diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-09-15 01:17:40 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2014-09-15 01:17:40 -0400 |
commit | 882ebfc28c389be86535bda4a7d9e407020356bf (patch) | |
tree | e8f3ddeda13196f40040f3ba4701ece5d38c63cc | |
parent | 1c84cd48a117486166f3597c081b170b76e5bd81 (diff) | |
parent | 9e82bf014195d6f0054982c463575cdce24292be (diff) |
Merge 3.17-rc5 into tty-next
We want those fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
479 files changed, 5412 insertions, 2803 deletions
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches index 0a523c9a5ff4..482c74947de0 100644 --- a/Documentation/SubmittingPatches +++ b/Documentation/SubmittingPatches | |||
@@ -794,6 +794,7 @@ Greg Kroah-Hartman, "How to piss off a kernel subsystem maintainer". | |||
794 | <http://www.kroah.com/log/linux/maintainer-03.html> | 794 | <http://www.kroah.com/log/linux/maintainer-03.html> |
795 | <http://www.kroah.com/log/linux/maintainer-04.html> | 795 | <http://www.kroah.com/log/linux/maintainer-04.html> |
796 | <http://www.kroah.com/log/linux/maintainer-05.html> | 796 | <http://www.kroah.com/log/linux/maintainer-05.html> |
797 | <http://www.kroah.com/log/linux/maintainer-06.html> | ||
797 | 798 | ||
798 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! | 799 | NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people! |
799 | <https://lkml.org/lkml/2005/7/11/336> | 800 | <https://lkml.org/lkml/2005/7/11/336> |
diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt index 9f1d750d76de..61bca509d7b9 100644 --- a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt +++ b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt | |||
@@ -16,9 +16,9 @@ Example: | |||
16 | * DMA client | 16 | * DMA client |
17 | 17 | ||
18 | Required properties: | 18 | Required properties: |
19 | - dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs, | 19 | - dmas: a list of <[DMA multiplexer phandle] [SRS << 8 | DRS]> pairs. |
20 | where SRS/DRS values are fixed handles, specified in the SoC | 20 | where SRS/DRS are specified in the SoC manual. |
21 | manual as the value that would be written into the PDMACHCR. | 21 | It will be written into PDMACHCR as high 16-bit parts. |
22 | - dma-names: a list of DMA channel names, one per "dmas" entry | 22 | - dma-names: a list of DMA channel names, one per "dmas" entry |
23 | 23 | ||
24 | Example: | 24 | Example: |
diff --git a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt index baef432e8369..0ac23f2ed104 100644 --- a/Documentation/devicetree/bindings/input/atmel,maxtouch.txt +++ b/Documentation/devicetree/bindings/input/atmel,maxtouch.txt | |||
@@ -15,6 +15,17 @@ Optional properties for main touchpad device: | |||
15 | keycode generated by each GPIO. Linux keycodes are defined in | 15 | keycode generated by each GPIO. Linux keycodes are defined in |
16 | <dt-bindings/input/input.h>. | 16 | <dt-bindings/input/input.h>. |
17 | 17 | ||
18 | - linux,gpio-keymap: When enabled, the SPT_GPIOPWN_T19 object sends messages | ||
19 | on GPIO bit changes. An array of up to 8 entries can be provided | ||
20 | indicating the Linux keycode mapped to each bit of the status byte, | ||
21 | starting at the LSB. Linux keycodes are defined in | ||
22 | <dt-bindings/input/input.h>. | ||
23 | |||
24 | Note: the numbering of the GPIOs and the bit they start at varies between | ||
25 | maXTouch devices. You must either refer to the documentation, or | ||
26 | experiment to determine which bit corresponds to which input. Use | ||
27 | KEY_RESERVED for unused padding values. | ||
28 | |||
18 | Example: | 29 | Example: |
19 | 30 | ||
20 | touch@4b { | 31 | touch@4b { |
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt index 9b03c57563a4..e45ac3f926b1 100644 --- a/Documentation/devicetree/bindings/net/stmmac.txt +++ b/Documentation/devicetree/bindings/net/stmmac.txt | |||
@@ -39,6 +39,10 @@ Optional properties: | |||
39 | further clocks may be specified in derived bindings. | 39 | further clocks may be specified in derived bindings. |
40 | - clock-names: One name for each entry in the clocks property, the | 40 | - clock-names: One name for each entry in the clocks property, the |
41 | first one should be "stmmaceth". | 41 | first one should be "stmmaceth". |
42 | - clk_ptp_ref: this is the PTP reference clock; in case of the PTP is | ||
43 | available this clock is used for programming the Timestamp Addend Register. | ||
44 | If not passed then the system clock will be used and this is fine on some | ||
45 | platforms. | ||
42 | 46 | ||
43 | Examples: | 47 | Examples: |
44 | 48 | ||
diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt index 340980239ea9..ca69f5e3040c 100644 --- a/Documentation/devicetree/bindings/regulator/tps65090.txt +++ b/Documentation/devicetree/bindings/regulator/tps65090.txt | |||
@@ -45,8 +45,8 @@ Example: | |||
45 | infet5-supply = <&some_reg>; | 45 | infet5-supply = <&some_reg>; |
46 | infet6-supply = <&some_reg>; | 46 | infet6-supply = <&some_reg>; |
47 | infet7-supply = <&some_reg>; | 47 | infet7-supply = <&some_reg>; |
48 | vsys_l1-supply = <&some_reg>; | 48 | vsys-l1-supply = <&some_reg>; |
49 | vsys_l2-supply = <&some_reg>; | 49 | vsys-l2-supply = <&some_reg>; |
50 | 50 | ||
51 | regulators { | 51 | regulators { |
52 | dcdc1 { | 52 | dcdc1 { |
diff --git a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt index 46f344965313..4eb7997674a0 100644 --- a/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt +++ b/Documentation/devicetree/bindings/sound/adi,axi-spdif-tx.txt | |||
@@ -1,7 +1,7 @@ | |||
1 | ADI AXI-SPDIF controller | 1 | ADI AXI-SPDIF controller |
2 | 2 | ||
3 | Required properties: | 3 | Required properties: |
4 | - compatible : Must be "adi,axi-spdif-1.00.a" | 4 | - compatible : Must be "adi,axi-spdif-tx-1.00.a" |
5 | - reg : Must contain SPDIF core's registers location and length | 5 | - reg : Must contain SPDIF core's registers location and length |
6 | - clocks : Pairs of phandle and specifier referencing the controller's clocks. | 6 | - clocks : Pairs of phandle and specifier referencing the controller's clocks. |
7 | The controller expects two clocks, the clock used for the AXI interface and | 7 | The controller expects two clocks, the clock used for the AXI interface and |
diff --git a/Documentation/devicetree/bindings/usb/mxs-phy.txt b/Documentation/devicetree/bindings/usb/mxs-phy.txt index cef181a9d8bd..96681c93b86d 100644 --- a/Documentation/devicetree/bindings/usb/mxs-phy.txt +++ b/Documentation/devicetree/bindings/usb/mxs-phy.txt | |||
@@ -5,6 +5,7 @@ Required properties: | |||
5 | * "fsl,imx23-usbphy" for imx23 and imx28 | 5 | * "fsl,imx23-usbphy" for imx23 and imx28 |
6 | * "fsl,imx6q-usbphy" for imx6dq and imx6dl | 6 | * "fsl,imx6q-usbphy" for imx6dq and imx6dl |
7 | * "fsl,imx6sl-usbphy" for imx6sl | 7 | * "fsl,imx6sl-usbphy" for imx6sl |
8 | * "fsl,imx6sx-usbphy" for imx6sx | ||
8 | "fsl,imx23-usbphy" is still a fallback for other strings | 9 | "fsl,imx23-usbphy" is still a fallback for other strings |
9 | - reg: Should contain registers location and length | 10 | - reg: Should contain registers location and length |
10 | - interrupts: Should contain phy interrupt | 11 | - interrupts: Should contain phy interrupt |
diff --git a/Documentation/devicetree/bindings/video/analog-tv-connector.txt b/Documentation/devicetree/bindings/video/analog-tv-connector.txt index 0218fcdc1299..0c0970c210ab 100644 --- a/Documentation/devicetree/bindings/video/analog-tv-connector.txt +++ b/Documentation/devicetree/bindings/video/analog-tv-connector.txt | |||
@@ -2,7 +2,7 @@ Analog TV Connector | |||
2 | =================== | 2 | =================== |
3 | 3 | ||
4 | Required properties: | 4 | Required properties: |
5 | - compatible: "composite-connector" or "svideo-connector" | 5 | - compatible: "composite-video-connector" or "svideo-connector" |
6 | 6 | ||
7 | Optional properties: | 7 | Optional properties: |
8 | - label: a symbolic name for the connector | 8 | - label: a symbolic name for the connector |
@@ -14,7 +14,7 @@ Example | |||
14 | ------- | 14 | ------- |
15 | 15 | ||
16 | tv: connector { | 16 | tv: connector { |
17 | compatible = "composite-connector"; | 17 | compatible = "composite-video-connector"; |
18 | label = "tv"; | 18 | label = "tv"; |
19 | 19 | ||
20 | port { | 20 | port { |
diff --git a/Documentation/filesystems/nfs/nfs-rdma.txt b/Documentation/filesystems/nfs/nfs-rdma.txt index e386f7e4bcee..724043858b08 100644 --- a/Documentation/filesystems/nfs/nfs-rdma.txt +++ b/Documentation/filesystems/nfs/nfs-rdma.txt | |||
@@ -138,9 +138,9 @@ Installation | |||
138 | - Build, install, reboot | 138 | - Build, install, reboot |
139 | 139 | ||
140 | The NFS/RDMA code will be enabled automatically if NFS and RDMA | 140 | The NFS/RDMA code will be enabled automatically if NFS and RDMA |
141 | are turned on. The NFS/RDMA client and server are configured via the hidden | 141 | are turned on. The NFS/RDMA client and server are configured via the |
142 | SUNRPC_XPRT_RDMA config option that depends on SUNRPC and INFINIBAND. The | 142 | SUNRPC_XPRT_RDMA_CLIENT and SUNRPC_XPRT_RDMA_SERVER config options that both |
143 | value of SUNRPC_XPRT_RDMA will be: | 143 | depend on SUNRPC and INFINIBAND. The default value of both options will be: |
144 | 144 | ||
145 | - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client | 145 | - N if either SUNRPC or INFINIBAND are N, in this case the NFS/RDMA client |
146 | and server will not be built | 146 | and server will not be built |
@@ -235,8 +235,9 @@ NFS/RDMA Setup | |||
235 | 235 | ||
236 | - Start the NFS server | 236 | - Start the NFS server |
237 | 237 | ||
238 | If the NFS/RDMA server was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in | 238 | If the NFS/RDMA server was built as a module |
239 | kernel config), load the RDMA transport module: | 239 | (CONFIG_SUNRPC_XPRT_RDMA_SERVER=m in kernel config), load the RDMA |
240 | transport module: | ||
240 | 241 | ||
241 | $ modprobe svcrdma | 242 | $ modprobe svcrdma |
242 | 243 | ||
@@ -255,8 +256,9 @@ NFS/RDMA Setup | |||
255 | 256 | ||
256 | - On the client system | 257 | - On the client system |
257 | 258 | ||
258 | If the NFS/RDMA client was built as a module (CONFIG_SUNRPC_XPRT_RDMA=m in | 259 | If the NFS/RDMA client was built as a module |
259 | kernel config), load the RDMA client module: | 260 | (CONFIG_SUNRPC_XPRT_RDMA_CLIENT=m in kernel config), load the RDMA client |
261 | module: | ||
260 | 262 | ||
261 | $ modprobe xprtrdma.ko | 263 | $ modprobe xprtrdma.ko |
262 | 264 | ||
diff --git a/Documentation/filesystems/seq_file.txt b/Documentation/filesystems/seq_file.txt index 1fe0ccb1af55..8ea3e90ace07 100644 --- a/Documentation/filesystems/seq_file.txt +++ b/Documentation/filesystems/seq_file.txt | |||
@@ -235,6 +235,39 @@ be used for more than one file, you can store an arbitrary pointer in the | |||
235 | private field of the seq_file structure; that value can then be retrieved | 235 | private field of the seq_file structure; that value can then be retrieved |
236 | by the iterator functions. | 236 | by the iterator functions. |
237 | 237 | ||
238 | There is also a wrapper function to seq_open() called seq_open_private(). It | ||
239 | kmallocs a zero filled block of memory and stores a pointer to it in the | ||
240 | private field of the seq_file structure, returning 0 on success. The | ||
241 | block size is specified in a third parameter to the function, e.g.: | ||
242 | |||
243 | static int ct_open(struct inode *inode, struct file *file) | ||
244 | { | ||
245 | return seq_open_private(file, &ct_seq_ops, | ||
246 | sizeof(struct mystruct)); | ||
247 | } | ||
248 | |||
249 | There is also a variant function, __seq_open_private(), which is functionally | ||
250 | identical except that, if successful, it returns the pointer to the allocated | ||
251 | memory block, allowing further initialisation e.g.: | ||
252 | |||
253 | static int ct_open(struct inode *inode, struct file *file) | ||
254 | { | ||
255 | struct mystruct *p = | ||
256 | __seq_open_private(file, &ct_seq_ops, sizeof(*p)); | ||
257 | |||
258 | if (!p) | ||
259 | return -ENOMEM; | ||
260 | |||
261 | p->foo = bar; /* initialize my stuff */ | ||
262 | ... | ||
263 | p->baz = true; | ||
264 | |||
265 | return 0; | ||
266 | } | ||
267 | |||
268 | A corresponding close function, seq_release_private() is available which | ||
269 | frees the memory allocated in the corresponding open. | ||
270 | |||
238 | The other operations of interest - read(), llseek(), and release() - are | 271 | The other operations of interest - read(), llseek(), and release() - are |
239 | all implemented by the seq_file code itself. So a virtual file's | 272 | all implemented by the seq_file code itself. So a virtual file's |
240 | file_operations structure will look like: | 273 | file_operations structure will look like: |
diff --git a/Documentation/gpio/consumer.txt b/Documentation/gpio/consumer.txt index 76546324e968..6ce544191ca6 100644 --- a/Documentation/gpio/consumer.txt +++ b/Documentation/gpio/consumer.txt | |||
@@ -53,7 +53,20 @@ with IS_ERR() (they will never return a NULL pointer). -ENOENT will be returned | |||
53 | if and only if no GPIO has been assigned to the device/function/index triplet, | 53 | if and only if no GPIO has been assigned to the device/function/index triplet, |
54 | other error codes are used for cases where a GPIO has been assigned but an error | 54 | other error codes are used for cases where a GPIO has been assigned but an error |
55 | occurred while trying to acquire it. This is useful to discriminate between mere | 55 | occurred while trying to acquire it. This is useful to discriminate between mere |
56 | errors and an absence of GPIO for optional GPIO parameters. | 56 | errors and an absence of GPIO for optional GPIO parameters. For the common |
57 | pattern where a GPIO is optional, the gpiod_get_optional() and | ||
58 | gpiod_get_index_optional() functions can be used. These functions return NULL | ||
59 | instead of -ENOENT if no GPIO has been assigned to the requested function: | ||
60 | |||
61 | |||
62 | struct gpio_desc *gpiod_get_optional(struct device *dev, | ||
63 | const char *con_id, | ||
64 | enum gpiod_flags flags) | ||
65 | |||
66 | struct gpio_desc *gpiod_get_index_optional(struct device *dev, | ||
67 | const char *con_id, | ||
68 | unsigned int index, | ||
69 | enum gpiod_flags flags) | ||
57 | 70 | ||
58 | Device-managed variants of these functions are also defined: | 71 | Device-managed variants of these functions are also defined: |
59 | 72 | ||
@@ -65,6 +78,15 @@ Device-managed variants of these functions are also defined: | |||
65 | unsigned int idx, | 78 | unsigned int idx, |
66 | enum gpiod_flags flags) | 79 | enum gpiod_flags flags) |
67 | 80 | ||
81 | struct gpio_desc *devm_gpiod_get_optional(struct device *dev, | ||
82 | const char *con_id, | ||
83 | enum gpiod_flags flags) | ||
84 | |||
85 | struct gpio_desc * devm_gpiod_get_index_optional(struct device *dev, | ||
86 | const char *con_id, | ||
87 | unsigned int index, | ||
88 | enum gpiod_flags flags) | ||
89 | |||
68 | A GPIO descriptor can be disposed of using the gpiod_put() function: | 90 | A GPIO descriptor can be disposed of using the gpiod_put() function: |
69 | 91 | ||
70 | void gpiod_put(struct gpio_desc *desc) | 92 | void gpiod_put(struct gpio_desc *desc) |
diff --git a/Documentation/i2c/dev-interface b/Documentation/i2c/dev-interface index 3e742ba25536..2ac78ae1039d 100644 --- a/Documentation/i2c/dev-interface +++ b/Documentation/i2c/dev-interface | |||
@@ -57,12 +57,12 @@ Well, you are all set up now. You can now use SMBus commands or plain | |||
57 | I2C to communicate with your device. SMBus commands are preferred if | 57 | I2C to communicate with your device. SMBus commands are preferred if |
58 | the device supports them. Both are illustrated below. | 58 | the device supports them. Both are illustrated below. |
59 | 59 | ||
60 | __u8 register = 0x10; /* Device register to access */ | 60 | __u8 reg = 0x10; /* Device register to access */ |
61 | __s32 res; | 61 | __s32 res; |
62 | char buf[10]; | 62 | char buf[10]; |
63 | 63 | ||
64 | /* Using SMBus commands */ | 64 | /* Using SMBus commands */ |
65 | res = i2c_smbus_read_word_data(file, register); | 65 | res = i2c_smbus_read_word_data(file, reg); |
66 | if (res < 0) { | 66 | if (res < 0) { |
67 | /* ERROR HANDLING: i2c transaction failed */ | 67 | /* ERROR HANDLING: i2c transaction failed */ |
68 | } else { | 68 | } else { |
@@ -70,11 +70,11 @@ the device supports them. Both are illustrated below. | |||
70 | } | 70 | } |
71 | 71 | ||
72 | /* Using I2C Write, equivalent of | 72 | /* Using I2C Write, equivalent of |
73 | i2c_smbus_write_word_data(file, register, 0x6543) */ | 73 | i2c_smbus_write_word_data(file, reg, 0x6543) */ |
74 | buf[0] = register; | 74 | buf[0] = reg; |
75 | buf[1] = 0x43; | 75 | buf[1] = 0x43; |
76 | buf[2] = 0x65; | 76 | buf[2] = 0x65; |
77 | if (write(file, buf, 3) ! =3) { | 77 | if (write(file, buf, 3) != 3) { |
78 | /* ERROR HANDLING: i2c transaction failed */ | 78 | /* ERROR HANDLING: i2c transaction failed */ |
79 | } | 79 | } |
80 | 80 | ||
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 5ae8608ca9f5..10d51c2f10d7 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -3541,6 +3541,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
3541 | bogus residue values); | 3541 | bogus residue values); |
3542 | s = SINGLE_LUN (the device has only one | 3542 | s = SINGLE_LUN (the device has only one |
3543 | Logical Unit); | 3543 | Logical Unit); |
3544 | u = IGNORE_UAS (don't bind to the uas driver); | ||
3544 | w = NO_WP_DETECT (don't test whether the | 3545 | w = NO_WP_DETECT (don't test whether the |
3545 | medium is write-protected). | 3546 | medium is write-protected). |
3546 | Example: quirks=0419:aaf5:rl,0421:0433:rc | 3547 | Example: quirks=0419:aaf5:rl,0421:0433:rc |
diff --git a/Documentation/misc-devices/lis3lv02d b/Documentation/misc-devices/lis3lv02d index af815b9ba413..f89960a0ff95 100644 --- a/Documentation/misc-devices/lis3lv02d +++ b/Documentation/misc-devices/lis3lv02d | |||
@@ -59,7 +59,7 @@ acts similar to /dev/rtc and reacts on free-fall interrupts received | |||
59 | from the device. It supports blocking operations, poll/select and | 59 | from the device. It supports blocking operations, poll/select and |
60 | fasync operation modes. You must read 1 bytes from the device. The | 60 | fasync operation modes. You must read 1 bytes from the device. The |
61 | result is number of free-fall interrupts since the last successful | 61 | result is number of free-fall interrupts since the last successful |
62 | read (or 255 if number of interrupts would not fit). See the hpfall.c | 62 | read (or 255 if number of interrupts would not fit). See the freefall.c |
63 | file for an example on using the device. | 63 | file for an example on using the device. |
64 | 64 | ||
65 | 65 | ||
diff --git a/Documentation/power/regulator/consumer.txt b/Documentation/power/regulator/consumer.txt index 81c0e2b49cd8..8afb236ca765 100644 --- a/Documentation/power/regulator/consumer.txt +++ b/Documentation/power/regulator/consumer.txt | |||
@@ -143,8 +143,9 @@ This will cause the core to recalculate the total load on the regulator (based | |||
143 | on all its consumers) and change operating mode (if necessary and permitted) | 143 | on all its consumers) and change operating mode (if necessary and permitted) |
144 | to best match the current operating load. | 144 | to best match the current operating load. |
145 | 145 | ||
146 | The load_uA value can be determined from the consumers datasheet. e.g.most | 146 | The load_uA value can be determined from the consumer's datasheet. e.g. most |
147 | datasheets have tables showing the max current consumed in certain situations. | 147 | datasheets have tables showing the maximum current consumed in certain |
148 | situations. | ||
148 | 149 | ||
149 | Most consumers will use indirect operating mode control since they have no | 150 | Most consumers will use indirect operating mode control since they have no |
150 | knowledge of the regulator or whether the regulator is shared with other | 151 | knowledge of the regulator or whether the regulator is shared with other |
@@ -173,7 +174,7 @@ Consumers can register interest in regulator events by calling :- | |||
173 | int regulator_register_notifier(struct regulator *regulator, | 174 | int regulator_register_notifier(struct regulator *regulator, |
174 | struct notifier_block *nb); | 175 | struct notifier_block *nb); |
175 | 176 | ||
176 | Consumers can uregister interest by calling :- | 177 | Consumers can unregister interest by calling :- |
177 | 178 | ||
178 | int regulator_unregister_notifier(struct regulator *regulator, | 179 | int regulator_unregister_notifier(struct regulator *regulator, |
179 | struct notifier_block *nb); | 180 | struct notifier_block *nb); |
diff --git a/Documentation/power/regulator/design.txt b/Documentation/power/regulator/design.txt index f9b56b72b782..fdd919b96830 100644 --- a/Documentation/power/regulator/design.txt +++ b/Documentation/power/regulator/design.txt | |||
@@ -9,14 +9,14 @@ Safety | |||
9 | 9 | ||
10 | - Errors in regulator configuration can have very serious consequences | 10 | - Errors in regulator configuration can have very serious consequences |
11 | for the system, potentially including lasting hardware damage. | 11 | for the system, potentially including lasting hardware damage. |
12 | - It is not possible to automatically determine the power confugration | 12 | - It is not possible to automatically determine the power configuration |
13 | of the system - software-equivalent variants of the same chip may | 13 | of the system - software-equivalent variants of the same chip may |
14 | have different power requirments, and not all components with power | 14 | have different power requirements, and not all components with power |
15 | requirements are visible to software. | 15 | requirements are visible to software. |
16 | 16 | ||
17 | => The API should make no changes to the hardware state unless it has | 17 | => The API should make no changes to the hardware state unless it has |
18 | specific knowledge that these changes are safe to do perform on | 18 | specific knowledge that these changes are safe to perform on this |
19 | this particular system. | 19 | particular system. |
20 | 20 | ||
21 | Consumer use cases | 21 | Consumer use cases |
22 | ------------------ | 22 | ------------------ |
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt index ce63af0a8e35..757e3b53dc11 100644 --- a/Documentation/power/regulator/machine.txt +++ b/Documentation/power/regulator/machine.txt | |||
@@ -11,7 +11,7 @@ Consider the following machine :- | |||
11 | +-> [Consumer B @ 3.3V] | 11 | +-> [Consumer B @ 3.3V] |
12 | 12 | ||
13 | The drivers for consumers A & B must be mapped to the correct regulator in | 13 | The drivers for consumers A & B must be mapped to the correct regulator in |
14 | order to control their power supply. This mapping can be achieved in machine | 14 | order to control their power supplies. This mapping can be achieved in machine |
15 | initialisation code by creating a struct regulator_consumer_supply for | 15 | initialisation code by creating a struct regulator_consumer_supply for |
16 | each regulator. | 16 | each regulator. |
17 | 17 | ||
@@ -39,7 +39,7 @@ to the 'Vcc' supply for Consumer A. | |||
39 | 39 | ||
40 | Constraints can now be registered by defining a struct regulator_init_data | 40 | Constraints can now be registered by defining a struct regulator_init_data |
41 | for each regulator power domain. This structure also maps the consumers | 41 | for each regulator power domain. This structure also maps the consumers |
42 | to their supply regulator :- | 42 | to their supply regulators :- |
43 | 43 | ||
44 | static struct regulator_init_data regulator1_data = { | 44 | static struct regulator_init_data regulator1_data = { |
45 | .constraints = { | 45 | .constraints = { |
diff --git a/Documentation/power/regulator/overview.txt b/Documentation/power/regulator/overview.txt index 8ed17587a74b..40ca2d6e2742 100644 --- a/Documentation/power/regulator/overview.txt +++ b/Documentation/power/regulator/overview.txt | |||
@@ -36,11 +36,11 @@ Some terms used in this document:- | |||
36 | Consumers can be classified into two types:- | 36 | Consumers can be classified into two types:- |
37 | 37 | ||
38 | Static: consumer does not change its supply voltage or | 38 | Static: consumer does not change its supply voltage or |
39 | current limit. It only needs to enable or disable it's | 39 | current limit. It only needs to enable or disable its |
40 | power supply. Its supply voltage is set by the hardware, | 40 | power supply. Its supply voltage is set by the hardware, |
41 | bootloader, firmware or kernel board initialisation code. | 41 | bootloader, firmware or kernel board initialisation code. |
42 | 42 | ||
43 | Dynamic: consumer needs to change it's supply voltage or | 43 | Dynamic: consumer needs to change its supply voltage or |
44 | current limit to meet operation demands. | 44 | current limit to meet operation demands. |
45 | 45 | ||
46 | 46 | ||
@@ -156,7 +156,7 @@ relevant to non SoC devices and is split into the following four interfaces:- | |||
156 | This interface is for machine specific code and allows the creation of | 156 | This interface is for machine specific code and allows the creation of |
157 | voltage/current domains (with constraints) for each regulator. It can | 157 | voltage/current domains (with constraints) for each regulator. It can |
158 | provide regulator constraints that will prevent device damage through | 158 | provide regulator constraints that will prevent device damage through |
159 | overvoltage or over current caused by buggy client drivers. It also | 159 | overvoltage or overcurrent caused by buggy client drivers. It also |
160 | allows the creation of a regulator tree whereby some regulators are | 160 | allows the creation of a regulator tree whereby some regulators are |
161 | supplied by others (similar to a clock tree). | 161 | supplied by others (similar to a clock tree). |
162 | 162 | ||
diff --git a/Documentation/power/regulator/regulator.txt b/Documentation/power/regulator/regulator.txt index 13902778ae44..b17e5833ce21 100644 --- a/Documentation/power/regulator/regulator.txt +++ b/Documentation/power/regulator/regulator.txt | |||
@@ -13,7 +13,7 @@ Drivers can register a regulator by calling :- | |||
13 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | 13 | struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, |
14 | const struct regulator_config *config); | 14 | const struct regulator_config *config); |
15 | 15 | ||
16 | This will register the regulators capabilities and operations to the regulator | 16 | This will register the regulator's capabilities and operations to the regulator |
17 | core. | 17 | core. |
18 | 18 | ||
19 | Regulators can be unregistered by calling :- | 19 | Regulators can be unregistered by calling :- |
@@ -23,8 +23,8 @@ void regulator_unregister(struct regulator_dev *rdev); | |||
23 | 23 | ||
24 | Regulator Events | 24 | Regulator Events |
25 | ================ | 25 | ================ |
26 | Regulators can send events (e.g. over temp, under voltage, etc) to consumer | 26 | Regulators can send events (e.g. overtemperature, undervoltage, etc) to |
27 | drivers by calling :- | 27 | consumer drivers by calling :- |
28 | 28 | ||
29 | int regulator_notifier_call_chain(struct regulator_dev *rdev, | 29 | int regulator_notifier_call_chain(struct regulator_dev *rdev, |
30 | unsigned long event, void *data); | 30 | unsigned long event, void *data); |
diff --git a/MAINTAINERS b/MAINTAINERS index cf24bb56bab9..809ecd680d88 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6424,7 +6424,8 @@ F: Documentation/scsi/NinjaSCSI.txt | |||
6424 | F: drivers/scsi/nsp32* | 6424 | F: drivers/scsi/nsp32* |
6425 | 6425 | ||
6426 | NTB DRIVER | 6426 | NTB DRIVER |
6427 | M: Jon Mason <jon.mason@intel.com> | 6427 | M: Jon Mason <jdmason@kudzu.us> |
6428 | M: Dave Jiang <dave.jiang@intel.com> | ||
6428 | S: Supported | 6429 | S: Supported |
6429 | W: https://github.com/jonmason/ntb/wiki | 6430 | W: https://github.com/jonmason/ntb/wiki |
6430 | T: git git://github.com/jonmason/ntb.git | 6431 | T: git git://github.com/jonmason/ntb.git |
@@ -7053,7 +7054,7 @@ S: Maintained | |||
7053 | F: drivers/pinctrl/sh-pfc/ | 7054 | F: drivers/pinctrl/sh-pfc/ |
7054 | 7055 | ||
7055 | PIN CONTROLLER - SAMSUNG | 7056 | PIN CONTROLLER - SAMSUNG |
7056 | M: Tomasz Figa <t.figa@samsung.com> | 7057 | M: Tomasz Figa <tomasz.figa@gmail.com> |
7057 | M: Thomas Abraham <thomas.abraham@linaro.org> | 7058 | M: Thomas Abraham <thomas.abraham@linaro.org> |
7058 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 7059 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
7059 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | 7060 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) |
@@ -7899,7 +7900,8 @@ S: Supported | |||
7899 | F: drivers/media/i2c/s5k5baf.c | 7900 | F: drivers/media/i2c/s5k5baf.c |
7900 | 7901 | ||
7901 | SAMSUNG SOC CLOCK DRIVERS | 7902 | SAMSUNG SOC CLOCK DRIVERS |
7902 | M: Tomasz Figa <t.figa@samsung.com> | 7903 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> |
7904 | M: Tomasz Figa <tomasz.figa@gmail.com> | ||
7903 | S: Supported | 7905 | S: Supported |
7904 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | 7906 | L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) |
7905 | F: drivers/clk/samsung/ | 7907 | F: drivers/clk/samsung/ |
@@ -7912,6 +7914,19 @@ S: Supported | |||
7912 | L: netdev@vger.kernel.org | 7914 | L: netdev@vger.kernel.org |
7913 | F: drivers/net/ethernet/samsung/sxgbe/ | 7915 | F: drivers/net/ethernet/samsung/sxgbe/ |
7914 | 7916 | ||
7917 | SAMSUNG USB2 PHY DRIVER | ||
7918 | M: Kamil Debski <k.debski@samsung.com> | ||
7919 | L: linux-kernel@vger.kernel.org | ||
7920 | S: Supported | ||
7921 | F: Documentation/devicetree/bindings/phy/samsung-phy.txt | ||
7922 | F: Documentation/phy/samsung-usb2.txt | ||
7923 | F: drivers/phy/phy-exynos4210-usb2.c | ||
7924 | F: drivers/phy/phy-exynos4x12-usb2.c | ||
7925 | F: drivers/phy/phy-exynos5250-usb2.c | ||
7926 | F: drivers/phy/phy-s5pv210-usb2.c | ||
7927 | F: drivers/phy/phy-samsung-usb2.c | ||
7928 | F: drivers/phy/phy-samsung-usb2.h | ||
7929 | |||
7915 | SERIAL DRIVERS | 7930 | SERIAL DRIVERS |
7916 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 7931 | M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> |
7917 | L: linux-serial@vger.kernel.org | 7932 | L: linux-serial@vger.kernel.org |
@@ -10070,9 +10085,9 @@ F: Documentation/x86/ | |||
10070 | F: arch/x86/ | 10085 | F: arch/x86/ |
10071 | 10086 | ||
10072 | X86 PLATFORM DRIVERS | 10087 | X86 PLATFORM DRIVERS |
10073 | M: Matthew Garrett <matthew.garrett@nebula.com> | 10088 | M: Darren Hart <dvhart@infradead.org> |
10074 | L: platform-driver-x86@vger.kernel.org | 10089 | L: platform-driver-x86@vger.kernel.org |
10075 | T: git git://cavan.codon.org.uk/platform-drivers-x86.git | 10090 | T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git |
10076 | S: Maintained | 10091 | S: Maintained |
10077 | F: drivers/platform/x86/ | 10092 | F: drivers/platform/x86/ |
10078 | 10093 | ||
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 17 | 2 | PATCHLEVEL = 17 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc5 |
5 | NAME = Shuffling Zombie Juror | 5 | NAME = Shuffling Zombie Juror |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index e88ddbf990e3..9e1142729fd1 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -427,7 +427,7 @@ struct ic_inv_args { | |||
427 | 427 | ||
428 | static void __ic_line_inv_vaddr_helper(void *info) | 428 | static void __ic_line_inv_vaddr_helper(void *info) |
429 | { | 429 | { |
430 | struct ic_inv *ic_inv_args = (struct ic_inv_args *) info; | 430 | struct ic_inv_args *ic_inv = info; |
431 | 431 | ||
432 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); | 432 | __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); |
433 | } | 433 | } |
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 9b3d2ba82f13..8689949bdba3 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi | |||
@@ -804,7 +804,7 @@ | |||
804 | 804 | ||
805 | usb1: usb@48390000 { | 805 | usb1: usb@48390000 { |
806 | compatible = "synopsys,dwc3"; | 806 | compatible = "synopsys,dwc3"; |
807 | reg = <0x48390000 0x17000>; | 807 | reg = <0x48390000 0x10000>; |
808 | interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; | 808 | interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; |
809 | phys = <&usb2_phy1>; | 809 | phys = <&usb2_phy1>; |
810 | phy-names = "usb2-phy"; | 810 | phy-names = "usb2-phy"; |
@@ -826,7 +826,7 @@ | |||
826 | 826 | ||
827 | usb2: usb@483d0000 { | 827 | usb2: usb@483d0000 { |
828 | compatible = "synopsys,dwc3"; | 828 | compatible = "synopsys,dwc3"; |
829 | reg = <0x483d0000 0x17000>; | 829 | reg = <0x483d0000 0x10000>; |
830 | interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; | 830 | interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; |
831 | phys = <&usb2_phy2>; | 831 | phys = <&usb2_phy2>; |
832 | phy-names = "usb2-phy"; | 832 | phy-names = "usb2-phy"; |
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 646a6eade788..e7ac47fa6615 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts | |||
@@ -260,7 +260,7 @@ | |||
260 | status = "okay"; | 260 | status = "okay"; |
261 | pinctrl-names = "default"; | 261 | pinctrl-names = "default"; |
262 | pinctrl-0 = <&i2c0_pins>; | 262 | pinctrl-0 = <&i2c0_pins>; |
263 | clock-frequency = <400000>; | 263 | clock-frequency = <100000>; |
264 | 264 | ||
265 | tps65218: tps65218@24 { | 265 | tps65218: tps65218@24 { |
266 | reg = <0x24>; | 266 | reg = <0x24>; |
@@ -424,7 +424,7 @@ | |||
424 | ranges = <0 0 0 0x01000000>; /* minimum GPMC partition = 16MB */ | 424 | ranges = <0 0 0 0x01000000>; /* minimum GPMC partition = 16MB */ |
425 | nand@0,0 { | 425 | nand@0,0 { |
426 | reg = <0 0 4>; /* device IO registers */ | 426 | reg = <0 0 4>; /* device IO registers */ |
427 | ti,nand-ecc-opt = "bch8"; | 427 | ti,nand-ecc-opt = "bch16"; |
428 | ti,elm-id = <&elm>; | 428 | ti,elm-id = <&elm>; |
429 | nand-bus-width = <8>; | 429 | nand-bus-width = <8>; |
430 | gpmc,device-width = <1>; | 430 | gpmc,device-width = <1>; |
@@ -443,8 +443,6 @@ | |||
443 | gpmc,rd-cycle-ns = <40>; | 443 | gpmc,rd-cycle-ns = <40>; |
444 | gpmc,wr-cycle-ns = <40>; | 444 | gpmc,wr-cycle-ns = <40>; |
445 | gpmc,wait-pin = <0>; | 445 | gpmc,wait-pin = <0>; |
446 | gpmc,wait-on-read; | ||
447 | gpmc,wait-on-write; | ||
448 | gpmc,bus-turnaround-ns = <0>; | 446 | gpmc,bus-turnaround-ns = <0>; |
449 | gpmc,cycle2cycle-delay-ns = <0>; | 447 | gpmc,cycle2cycle-delay-ns = <0>; |
450 | gpmc,clk-activation-ns = <0>; | 448 | gpmc,clk-activation-ns = <0>; |
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index ed7dd2395915..ac3e4859935f 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts | |||
@@ -435,13 +435,13 @@ | |||
435 | }; | 435 | }; |
436 | 436 | ||
437 | &gpmc { | 437 | &gpmc { |
438 | status = "okay"; | 438 | status = "okay"; /* Disable QSPI when enabling GPMC (NAND) */ |
439 | pinctrl-names = "default"; | 439 | pinctrl-names = "default"; |
440 | pinctrl-0 = <&nand_flash_x8>; | 440 | pinctrl-0 = <&nand_flash_x8>; |
441 | ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */ | 441 | ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */ |
442 | nand@0,0 { | 442 | nand@0,0 { |
443 | reg = <0 0 0>; /* CS0, offset 0 */ | 443 | reg = <0 0 0>; /* CS0, offset 0 */ |
444 | ti,nand-ecc-opt = "bch8"; | 444 | ti,nand-ecc-opt = "bch16"; |
445 | ti,elm-id = <&elm>; | 445 | ti,elm-id = <&elm>; |
446 | nand-bus-width = <8>; | 446 | nand-bus-width = <8>; |
447 | gpmc,device-width = <1>; | 447 | gpmc,device-width = <1>; |
@@ -459,8 +459,7 @@ | |||
459 | gpmc,access-ns = <30>; /* tCEA + 4*/ | 459 | gpmc,access-ns = <30>; /* tCEA + 4*/ |
460 | gpmc,rd-cycle-ns = <40>; | 460 | gpmc,rd-cycle-ns = <40>; |
461 | gpmc,wr-cycle-ns = <40>; | 461 | gpmc,wr-cycle-ns = <40>; |
462 | gpmc,wait-on-read = "true"; | 462 | gpmc,wait-pin = <0>; |
463 | gpmc,wait-on-write = "true"; | ||
464 | gpmc,bus-turnaround-ns = <0>; | 463 | gpmc,bus-turnaround-ns = <0>; |
465 | gpmc,cycle2cycle-delay-ns = <0>; | 464 | gpmc,cycle2cycle-delay-ns = <0>; |
466 | gpmc,clk-activation-ns = <0>; | 465 | gpmc,clk-activation-ns = <0>; |
@@ -557,7 +556,7 @@ | |||
557 | }; | 556 | }; |
558 | 557 | ||
559 | &qspi { | 558 | &qspi { |
560 | status = "okay"; | 559 | status = "disabled"; /* Disable GPMC (NAND) when enabling QSPI */ |
561 | pinctrl-names = "default"; | 560 | pinctrl-names = "default"; |
562 | pinctrl-0 = <&qspi1_default>; | 561 | pinctrl-0 = <&qspi1_default>; |
563 | 562 | ||
diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index 65ccf564b9a5..6c97d4af61ee 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi | |||
@@ -149,7 +149,7 @@ | |||
149 | usb: usbck { | 149 | usb: usbck { |
150 | compatible = "atmel,at91rm9200-clk-usb"; | 150 | compatible = "atmel,at91rm9200-clk-usb"; |
151 | #clock-cells = <0>; | 151 | #clock-cells = <0>; |
152 | atmel,clk-divisors = <1 2>; | 152 | atmel,clk-divisors = <1 2 0 0>; |
153 | clocks = <&pllb>; | 153 | clocks = <&pllb>; |
154 | }; | 154 | }; |
155 | 155 | ||
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi index 31f7652612fc..4e0abbd9d655 100644 --- a/arch/arm/boot/dts/at91sam9g20.dtsi +++ b/arch/arm/boot/dts/at91sam9g20.dtsi | |||
@@ -40,6 +40,7 @@ | |||
40 | }; | 40 | }; |
41 | 41 | ||
42 | pllb: pllbck { | 42 | pllb: pllbck { |
43 | compatible = "atmel,at91sam9g20-clk-pllb"; | ||
43 | atmel,clk-input-range = <2000000 32000000>; | 44 | atmel,clk-input-range = <2000000 32000000>; |
44 | atmel,pll-clk-output-ranges = <30000000 100000000 0 0>; | 45 | atmel,pll-clk-output-ranges = <30000000 100000000 0 0>; |
45 | }; | 46 | }; |
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 50f8022905a1..e03fbf3c6889 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts | |||
@@ -8,6 +8,7 @@ | |||
8 | /dts-v1/; | 8 | /dts-v1/; |
9 | 9 | ||
10 | #include "dra74x.dtsi" | 10 | #include "dra74x.dtsi" |
11 | #include <dt-bindings/gpio/gpio.h> | ||
11 | 12 | ||
12 | / { | 13 | / { |
13 | model = "TI DRA742"; | 14 | model = "TI DRA742"; |
@@ -24,9 +25,29 @@ | |||
24 | regulator-min-microvolt = <3300000>; | 25 | regulator-min-microvolt = <3300000>; |
25 | regulator-max-microvolt = <3300000>; | 26 | regulator-max-microvolt = <3300000>; |
26 | }; | 27 | }; |
28 | |||
29 | vtt_fixed: fixedregulator-vtt { | ||
30 | compatible = "regulator-fixed"; | ||
31 | regulator-name = "vtt_fixed"; | ||
32 | regulator-min-microvolt = <1350000>; | ||
33 | regulator-max-microvolt = <1350000>; | ||
34 | regulator-always-on; | ||
35 | regulator-boot-on; | ||
36 | enable-active-high; | ||
37 | gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>; | ||
38 | }; | ||
27 | }; | 39 | }; |
28 | 40 | ||
29 | &dra7_pmx_core { | 41 | &dra7_pmx_core { |
42 | pinctrl-names = "default"; | ||
43 | pinctrl-0 = <&vtt_pin>; | ||
44 | |||
45 | vtt_pin: pinmux_vtt_pin { | ||
46 | pinctrl-single,pins = < | ||
47 | 0x3b4 (PIN_OUTPUT | MUX_MODE14) /* spi1_cs1.gpio7_11 */ | ||
48 | >; | ||
49 | }; | ||
50 | |||
30 | i2c1_pins: pinmux_i2c1_pins { | 51 | i2c1_pins: pinmux_i2c1_pins { |
31 | pinctrl-single,pins = < | 52 | pinctrl-single,pins = < |
32 | 0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */ | 53 | 0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */ |
@@ -43,20 +64,19 @@ | |||
43 | 64 | ||
44 | i2c3_pins: pinmux_i2c3_pins { | 65 | i2c3_pins: pinmux_i2c3_pins { |
45 | pinctrl-single,pins = < | 66 | pinctrl-single,pins = < |
46 | 0x410 (PIN_INPUT | MUX_MODE0) /* i2c3_sda */ | 67 | 0x288 (PIN_INPUT | MUX_MODE9) /* gpio6_14.i2c3_sda */ |
47 | 0x414 (PIN_INPUT | MUX_MODE0) /* i2c3_scl */ | 68 | 0x28c (PIN_INPUT | MUX_MODE9) /* gpio6_15.i2c3_scl */ |
48 | >; | 69 | >; |
49 | }; | 70 | }; |
50 | 71 | ||
51 | mcspi1_pins: pinmux_mcspi1_pins { | 72 | mcspi1_pins: pinmux_mcspi1_pins { |
52 | pinctrl-single,pins = < | 73 | pinctrl-single,pins = < |
53 | 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */ | 74 | 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */ |
54 | 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */ | 75 | 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */ |
55 | 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */ | 76 | 0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */ |
56 | 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */ | 77 | 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */ |
57 | 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */ | 78 | 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */ |
58 | 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */ | 79 | 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */ |
59 | 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */ | ||
60 | >; | 80 | >; |
61 | }; | 81 | }; |
62 | 82 | ||
@@ -284,7 +304,7 @@ | |||
284 | status = "okay"; | 304 | status = "okay"; |
285 | pinctrl-names = "default"; | 305 | pinctrl-names = "default"; |
286 | pinctrl-0 = <&i2c3_pins>; | 306 | pinctrl-0 = <&i2c3_pins>; |
287 | clock-frequency = <3400000>; | 307 | clock-frequency = <400000>; |
288 | }; | 308 | }; |
289 | 309 | ||
290 | &mcspi1 { | 310 | &mcspi1 { |
@@ -483,7 +503,7 @@ | |||
483 | reg = <0x001c0000 0x00020000>; | 503 | reg = <0x001c0000 0x00020000>; |
484 | }; | 504 | }; |
485 | partition@7 { | 505 | partition@7 { |
486 | label = "NAND.u-boot-env"; | 506 | label = "NAND.u-boot-env.backup1"; |
487 | reg = <0x001e0000 0x00020000>; | 507 | reg = <0x001e0000 0x00020000>; |
488 | }; | 508 | }; |
489 | partition@8 { | 509 | partition@8 { |
@@ -504,3 +524,8 @@ | |||
504 | &usb2_phy2 { | 524 | &usb2_phy2 { |
505 | phy-supply = <&ldousb_reg>; | 525 | phy-supply = <&ldousb_reg>; |
506 | }; | 526 | }; |
527 | |||
528 | &gpio7 { | ||
529 | ti,no-reset-on-init; | ||
530 | ti,no-idle-on-init; | ||
531 | }; | ||
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 1fe45d1f75ec..4361777a08d8 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -93,7 +93,7 @@ | |||
93 | }; | 93 | }; |
94 | 94 | ||
95 | tv: connector { | 95 | tv: connector { |
96 | compatible = "composite-connector"; | 96 | compatible = "composite-video-connector"; |
97 | label = "tv"; | 97 | label = "tv"; |
98 | 98 | ||
99 | port { | 99 | port { |
diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi index e47ff69dcf70..5c375003bad1 100644 --- a/arch/arm/boot/dts/omap3xxx-clocks.dtsi +++ b/arch/arm/boot/dts/omap3xxx-clocks.dtsi | |||
@@ -467,6 +467,7 @@ | |||
467 | ti,bit-shift = <0x1e>; | 467 | ti,bit-shift = <0x1e>; |
468 | reg = <0x0d00>; | 468 | reg = <0x0d00>; |
469 | ti,set-bit-to-disable; | 469 | ti,set-bit-to-disable; |
470 | ti,set-rate-parent; | ||
470 | }; | 471 | }; |
471 | 472 | ||
472 | dpll4_m6_ck: dpll4_m6_ck { | 473 | dpll4_m6_ck: dpll4_m6_ck { |
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 4a2000c620ad..3e97a669f15e 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts | |||
@@ -116,7 +116,6 @@ | |||
116 | msp2: msp@80117000 { | 116 | msp2: msp@80117000 { |
117 | pinctrl-names = "default"; | 117 | pinctrl-names = "default"; |
118 | pinctrl-0 = <&msp2_default_mode>; | 118 | pinctrl-0 = <&msp2_default_mode>; |
119 | status = "okay"; | ||
120 | }; | 119 | }; |
121 | 120 | ||
122 | msp3: msp@80125000 { | 121 | msp3: msp@80125000 { |
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 88099175fc56..d86771abbf57 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c | |||
@@ -1443,14 +1443,14 @@ void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) | |||
1443 | EXPORT_SYMBOL(edma_assign_channel_eventq); | 1443 | EXPORT_SYMBOL(edma_assign_channel_eventq); |
1444 | 1444 | ||
1445 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | 1445 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
1446 | struct edma *edma_cc) | 1446 | struct edma *edma_cc, int cc_id) |
1447 | { | 1447 | { |
1448 | int i; | 1448 | int i; |
1449 | u32 value, cccfg; | 1449 | u32 value, cccfg; |
1450 | s8 (*queue_priority_map)[2]; | 1450 | s8 (*queue_priority_map)[2]; |
1451 | 1451 | ||
1452 | /* Decode the eDMA3 configuration from CCCFG register */ | 1452 | /* Decode the eDMA3 configuration from CCCFG register */ |
1453 | cccfg = edma_read(0, EDMA_CCCFG); | 1453 | cccfg = edma_read(cc_id, EDMA_CCCFG); |
1454 | 1454 | ||
1455 | value = GET_NUM_REGN(cccfg); | 1455 | value = GET_NUM_REGN(cccfg); |
1456 | edma_cc->num_region = BIT(value); | 1456 | edma_cc->num_region = BIT(value); |
@@ -1464,7 +1464,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, | |||
1464 | value = GET_NUM_EVQUE(cccfg); | 1464 | value = GET_NUM_EVQUE(cccfg); |
1465 | edma_cc->num_tc = value + 1; | 1465 | edma_cc->num_tc = value + 1; |
1466 | 1466 | ||
1467 | dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); | 1467 | dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id, |
1468 | cccfg); | ||
1468 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); | 1469 | dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); |
1469 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); | 1470 | dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); |
1470 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); | 1471 | dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); |
@@ -1684,7 +1685,7 @@ static int edma_probe(struct platform_device *pdev) | |||
1684 | return -ENOMEM; | 1685 | return -ENOMEM; |
1685 | 1686 | ||
1686 | /* Get eDMA3 configuration from IP */ | 1687 | /* Get eDMA3 configuration from IP */ |
1687 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); | 1688 | ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j); |
1688 | if (ret) | 1689 | if (ret) |
1689 | return ret; | 1690 | return ret; |
1690 | 1691 | ||
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 1109017499e5..e8275ea88e88 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h | |||
@@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | 26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 29 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 30 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 31 | struct dma_attrs *attrs); |
32 | { | ||
33 | if (__generic_dma_ops(hwdev)->unmap_page) | ||
34 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
35 | } | ||
36 | 32 | ||
37 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 33 | void xen_dma_sync_single_for_cpu(struct device *hwdev, |
38 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 34 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
39 | { | 35 | |
40 | if (__generic_dma_ops(hwdev)->sync_single_for_cpu) | 36 | void xen_dma_sync_single_for_device(struct device *hwdev, |
41 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | 37 | dma_addr_t handle, size_t size, enum dma_data_direction dir); |
42 | } | ||
43 | 38 | ||
44 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | ||
45 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
46 | { | ||
47 | if (__generic_dma_ops(hwdev)->sync_single_for_device) | ||
48 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
49 | } | ||
50 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ | 39 | #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index ded062f9b358..135c24a5ba26 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -33,7 +33,6 @@ typedef struct xpaddr { | |||
33 | #define INVALID_P2M_ENTRY (~0UL) | 33 | #define INVALID_P2M_ENTRY (~0UL) |
34 | 34 | ||
35 | unsigned long __pfn_to_mfn(unsigned long pfn); | 35 | unsigned long __pfn_to_mfn(unsigned long pfn); |
36 | unsigned long __mfn_to_pfn(unsigned long mfn); | ||
37 | extern struct rb_root phys_to_mach; | 36 | extern struct rb_root phys_to_mach; |
38 | 37 | ||
39 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | 38 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
@@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) | |||
51 | 50 | ||
52 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 51 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
53 | { | 52 | { |
54 | unsigned long pfn; | ||
55 | |||
56 | if (phys_to_mach.rb_node != NULL) { | ||
57 | pfn = __mfn_to_pfn(mfn); | ||
58 | if (pfn != INVALID_P2M_ENTRY) | ||
59 | return pfn; | ||
60 | } | ||
61 | |||
62 | return mfn; | 53 | return mfn; |
63 | } | 54 | } |
64 | 55 | ||
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4c979d466cc1..a96a8043277c 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c | |||
@@ -93,6 +93,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
93 | else | 93 | else |
94 | kvm_vcpu_block(vcpu); | 94 | kvm_vcpu_block(vcpu); |
95 | 95 | ||
96 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
97 | |||
96 | return 1; | 98 | return 1; |
97 | } | 99 | } |
98 | 100 | ||
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 991415d978b6..3988e72d16ff 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S | |||
@@ -99,6 +99,10 @@ __do_hyp_init: | |||
99 | mrc p15, 0, r0, c10, c2, 1 | 99 | mrc p15, 0, r0, c10, c2, 1 |
100 | mcr p15, 4, r0, c10, c2, 1 | 100 | mcr p15, 4, r0, c10, c2, 1 |
101 | 101 | ||
102 | @ Invalidate the stale TLBs from Bootloader | ||
103 | mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH | ||
104 | dsb ish | ||
105 | |||
102 | @ Set the HSCTLR to: | 106 | @ Set the HSCTLR to: |
103 | @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) | 107 | @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) |
104 | @ - Endianness: Kernel config | 108 | @ - Endianness: Kernel config |
diff --git a/arch/arm/mach-at91/board-dt-rm9200.c b/arch/arm/mach-at91/board-dt-rm9200.c index 3a185faee795..f4b6e91843e4 100644 --- a/arch/arm/mach-at91/board-dt-rm9200.c +++ b/arch/arm/mach-at91/board-dt-rm9200.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/gpio.h> | 14 | #include <linux/gpio.h> |
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_irq.h> | 16 | #include <linux/of_irq.h> |
17 | #include <linux/clk-provider.h> | ||
17 | 18 | ||
18 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
19 | #include <asm/irq.h> | 20 | #include <asm/irq.h> |
@@ -35,13 +36,21 @@ static void __init at91rm9200_dt_init_irq(void) | |||
35 | of_irq_init(irq_of_match); | 36 | of_irq_init(irq_of_match); |
36 | } | 37 | } |
37 | 38 | ||
39 | static void __init at91rm9200_dt_timer_init(void) | ||
40 | { | ||
41 | #if defined(CONFIG_COMMON_CLK) | ||
42 | of_clk_init(NULL); | ||
43 | #endif | ||
44 | at91rm9200_timer_init(); | ||
45 | } | ||
46 | |||
38 | static const char *at91rm9200_dt_board_compat[] __initdata = { | 47 | static const char *at91rm9200_dt_board_compat[] __initdata = { |
39 | "atmel,at91rm9200", | 48 | "atmel,at91rm9200", |
40 | NULL | 49 | NULL |
41 | }; | 50 | }; |
42 | 51 | ||
43 | DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)") | 52 | DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)") |
44 | .init_time = at91rm9200_timer_init, | 53 | .init_time = at91rm9200_dt_timer_init, |
45 | .map_io = at91_map_io, | 54 | .map_io = at91_map_io, |
46 | .handle_irq = at91_aic_handle_irq, | 55 | .handle_irq = at91_aic_handle_irq, |
47 | .init_early = at91rm9200_dt_initialize, | 56 | .init_early = at91rm9200_dt_initialize, |
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 9f42d5437fcc..2f97228f188a 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c | |||
@@ -1207,8 +1207,7 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) | |||
1207 | } | 1207 | } |
1208 | } | 1208 | } |
1209 | 1209 | ||
1210 | if ((p->wait_on_read || p->wait_on_write) && | 1210 | if (p->wait_pin > gpmc_nr_waitpins) { |
1211 | (p->wait_pin > gpmc_nr_waitpins)) { | ||
1212 | pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); | 1211 | pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); |
1213 | return -EINVAL; | 1212 | return -EINVAL; |
1214 | } | 1213 | } |
@@ -1288,8 +1287,8 @@ void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) | |||
1288 | p->wait_on_write = of_property_read_bool(np, | 1287 | p->wait_on_write = of_property_read_bool(np, |
1289 | "gpmc,wait-on-write"); | 1288 | "gpmc,wait-on-write"); |
1290 | if (!p->wait_on_read && !p->wait_on_write) | 1289 | if (!p->wait_on_read && !p->wait_on_write) |
1291 | pr_warn("%s: read/write wait monitoring not enabled!\n", | 1290 | pr_debug("%s: rd/wr wait monitoring not enabled!\n", |
1292 | __func__); | 1291 | __func__); |
1293 | } | 1292 | } |
1294 | } | 1293 | } |
1295 | 1294 | ||
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 12969523414c..1f85bfe6b470 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile | |||
@@ -1 +1 @@ | |||
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o | obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o | ||
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 98544c5f86e9..0e15f011f9c8 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -260,6 +260,12 @@ static int __init xen_guest_init(void) | |||
260 | xen_domain_type = XEN_HVM_DOMAIN; | 260 | xen_domain_type = XEN_HVM_DOMAIN; |
261 | 261 | ||
262 | xen_setup_features(); | 262 | xen_setup_features(); |
263 | |||
264 | if (!xen_feature(XENFEAT_grant_map_identity)) { | ||
265 | pr_warn("Please upgrade your Xen.\n" | ||
266 | "If your platform has any non-coherent DMA devices, they won't work properly.\n"); | ||
267 | } | ||
268 | |||
263 | if (xen_feature(XENFEAT_dom0)) | 269 | if (xen_feature(XENFEAT_dom0)) |
264 | xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; | 270 | xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; |
265 | else | 271 | else |
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c new file mode 100644 index 000000000000..3b99860fd7ae --- /dev/null +++ b/arch/arm/xen/mm32.c | |||
@@ -0,0 +1,202 @@ | |||
1 | #include <linux/cpu.h> | ||
2 | #include <linux/dma-mapping.h> | ||
3 | #include <linux/gfp.h> | ||
4 | #include <linux/highmem.h> | ||
5 | |||
6 | #include <xen/features.h> | ||
7 | |||
8 | static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt); | ||
9 | static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep); | ||
10 | |||
11 | static int alloc_xen_mm32_scratch_page(int cpu) | ||
12 | { | ||
13 | struct page *page; | ||
14 | unsigned long virt; | ||
15 | pmd_t *pmdp; | ||
16 | pte_t *ptep; | ||
17 | |||
18 | if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL) | ||
19 | return 0; | ||
20 | |||
21 | page = alloc_page(GFP_KERNEL); | ||
22 | if (page == NULL) { | ||
23 | pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu); | ||
24 | return -ENOMEM; | ||
25 | } | ||
26 | |||
27 | virt = (unsigned long)__va(page_to_phys(page)); | ||
28 | pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); | ||
29 | ptep = pte_offset_kernel(pmdp, virt); | ||
30 | |||
31 | per_cpu(xen_mm32_scratch_virt, cpu) = virt; | ||
32 | per_cpu(xen_mm32_scratch_ptep, cpu) = ptep; | ||
33 | |||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | static int xen_mm32_cpu_notify(struct notifier_block *self, | ||
38 | unsigned long action, void *hcpu) | ||
39 | { | ||
40 | int cpu = (long)hcpu; | ||
41 | switch (action) { | ||
42 | case CPU_UP_PREPARE: | ||
43 | if (alloc_xen_mm32_scratch_page(cpu)) | ||
44 | return NOTIFY_BAD; | ||
45 | break; | ||
46 | default: | ||
47 | break; | ||
48 | } | ||
49 | return NOTIFY_OK; | ||
50 | } | ||
51 | |||
52 | static struct notifier_block xen_mm32_cpu_notifier = { | ||
53 | .notifier_call = xen_mm32_cpu_notify, | ||
54 | }; | ||
55 | |||
56 | static void* xen_mm32_remap_page(dma_addr_t handle) | ||
57 | { | ||
58 | unsigned long virt = get_cpu_var(xen_mm32_scratch_virt); | ||
59 | pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep); | ||
60 | |||
61 | *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL); | ||
62 | local_flush_tlb_kernel_page(virt); | ||
63 | |||
64 | return (void*)virt; | ||
65 | } | ||
66 | |||
67 | static void xen_mm32_unmap(void *vaddr) | ||
68 | { | ||
69 | put_cpu_var(xen_mm32_scratch_virt); | ||
70 | } | ||
71 | |||
72 | |||
73 | /* functions called by SWIOTLB */ | ||
74 | |||
75 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, | ||
76 | size_t size, enum dma_data_direction dir, | ||
77 | void (*op)(const void *, size_t, int)) | ||
78 | { | ||
79 | unsigned long pfn; | ||
80 | size_t left = size; | ||
81 | |||
82 | pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; | ||
83 | offset %= PAGE_SIZE; | ||
84 | |||
85 | do { | ||
86 | size_t len = left; | ||
87 | void *vaddr; | ||
88 | |||
89 | if (!pfn_valid(pfn)) | ||
90 | { | ||
91 | /* Cannot map the page, we don't know its physical address. | ||
92 | * Return and hope for the best */ | ||
93 | if (!xen_feature(XENFEAT_grant_map_identity)) | ||
94 | return; | ||
95 | vaddr = xen_mm32_remap_page(handle) + offset; | ||
96 | op(vaddr, len, dir); | ||
97 | xen_mm32_unmap(vaddr - offset); | ||
98 | } else { | ||
99 | struct page *page = pfn_to_page(pfn); | ||
100 | |||
101 | if (PageHighMem(page)) { | ||
102 | if (len + offset > PAGE_SIZE) | ||
103 | len = PAGE_SIZE - offset; | ||
104 | |||
105 | if (cache_is_vipt_nonaliasing()) { | ||
106 | vaddr = kmap_atomic(page); | ||
107 | op(vaddr + offset, len, dir); | ||
108 | kunmap_atomic(vaddr); | ||
109 | } else { | ||
110 | vaddr = kmap_high_get(page); | ||
111 | if (vaddr) { | ||
112 | op(vaddr + offset, len, dir); | ||
113 | kunmap_high(page); | ||
114 | } | ||
115 | } | ||
116 | } else { | ||
117 | vaddr = page_address(page) + offset; | ||
118 | op(vaddr, len, dir); | ||
119 | } | ||
120 | } | ||
121 | |||
122 | offset = 0; | ||
123 | pfn++; | ||
124 | left -= len; | ||
125 | } while (left); | ||
126 | } | ||
127 | |||
128 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, | ||
129 | size_t size, enum dma_data_direction dir) | ||
130 | { | ||
131 | /* Cannot use __dma_page_dev_to_cpu because we don't have a | ||
132 | * struct page for handle */ | ||
133 | |||
134 | if (dir != DMA_TO_DEVICE) | ||
135 | outer_inv_range(handle, handle + size); | ||
136 | |||
137 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area); | ||
138 | } | ||
139 | |||
140 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | ||
141 | size_t size, enum dma_data_direction dir) | ||
142 | { | ||
143 | |||
144 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area); | ||
145 | |||
146 | if (dir == DMA_FROM_DEVICE) { | ||
147 | outer_inv_range(handle, handle + size); | ||
148 | } else { | ||
149 | outer_clean_range(handle, handle + size); | ||
150 | } | ||
151 | } | ||
152 | |||
153 | void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | ||
154 | size_t size, enum dma_data_direction dir, | ||
155 | struct dma_attrs *attrs) | ||
156 | |||
157 | { | ||
158 | if (!__generic_dma_ops(hwdev)->unmap_page) | ||
159 | return; | ||
160 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | ||
161 | return; | ||
162 | |||
163 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | ||
164 | } | ||
165 | |||
166 | void xen_dma_sync_single_for_cpu(struct device *hwdev, | ||
167 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
168 | { | ||
169 | if (!__generic_dma_ops(hwdev)->sync_single_for_cpu) | ||
170 | return; | ||
171 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | ||
172 | } | ||
173 | |||
174 | void xen_dma_sync_single_for_device(struct device *hwdev, | ||
175 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | ||
176 | { | ||
177 | if (!__generic_dma_ops(hwdev)->sync_single_for_device) | ||
178 | return; | ||
179 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); | ||
180 | } | ||
181 | |||
182 | int __init xen_mm32_init(void) | ||
183 | { | ||
184 | int cpu; | ||
185 | |||
186 | if (!xen_initial_domain()) | ||
187 | return 0; | ||
188 | |||
189 | register_cpu_notifier(&xen_mm32_cpu_notifier); | ||
190 | get_online_cpus(); | ||
191 | for_each_online_cpu(cpu) { | ||
192 | if (alloc_xen_mm32_scratch_page(cpu)) { | ||
193 | put_online_cpus(); | ||
194 | unregister_cpu_notifier(&xen_mm32_cpu_notifier); | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | } | ||
198 | put_online_cpus(); | ||
199 | |||
200 | return 0; | ||
201 | } | ||
202 | arch_initcall(xen_mm32_init); | ||
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 97baf4427817..054857776254 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c | |||
@@ -21,14 +21,12 @@ struct xen_p2m_entry { | |||
21 | unsigned long pfn; | 21 | unsigned long pfn; |
22 | unsigned long mfn; | 22 | unsigned long mfn; |
23 | unsigned long nr_pages; | 23 | unsigned long nr_pages; |
24 | struct rb_node rbnode_mach; | ||
25 | struct rb_node rbnode_phys; | 24 | struct rb_node rbnode_phys; |
26 | }; | 25 | }; |
27 | 26 | ||
28 | static rwlock_t p2m_lock; | 27 | static rwlock_t p2m_lock; |
29 | struct rb_root phys_to_mach = RB_ROOT; | 28 | struct rb_root phys_to_mach = RB_ROOT; |
30 | EXPORT_SYMBOL_GPL(phys_to_mach); | 29 | EXPORT_SYMBOL_GPL(phys_to_mach); |
31 | static struct rb_root mach_to_phys = RB_ROOT; | ||
32 | 30 | ||
33 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | 31 | static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) |
34 | { | 32 | { |
@@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) | |||
41 | parent = *link; | 39 | parent = *link; |
42 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); | 40 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); |
43 | 41 | ||
44 | if (new->mfn == entry->mfn) | ||
45 | goto err_out; | ||
46 | if (new->pfn == entry->pfn) | 42 | if (new->pfn == entry->pfn) |
47 | goto err_out; | 43 | goto err_out; |
48 | 44 | ||
@@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn) | |||
88 | } | 84 | } |
89 | EXPORT_SYMBOL_GPL(__pfn_to_mfn); | 85 | EXPORT_SYMBOL_GPL(__pfn_to_mfn); |
90 | 86 | ||
91 | static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) | ||
92 | { | ||
93 | struct rb_node **link = &mach_to_phys.rb_node; | ||
94 | struct rb_node *parent = NULL; | ||
95 | struct xen_p2m_entry *entry; | ||
96 | int rc = 0; | ||
97 | |||
98 | while (*link) { | ||
99 | parent = *link; | ||
100 | entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); | ||
101 | |||
102 | if (new->mfn == entry->mfn) | ||
103 | goto err_out; | ||
104 | if (new->pfn == entry->pfn) | ||
105 | goto err_out; | ||
106 | |||
107 | if (new->mfn < entry->mfn) | ||
108 | link = &(*link)->rb_left; | ||
109 | else | ||
110 | link = &(*link)->rb_right; | ||
111 | } | ||
112 | rb_link_node(&new->rbnode_mach, parent, link); | ||
113 | rb_insert_color(&new->rbnode_mach, &mach_to_phys); | ||
114 | goto out; | ||
115 | |||
116 | err_out: | ||
117 | rc = -EINVAL; | ||
118 | pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", | ||
119 | __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); | ||
120 | out: | ||
121 | return rc; | ||
122 | } | ||
123 | |||
124 | unsigned long __mfn_to_pfn(unsigned long mfn) | ||
125 | { | ||
126 | struct rb_node *n = mach_to_phys.rb_node; | ||
127 | struct xen_p2m_entry *entry; | ||
128 | unsigned long irqflags; | ||
129 | |||
130 | read_lock_irqsave(&p2m_lock, irqflags); | ||
131 | while (n) { | ||
132 | entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); | ||
133 | if (entry->mfn <= mfn && | ||
134 | entry->mfn + entry->nr_pages > mfn) { | ||
135 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
136 | return entry->pfn + (mfn - entry->mfn); | ||
137 | } | ||
138 | if (mfn < entry->mfn) | ||
139 | n = n->rb_left; | ||
140 | else | ||
141 | n = n->rb_right; | ||
142 | } | ||
143 | read_unlock_irqrestore(&p2m_lock, irqflags); | ||
144 | |||
145 | return INVALID_P2M_ENTRY; | ||
146 | } | ||
147 | EXPORT_SYMBOL_GPL(__mfn_to_pfn); | ||
148 | |||
149 | int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, | 87 | int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, |
150 | struct gnttab_map_grant_ref *kmap_ops, | 88 | struct gnttab_map_grant_ref *kmap_ops, |
151 | struct page **pages, unsigned int count) | 89 | struct page **pages, unsigned int count) |
@@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn, | |||
192 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); | 130 | p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); |
193 | if (p2m_entry->pfn <= pfn && | 131 | if (p2m_entry->pfn <= pfn && |
194 | p2m_entry->pfn + p2m_entry->nr_pages > pfn) { | 132 | p2m_entry->pfn + p2m_entry->nr_pages > pfn) { |
195 | rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); | ||
196 | rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); | 133 | rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); |
197 | write_unlock_irqrestore(&p2m_lock, irqflags); | 134 | write_unlock_irqrestore(&p2m_lock, irqflags); |
198 | kfree(p2m_entry); | 135 | kfree(p2m_entry); |
@@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn, | |||
217 | p2m_entry->mfn = mfn; | 154 | p2m_entry->mfn = mfn; |
218 | 155 | ||
219 | write_lock_irqsave(&p2m_lock, irqflags); | 156 | write_lock_irqsave(&p2m_lock, irqflags); |
220 | if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || | 157 | if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) { |
221 | (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { | ||
222 | write_unlock_irqrestore(&p2m_lock, irqflags); | 158 | write_unlock_irqrestore(&p2m_lock, irqflags); |
223 | return false; | 159 | return false; |
224 | } | 160 | } |
diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index c294e67d3925..ae67e88c28b9 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c | |||
@@ -150,7 +150,6 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data, | |||
150 | kernel_neon_begin_partial(28); | 150 | kernel_neon_begin_partial(28); |
151 | sha2_ce_transform(blocks, data, sctx->state, NULL, len); | 151 | sha2_ce_transform(blocks, data, sctx->state, NULL, len); |
152 | kernel_neon_end(); | 152 | kernel_neon_end(); |
153 | data += blocks * SHA256_BLOCK_SIZE; | ||
154 | } | 153 | } |
155 | 154 | ||
156 | static int sha224_finup(struct shash_desc *desc, const u8 *data, | 155 | static int sha224_finup(struct shash_desc *desc, const u8 *data, |
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d064047612b1..52b484b6aa1a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h | |||
@@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, | |||
79 | */ | 79 | */ |
80 | #define ARM_MAX_BRP 16 | 80 | #define ARM_MAX_BRP 16 |
81 | #define ARM_MAX_WRP 16 | 81 | #define ARM_MAX_WRP 16 |
82 | #define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) | ||
83 | 82 | ||
84 | /* Virtual debug register bases. */ | 83 | /* Virtual debug register bases. */ |
85 | #define AARCH64_DBG_REG_BVR 0 | 84 | #define AARCH64_DBG_REG_BVR 0 |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3df21feeabdd..286b1bec547c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, | |||
139 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 139 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
140 | 140 | ||
141 | #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) | 141 | #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) |
142 | #define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) | 142 | #define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Prefetching support | 145 | * Prefetching support |
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 501000fadb6f..41ed9e13795e 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h | |||
@@ -137,7 +137,7 @@ struct pt_regs { | |||
137 | (!((regs)->pstate & PSR_F_BIT)) | 137 | (!((regs)->pstate & PSR_F_BIT)) |
138 | 138 | ||
139 | #define user_stack_pointer(regs) \ | 139 | #define user_stack_pointer(regs) \ |
140 | (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) | 140 | (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) |
141 | 141 | ||
142 | static inline unsigned long regs_return_value(struct pt_regs *regs) | 142 | static inline unsigned long regs_return_value(struct pt_regs *regs) |
143 | { | 143 | { |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ad8aebb1cdef..3dca15634e69 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
@@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, | |||
270 | case CPU_PM_ENTER: | 270 | case CPU_PM_ENTER: |
271 | if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) | 271 | if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) |
272 | fpsimd_save_state(¤t->thread.fpsimd_state); | 272 | fpsimd_save_state(¤t->thread.fpsimd_state); |
273 | this_cpu_write(fpsimd_last_state, NULL); | ||
273 | break; | 274 | break; |
274 | case CPU_PM_EXIT: | 275 | case CPU_PM_EXIT: |
275 | if (current->mm) | 276 | if (current->mm) |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bed028364a93..873069056229 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode) | |||
373 | .long 0 | 373 | .long 0 |
374 | .popsection | 374 | .popsection |
375 | 375 | ||
376 | .align 3 | ||
377 | 2: .quad . | ||
378 | .quad PAGE_OFFSET | ||
379 | |||
380 | #ifdef CONFIG_SMP | 376 | #ifdef CONFIG_SMP |
381 | .align 3 | 377 | .align 3 |
382 | 1: .quad . | 378 | 1: .quad . |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0f08dfd69ebc..dfa6e3e74fdd 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | 97 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) |
98 | return false; | 98 | return false; |
99 | 99 | ||
100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) | 100 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
101 | affinity = cpu_online_mask; | ||
101 | ret = true; | 102 | ret = true; |
103 | } | ||
102 | 104 | ||
103 | /* | ||
104 | * when using forced irq_set_affinity we must ensure that the cpu | ||
105 | * being offlined is not present in the affinity mask, it may be | ||
106 | * selected as the target CPU otherwise | ||
107 | */ | ||
108 | affinity = cpu_online_mask; | ||
109 | c = irq_data_get_irq_chip(d); | 105 | c = irq_data_get_irq_chip(d); |
110 | if (!c->irq_set_affinity) | 106 | if (!c->irq_set_affinity) |
111 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 107 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
112 | else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) | 108 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) |
113 | cpumask_copy(d->affinity, affinity); | 109 | cpumask_copy(d->affinity, affinity); |
114 | 110 | ||
115 | return ret; | 111 | return ret; |
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 422ebd63b619..6762ad705587 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c | |||
@@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) | |||
24 | return regs->compat_lr; | 24 | return regs->compat_lr; |
25 | } | 25 | } |
26 | 26 | ||
27 | if ((u32)idx == PERF_REG_ARM64_SP) | ||
28 | return regs->sp; | ||
29 | |||
30 | if ((u32)idx == PERF_REG_ARM64_PC) | ||
31 | return regs->pc; | ||
32 | |||
27 | return regs->regs[idx]; | 33 | return regs->regs[idx]; |
28 | } | 34 | } |
29 | 35 | ||
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1309d64aa926..29d48690f2ac 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -230,9 +230,27 @@ void exit_thread(void) | |||
230 | { | 230 | { |
231 | } | 231 | } |
232 | 232 | ||
233 | static void tls_thread_flush(void) | ||
234 | { | ||
235 | asm ("msr tpidr_el0, xzr"); | ||
236 | |||
237 | if (is_compat_task()) { | ||
238 | current->thread.tp_value = 0; | ||
239 | |||
240 | /* | ||
241 | * We need to ensure ordering between the shadow state and the | ||
242 | * hardware state, so that we don't corrupt the hardware state | ||
243 | * with a stale shadow state during context switch. | ||
244 | */ | ||
245 | barrier(); | ||
246 | asm ("msr tpidrro_el0, xzr"); | ||
247 | } | ||
248 | } | ||
249 | |||
233 | void flush_thread(void) | 250 | void flush_thread(void) |
234 | { | 251 | { |
235 | fpsimd_flush_thread(); | 252 | fpsimd_flush_thread(); |
253 | tls_thread_flush(); | ||
236 | flush_ptrace_hw_breakpoint(current); | 254 | flush_ptrace_hw_breakpoint(current); |
237 | } | 255 | } |
238 | 256 | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 70526cfda056..fe63ac5e9bf5 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, | |||
87 | break; | 87 | break; |
88 | } | 88 | } |
89 | } | 89 | } |
90 | for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { | 90 | |
91 | for (i = 0; i < ARM_MAX_WRP; ++i) { | ||
91 | if (current->thread.debug.hbp_watch[i] == bp) { | 92 | if (current->thread.debug.hbp_watch[i] == bp) { |
92 | info.si_errno = -((i << 1) + 1); | 93 | info.si_errno = -((i << 1) + 1); |
93 | break; | 94 | break; |
@@ -662,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target, | |||
662 | kbuf += sizeof(reg); | 663 | kbuf += sizeof(reg); |
663 | } else { | 664 | } else { |
664 | ret = copy_to_user(ubuf, ®, sizeof(reg)); | 665 | ret = copy_to_user(ubuf, ®, sizeof(reg)); |
665 | if (ret) | 666 | if (ret) { |
667 | ret = -EFAULT; | ||
666 | break; | 668 | break; |
669 | } | ||
667 | 670 | ||
668 | ubuf += sizeof(reg); | 671 | ubuf += sizeof(reg); |
669 | } | 672 | } |
@@ -701,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target, | |||
701 | kbuf += sizeof(reg); | 704 | kbuf += sizeof(reg); |
702 | } else { | 705 | } else { |
703 | ret = copy_from_user(®, ubuf, sizeof(reg)); | 706 | ret = copy_from_user(®, ubuf, sizeof(reg)); |
704 | if (ret) | 707 | if (ret) { |
705 | return ret; | 708 | ret = -EFAULT; |
709 | break; | ||
710 | } | ||
706 | 711 | ||
707 | ubuf += sizeof(reg); | 712 | ubuf += sizeof(reg); |
708 | } | 713 | } |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6f0ccf35ae6..edb146d01857 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; | |||
78 | #endif | 78 | #endif |
79 | 79 | ||
80 | static const char *cpu_name; | 80 | static const char *cpu_name; |
81 | static const char *machine_name; | ||
81 | phys_addr_t __fdt_pointer __initdata; | 82 | phys_addr_t __fdt_pointer __initdata; |
82 | 83 | ||
83 | /* | 84 | /* |
@@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) | |||
309 | while (true) | 310 | while (true) |
310 | cpu_relax(); | 311 | cpu_relax(); |
311 | } | 312 | } |
313 | |||
314 | machine_name = of_flat_dt_get_machine_name(); | ||
312 | } | 315 | } |
313 | 316 | ||
314 | /* | 317 | /* |
@@ -447,21 +450,10 @@ static int c_show(struct seq_file *m, void *v) | |||
447 | { | 450 | { |
448 | int i; | 451 | int i; |
449 | 452 | ||
450 | /* | 453 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", |
451 | * Dump out the common processor features in a single line. Userspace | 454 | cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); |
452 | * should read the hwcaps with getauxval(AT_HWCAP) rather than | ||
453 | * attempting to parse this. | ||
454 | */ | ||
455 | seq_puts(m, "features\t:"); | ||
456 | for (i = 0; hwcap_str[i]; i++) | ||
457 | if (elf_hwcap & (1 << i)) | ||
458 | seq_printf(m, " %s", hwcap_str[i]); | ||
459 | seq_puts(m, "\n\n"); | ||
460 | 455 | ||
461 | for_each_online_cpu(i) { | 456 | for_each_online_cpu(i) { |
462 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); | ||
463 | u32 midr = cpuinfo->reg_midr; | ||
464 | |||
465 | /* | 457 | /* |
466 | * glibc reads /proc/cpuinfo to determine the number of | 458 | * glibc reads /proc/cpuinfo to determine the number of |
467 | * online processors, looking for lines beginning with | 459 | * online processors, looking for lines beginning with |
@@ -470,13 +462,25 @@ static int c_show(struct seq_file *m, void *v) | |||
470 | #ifdef CONFIG_SMP | 462 | #ifdef CONFIG_SMP |
471 | seq_printf(m, "processor\t: %d\n", i); | 463 | seq_printf(m, "processor\t: %d\n", i); |
472 | #endif | 464 | #endif |
473 | seq_printf(m, "implementer\t: 0x%02x\n", | ||
474 | MIDR_IMPLEMENTOR(midr)); | ||
475 | seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr)); | ||
476 | seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr)); | ||
477 | seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr)); | ||
478 | } | 465 | } |
479 | 466 | ||
467 | /* dump out the processor features */ | ||
468 | seq_puts(m, "Features\t: "); | ||
469 | |||
470 | for (i = 0; hwcap_str[i]; i++) | ||
471 | if (elf_hwcap & (1 << i)) | ||
472 | seq_printf(m, "%s ", hwcap_str[i]); | ||
473 | |||
474 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | ||
475 | seq_printf(m, "CPU architecture: AArch64\n"); | ||
476 | seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); | ||
477 | seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); | ||
478 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | ||
479 | |||
480 | seq_puts(m, "\n"); | ||
481 | |||
482 | seq_printf(m, "Hardware\t: %s\n", machine_name); | ||
483 | |||
480 | return 0; | 484 | return 0; |
481 | } | 485 | } |
482 | 486 | ||
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index de2b0226e06d..dc47e53e9e28 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c | |||
@@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs) | |||
79 | 79 | ||
80 | case __ARM_NR_compat_set_tls: | 80 | case __ARM_NR_compat_set_tls: |
81 | current->thread.tp_value = regs->regs[0]; | 81 | current->thread.tp_value = regs->regs[0]; |
82 | |||
83 | /* | ||
84 | * Protect against register corruption from context switch. | ||
85 | * See comment in tls_thread_flush. | ||
86 | */ | ||
87 | barrier(); | ||
82 | asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); | 88 | asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); |
83 | return 0; | 89 | return 0; |
84 | 90 | ||
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e28be510380c..34b8bd0711e9 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c | |||
@@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
66 | else | 66 | else |
67 | kvm_vcpu_block(vcpu); | 67 | kvm_vcpu_block(vcpu); |
68 | 68 | ||
69 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | ||
70 | |||
69 | return 1; | 71 | return 1; |
70 | } | 72 | } |
71 | 73 | ||
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d968796f4b2d..c3191168a994 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
@@ -80,6 +80,10 @@ __do_hyp_init: | |||
80 | msr mair_el2, x4 | 80 | msr mair_el2, x4 |
81 | isb | 81 | isb |
82 | 82 | ||
83 | /* Invalidate the stale TLBs from Bootloader */ | ||
84 | tlbi alle2 | ||
85 | dsb sy | ||
86 | |||
83 | mrs x4, sctlr_el2 | 87 | mrs x4, sctlr_el2 |
84 | and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 | 88 | and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 |
85 | ldr x5, =SCTLR_EL2_FLAGS | 89 | ldr x5, =SCTLR_EL2_FLAGS |
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 1fcdd344c7ad..4ef7a54813e6 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 352 | 7 | #define NR_syscalls 354 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 9cd82fbc7817..b419c6b7ac37 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -357,5 +357,7 @@ | |||
357 | #define __NR_sched_setattr 349 | 357 | #define __NR_sched_setattr 349 |
358 | #define __NR_sched_getattr 350 | 358 | #define __NR_sched_getattr 350 |
359 | #define __NR_renameat2 351 | 359 | #define __NR_renameat2 351 |
360 | #define __NR_getrandom 352 | ||
361 | #define __NR_memfd_create 353 | ||
360 | 362 | ||
361 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 363 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 501e10212789..05b46c2b08b8 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -372,4 +372,6 @@ ENTRY(sys_call_table) | |||
372 | .long sys_sched_setattr | 372 | .long sys_sched_setattr |
373 | .long sys_sched_getattr /* 350 */ | 373 | .long sys_sched_getattr /* 350 */ |
374 | .long sys_renameat2 | 374 | .long sys_renameat2 |
375 | .long sys_getrandom | ||
376 | .long sys_memfd_create | ||
375 | 377 | ||
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 40e1c1dd0e24..6feded3b0c4c 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -127,7 +127,7 @@ config SECCOMP | |||
127 | 127 | ||
128 | endmenu | 128 | endmenu |
129 | 129 | ||
130 | menu "Advanced setup" | 130 | menu "Kernel features" |
131 | 131 | ||
132 | config ADVANCED_OPTIONS | 132 | config ADVANCED_OPTIONS |
133 | bool "Prompt for advanced kernel configuration options" | 133 | bool "Prompt for advanced kernel configuration options" |
@@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES | |||
248 | 248 | ||
249 | endchoice | 249 | endchoice |
250 | 250 | ||
251 | endmenu | ||
252 | |||
253 | source "mm/Kconfig" | 251 | source "mm/Kconfig" |
254 | 252 | ||
253 | endmenu | ||
254 | |||
255 | menu "Executable file formats" | 255 | menu "Executable file formats" |
256 | 256 | ||
257 | source "fs/Kconfig.binfmt" | 257 | source "fs/Kconfig.binfmt" |
diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index b4a4cb150aa9..596e485ae707 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <asm/percpu.h> | 16 | #include <asm/percpu.h> |
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <linux/linkage.h> | ||
18 | 19 | ||
19 | /* | 20 | /* |
20 | * These are per-cpu variables required in entry.S, among other | 21 | * These are per-cpu variables required in entry.S, among other |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 0aa005703a0b..59a89a64a865 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr, | |||
98 | 98 | ||
99 | if ((get_fs().seg < ((unsigned long)addr)) || | 99 | if ((get_fs().seg < ((unsigned long)addr)) || |
100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { | 100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { |
101 | pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 101 | pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
102 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, | 102 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
103 | (u32)get_fs().seg); | 103 | (u32)get_fs().seg); |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | ok: | 106 | ok: |
107 | pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 107 | pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
108 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, | 108 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
109 | (u32)get_fs().seg); | 109 | (u32)get_fs().seg); |
110 | return 1; | 110 | return 1; |
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index fd56a8f66489..ea4b233647c1 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h | |||
@@ -38,6 +38,6 @@ | |||
38 | 38 | ||
39 | #endif /* __ASSEMBLY__ */ | 39 | #endif /* __ASSEMBLY__ */ |
40 | 40 | ||
41 | #define __NR_syscalls 381 | 41 | #define __NR_syscalls 387 |
42 | 42 | ||
43 | #endif /* _ASM_MICROBLAZE_UNISTD_H */ | 43 | #endif /* _ASM_MICROBLAZE_UNISTD_H */ |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 6e75e2030927..1554a6f2a5bb 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -321,6 +321,22 @@ source "fs/Kconfig" | |||
321 | 321 | ||
322 | source "arch/parisc/Kconfig.debug" | 322 | source "arch/parisc/Kconfig.debug" |
323 | 323 | ||
324 | config SECCOMP | ||
325 | def_bool y | ||
326 | prompt "Enable seccomp to safely compute untrusted bytecode" | ||
327 | ---help--- | ||
328 | This kernel feature is useful for number crunching applications | ||
329 | that may need to compute untrusted bytecode during their | ||
330 | execution. By using pipes or other transports made available to | ||
331 | the process as file descriptors supporting the read/write | ||
332 | syscalls, it's possible to isolate those applications in | ||
333 | their own address space using seccomp. Once seccomp is | ||
334 | enabled via prctl(PR_SET_SECCOMP), it cannot be disabled | ||
335 | and the task is only allowed to execute a few safe syscalls | ||
336 | defined by each seccomp mode. | ||
337 | |||
338 | If unsure, say Y. Only embedded should say N here. | ||
339 | |||
324 | source "security/Kconfig" | 340 | source "security/Kconfig" |
325 | 341 | ||
326 | source "crypto/Kconfig" | 342 | source "crypto/Kconfig" |
diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index d9dc6cd3b7d2..e5c4da035810 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c | |||
@@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2) | |||
456 | } | 456 | } |
457 | 457 | ||
458 | /* String could be altered by userspace after strlen_user() */ | 458 | /* String could be altered by userspace after strlen_user() */ |
459 | fsname[len] = '\0'; | 459 | fsname[len - 1] = '\0'; |
460 | 460 | ||
461 | printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); | 461 | printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); |
462 | if ( !strcmp(fsname, "hfs") ) { | 462 | if ( !strcmp(fsname, "hfs") ) { |
diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h new file mode 100644 index 000000000000..015f7887aa29 --- /dev/null +++ b/arch/parisc/include/asm/seccomp.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_PARISC_SECCOMP_H | ||
2 | #define _ASM_PARISC_SECCOMP_H | ||
3 | |||
4 | #include <linux/unistd.h> | ||
5 | |||
6 | #define __NR_seccomp_read __NR_read | ||
7 | #define __NR_seccomp_write __NR_write | ||
8 | #define __NR_seccomp_exit __NR_exit | ||
9 | #define __NR_seccomp_sigreturn __NR_rt_sigreturn | ||
10 | |||
11 | #define __NR_seccomp_read_32 __NR_read | ||
12 | #define __NR_seccomp_write_32 __NR_write | ||
13 | #define __NR_seccomp_exit_32 __NR_exit | ||
14 | #define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn | ||
15 | |||
16 | #endif /* _ASM_PARISC_SECCOMP_H */ | ||
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4b9b10ce1f9d..a84611835549 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h | |||
@@ -60,6 +60,7 @@ struct thread_info { | |||
60 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ | 60 | #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ |
61 | #define TIF_SINGLESTEP 9 /* single stepping? */ | 61 | #define TIF_SINGLESTEP 9 /* single stepping? */ |
62 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ | 62 | #define TIF_BLOCKSTEP 10 /* branch stepping? */ |
63 | #define TIF_SECCOMP 11 /* secure computing */ | ||
63 | 64 | ||
64 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 65 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
65 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) | 66 | #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) |
@@ -70,11 +71,13 @@ struct thread_info { | |||
70 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 71 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
71 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) | 72 | #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) |
72 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) | 73 | #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) |
74 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | ||
73 | 75 | ||
74 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ | 76 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ |
75 | _TIF_NEED_RESCHED) | 77 | _TIF_NEED_RESCHED) |
76 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ | 78 | #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ |
77 | _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT) | 79 | _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ |
80 | _TIF_SECCOMP) | ||
78 | 81 | ||
79 | #ifdef CONFIG_64BIT | 82 | #ifdef CONFIG_64BIT |
80 | # ifdef CONFIG_COMPAT | 83 | # ifdef CONFIG_COMPAT |
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 47e0e21d2272..8667f18be238 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h | |||
@@ -830,8 +830,11 @@ | |||
830 | #define __NR_sched_getattr (__NR_Linux + 335) | 830 | #define __NR_sched_getattr (__NR_Linux + 335) |
831 | #define __NR_utimes (__NR_Linux + 336) | 831 | #define __NR_utimes (__NR_Linux + 336) |
832 | #define __NR_renameat2 (__NR_Linux + 337) | 832 | #define __NR_renameat2 (__NR_Linux + 337) |
833 | #define __NR_seccomp (__NR_Linux + 338) | ||
834 | #define __NR_getrandom (__NR_Linux + 339) | ||
835 | #define __NR_memfd_create (__NR_Linux + 340) | ||
833 | 836 | ||
834 | #define __NR_Linux_syscalls (__NR_renameat2 + 1) | 837 | #define __NR_Linux_syscalls (__NR_memfd_create + 1) |
835 | 838 | ||
836 | 839 | ||
837 | #define __IGNORE_select /* newselect */ | 840 | #define __IGNORE_select /* newselect */ |
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index e842ee233db4..3bab72462ab5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -270,6 +270,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
270 | { | 270 | { |
271 | long ret = 0; | 271 | long ret = 0; |
272 | 272 | ||
273 | /* Do the secure computing check first. */ | ||
274 | if (secure_computing(regs->gr[20])) { | ||
275 | /* seccomp failures shouldn't expose any additional code. */ | ||
276 | return -1; | ||
277 | } | ||
278 | |||
273 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 279 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
274 | tracehook_report_syscall_entry(regs)) | 280 | tracehook_report_syscall_entry(regs)) |
275 | ret = -1L; | 281 | ret = -1L; |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 838786011037..7ef22e3387e0 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -74,7 +74,7 @@ ENTRY(linux_gateway_page) | |||
74 | /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ | 74 | /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ |
75 | /* Light-weight-syscall entry must always be located at 0xb0 */ | 75 | /* Light-weight-syscall entry must always be located at 0xb0 */ |
76 | /* WARNING: Keep this number updated with table size changes */ | 76 | /* WARNING: Keep this number updated with table size changes */ |
77 | #define __NR_lws_entries (2) | 77 | #define __NR_lws_entries (3) |
78 | 78 | ||
79 | lws_entry: | 79 | lws_entry: |
80 | gate lws_start, %r0 /* increase privilege */ | 80 | gate lws_start, %r0 /* increase privilege */ |
@@ -502,7 +502,7 @@ lws_exit: | |||
502 | 502 | ||
503 | 503 | ||
504 | /*************************************************** | 504 | /*************************************************** |
505 | Implementing CAS as an atomic operation: | 505 | Implementing 32bit CAS as an atomic operation: |
506 | 506 | ||
507 | %r26 - Address to examine | 507 | %r26 - Address to examine |
508 | %r25 - Old value to check (old) | 508 | %r25 - Old value to check (old) |
@@ -659,6 +659,230 @@ cas_action: | |||
659 | ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) | 659 | ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) |
660 | 660 | ||
661 | 661 | ||
662 | /*************************************************** | ||
663 | New CAS implementation which uses pointers and variable size | ||
664 | information. The value pointed by old and new MUST NOT change | ||
665 | while performing CAS. The lock only protect the value at %r26. | ||
666 | |||
667 | %r26 - Address to examine | ||
668 | %r25 - Pointer to the value to check (old) | ||
669 | %r24 - Pointer to the value to set (new) | ||
670 | %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) | ||
671 | %r28 - Return non-zero on failure | ||
672 | %r21 - Kernel error code | ||
673 | |||
674 | %r21 has the following meanings: | ||
675 | |||
676 | EAGAIN - CAS is busy, ldcw failed, try again. | ||
677 | EFAULT - Read or write failed. | ||
678 | |||
679 | Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) | ||
680 | |||
681 | ****************************************************/ | ||
682 | |||
683 | /* ELF32 Process entry path */ | ||
684 | lws_compare_and_swap_2: | ||
685 | #ifdef CONFIG_64BIT | ||
686 | /* Clip the input registers */ | ||
687 | depdi 0, 31, 32, %r26 | ||
688 | depdi 0, 31, 32, %r25 | ||
689 | depdi 0, 31, 32, %r24 | ||
690 | depdi 0, 31, 32, %r23 | ||
691 | #endif | ||
692 | |||
693 | /* Check the validity of the size pointer */ | ||
694 | subi,>>= 4, %r23, %r0 | ||
695 | b,n lws_exit_nosys | ||
696 | |||
697 | /* Jump to the functions which will load the old and new values into | ||
698 | registers depending on the their size */ | ||
699 | shlw %r23, 2, %r29 | ||
700 | blr %r29, %r0 | ||
701 | nop | ||
702 | |||
703 | /* 8bit load */ | ||
704 | 4: ldb 0(%sr3,%r25), %r25 | ||
705 | b cas2_lock_start | ||
706 | 5: ldb 0(%sr3,%r24), %r24 | ||
707 | nop | ||
708 | nop | ||
709 | nop | ||
710 | nop | ||
711 | nop | ||
712 | |||
713 | /* 16bit load */ | ||
714 | 6: ldh 0(%sr3,%r25), %r25 | ||
715 | b cas2_lock_start | ||
716 | 7: ldh 0(%sr3,%r24), %r24 | ||
717 | nop | ||
718 | nop | ||
719 | nop | ||
720 | nop | ||
721 | nop | ||
722 | |||
723 | /* 32bit load */ | ||
724 | 8: ldw 0(%sr3,%r25), %r25 | ||
725 | b cas2_lock_start | ||
726 | 9: ldw 0(%sr3,%r24), %r24 | ||
727 | nop | ||
728 | nop | ||
729 | nop | ||
730 | nop | ||
731 | nop | ||
732 | |||
733 | /* 64bit load */ | ||
734 | #ifdef CONFIG_64BIT | ||
735 | 10: ldd 0(%sr3,%r25), %r25 | ||
736 | 11: ldd 0(%sr3,%r24), %r24 | ||
737 | #else | ||
738 | /* Load new value into r22/r23 - high/low */ | ||
739 | 10: ldw 0(%sr3,%r25), %r22 | ||
740 | 11: ldw 4(%sr3,%r25), %r23 | ||
741 | /* Load new value into fr4 for atomic store later */ | ||
742 | 12: flddx 0(%sr3,%r24), %fr4 | ||
743 | #endif | ||
744 | |||
745 | cas2_lock_start: | ||
746 | /* Load start of lock table */ | ||
747 | ldil L%lws_lock_start, %r20 | ||
748 | ldo R%lws_lock_start(%r20), %r28 | ||
749 | |||
750 | /* Extract four bits from r26 and hash lock (Bits 4-7) */ | ||
751 | extru %r26, 27, 4, %r20 | ||
752 | |||
753 | /* Find lock to use, the hash is either one of 0 to | ||
754 | 15, multiplied by 16 (keep it 16-byte aligned) | ||
755 | and add to the lock table offset. */ | ||
756 | shlw %r20, 4, %r20 | ||
757 | add %r20, %r28, %r20 | ||
758 | |||
759 | rsm PSW_SM_I, %r0 /* Disable interrupts */ | ||
760 | /* COW breaks can cause contention on UP systems */ | ||
761 | LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ | ||
762 | cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ | ||
763 | cas2_wouldblock: | ||
764 | ldo 2(%r0), %r28 /* 2nd case */ | ||
765 | ssm PSW_SM_I, %r0 | ||
766 | b lws_exit /* Contended... */ | ||
767 | ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ | ||
768 | |||
769 | /* | ||
770 | prev = *addr; | ||
771 | if ( prev == old ) | ||
772 | *addr = new; | ||
773 | return prev; | ||
774 | */ | ||
775 | |||
776 | /* NOTES: | ||
777 | This all works becuse intr_do_signal | ||
778 | and schedule both check the return iasq | ||
779 | and see that we are on the kernel page | ||
780 | so this process is never scheduled off | ||
781 | or is ever sent any signal of any sort, | ||
782 | thus it is wholly atomic from usrspaces | ||
783 | perspective | ||
784 | */ | ||
785 | cas2_action: | ||
786 | /* Jump to the correct function */ | ||
787 | blr %r29, %r0 | ||
788 | /* Set %r28 as non-zero for now */ | ||
789 | ldo 1(%r0),%r28 | ||
790 | |||
791 | /* 8bit CAS */ | ||
792 | 13: ldb,ma 0(%sr3,%r26), %r29 | ||
793 | sub,= %r29, %r25, %r0 | ||
794 | b,n cas2_end | ||
795 | 14: stb,ma %r24, 0(%sr3,%r26) | ||
796 | b cas2_end | ||
797 | copy %r0, %r28 | ||
798 | nop | ||
799 | nop | ||
800 | |||
801 | /* 16bit CAS */ | ||
802 | 15: ldh,ma 0(%sr3,%r26), %r29 | ||
803 | sub,= %r29, %r25, %r0 | ||
804 | b,n cas2_end | ||
805 | 16: sth,ma %r24, 0(%sr3,%r26) | ||
806 | b cas2_end | ||
807 | copy %r0, %r28 | ||
808 | nop | ||
809 | nop | ||
810 | |||
811 | /* 32bit CAS */ | ||
812 | 17: ldw,ma 0(%sr3,%r26), %r29 | ||
813 | sub,= %r29, %r25, %r0 | ||
814 | b,n cas2_end | ||
815 | 18: stw,ma %r24, 0(%sr3,%r26) | ||
816 | b cas2_end | ||
817 | copy %r0, %r28 | ||
818 | nop | ||
819 | nop | ||
820 | |||
821 | /* 64bit CAS */ | ||
822 | #ifdef CONFIG_64BIT | ||
823 | 19: ldd,ma 0(%sr3,%r26), %r29 | ||
824 | sub,= %r29, %r25, %r0 | ||
825 | b,n cas2_end | ||
826 | 20: std,ma %r24, 0(%sr3,%r26) | ||
827 | copy %r0, %r28 | ||
828 | #else | ||
829 | /* Compare first word */ | ||
830 | 19: ldw,ma 0(%sr3,%r26), %r29 | ||
831 | sub,= %r29, %r22, %r0 | ||
832 | b,n cas2_end | ||
833 | /* Compare second word */ | ||
834 | 20: ldw,ma 4(%sr3,%r26), %r29 | ||
835 | sub,= %r29, %r23, %r0 | ||
836 | b,n cas2_end | ||
837 | /* Perform the store */ | ||
838 | 21: fstdx %fr4, 0(%sr3,%r26) | ||
839 | copy %r0, %r28 | ||
840 | #endif | ||
841 | |||
842 | cas2_end: | ||
843 | /* Free lock */ | ||
844 | stw,ma %r20, 0(%sr2,%r20) | ||
845 | /* Enable interrupts */ | ||
846 | ssm PSW_SM_I, %r0 | ||
847 | /* Return to userspace, set no error */ | ||
848 | b lws_exit | ||
849 | copy %r0, %r21 | ||
850 | |||
851 | 22: | ||
852 | /* Error occurred on load or store */ | ||
853 | /* Free lock */ | ||
854 | stw %r20, 0(%sr2,%r20) | ||
855 | ssm PSW_SM_I, %r0 | ||
856 | ldo 1(%r0),%r28 | ||
857 | b lws_exit | ||
858 | ldo -EFAULT(%r0),%r21 /* set errno */ | ||
859 | nop | ||
860 | nop | ||
861 | nop | ||
862 | |||
863 | /* Exception table entries, for the load and store, return EFAULT. | ||
864 | Each of the entries must be relocated. */ | ||
865 | ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) | ||
866 | ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) | ||
867 | ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) | ||
868 | ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) | ||
869 | ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) | ||
870 | ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) | ||
871 | ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) | ||
872 | ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) | ||
873 | ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) | ||
874 | ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) | ||
875 | ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) | ||
876 | ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) | ||
877 | ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) | ||
878 | ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) | ||
879 | ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) | ||
880 | ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) | ||
881 | #ifndef CONFIG_64BIT | ||
882 | ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) | ||
883 | ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) | ||
884 | #endif | ||
885 | |||
662 | /* Make sure nothing else is placed on this page */ | 886 | /* Make sure nothing else is placed on this page */ |
663 | .align PAGE_SIZE | 887 | .align PAGE_SIZE |
664 | END(linux_gateway_page) | 888 | END(linux_gateway_page) |
@@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page) | |||
675 | /* Light-weight-syscall table */ | 899 | /* Light-weight-syscall table */ |
676 | /* Start of lws table. */ | 900 | /* Start of lws table. */ |
677 | ENTRY(lws_table) | 901 | ENTRY(lws_table) |
678 | LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ | 902 | LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ |
679 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ | 903 | LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ |
904 | LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ | ||
680 | END(lws_table) | 905 | END(lws_table) |
681 | /* End of lws table */ | 906 | /* End of lws table */ |
682 | 907 | ||
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 84c5d3a58fa1..b563d9c8268b 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -433,6 +433,9 @@ | |||
433 | ENTRY_SAME(sched_getattr) /* 335 */ | 433 | ENTRY_SAME(sched_getattr) /* 335 */ |
434 | ENTRY_COMP(utimes) | 434 | ENTRY_COMP(utimes) |
435 | ENTRY_SAME(renameat2) | 435 | ENTRY_SAME(renameat2) |
436 | ENTRY_SAME(seccomp) | ||
437 | ENTRY_SAME(getrandom) | ||
438 | ENTRY_SAME(memfd_create) /* 340 */ | ||
436 | 439 | ||
437 | /* Nothing yet */ | 440 | /* Nothing yet */ |
438 | 441 | ||
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 4bee1a6d41d0..45fd06cdc3e8 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=4 | 5 | CONFIG_NR_CPUS=4 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_LOG_BUF_SHIFT=15 | 11 | CONFIG_LOG_BUF_SHIFT=15 |
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig index 6d7b22f41b50..77d7bf3ca2ac 100644 --- a/arch/powerpc/configs/celleb_defconfig +++ b/arch/powerpc/configs/celleb_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=4 | 5 | CONFIG_NR_CPUS=4 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_LOG_BUF_SHIFT=15 | 11 | CONFIG_LOG_BUF_SHIFT=15 |
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 4b07bade1ba9..269d6e47c67d 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y | |||
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=24 | 5 | CONFIG_NR_CPUS=24 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IRQ_DOMAIN_DEBUG=y | 8 | CONFIG_IRQ_DOMAIN_DEBUG=y |
8 | CONFIG_NO_HZ=y | 9 | CONFIG_NO_HZ=y |
9 | CONFIG_HIGH_RES_TIMERS=y | 10 | CONFIG_HIGH_RES_TIMERS=y |
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 3c72fa615bd9..7594c5ac6481 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4 | |||
5 | CONFIG_EXPERIMENTAL=y | 5 | CONFIG_EXPERIMENTAL=y |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_IKCONFIG=y | 9 | CONFIG_IKCONFIG=y |
9 | CONFIG_IKCONFIG_PROC=y | 10 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 95e545d9f25c..c8b6a9ddb21b 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4 | |||
4 | CONFIG_EXPERIMENTAL=y | 4 | CONFIG_EXPERIMENTAL=y |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_POSIX_MQUEUE=y | 6 | CONFIG_POSIX_MQUEUE=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IKCONFIG=y | 8 | CONFIG_IKCONFIG=y |
8 | CONFIG_IKCONFIG_PROC=y | 9 | CONFIG_IKCONFIG_PROC=y |
9 | # CONFIG_COMPAT_BRK is not set | 10 | # CONFIG_COMPAT_BRK is not set |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index cec044a3ff69..e5e7838af008 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y | |||
3 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
4 | CONFIG_NR_CPUS=2 | 4 | CONFIG_NR_CPUS=2 |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_FHANDLE=y | ||
6 | CONFIG_NO_HZ=y | 7 | CONFIG_NO_HZ=y |
7 | CONFIG_HIGH_RES_TIMERS=y | 8 | CONFIG_HIGH_RES_TIMERS=y |
8 | CONFIG_BLK_DEV_INITRD=y | 9 | CONFIG_BLK_DEV_INITRD=y |
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f26b267eb71f..f6c02f8cdc62 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
@@ -4,6 +4,7 @@ CONFIG_VSX=y | |||
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_SYSVIPC=y | 5 | CONFIG_SYSVIPC=y |
6 | CONFIG_POSIX_MQUEUE=y | 6 | CONFIG_POSIX_MQUEUE=y |
7 | CONFIG_FHANDLE=y | ||
7 | CONFIG_IRQ_DOMAIN_DEBUG=y | 8 | CONFIG_IRQ_DOMAIN_DEBUG=y |
8 | CONFIG_NO_HZ=y | 9 | CONFIG_NO_HZ=y |
9 | CONFIG_HIGH_RES_TIMERS=y | 10 | CONFIG_HIGH_RES_TIMERS=y |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 438e813dc9cb..587f5514f9b1 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
@@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y | |||
3 | CONFIG_SMP=y | 3 | CONFIG_SMP=y |
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_POSIX_MQUEUE=y | 5 | CONFIG_POSIX_MQUEUE=y |
6 | CONFIG_FHANDLE=y | ||
6 | CONFIG_NO_HZ=y | 7 | CONFIG_NO_HZ=y |
7 | CONFIG_HIGH_RES_TIMERS=y | 8 | CONFIG_HIGH_RES_TIMERS=y |
8 | CONFIG_TASKSTATS=y | 9 | CONFIG_TASKSTATS=y |
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index fdee37fab81c..2e637c881d2b 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=2 | 5 | CONFIG_NR_CPUS=2 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_HIGH_RES_TIMERS=y | 9 | CONFIG_HIGH_RES_TIMERS=y |
9 | CONFIG_BLK_DEV_INITRD=y | 10 | CONFIG_BLK_DEV_INITRD=y |
10 | CONFIG_RD_LZMA=y | 11 | CONFIG_RD_LZMA=y |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index a905063281cc..50375f1f59e7 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -5,6 +5,7 @@ CONFIG_SMP=y | |||
5 | CONFIG_NR_CPUS=2048 | 5 | CONFIG_NR_CPUS=2048 |
6 | CONFIG_SYSVIPC=y | 6 | CONFIG_SYSVIPC=y |
7 | CONFIG_POSIX_MQUEUE=y | 7 | CONFIG_POSIX_MQUEUE=y |
8 | CONFIG_FHANDLE=y | ||
8 | CONFIG_AUDIT=y | 9 | CONFIG_AUDIT=y |
9 | CONFIG_AUDITSYSCALL=y | 10 | CONFIG_AUDITSYSCALL=y |
10 | CONFIG_IRQ_DOMAIN_DEBUG=y | 11 | CONFIG_IRQ_DOMAIN_DEBUG=y |
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 58e3dbf43ca4..4428ee428f4e 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig | |||
@@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048 | |||
6 | CONFIG_CPU_LITTLE_ENDIAN=y | 6 | CONFIG_CPU_LITTLE_ENDIAN=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_POSIX_MQUEUE=y | 8 | CONFIG_POSIX_MQUEUE=y |
9 | CONFIG_FHANDLE=y | ||
9 | CONFIG_AUDIT=y | 10 | CONFIG_AUDIT=y |
10 | CONFIG_AUDITSYSCALL=y | 11 | CONFIG_AUDITSYSCALL=y |
11 | CONFIG_IRQ_DOMAIN_DEBUG=y | 12 | CONFIG_IRQ_DOMAIN_DEBUG=y |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 279b80f3bb29..c0c61fa9cd9e 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -47,6 +47,12 @@ | |||
47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) | 47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) |
48 | #define STACK_FRAME_MARKER 12 | 48 | #define STACK_FRAME_MARKER 12 |
49 | 49 | ||
50 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
51 | #define STACK_FRAME_MIN_SIZE 32 | ||
52 | #else | ||
53 | #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD | ||
54 | #endif | ||
55 | |||
50 | /* Size of dummy stack frame allocated when calling signal handler. */ | 56 | /* Size of dummy stack frame allocated when calling signal handler. */ |
51 | #define __SIGNAL_FRAMESIZE 128 | 57 | #define __SIGNAL_FRAMESIZE 128 |
52 | #define __SIGNAL_FRAMESIZE32 64 | 58 | #define __SIGNAL_FRAMESIZE32 64 |
@@ -60,6 +66,7 @@ | |||
60 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) | 66 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) |
61 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) | 67 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) |
62 | #define STACK_FRAME_MARKER 2 | 68 | #define STACK_FRAME_MARKER 2 |
69 | #define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD | ||
63 | 70 | ||
64 | /* Size of stack frame allocated when calling signal handler. */ | 71 | /* Size of stack frame allocated when calling signal handler. */ |
65 | #define __SIGNAL_FRAMESIZE 64 | 72 | #define __SIGNAL_FRAMESIZE 64 |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 542bc0f0673f..7d8a60068805 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */ | |||
362 | SYSCALL_SPU(sched_setattr) | 362 | SYSCALL_SPU(sched_setattr) |
363 | SYSCALL_SPU(sched_getattr) | 363 | SYSCALL_SPU(sched_getattr) |
364 | SYSCALL_SPU(renameat2) | 364 | SYSCALL_SPU(renameat2) |
365 | SYSCALL_SPU(seccomp) | ||
366 | SYSCALL_SPU(getrandom) | ||
367 | SYSCALL_SPU(memfd_create) | ||
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 5ce5552ab9f5..4e9af3fd43e7 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <uapi/asm/unistd.h> | 12 | #include <uapi/asm/unistd.h> |
13 | 13 | ||
14 | 14 | ||
15 | #define __NR_syscalls 358 | 15 | #define __NR_syscalls 361 |
16 | 16 | ||
17 | #define __NR__exit __NR_exit | 17 | #define __NR__exit __NR_exit |
18 | #define NR_syscalls __NR_syscalls | 18 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2d526f7b48da..0688fc06e183 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h | |||
@@ -380,5 +380,8 @@ | |||
380 | #define __NR_sched_setattr 355 | 380 | #define __NR_sched_setattr 355 |
381 | #define __NR_sched_getattr 356 | 381 | #define __NR_sched_getattr 356 |
382 | #define __NR_renameat2 357 | 382 | #define __NR_renameat2 357 |
383 | #define __NR_seccomp 358 | ||
384 | #define __NR_getrandom 359 | ||
385 | #define __NR_memfd_create 360 | ||
383 | 386 | ||
384 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ | 387 | #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 72c20bb16d26..79294c4c5015 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -62,10 +62,10 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | kvm->arch.hpt_cma_alloc = 0; | 64 | kvm->arch.hpt_cma_alloc = 0; |
65 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); | 65 | page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT)); |
66 | if (page) { | 66 | if (page) { |
67 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); | 67 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
68 | memset((void *)hpt, 0, (1 << order)); | 68 | memset((void *)hpt, 0, (1ul << order)); |
69 | kvm->arch.hpt_cma_alloc = 1; | 69 | kvm->arch.hpt_cma_alloc = 1; |
70 | } | 70 | } |
71 | 71 | ||
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 74d1e780748b..2396dda282cd 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c | |||
@@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
35 | return 0; /* must be 16-byte aligned */ | 35 | return 0; /* must be 16-byte aligned */ |
36 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 36 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
37 | return 0; | 37 | return 0; |
38 | if (sp >= prev_sp + STACK_FRAME_OVERHEAD) | 38 | if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) |
39 | return 1; | 39 | return 1; |
40 | /* | 40 | /* |
41 | * sp could decrease when we jump off an interrupt stack | 41 | * sp could decrease when we jump off an interrupt stack |
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 97ac8dc33667..5e1ed1575aab 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c | |||
@@ -28,6 +28,7 @@ | |||
28 | 28 | ||
29 | #include <asm/opal.h> | 29 | #include <asm/opal.h> |
30 | #include <asm/cputable.h> | 30 | #include <asm/cputable.h> |
31 | #include <asm/machdep.h> | ||
31 | 32 | ||
32 | static int opal_hmi_handler_nb_init; | 33 | static int opal_hmi_handler_nb_init; |
33 | struct OpalHmiEvtNode { | 34 | struct OpalHmiEvtNode { |
@@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void) | |||
185 | } | 186 | } |
186 | return 0; | 187 | return 0; |
187 | } | 188 | } |
188 | subsys_initcall(opal_hmi_handler_init); | 189 | machine_subsys_initcall(powernv, opal_hmi_handler_init); |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c904583baf4b..17ee193960a0 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -113,7 +113,7 @@ out: | |||
113 | static int pseries_remove_mem_node(struct device_node *np) | 113 | static int pseries_remove_mem_node(struct device_node *np) |
114 | { | 114 | { |
115 | const char *type; | 115 | const char *type; |
116 | const unsigned int *regs; | 116 | const __be32 *regs; |
117 | unsigned long base; | 117 | unsigned long base; |
118 | unsigned int lmb_size; | 118 | unsigned int lmb_size; |
119 | int ret = -EINVAL; | 119 | int ret = -EINVAL; |
@@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np) | |||
132 | if (!regs) | 132 | if (!regs) |
133 | return ret; | 133 | return ret; |
134 | 134 | ||
135 | base = *(unsigned long *)regs; | 135 | base = be64_to_cpu(*(unsigned long *)regs); |
136 | lmb_size = regs[3]; | 136 | lmb_size = be32_to_cpu(regs[3]); |
137 | 137 | ||
138 | pseries_remove_memblock(base, lmb_size); | 138 | pseries_remove_memblock(base, lmb_size); |
139 | return 0; | 139 | return 0; |
@@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np) | |||
153 | static int pseries_add_mem_node(struct device_node *np) | 153 | static int pseries_add_mem_node(struct device_node *np) |
154 | { | 154 | { |
155 | const char *type; | 155 | const char *type; |
156 | const unsigned int *regs; | 156 | const __be32 *regs; |
157 | unsigned long base; | 157 | unsigned long base; |
158 | unsigned int lmb_size; | 158 | unsigned int lmb_size; |
159 | int ret = -EINVAL; | 159 | int ret = -EINVAL; |
@@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np) | |||
172 | if (!regs) | 172 | if (!regs) |
173 | return ret; | 173 | return ret; |
174 | 174 | ||
175 | base = *(unsigned long *)regs; | 175 | base = be64_to_cpu(*(unsigned long *)regs); |
176 | lmb_size = regs[3]; | 176 | lmb_size = be32_to_cpu(regs[3]); |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Update memory region to represent the memory add | 179 | * Update memory region to represent the memory add |
@@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) | |||
187 | struct of_drconf_cell *new_drmem, *old_drmem; | 187 | struct of_drconf_cell *new_drmem, *old_drmem; |
188 | unsigned long memblock_size; | 188 | unsigned long memblock_size; |
189 | u32 entries; | 189 | u32 entries; |
190 | u32 *p; | 190 | __be32 *p; |
191 | int i, rc = -EINVAL; | 191 | int i, rc = -EINVAL; |
192 | 192 | ||
193 | memblock_size = pseries_memory_block_size(); | 193 | memblock_size = pseries_memory_block_size(); |
194 | if (!memblock_size) | 194 | if (!memblock_size) |
195 | return -EINVAL; | 195 | return -EINVAL; |
196 | 196 | ||
197 | p = (u32 *) pr->old_prop->value; | 197 | p = (__be32 *) pr->old_prop->value; |
198 | if (!p) | 198 | if (!p) |
199 | return -EINVAL; | 199 | return -EINVAL; |
200 | 200 | ||
@@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) | |||
203 | * entries. Get the niumber of entries and skip to the array of | 203 | * entries. Get the niumber of entries and skip to the array of |
204 | * of_drconf_cell's. | 204 | * of_drconf_cell's. |
205 | */ | 205 | */ |
206 | entries = *p++; | 206 | entries = be32_to_cpu(*p++); |
207 | old_drmem = (struct of_drconf_cell *)p; | 207 | old_drmem = (struct of_drconf_cell *)p; |
208 | 208 | ||
209 | p = (u32 *)pr->prop->value; | 209 | p = (__be32 *)pr->prop->value; |
210 | p++; | 210 | p++; |
211 | new_drmem = (struct of_drconf_cell *)p; | 211 | new_drmem = (struct of_drconf_cell *)p; |
212 | 212 | ||
213 | for (i = 0; i < entries; i++) { | 213 | for (i = 0; i < entries; i++) { |
214 | if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) && | 214 | if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && |
215 | (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) { | 215 | (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { |
216 | rc = pseries_remove_memblock(old_drmem[i].base_addr, | 216 | rc = pseries_remove_memblock( |
217 | be64_to_cpu(old_drmem[i].base_addr), | ||
217 | memblock_size); | 218 | memblock_size); |
218 | break; | 219 | break; |
219 | } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) && | 220 | } else if ((!(be32_to_cpu(old_drmem[i].flags) & |
220 | (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) { | 221 | DRCONF_MEM_ASSIGNED)) && |
221 | rc = memblock_add(old_drmem[i].base_addr, | 222 | (be32_to_cpu(new_drmem[i].flags) & |
223 | DRCONF_MEM_ASSIGNED)) { | ||
224 | rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), | ||
222 | memblock_size); | 225 | memblock_size); |
223 | rc = (rc < 0) ? -EINVAL : 0; | 226 | rc = (rc < 0) ? -EINVAL : 0; |
224 | break; | 227 | break; |
225 | } | 228 | } |
226 | } | 229 | } |
227 | |||
228 | return rc; | 230 | return rc; |
229 | } | 231 | } |
230 | 232 | ||
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 2fcccc0c997c..c81661e756a0 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h | |||
@@ -17,12 +17,12 @@ | |||
17 | #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ | 17 | #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ |
18 | sizeof(struct ipl_block_fcp)) | 18 | sizeof(struct ipl_block_fcp)) |
19 | 19 | ||
20 | #define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8) | 20 | #define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16) |
21 | 21 | ||
22 | #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ | 22 | #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ |
23 | sizeof(struct ipl_block_ccw)) | 23 | sizeof(struct ipl_block_ccw)) |
24 | 24 | ||
25 | #define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8) | 25 | #define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16) |
26 | 26 | ||
27 | #define IPL_MAX_SUPPORTED_VERSION (0) | 27 | #define IPL_MAX_SUPPORTED_VERSION (0) |
28 | 28 | ||
@@ -38,10 +38,11 @@ struct ipl_list_hdr { | |||
38 | u8 pbt; | 38 | u8 pbt; |
39 | u8 flags; | 39 | u8 flags; |
40 | u16 reserved2; | 40 | u16 reserved2; |
41 | u8 loadparm[8]; | ||
41 | } __attribute__((packed)); | 42 | } __attribute__((packed)); |
42 | 43 | ||
43 | struct ipl_block_fcp { | 44 | struct ipl_block_fcp { |
44 | u8 reserved1[313-1]; | 45 | u8 reserved1[305-1]; |
45 | u8 opt; | 46 | u8 opt; |
46 | u8 reserved2[3]; | 47 | u8 reserved2[3]; |
47 | u16 reserved3; | 48 | u16 reserved3; |
@@ -62,7 +63,6 @@ struct ipl_block_fcp { | |||
62 | offsetof(struct ipl_block_fcp, scp_data))) | 63 | offsetof(struct ipl_block_fcp, scp_data))) |
63 | 64 | ||
64 | struct ipl_block_ccw { | 65 | struct ipl_block_ccw { |
65 | u8 load_parm[8]; | ||
66 | u8 reserved1[84]; | 66 | u8 reserved1[84]; |
67 | u8 reserved2[2]; | 67 | u8 reserved2[2]; |
68 | u16 devno; | 68 | u16 devno; |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index b76317c1f3eb..5efb2fe186e7 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -1127,7 +1127,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |||
1127 | unsigned long addr, pte_t *ptep) | 1127 | unsigned long addr, pte_t *ptep) |
1128 | { | 1128 | { |
1129 | pgste_t pgste; | 1129 | pgste_t pgste; |
1130 | pte_t pte; | 1130 | pte_t pte, oldpte; |
1131 | int young; | 1131 | int young; |
1132 | 1132 | ||
1133 | if (mm_has_pgste(vma->vm_mm)) { | 1133 | if (mm_has_pgste(vma->vm_mm)) { |
@@ -1135,12 +1135,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | |||
1135 | pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); | 1135 | pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); |
1136 | } | 1136 | } |
1137 | 1137 | ||
1138 | pte = *ptep; | 1138 | oldpte = pte = *ptep; |
1139 | ptep_flush_direct(vma->vm_mm, addr, ptep); | 1139 | ptep_flush_direct(vma->vm_mm, addr, ptep); |
1140 | young = pte_young(pte); | 1140 | young = pte_young(pte); |
1141 | pte = pte_mkold(pte); | 1141 | pte = pte_mkold(pte); |
1142 | 1142 | ||
1143 | if (mm_has_pgste(vma->vm_mm)) { | 1143 | if (mm_has_pgste(vma->vm_mm)) { |
1144 | pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); | ||
1144 | pgste = pgste_set_pte(ptep, pgste, pte); | 1145 | pgste = pgste_set_pte(ptep, pgste, pte); |
1145 | pgste_set_unlock(ptep, pgste); | 1146 | pgste_set_unlock(ptep, pgste); |
1146 | } else | 1147 | } else |
@@ -1330,6 +1331,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, | |||
1330 | ptep_flush_direct(vma->vm_mm, address, ptep); | 1331 | ptep_flush_direct(vma->vm_mm, address, ptep); |
1331 | 1332 | ||
1332 | if (mm_has_pgste(vma->vm_mm)) { | 1333 | if (mm_has_pgste(vma->vm_mm)) { |
1334 | pgste_set_key(ptep, pgste, entry, vma->vm_mm); | ||
1333 | pgste = pgste_set_pte(ptep, pgste, entry); | 1335 | pgste = pgste_set_pte(ptep, pgste, entry); |
1334 | pgste_set_unlock(ptep, pgste); | 1336 | pgste_set_unlock(ptep, pgste); |
1335 | } else | 1337 | } else |
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 22aac5885ba2..39badb9ca0b3 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c | |||
@@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long) | |||
455 | DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) | 455 | DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) |
456 | IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); | 456 | IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); |
457 | 457 | ||
458 | static struct attribute *ipl_fcp_attrs[] = { | ||
459 | &sys_ipl_type_attr.attr, | ||
460 | &sys_ipl_device_attr.attr, | ||
461 | &sys_ipl_fcp_wwpn_attr.attr, | ||
462 | &sys_ipl_fcp_lun_attr.attr, | ||
463 | &sys_ipl_fcp_bootprog_attr.attr, | ||
464 | &sys_ipl_fcp_br_lba_attr.attr, | ||
465 | NULL, | ||
466 | }; | ||
467 | |||
468 | static struct attribute_group ipl_fcp_attr_group = { | ||
469 | .attrs = ipl_fcp_attrs, | ||
470 | }; | ||
471 | |||
472 | /* CCW ipl device attributes */ | ||
473 | |||
474 | static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, | 458 | static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, |
475 | struct kobj_attribute *attr, char *page) | 459 | struct kobj_attribute *attr, char *page) |
476 | { | 460 | { |
@@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, | |||
487 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = | 471 | static struct kobj_attribute sys_ipl_ccw_loadparm_attr = |
488 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); | 472 | __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); |
489 | 473 | ||
474 | static struct attribute *ipl_fcp_attrs[] = { | ||
475 | &sys_ipl_type_attr.attr, | ||
476 | &sys_ipl_device_attr.attr, | ||
477 | &sys_ipl_fcp_wwpn_attr.attr, | ||
478 | &sys_ipl_fcp_lun_attr.attr, | ||
479 | &sys_ipl_fcp_bootprog_attr.attr, | ||
480 | &sys_ipl_fcp_br_lba_attr.attr, | ||
481 | &sys_ipl_ccw_loadparm_attr.attr, | ||
482 | NULL, | ||
483 | }; | ||
484 | |||
485 | static struct attribute_group ipl_fcp_attr_group = { | ||
486 | .attrs = ipl_fcp_attrs, | ||
487 | }; | ||
488 | |||
489 | /* CCW ipl device attributes */ | ||
490 | |||
490 | static struct attribute *ipl_ccw_attrs_vm[] = { | 491 | static struct attribute *ipl_ccw_attrs_vm[] = { |
491 | &sys_ipl_type_attr.attr, | 492 | &sys_ipl_type_attr.attr, |
492 | &sys_ipl_device_attr.attr, | 493 | &sys_ipl_device_attr.attr, |
@@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n", | |||
765 | DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", | 766 | DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", |
766 | reipl_block_fcp->ipl_info.fcp.devno); | 767 | reipl_block_fcp->ipl_info.fcp.devno); |
767 | 768 | ||
768 | static struct attribute *reipl_fcp_attrs[] = { | ||
769 | &sys_reipl_fcp_device_attr.attr, | ||
770 | &sys_reipl_fcp_wwpn_attr.attr, | ||
771 | &sys_reipl_fcp_lun_attr.attr, | ||
772 | &sys_reipl_fcp_bootprog_attr.attr, | ||
773 | &sys_reipl_fcp_br_lba_attr.attr, | ||
774 | NULL, | ||
775 | }; | ||
776 | |||
777 | static struct attribute_group reipl_fcp_attr_group = { | ||
778 | .attrs = reipl_fcp_attrs, | ||
779 | }; | ||
780 | |||
781 | /* CCW reipl device attributes */ | ||
782 | |||
783 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
784 | reipl_block_ccw->ipl_info.ccw.devno); | ||
785 | |||
786 | static void reipl_get_ascii_loadparm(char *loadparm, | 769 | static void reipl_get_ascii_loadparm(char *loadparm, |
787 | struct ipl_parameter_block *ibp) | 770 | struct ipl_parameter_block *ibp) |
788 | { | 771 | { |
789 | memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); | 772 | memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN); |
790 | EBCASC(loadparm, LOADPARM_LEN); | 773 | EBCASC(loadparm, LOADPARM_LEN); |
791 | loadparm[LOADPARM_LEN] = 0; | 774 | loadparm[LOADPARM_LEN] = 0; |
792 | strim(loadparm); | 775 | strim(loadparm); |
@@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, | |||
821 | return -EINVAL; | 804 | return -EINVAL; |
822 | } | 805 | } |
823 | /* initialize loadparm with blanks */ | 806 | /* initialize loadparm with blanks */ |
824 | memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN); | 807 | memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN); |
825 | /* copy and convert to ebcdic */ | 808 | /* copy and convert to ebcdic */ |
826 | memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len); | 809 | memcpy(ipb->hdr.loadparm, buf, lp_len); |
827 | ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN); | 810 | ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); |
828 | return len; | 811 | return len; |
829 | } | 812 | } |
830 | 813 | ||
814 | /* FCP wrapper */ | ||
815 | static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj, | ||
816 | struct kobj_attribute *attr, char *page) | ||
817 | { | ||
818 | return reipl_generic_loadparm_show(reipl_block_fcp, page); | ||
819 | } | ||
820 | |||
821 | static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj, | ||
822 | struct kobj_attribute *attr, | ||
823 | const char *buf, size_t len) | ||
824 | { | ||
825 | return reipl_generic_loadparm_store(reipl_block_fcp, buf, len); | ||
826 | } | ||
827 | |||
828 | static struct kobj_attribute sys_reipl_fcp_loadparm_attr = | ||
829 | __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show, | ||
830 | reipl_fcp_loadparm_store); | ||
831 | |||
832 | static struct attribute *reipl_fcp_attrs[] = { | ||
833 | &sys_reipl_fcp_device_attr.attr, | ||
834 | &sys_reipl_fcp_wwpn_attr.attr, | ||
835 | &sys_reipl_fcp_lun_attr.attr, | ||
836 | &sys_reipl_fcp_bootprog_attr.attr, | ||
837 | &sys_reipl_fcp_br_lba_attr.attr, | ||
838 | &sys_reipl_fcp_loadparm_attr.attr, | ||
839 | NULL, | ||
840 | }; | ||
841 | |||
842 | static struct attribute_group reipl_fcp_attr_group = { | ||
843 | .attrs = reipl_fcp_attrs, | ||
844 | }; | ||
845 | |||
846 | /* CCW reipl device attributes */ | ||
847 | |||
848 | DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", | ||
849 | reipl_block_ccw->ipl_info.ccw.devno); | ||
850 | |||
831 | /* NSS wrapper */ | 851 | /* NSS wrapper */ |
832 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, | 852 | static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, |
833 | struct kobj_attribute *attr, char *page) | 853 | struct kobj_attribute *attr, char *page) |
@@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb) | |||
1125 | /* LOADPARM */ | 1145 | /* LOADPARM */ |
1126 | /* check if read scp info worked and set loadparm */ | 1146 | /* check if read scp info worked and set loadparm */ |
1127 | if (sclp_ipl_info.is_valid) | 1147 | if (sclp_ipl_info.is_valid) |
1128 | memcpy(ipb->ipl_info.ccw.load_parm, | 1148 | memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); |
1129 | &sclp_ipl_info.loadparm, LOADPARM_LEN); | ||
1130 | else | 1149 | else |
1131 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ | 1150 | /* read scp info failed: set empty loadparm (EBCDIC blanks) */ |
1132 | memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN); | 1151 | memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN); |
1133 | ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; | 1152 | ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; |
1134 | 1153 | ||
1135 | /* VM PARM */ | 1154 | /* VM PARM */ |
@@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void) | |||
1251 | return rc; | 1270 | return rc; |
1252 | } | 1271 | } |
1253 | 1272 | ||
1254 | if (ipl_info.type == IPL_TYPE_FCP) | 1273 | if (ipl_info.type == IPL_TYPE_FCP) { |
1255 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); | 1274 | memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); |
1256 | else { | 1275 | /* |
1276 | * Fix loadparm: There are systems where the (SCSI) LOADPARM | ||
1277 | * is invalid in the SCSI IPL parameter block, so take it | ||
1278 | * always from sclp_ipl_info. | ||
1279 | */ | ||
1280 | memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm, | ||
1281 | LOADPARM_LEN); | ||
1282 | } else { | ||
1257 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; | 1283 | reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; |
1258 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; | 1284 | reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; |
1259 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; | 1285 | reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; |
@@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void) | |||
1864 | 1890 | ||
1865 | static int __init s390_ipl_init(void) | 1891 | static int __init s390_ipl_init(void) |
1866 | { | 1892 | { |
1893 | char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; | ||
1894 | |||
1867 | sclp_get_ipl_info(&sclp_ipl_info); | 1895 | sclp_get_ipl_info(&sclp_ipl_info); |
1896 | /* | ||
1897 | * Fix loadparm: There are systems where the (SCSI) LOADPARM | ||
1898 | * returned by read SCP info is invalid (contains EBCDIC blanks) | ||
1899 | * when the system has been booted via diag308. In that case we use | ||
1900 | * the value from diag308, if available. | ||
1901 | * | ||
1902 | * There are also systems where diag308 store does not work in | ||
1903 | * case the system is booted from HMC. Fortunately in this case | ||
1904 | * READ SCP info provides the correct value. | ||
1905 | */ | ||
1906 | if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && | ||
1907 | diag308_set_works) | ||
1908 | memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, | ||
1909 | LOADPARM_LEN); | ||
1868 | shutdown_actions_init(); | 1910 | shutdown_actions_init(); |
1869 | shutdown_triggers_init(); | 1911 | shutdown_triggers_init(); |
1870 | return 0; | 1912 | return 0; |
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 65fc3979c2f1..7cf18f8d4cb4 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S | |||
@@ -22,13 +22,11 @@ __kernel_clock_gettime: | |||
22 | basr %r5,0 | 22 | basr %r5,0 |
23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ | 23 | 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ |
24 | chi %r2,__CLOCK_REALTIME | 24 | chi %r2,__CLOCK_REALTIME |
25 | je 10f | 25 | je 11f |
26 | chi %r2,__CLOCK_MONOTONIC | 26 | chi %r2,__CLOCK_MONOTONIC |
27 | jne 19f | 27 | jne 19f |
28 | 28 | ||
29 | /* CLOCK_MONOTONIC */ | 29 | /* CLOCK_MONOTONIC */ |
30 | ltr %r3,%r3 | ||
31 | jz 9f /* tp == NULL */ | ||
32 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | 30 | 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ |
33 | tml %r4,0x0001 /* pending update ? loop */ | 31 | tml %r4,0x0001 /* pending update ? loop */ |
34 | jnz 1b | 32 | jnz 1b |
@@ -67,12 +65,10 @@ __kernel_clock_gettime: | |||
67 | j 6b | 65 | j 6b |
68 | 8: st %r2,0(%r3) /* store tp->tv_sec */ | 66 | 8: st %r2,0(%r3) /* store tp->tv_sec */ |
69 | st %r1,4(%r3) /* store tp->tv_nsec */ | 67 | st %r1,4(%r3) /* store tp->tv_nsec */ |
70 | 9: lhi %r2,0 | 68 | lhi %r2,0 |
71 | br %r14 | 69 | br %r14 |
72 | 70 | ||
73 | /* CLOCK_REALTIME */ | 71 | /* CLOCK_REALTIME */ |
74 | 10: ltr %r3,%r3 /* tp == NULL */ | ||
75 | jz 18f | ||
76 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ | 72 | 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ |
77 | tml %r4,0x0001 /* pending update ? loop */ | 73 | tml %r4,0x0001 /* pending update ? loop */ |
78 | jnz 11b | 74 | jnz 11b |
@@ -111,7 +107,7 @@ __kernel_clock_gettime: | |||
111 | j 15b | 107 | j 15b |
112 | 17: st %r2,0(%r3) /* store tp->tv_sec */ | 108 | 17: st %r2,0(%r3) /* store tp->tv_sec */ |
113 | st %r1,4(%r3) /* store tp->tv_nsec */ | 109 | st %r1,4(%r3) /* store tp->tv_nsec */ |
114 | 18: lhi %r2,0 | 110 | lhi %r2,0 |
115 | br %r14 | 111 | br %r14 |
116 | 112 | ||
117 | /* Fallback to system call */ | 113 | /* Fallback to system call */ |
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 91940ed33a4a..3f34e09db5f4 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S | |||
@@ -21,7 +21,7 @@ __kernel_clock_gettime: | |||
21 | .cfi_startproc | 21 | .cfi_startproc |
22 | larl %r5,_vdso_data | 22 | larl %r5,_vdso_data |
23 | cghi %r2,__CLOCK_REALTIME | 23 | cghi %r2,__CLOCK_REALTIME |
24 | je 4f | 24 | je 5f |
25 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID | 25 | cghi %r2,__CLOCK_THREAD_CPUTIME_ID |
26 | je 9f | 26 | je 9f |
27 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ | 27 | cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ |
@@ -30,8 +30,6 @@ __kernel_clock_gettime: | |||
30 | jne 12f | 30 | jne 12f |
31 | 31 | ||
32 | /* CLOCK_MONOTONIC */ | 32 | /* CLOCK_MONOTONIC */ |
33 | ltgr %r3,%r3 | ||
34 | jz 3f /* tp == NULL */ | ||
35 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | 33 | 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ |
36 | tmll %r4,0x0001 /* pending update ? loop */ | 34 | tmll %r4,0x0001 /* pending update ? loop */ |
37 | jnz 0b | 35 | jnz 0b |
@@ -53,12 +51,10 @@ __kernel_clock_gettime: | |||
53 | j 1b | 51 | j 1b |
54 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ | 52 | 2: stg %r0,0(%r3) /* store tp->tv_sec */ |
55 | stg %r1,8(%r3) /* store tp->tv_nsec */ | 53 | stg %r1,8(%r3) /* store tp->tv_nsec */ |
56 | 3: lghi %r2,0 | 54 | lghi %r2,0 |
57 | br %r14 | 55 | br %r14 |
58 | 56 | ||
59 | /* CLOCK_REALTIME */ | 57 | /* CLOCK_REALTIME */ |
60 | 4: ltr %r3,%r3 /* tp == NULL */ | ||
61 | jz 8f | ||
62 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ | 58 | 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ |
63 | tmll %r4,0x0001 /* pending update ? loop */ | 59 | tmll %r4,0x0001 /* pending update ? loop */ |
64 | jnz 5b | 60 | jnz 5b |
@@ -80,7 +76,7 @@ __kernel_clock_gettime: | |||
80 | j 6b | 76 | j 6b |
81 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ | 77 | 7: stg %r0,0(%r3) /* store tp->tv_sec */ |
82 | stg %r1,8(%r3) /* store tp->tv_nsec */ | 78 | stg %r1,8(%r3) /* store tp->tv_nsec */ |
83 | 8: lghi %r2,0 | 79 | lghi %r2,0 |
84 | br %r14 | 80 | br %r14 |
85 | 81 | ||
86 | /* CLOCK_THREAD_CPUTIME_ID for this thread */ | 82 | /* CLOCK_THREAD_CPUTIME_ID for this thread */ |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ce81eb2ab76a..81b0e11521e4 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -1317,19 +1317,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1317 | return -EINVAL; | 1317 | return -EINVAL; |
1318 | } | 1318 | } |
1319 | 1319 | ||
1320 | switch (kvm_run->exit_reason) { | ||
1321 | case KVM_EXIT_S390_SIEIC: | ||
1322 | case KVM_EXIT_UNKNOWN: | ||
1323 | case KVM_EXIT_INTR: | ||
1324 | case KVM_EXIT_S390_RESET: | ||
1325 | case KVM_EXIT_S390_UCONTROL: | ||
1326 | case KVM_EXIT_S390_TSCH: | ||
1327 | case KVM_EXIT_DEBUG: | ||
1328 | break; | ||
1329 | default: | ||
1330 | BUG(); | ||
1331 | } | ||
1332 | |||
1333 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; | 1320 | vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; |
1334 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; | 1321 | vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; |
1335 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { | 1322 | if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 19daa53a3da4..5404a6261db9 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -986,11 +986,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | |||
986 | pte_t *ptep; | 986 | pte_t *ptep; |
987 | 987 | ||
988 | down_read(&mm->mmap_sem); | 988 | down_read(&mm->mmap_sem); |
989 | retry: | ||
989 | ptep = get_locked_pte(current->mm, addr, &ptl); | 990 | ptep = get_locked_pte(current->mm, addr, &ptl); |
990 | if (unlikely(!ptep)) { | 991 | if (unlikely(!ptep)) { |
991 | up_read(&mm->mmap_sem); | 992 | up_read(&mm->mmap_sem); |
992 | return -EFAULT; | 993 | return -EFAULT; |
993 | } | 994 | } |
995 | if (!(pte_val(*ptep) & _PAGE_INVALID) && | ||
996 | (pte_val(*ptep) & _PAGE_PROTECT)) { | ||
997 | pte_unmap_unlock(*ptep, ptl); | ||
998 | if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { | ||
999 | up_read(&mm->mmap_sem); | ||
1000 | return -EFAULT; | ||
1001 | } | ||
1002 | goto retry; | ||
1003 | } | ||
994 | 1004 | ||
995 | new = old = pgste_get_lock(ptep); | 1005 | new = old = pgste_get_lock(ptep); |
996 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | | 1006 | pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | |
diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index bf8daf9d9c9b..37458f38b220 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c | |||
@@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
105 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); | 105 | VM_BUG_ON(!pfn_valid(pte_pfn(pte))); |
106 | page = pte_page(pte); | 106 | page = pte_page(pte); |
107 | get_page(page); | 107 | get_page(page); |
108 | __flush_anon_page(page, addr); | ||
109 | flush_dcache_page(page); | ||
108 | pages[*nr] = page; | 110 | pages[*nr] = page; |
109 | (*nr)++; | 111 | (*nr)++; |
110 | 112 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 778178f4c7d1..36327438caf0 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -23,6 +23,7 @@ config X86 | |||
23 | def_bool y | 23 | def_bool y |
24 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI | 24 | select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI |
25 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS | 25 | select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS |
26 | select ARCH_HAS_FAST_MULTIPLIER | ||
26 | select ARCH_MIGHT_HAVE_PC_PARPORT | 27 | select ARCH_MIGHT_HAVE_PC_PARPORT |
27 | select ARCH_MIGHT_HAVE_PC_SERIO | 28 | select ARCH_MIGHT_HAVE_PC_SERIO |
28 | select HAVE_AOUT if X86_32 | 29 | select HAVE_AOUT if X86_32 |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index afcd35d331de..cfe3b954d5e4 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x) | |||
497 | 497 | ||
498 | #include <asm-generic/bitops/sched.h> | 498 | #include <asm-generic/bitops/sched.h> |
499 | 499 | ||
500 | #define ARCH_HAS_FAST_MULTIPLIER 1 | ||
501 | |||
502 | #include <asm/arch_hweight.h> | 500 | #include <asm/arch_hweight.h> |
503 | 501 | ||
504 | #include <asm-generic/bitops/const_hweight.h> | 502 | #include <asm-generic/bitops/const_hweight.h> |
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 5be9063545d2..3874693c0e53 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h | |||
@@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512]; | |||
19 | extern pmd_t level2_kernel_pgt[512]; | 19 | extern pmd_t level2_kernel_pgt[512]; |
20 | extern pmd_t level2_fixmap_pgt[512]; | 20 | extern pmd_t level2_fixmap_pgt[512]; |
21 | extern pmd_t level2_ident_pgt[512]; | 21 | extern pmd_t level2_ident_pgt[512]; |
22 | extern pte_t level1_fixmap_pgt[512]; | ||
22 | extern pgd_t init_level4_pgt[]; | 23 | extern pgd_t init_level4_pgt[]; |
23 | 24 | ||
24 | #define swapper_pg_dir init_level4_pgt | 25 | #define swapper_pg_dir init_level4_pgt |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8a1201c3293..16fb0099b7f2 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, | |||
1866 | * | 1866 | * |
1867 | * We can construct this by grafting the Xen provided pagetable into | 1867 | * We can construct this by grafting the Xen provided pagetable into |
1868 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | 1868 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into |
1869 | * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This | 1869 | * level2_ident_pgt, and level2_kernel_pgt. This means that only the |
1870 | * means that only the kernel has a physical mapping to start with - | 1870 | * kernel has a physical mapping to start with - but that's enough to |
1871 | * but that's enough to get __va working. We need to fill in the rest | 1871 | * get __va working. We need to fill in the rest of the physical |
1872 | * of the physical mapping once some sort of allocator has been set | 1872 | * mapping once some sort of allocator has been set up. NOTE: for |
1873 | * up. | 1873 | * PVH, the page tables are native. |
1874 | * NOTE: for PVH, the page tables are native. | ||
1875 | */ | 1874 | */ |
1876 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | 1875 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
1877 | { | 1876 | { |
@@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1902 | /* L3_i[0] -> level2_ident_pgt */ | 1901 | /* L3_i[0] -> level2_ident_pgt */ |
1903 | convert_pfn_mfn(level3_ident_pgt); | 1902 | convert_pfn_mfn(level3_ident_pgt); |
1904 | /* L3_k[510] -> level2_kernel_pgt | 1903 | /* L3_k[510] -> level2_kernel_pgt |
1905 | * L3_i[511] -> level2_fixmap_pgt */ | 1904 | * L3_k[511] -> level2_fixmap_pgt */ |
1906 | convert_pfn_mfn(level3_kernel_pgt); | 1905 | convert_pfn_mfn(level3_kernel_pgt); |
1906 | |||
1907 | /* L3_k[511][506] -> level1_fixmap_pgt */ | ||
1908 | convert_pfn_mfn(level2_fixmap_pgt); | ||
1907 | } | 1909 | } |
1908 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ | 1910 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
1909 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | 1911 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
@@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1913 | addr[1] = (unsigned long)l3; | 1915 | addr[1] = (unsigned long)l3; |
1914 | addr[2] = (unsigned long)l2; | 1916 | addr[2] = (unsigned long)l2; |
1915 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: | 1917 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: |
1916 | * Both L4[272][0] and L4[511][511] have entries that point to the same | 1918 | * Both L4[272][0] and L4[511][510] have entries that point to the same |
1917 | * L2 (PMD) tables. Meaning that if you modify it in __va space | 1919 | * L2 (PMD) tables. Meaning that if you modify it in __va space |
1918 | * it will be also modified in the __ka space! (But if you just | 1920 | * it will be also modified in the __ka space! (But if you just |
1919 | * modify the PMD table to point to other PTE's or none, then you | 1921 | * modify the PMD table to point to other PTE's or none, then you |
1920 | * are OK - which is what cleanup_highmap does) */ | 1922 | * are OK - which is what cleanup_highmap does) */ |
1921 | copy_page(level2_ident_pgt, l2); | 1923 | copy_page(level2_ident_pgt, l2); |
1922 | /* Graft it onto L4[511][511] */ | 1924 | /* Graft it onto L4[511][510] */ |
1923 | copy_page(level2_kernel_pgt, l2); | 1925 | copy_page(level2_kernel_pgt, l2); |
1924 | 1926 | ||
1925 | /* Get [511][510] and graft that in level2_fixmap_pgt */ | ||
1926 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); | ||
1927 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | ||
1928 | copy_page(level2_fixmap_pgt, l2); | ||
1929 | /* Note that we don't do anything with level1_fixmap_pgt which | ||
1930 | * we don't need. */ | ||
1931 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { | 1927 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
1932 | /* Make pagetable pieces RO */ | 1928 | /* Make pagetable pieces RO */ |
1933 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | 1929 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); |
@@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
1937 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | 1933 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); |
1938 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | 1934 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1939 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | 1935 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); |
1936 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); | ||
1940 | 1937 | ||
1941 | /* Pin down new L4 */ | 1938 | /* Pin down new L4 */ |
1942 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | 1939 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 54535831f1e1..77881798f793 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -10,10 +10,11 @@ | |||
10 | #include "blk.h" | 10 | #include "blk.h" |
11 | 11 | ||
12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | 12 | static unsigned int __blk_recalc_rq_segments(struct request_queue *q, |
13 | struct bio *bio) | 13 | struct bio *bio, |
14 | bool no_sg_merge) | ||
14 | { | 15 | { |
15 | struct bio_vec bv, bvprv = { NULL }; | 16 | struct bio_vec bv, bvprv = { NULL }; |
16 | int cluster, high, highprv = 1, no_sg_merge; | 17 | int cluster, high, highprv = 1; |
17 | unsigned int seg_size, nr_phys_segs; | 18 | unsigned int seg_size, nr_phys_segs; |
18 | struct bio *fbio, *bbio; | 19 | struct bio *fbio, *bbio; |
19 | struct bvec_iter iter; | 20 | struct bvec_iter iter; |
@@ -35,7 +36,6 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q, | |||
35 | cluster = blk_queue_cluster(q); | 36 | cluster = blk_queue_cluster(q); |
36 | seg_size = 0; | 37 | seg_size = 0; |
37 | nr_phys_segs = 0; | 38 | nr_phys_segs = 0; |
38 | no_sg_merge = test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags); | ||
39 | high = 0; | 39 | high = 0; |
40 | for_each_bio(bio) { | 40 | for_each_bio(bio) { |
41 | bio_for_each_segment(bv, bio, iter) { | 41 | bio_for_each_segment(bv, bio, iter) { |
@@ -88,18 +88,23 @@ new_segment: | |||
88 | 88 | ||
89 | void blk_recalc_rq_segments(struct request *rq) | 89 | void blk_recalc_rq_segments(struct request *rq) |
90 | { | 90 | { |
91 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); | 91 | bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, |
92 | &rq->q->queue_flags); | ||
93 | |||
94 | rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, | ||
95 | no_sg_merge); | ||
92 | } | 96 | } |
93 | 97 | ||
94 | void blk_recount_segments(struct request_queue *q, struct bio *bio) | 98 | void blk_recount_segments(struct request_queue *q, struct bio *bio) |
95 | { | 99 | { |
96 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags)) | 100 | if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && |
101 | bio->bi_vcnt < queue_max_segments(q)) | ||
97 | bio->bi_phys_segments = bio->bi_vcnt; | 102 | bio->bi_phys_segments = bio->bi_vcnt; |
98 | else { | 103 | else { |
99 | struct bio *nxt = bio->bi_next; | 104 | struct bio *nxt = bio->bi_next; |
100 | 105 | ||
101 | bio->bi_next = NULL; | 106 | bio->bi_next = NULL; |
102 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); | 107 | bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false); |
103 | bio->bi_next = nxt; | 108 | bio->bi_next = nxt; |
104 | } | 109 | } |
105 | 110 | ||
diff --git a/block/blk-mq.c b/block/blk-mq.c index 4aac82615a46..383ea0cb1f0a 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -1321,6 +1321,7 @@ static void blk_mq_free_rq_map(struct blk_mq_tag_set *set, | |||
1321 | continue; | 1321 | continue; |
1322 | set->ops->exit_request(set->driver_data, tags->rqs[i], | 1322 | set->ops->exit_request(set->driver_data, tags->rqs[i], |
1323 | hctx_idx, i); | 1323 | hctx_idx, i); |
1324 | tags->rqs[i] = NULL; | ||
1324 | } | 1325 | } |
1325 | } | 1326 | } |
1326 | 1327 | ||
@@ -1354,8 +1355,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1354 | 1355 | ||
1355 | INIT_LIST_HEAD(&tags->page_list); | 1356 | INIT_LIST_HEAD(&tags->page_list); |
1356 | 1357 | ||
1357 | tags->rqs = kmalloc_node(set->queue_depth * sizeof(struct request *), | 1358 | tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *), |
1358 | GFP_KERNEL, set->numa_node); | 1359 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, |
1360 | set->numa_node); | ||
1359 | if (!tags->rqs) { | 1361 | if (!tags->rqs) { |
1360 | blk_mq_free_tags(tags); | 1362 | blk_mq_free_tags(tags); |
1361 | return NULL; | 1363 | return NULL; |
@@ -1379,8 +1381,9 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1379 | this_order--; | 1381 | this_order--; |
1380 | 1382 | ||
1381 | do { | 1383 | do { |
1382 | page = alloc_pages_node(set->numa_node, GFP_KERNEL, | 1384 | page = alloc_pages_node(set->numa_node, |
1383 | this_order); | 1385 | GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, |
1386 | this_order); | ||
1384 | if (page) | 1387 | if (page) |
1385 | break; | 1388 | break; |
1386 | if (!this_order--) | 1389 | if (!this_order--) |
@@ -1404,8 +1407,10 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1404 | if (set->ops->init_request) { | 1407 | if (set->ops->init_request) { |
1405 | if (set->ops->init_request(set->driver_data, | 1408 | if (set->ops->init_request(set->driver_data, |
1406 | tags->rqs[i], hctx_idx, i, | 1409 | tags->rqs[i], hctx_idx, i, |
1407 | set->numa_node)) | 1410 | set->numa_node)) { |
1411 | tags->rqs[i] = NULL; | ||
1408 | goto fail; | 1412 | goto fail; |
1413 | } | ||
1409 | } | 1414 | } |
1410 | 1415 | ||
1411 | p += rq_size; | 1416 | p += rq_size; |
@@ -1416,7 +1421,6 @@ static struct blk_mq_tags *blk_mq_init_rq_map(struct blk_mq_tag_set *set, | |||
1416 | return tags; | 1421 | return tags; |
1417 | 1422 | ||
1418 | fail: | 1423 | fail: |
1419 | pr_warn("%s: failed to allocate requests\n", __func__); | ||
1420 | blk_mq_free_rq_map(set, tags, hctx_idx); | 1424 | blk_mq_free_rq_map(set, tags, hctx_idx); |
1421 | return NULL; | 1425 | return NULL; |
1422 | } | 1426 | } |
@@ -1936,6 +1940,61 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
1936 | return NOTIFY_OK; | 1940 | return NOTIFY_OK; |
1937 | } | 1941 | } |
1938 | 1942 | ||
1943 | static int __blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) | ||
1944 | { | ||
1945 | int i; | ||
1946 | |||
1947 | for (i = 0; i < set->nr_hw_queues; i++) { | ||
1948 | set->tags[i] = blk_mq_init_rq_map(set, i); | ||
1949 | if (!set->tags[i]) | ||
1950 | goto out_unwind; | ||
1951 | } | ||
1952 | |||
1953 | return 0; | ||
1954 | |||
1955 | out_unwind: | ||
1956 | while (--i >= 0) | ||
1957 | blk_mq_free_rq_map(set, set->tags[i], i); | ||
1958 | |||
1959 | set->tags = NULL; | ||
1960 | return -ENOMEM; | ||
1961 | } | ||
1962 | |||
1963 | /* | ||
1964 | * Allocate the request maps associated with this tag_set. Note that this | ||
1965 | * may reduce the depth asked for, if memory is tight. set->queue_depth | ||
1966 | * will be updated to reflect the allocated depth. | ||
1967 | */ | ||
1968 | static int blk_mq_alloc_rq_maps(struct blk_mq_tag_set *set) | ||
1969 | { | ||
1970 | unsigned int depth; | ||
1971 | int err; | ||
1972 | |||
1973 | depth = set->queue_depth; | ||
1974 | do { | ||
1975 | err = __blk_mq_alloc_rq_maps(set); | ||
1976 | if (!err) | ||
1977 | break; | ||
1978 | |||
1979 | set->queue_depth >>= 1; | ||
1980 | if (set->queue_depth < set->reserved_tags + BLK_MQ_TAG_MIN) { | ||
1981 | err = -ENOMEM; | ||
1982 | break; | ||
1983 | } | ||
1984 | } while (set->queue_depth); | ||
1985 | |||
1986 | if (!set->queue_depth || err) { | ||
1987 | pr_err("blk-mq: failed to allocate request map\n"); | ||
1988 | return -ENOMEM; | ||
1989 | } | ||
1990 | |||
1991 | if (depth != set->queue_depth) | ||
1992 | pr_info("blk-mq: reduced tag depth (%u -> %u)\n", | ||
1993 | depth, set->queue_depth); | ||
1994 | |||
1995 | return 0; | ||
1996 | } | ||
1997 | |||
1939 | /* | 1998 | /* |
1940 | * Alloc a tag set to be associated with one or more request queues. | 1999 | * Alloc a tag set to be associated with one or more request queues. |
1941 | * May fail with EINVAL for various error conditions. May adjust the | 2000 | * May fail with EINVAL for various error conditions. May adjust the |
@@ -1944,8 +2003,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, | |||
1944 | */ | 2003 | */ |
1945 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) | 2004 | int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) |
1946 | { | 2005 | { |
1947 | int i; | ||
1948 | |||
1949 | if (!set->nr_hw_queues) | 2006 | if (!set->nr_hw_queues) |
1950 | return -EINVAL; | 2007 | return -EINVAL; |
1951 | if (!set->queue_depth) | 2008 | if (!set->queue_depth) |
@@ -1966,23 +2023,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) | |||
1966 | sizeof(struct blk_mq_tags *), | 2023 | sizeof(struct blk_mq_tags *), |
1967 | GFP_KERNEL, set->numa_node); | 2024 | GFP_KERNEL, set->numa_node); |
1968 | if (!set->tags) | 2025 | if (!set->tags) |
1969 | goto out; | 2026 | return -ENOMEM; |
1970 | 2027 | ||
1971 | for (i = 0; i < set->nr_hw_queues; i++) { | 2028 | if (blk_mq_alloc_rq_maps(set)) |
1972 | set->tags[i] = blk_mq_init_rq_map(set, i); | 2029 | goto enomem; |
1973 | if (!set->tags[i]) | ||
1974 | goto out_unwind; | ||
1975 | } | ||
1976 | 2030 | ||
1977 | mutex_init(&set->tag_list_lock); | 2031 | mutex_init(&set->tag_list_lock); |
1978 | INIT_LIST_HEAD(&set->tag_list); | 2032 | INIT_LIST_HEAD(&set->tag_list); |
1979 | 2033 | ||
1980 | return 0; | 2034 | return 0; |
1981 | 2035 | enomem: | |
1982 | out_unwind: | 2036 | kfree(set->tags); |
1983 | while (--i >= 0) | 2037 | set->tags = NULL; |
1984 | blk_mq_free_rq_map(set, set->tags[i], i); | ||
1985 | out: | ||
1986 | return -ENOMEM; | 2038 | return -ENOMEM; |
1987 | } | 2039 | } |
1988 | EXPORT_SYMBOL(blk_mq_alloc_tag_set); | 2040 | EXPORT_SYMBOL(blk_mq_alloc_tag_set); |
@@ -1997,6 +2049,7 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set) | |||
1997 | } | 2049 | } |
1998 | 2050 | ||
1999 | kfree(set->tags); | 2051 | kfree(set->tags); |
2052 | set->tags = NULL; | ||
2000 | } | 2053 | } |
2001 | EXPORT_SYMBOL(blk_mq_free_tag_set); | 2054 | EXPORT_SYMBOL(blk_mq_free_tag_set); |
2002 | 2055 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 4db5abf96b9e..17f5c84ce7bf 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -554,8 +554,10 @@ int blk_register_queue(struct gendisk *disk) | |||
554 | * Initialization must be complete by now. Finish the initial | 554 | * Initialization must be complete by now. Finish the initial |
555 | * bypass from queue allocation. | 555 | * bypass from queue allocation. |
556 | */ | 556 | */ |
557 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); | 557 | if (!blk_queue_init_done(q)) { |
558 | blk_queue_bypass_end(q); | 558 | queue_flag_set_unlocked(QUEUE_FLAG_INIT_DONE, q); |
559 | blk_queue_bypass_end(q); | ||
560 | } | ||
559 | 561 | ||
560 | ret = blk_trace_init_sysfs(dev); | 562 | ret = blk_trace_init_sysfs(dev); |
561 | if (ret) | 563 | if (ret) |
diff --git a/block/genhd.c b/block/genhd.c index 791f41943132..09da5e4a8e03 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -28,10 +28,10 @@ struct kobject *block_depr; | |||
28 | /* for extended dynamic devt allocation, currently only one major is used */ | 28 | /* for extended dynamic devt allocation, currently only one major is used */ |
29 | #define NR_EXT_DEVT (1 << MINORBITS) | 29 | #define NR_EXT_DEVT (1 << MINORBITS) |
30 | 30 | ||
31 | /* For extended devt allocation. ext_devt_mutex prevents look up | 31 | /* For extended devt allocation. ext_devt_lock prevents look up |
32 | * results from going away underneath its user. | 32 | * results from going away underneath its user. |
33 | */ | 33 | */ |
34 | static DEFINE_MUTEX(ext_devt_mutex); | 34 | static DEFINE_SPINLOCK(ext_devt_lock); |
35 | static DEFINE_IDR(ext_devt_idr); | 35 | static DEFINE_IDR(ext_devt_idr); |
36 | 36 | ||
37 | static struct device_type disk_type; | 37 | static struct device_type disk_type; |
@@ -420,9 +420,13 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | |||
420 | } | 420 | } |
421 | 421 | ||
422 | /* allocate ext devt */ | 422 | /* allocate ext devt */ |
423 | mutex_lock(&ext_devt_mutex); | 423 | idr_preload(GFP_KERNEL); |
424 | idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); | 424 | |
425 | mutex_unlock(&ext_devt_mutex); | 425 | spin_lock(&ext_devt_lock); |
426 | idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT); | ||
427 | spin_unlock(&ext_devt_lock); | ||
428 | |||
429 | idr_preload_end(); | ||
426 | if (idx < 0) | 430 | if (idx < 0) |
427 | return idx == -ENOSPC ? -EBUSY : idx; | 431 | return idx == -ENOSPC ? -EBUSY : idx; |
428 | 432 | ||
@@ -447,9 +451,9 @@ void blk_free_devt(dev_t devt) | |||
447 | return; | 451 | return; |
448 | 452 | ||
449 | if (MAJOR(devt) == BLOCK_EXT_MAJOR) { | 453 | if (MAJOR(devt) == BLOCK_EXT_MAJOR) { |
450 | mutex_lock(&ext_devt_mutex); | 454 | spin_lock(&ext_devt_lock); |
451 | idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 455 | idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
452 | mutex_unlock(&ext_devt_mutex); | 456 | spin_unlock(&ext_devt_lock); |
453 | } | 457 | } |
454 | } | 458 | } |
455 | 459 | ||
@@ -665,7 +669,6 @@ void del_gendisk(struct gendisk *disk) | |||
665 | sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); | 669 | sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); |
666 | pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); | 670 | pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); |
667 | device_del(disk_to_dev(disk)); | 671 | device_del(disk_to_dev(disk)); |
668 | blk_free_devt(disk_to_dev(disk)->devt); | ||
669 | } | 672 | } |
670 | EXPORT_SYMBOL(del_gendisk); | 673 | EXPORT_SYMBOL(del_gendisk); |
671 | 674 | ||
@@ -690,13 +693,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno) | |||
690 | } else { | 693 | } else { |
691 | struct hd_struct *part; | 694 | struct hd_struct *part; |
692 | 695 | ||
693 | mutex_lock(&ext_devt_mutex); | 696 | spin_lock(&ext_devt_lock); |
694 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); | 697 | part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt))); |
695 | if (part && get_disk(part_to_disk(part))) { | 698 | if (part && get_disk(part_to_disk(part))) { |
696 | *partno = part->partno; | 699 | *partno = part->partno; |
697 | disk = part_to_disk(part); | 700 | disk = part_to_disk(part); |
698 | } | 701 | } |
699 | mutex_unlock(&ext_devt_mutex); | 702 | spin_unlock(&ext_devt_lock); |
700 | } | 703 | } |
701 | 704 | ||
702 | return disk; | 705 | return disk; |
@@ -1098,6 +1101,7 @@ static void disk_release(struct device *dev) | |||
1098 | { | 1101 | { |
1099 | struct gendisk *disk = dev_to_disk(dev); | 1102 | struct gendisk *disk = dev_to_disk(dev); |
1100 | 1103 | ||
1104 | blk_free_devt(dev->devt); | ||
1101 | disk_release_events(disk); | 1105 | disk_release_events(disk); |
1102 | kfree(disk->random); | 1106 | kfree(disk->random); |
1103 | disk_replace_part_tbl(disk, NULL); | 1107 | disk_replace_part_tbl(disk, NULL); |
diff --git a/block/partition-generic.c b/block/partition-generic.c index 789cdea05893..0d9e5f97f0a8 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -211,6 +211,7 @@ static const struct attribute_group *part_attr_groups[] = { | |||
211 | static void part_release(struct device *dev) | 211 | static void part_release(struct device *dev) |
212 | { | 212 | { |
213 | struct hd_struct *p = dev_to_part(dev); | 213 | struct hd_struct *p = dev_to_part(dev); |
214 | blk_free_devt(dev->devt); | ||
214 | free_part_stats(p); | 215 | free_part_stats(p); |
215 | free_part_info(p); | 216 | free_part_info(p); |
216 | kfree(p); | 217 | kfree(p); |
@@ -253,7 +254,6 @@ void delete_partition(struct gendisk *disk, int partno) | |||
253 | rcu_assign_pointer(ptbl->last_lookup, NULL); | 254 | rcu_assign_pointer(ptbl->last_lookup, NULL); |
254 | kobject_put(part->holder_dir); | 255 | kobject_put(part->holder_dir); |
255 | device_del(part_to_dev(part)); | 256 | device_del(part_to_dev(part)); |
256 | blk_free_devt(part_devt(part)); | ||
257 | 257 | ||
258 | hd_struct_put(part); | 258 | hd_struct_put(part); |
259 | } | 259 | } |
diff --git a/crypto/asymmetric_keys/public_key.c b/crypto/asymmetric_keys/public_key.c index 97eb001960b9..2f6e4fb1a1ea 100644 --- a/crypto/asymmetric_keys/public_key.c +++ b/crypto/asymmetric_keys/public_key.c | |||
@@ -121,6 +121,7 @@ static int public_key_verify_signature_2(const struct key *key, | |||
121 | struct asymmetric_key_subtype public_key_subtype = { | 121 | struct asymmetric_key_subtype public_key_subtype = { |
122 | .owner = THIS_MODULE, | 122 | .owner = THIS_MODULE, |
123 | .name = "public_key", | 123 | .name = "public_key", |
124 | .name_len = sizeof("public_key") - 1, | ||
124 | .describe = public_key_describe, | 125 | .describe = public_key_describe, |
125 | .destroy = public_key_destroy, | 126 | .destroy = public_key_destroy, |
126 | .verify_signature = public_key_verify_signature_2, | 127 | .verify_signature = public_key_verify_signature_2, |
diff --git a/crypto/asymmetric_keys/verify_pefile.c b/crypto/asymmetric_keys/verify_pefile.c index 79175e6ea0b2..2421f46184ce 100644 --- a/crypto/asymmetric_keys/verify_pefile.c +++ b/crypto/asymmetric_keys/verify_pefile.c | |||
@@ -128,6 +128,7 @@ static int pefile_strip_sig_wrapper(const void *pebuf, | |||
128 | { | 128 | { |
129 | struct win_certificate wrapper; | 129 | struct win_certificate wrapper; |
130 | const u8 *pkcs7; | 130 | const u8 *pkcs7; |
131 | unsigned len; | ||
131 | 132 | ||
132 | if (ctx->sig_len < sizeof(wrapper)) { | 133 | if (ctx->sig_len < sizeof(wrapper)) { |
133 | pr_debug("Signature wrapper too short\n"); | 134 | pr_debug("Signature wrapper too short\n"); |
@@ -154,33 +155,49 @@ static int pefile_strip_sig_wrapper(const void *pebuf, | |||
154 | return -ENOTSUPP; | 155 | return -ENOTSUPP; |
155 | } | 156 | } |
156 | 157 | ||
157 | /* Looks like actual pkcs signature length is in wrapper->length. | 158 | /* It looks like the pkcs signature length in wrapper->length and the |
158 | * size obtained from data dir entries lists the total size of | 159 | * size obtained from the data dir entries, which lists the total size |
159 | * certificate table which is also aligned to octawrod boundary. | 160 | * of certificate table, are both aligned to an octaword boundary, so |
160 | * | 161 | * we may have to deal with some padding. |
161 | * So set signature length field appropriately. | ||
162 | */ | 162 | */ |
163 | ctx->sig_len = wrapper.length; | 163 | ctx->sig_len = wrapper.length; |
164 | ctx->sig_offset += sizeof(wrapper); | 164 | ctx->sig_offset += sizeof(wrapper); |
165 | ctx->sig_len -= sizeof(wrapper); | 165 | ctx->sig_len -= sizeof(wrapper); |
166 | if (ctx->sig_len == 0) { | 166 | if (ctx->sig_len < 4) { |
167 | pr_debug("Signature data missing\n"); | 167 | pr_debug("Signature data missing\n"); |
168 | return -EKEYREJECTED; | 168 | return -EKEYREJECTED; |
169 | } | 169 | } |
170 | 170 | ||
171 | /* What's left should a PKCS#7 cert */ | 171 | /* What's left should be a PKCS#7 cert */ |
172 | pkcs7 = pebuf + ctx->sig_offset; | 172 | pkcs7 = pebuf + ctx->sig_offset; |
173 | if (pkcs7[0] == (ASN1_CONS_BIT | ASN1_SEQ)) { | 173 | if (pkcs7[0] != (ASN1_CONS_BIT | ASN1_SEQ)) |
174 | if (pkcs7[1] == 0x82 && | 174 | goto not_pkcs7; |
175 | pkcs7[2] == (((ctx->sig_len - 4) >> 8) & 0xff) && | 175 | |
176 | pkcs7[3] == ((ctx->sig_len - 4) & 0xff)) | 176 | switch (pkcs7[1]) { |
177 | return 0; | 177 | case 0 ... 0x7f: |
178 | if (pkcs7[1] == 0x80) | 178 | len = pkcs7[1] + 2; |
179 | return 0; | 179 | goto check_len; |
180 | if (pkcs7[1] > 0x82) | 180 | case ASN1_INDEFINITE_LENGTH: |
181 | return -EMSGSIZE; | 181 | return 0; |
182 | case 0x81: | ||
183 | len = pkcs7[2] + 3; | ||
184 | goto check_len; | ||
185 | case 0x82: | ||
186 | len = ((pkcs7[2] << 8) | pkcs7[3]) + 4; | ||
187 | goto check_len; | ||
188 | case 0x83 ... 0xff: | ||
189 | return -EMSGSIZE; | ||
190 | default: | ||
191 | goto not_pkcs7; | ||
182 | } | 192 | } |
183 | 193 | ||
194 | check_len: | ||
195 | if (len <= ctx->sig_len) { | ||
196 | /* There may be padding */ | ||
197 | ctx->sig_len = len; | ||
198 | return 0; | ||
199 | } | ||
200 | not_pkcs7: | ||
184 | pr_debug("Signature data not PKCS#7\n"); | 201 | pr_debug("Signature data not PKCS#7\n"); |
185 | return -ELIBBAD; | 202 | return -ELIBBAD; |
186 | } | 203 | } |
diff --git a/drivers/acpi/acpi_cmos_rtc.c b/drivers/acpi/acpi_cmos_rtc.c index 2da8660262e5..81dc75033f15 100644 --- a/drivers/acpi/acpi_cmos_rtc.c +++ b/drivers/acpi/acpi_cmos_rtc.c | |||
@@ -33,7 +33,7 @@ acpi_cmos_rtc_space_handler(u32 function, acpi_physical_address address, | |||
33 | void *handler_context, void *region_context) | 33 | void *handler_context, void *region_context) |
34 | { | 34 | { |
35 | int i; | 35 | int i; |
36 | u8 *value = (u8 *)&value64; | 36 | u8 *value = (u8 *)value64; |
37 | 37 | ||
38 | if (address > 0xff || !value64) | 38 | if (address > 0xff || !value64) |
39 | return AE_BAD_PARAMETER; | 39 | return AE_BAD_PARAMETER; |
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 9dfec48dd4e5..fddc1e86f9d0 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c | |||
@@ -610,7 +610,7 @@ static int acpi_lpss_suspend_late(struct device *dev) | |||
610 | return acpi_dev_suspend_late(dev); | 610 | return acpi_dev_suspend_late(dev); |
611 | } | 611 | } |
612 | 612 | ||
613 | static int acpi_lpss_restore_early(struct device *dev) | 613 | static int acpi_lpss_resume_early(struct device *dev) |
614 | { | 614 | { |
615 | int ret = acpi_dev_resume_early(dev); | 615 | int ret = acpi_dev_resume_early(dev); |
616 | 616 | ||
@@ -650,15 +650,15 @@ static int acpi_lpss_runtime_resume(struct device *dev) | |||
650 | static struct dev_pm_domain acpi_lpss_pm_domain = { | 650 | static struct dev_pm_domain acpi_lpss_pm_domain = { |
651 | .ops = { | 651 | .ops = { |
652 | #ifdef CONFIG_PM_SLEEP | 652 | #ifdef CONFIG_PM_SLEEP |
653 | .suspend_late = acpi_lpss_suspend_late, | ||
654 | .restore_early = acpi_lpss_restore_early, | ||
655 | .prepare = acpi_subsys_prepare, | 653 | .prepare = acpi_subsys_prepare, |
656 | .complete = acpi_subsys_complete, | 654 | .complete = acpi_subsys_complete, |
657 | .suspend = acpi_subsys_suspend, | 655 | .suspend = acpi_subsys_suspend, |
658 | .resume_early = acpi_subsys_resume_early, | 656 | .suspend_late = acpi_lpss_suspend_late, |
657 | .resume_early = acpi_lpss_resume_early, | ||
659 | .freeze = acpi_subsys_freeze, | 658 | .freeze = acpi_subsys_freeze, |
660 | .poweroff = acpi_subsys_suspend, | 659 | .poweroff = acpi_subsys_suspend, |
661 | .poweroff_late = acpi_subsys_suspend_late, | 660 | .poweroff_late = acpi_lpss_suspend_late, |
661 | .restore_early = acpi_lpss_resume_early, | ||
662 | #endif | 662 | #endif |
663 | #ifdef CONFIG_PM_RUNTIME | 663 | #ifdef CONFIG_PM_RUNTIME |
664 | .runtime_suspend = acpi_lpss_runtime_suspend, | 664 | .runtime_suspend = acpi_lpss_runtime_suspend, |
diff --git a/drivers/acpi/acpica/nsprepkg.c b/drivers/acpi/acpica/nsprepkg.c index 68f725839eb6..1b13b921dda9 100644 --- a/drivers/acpi/acpica/nsprepkg.c +++ b/drivers/acpi/acpica/nsprepkg.c | |||
@@ -316,6 +316,45 @@ acpi_ns_check_package(struct acpi_evaluate_info *info, | |||
316 | acpi_ns_check_package_list(info, package, elements, count); | 316 | acpi_ns_check_package_list(info, package, elements, count); |
317 | break; | 317 | break; |
318 | 318 | ||
319 | case ACPI_PTYPE2_UUID_PAIR: | ||
320 | |||
321 | /* The package must contain pairs of (UUID + type) */ | ||
322 | |||
323 | if (count & 1) { | ||
324 | expected_count = count + 1; | ||
325 | goto package_too_small; | ||
326 | } | ||
327 | |||
328 | while (count > 0) { | ||
329 | status = acpi_ns_check_object_type(info, elements, | ||
330 | package->ret_info. | ||
331 | object_type1, 0); | ||
332 | if (ACPI_FAILURE(status)) { | ||
333 | return (status); | ||
334 | } | ||
335 | |||
336 | /* Validate length of the UUID buffer */ | ||
337 | |||
338 | if ((*elements)->buffer.length != 16) { | ||
339 | ACPI_WARN_PREDEFINED((AE_INFO, | ||
340 | info->full_pathname, | ||
341 | info->node_flags, | ||
342 | "Invalid length for UUID Buffer")); | ||
343 | return (AE_AML_OPERAND_VALUE); | ||
344 | } | ||
345 | |||
346 | status = acpi_ns_check_object_type(info, elements + 1, | ||
347 | package->ret_info. | ||
348 | object_type2, 0); | ||
349 | if (ACPI_FAILURE(status)) { | ||
350 | return (status); | ||
351 | } | ||
352 | |||
353 | elements += 2; | ||
354 | count -= 2; | ||
355 | } | ||
356 | break; | ||
357 | |||
319 | default: | 358 | default: |
320 | 359 | ||
321 | /* Should not get here if predefined info table is correct */ | 360 | /* Should not get here if predefined info table is correct */ |
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 1c162e7be045..5fdfe65fe165 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -534,20 +534,6 @@ static int acpi_battery_get_state(struct acpi_battery *battery) | |||
534 | " invalid.\n"); | 534 | " invalid.\n"); |
535 | } | 535 | } |
536 | 536 | ||
537 | /* | ||
538 | * When fully charged, some batteries wrongly report | ||
539 | * capacity_now = design_capacity instead of = full_charge_capacity | ||
540 | */ | ||
541 | if (battery->capacity_now > battery->full_charge_capacity | ||
542 | && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) { | ||
543 | if (battery->capacity_now != battery->design_capacity) | ||
544 | printk_once(KERN_WARNING FW_BUG | ||
545 | "battery: reported current charge level (%d) " | ||
546 | "is higher than reported maximum charge level (%d).\n", | ||
547 | battery->capacity_now, battery->full_charge_capacity); | ||
548 | battery->capacity_now = battery->full_charge_capacity; | ||
549 | } | ||
550 | |||
551 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) | 537 | if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) |
552 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) | 538 | && battery->capacity_now >= 0 && battery->capacity_now <= 100) |
553 | battery->capacity_now = (battery->capacity_now * | 539 | battery->capacity_now = (battery->capacity_now * |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 9922cc46b15c..cb6066c809ea 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -1030,6 +1030,10 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { | |||
1030 | DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), | 1030 | DMI_MATCH(DMI_SYS_VENDOR, "Quanta"), |
1031 | DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, | 1031 | DMI_MATCH(DMI_PRODUCT_NAME, "TW9/SW9"),}, NULL}, |
1032 | { | 1032 | { |
1033 | ec_flag_msi, "Clevo W350etq", { | ||
1034 | DMI_MATCH(DMI_SYS_VENDOR, "CLEVO CO."), | ||
1035 | DMI_MATCH(DMI_PRODUCT_NAME, "W35_37ET"),}, NULL}, | ||
1036 | { | ||
1033 | ec_validate_ecdt, "ASUS hardware", { | 1037 | ec_validate_ecdt, "ASUS hardware", { |
1034 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, | 1038 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, |
1035 | { | 1039 | { |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 3dca36d4ad26..17f9ec501972 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -1071,9 +1071,9 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1071 | 1071 | ||
1072 | if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { | 1072 | if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) { |
1073 | 1073 | ||
1074 | cpuidle_pause_and_lock(); | ||
1075 | /* Protect against cpu-hotplug */ | 1074 | /* Protect against cpu-hotplug */ |
1076 | get_online_cpus(); | 1075 | get_online_cpus(); |
1076 | cpuidle_pause_and_lock(); | ||
1077 | 1077 | ||
1078 | /* Disable all cpuidle devices */ | 1078 | /* Disable all cpuidle devices */ |
1079 | for_each_online_cpu(cpu) { | 1079 | for_each_online_cpu(cpu) { |
@@ -1100,8 +1100,8 @@ int acpi_processor_cst_has_changed(struct acpi_processor *pr) | |||
1100 | cpuidle_enable_device(dev); | 1100 | cpuidle_enable_device(dev); |
1101 | } | 1101 | } |
1102 | } | 1102 | } |
1103 | put_online_cpus(); | ||
1104 | cpuidle_resume_and_unlock(); | 1103 | cpuidle_resume_and_unlock(); |
1104 | put_online_cpus(); | ||
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | return 0; | 1107 | return 0; |
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 9a9298994e26..3bf7764659a4 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c | |||
@@ -667,8 +667,14 @@ static ssize_t | |||
667 | acpi_device_sun_show(struct device *dev, struct device_attribute *attr, | 667 | acpi_device_sun_show(struct device *dev, struct device_attribute *attr, |
668 | char *buf) { | 668 | char *buf) { |
669 | struct acpi_device *acpi_dev = to_acpi_device(dev); | 669 | struct acpi_device *acpi_dev = to_acpi_device(dev); |
670 | acpi_status status; | ||
671 | unsigned long long sun; | ||
672 | |||
673 | status = acpi_evaluate_integer(acpi_dev->handle, "_SUN", NULL, &sun); | ||
674 | if (ACPI_FAILURE(status)) | ||
675 | return -ENODEV; | ||
670 | 676 | ||
671 | return sprintf(buf, "%lu\n", acpi_dev->pnp.sun); | 677 | return sprintf(buf, "%llu\n", sun); |
672 | } | 678 | } |
673 | static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); | 679 | static DEVICE_ATTR(sun, 0444, acpi_device_sun_show, NULL); |
674 | 680 | ||
@@ -690,7 +696,6 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
690 | { | 696 | { |
691 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 697 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
692 | acpi_status status; | 698 | acpi_status status; |
693 | unsigned long long sun; | ||
694 | int result = 0; | 699 | int result = 0; |
695 | 700 | ||
696 | /* | 701 | /* |
@@ -731,14 +736,10 @@ static int acpi_device_setup_files(struct acpi_device *dev) | |||
731 | if (dev->pnp.unique_id) | 736 | if (dev->pnp.unique_id) |
732 | result = device_create_file(&dev->dev, &dev_attr_uid); | 737 | result = device_create_file(&dev->dev, &dev_attr_uid); |
733 | 738 | ||
734 | status = acpi_evaluate_integer(dev->handle, "_SUN", NULL, &sun); | 739 | if (acpi_has_method(dev->handle, "_SUN")) { |
735 | if (ACPI_SUCCESS(status)) { | ||
736 | dev->pnp.sun = (unsigned long)sun; | ||
737 | result = device_create_file(&dev->dev, &dev_attr_sun); | 740 | result = device_create_file(&dev->dev, &dev_attr_sun); |
738 | if (result) | 741 | if (result) |
739 | goto end; | 742 | goto end; |
740 | } else { | ||
741 | dev->pnp.sun = (unsigned long)-1; | ||
742 | } | 743 | } |
743 | 744 | ||
744 | if (acpi_has_method(dev->handle, "_STA")) { | 745 | if (acpi_has_method(dev->handle, "_STA")) { |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 826884392e6b..fcbda105616e 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -82,9 +82,9 @@ module_param(allow_duplicates, bool, 0644); | |||
82 | * For Windows 8 systems: used to decide if video module | 82 | * For Windows 8 systems: used to decide if video module |
83 | * should skip registering backlight interface of its own. | 83 | * should skip registering backlight interface of its own. |
84 | */ | 84 | */ |
85 | static int use_native_backlight_param = 1; | 85 | static int use_native_backlight_param = -1; |
86 | module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); | 86 | module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); |
87 | static bool use_native_backlight_dmi = false; | 87 | static bool use_native_backlight_dmi = true; |
88 | 88 | ||
89 | static int register_count; | 89 | static int register_count; |
90 | static struct mutex video_list_lock; | 90 | static struct mutex video_list_lock; |
@@ -417,6 +417,12 @@ static int __init video_set_use_native_backlight(const struct dmi_system_id *d) | |||
417 | return 0; | 417 | return 0; |
418 | } | 418 | } |
419 | 419 | ||
420 | static int __init video_disable_native_backlight(const struct dmi_system_id *d) | ||
421 | { | ||
422 | use_native_backlight_dmi = false; | ||
423 | return 0; | ||
424 | } | ||
425 | |||
420 | static struct dmi_system_id video_dmi_table[] __initdata = { | 426 | static struct dmi_system_id video_dmi_table[] __initdata = { |
421 | /* | 427 | /* |
422 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 | 428 | * Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121 |
@@ -720,6 +726,41 @@ static struct dmi_system_id video_dmi_table[] __initdata = { | |||
720 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), | 726 | DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"), |
721 | }, | 727 | }, |
722 | }, | 728 | }, |
729 | |||
730 | /* | ||
731 | * These models have a working acpi_video backlight control, and using | ||
732 | * native backlight causes a regression where backlight does not work | ||
733 | * when userspace is not handling brightness key events. Disable | ||
734 | * native_backlight on these to fix this: | ||
735 | * https://bugzilla.kernel.org/show_bug.cgi?id=81691 | ||
736 | */ | ||
737 | { | ||
738 | .callback = video_disable_native_backlight, | ||
739 | .ident = "ThinkPad T420", | ||
740 | .matches = { | ||
741 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
742 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T420"), | ||
743 | }, | ||
744 | }, | ||
745 | { | ||
746 | .callback = video_disable_native_backlight, | ||
747 | .ident = "ThinkPad T520", | ||
748 | .matches = { | ||
749 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
750 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T520"), | ||
751 | }, | ||
752 | }, | ||
753 | |||
754 | /* The native backlight controls do not work on some older machines */ | ||
755 | { | ||
756 | /* https://bugs.freedesktop.org/show_bug.cgi?id=81515 */ | ||
757 | .callback = video_disable_native_backlight, | ||
758 | .ident = "HP ENVY 15 Notebook", | ||
759 | .matches = { | ||
760 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
761 | DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"), | ||
762 | }, | ||
763 | }, | ||
723 | {} | 764 | {} |
724 | }; | 765 | }; |
725 | 766 | ||
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index a29f8012fb08..a0cc0edafc78 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -305,6 +305,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
305 | { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */ | 305 | { PCI_VDEVICE(INTEL, 0x9c85), board_ahci }, /* Wildcat Point-LP RAID */ |
306 | { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ | 306 | { PCI_VDEVICE(INTEL, 0x9c87), board_ahci }, /* Wildcat Point-LP RAID */ |
307 | { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ | 307 | { PCI_VDEVICE(INTEL, 0x9c8f), board_ahci }, /* Wildcat Point-LP RAID */ |
308 | { PCI_VDEVICE(INTEL, 0x8c82), board_ahci }, /* 9 Series AHCI */ | ||
309 | { PCI_VDEVICE(INTEL, 0x8c83), board_ahci }, /* 9 Series AHCI */ | ||
310 | { PCI_VDEVICE(INTEL, 0x8c84), board_ahci }, /* 9 Series RAID */ | ||
311 | { PCI_VDEVICE(INTEL, 0x8c85), board_ahci }, /* 9 Series RAID */ | ||
312 | { PCI_VDEVICE(INTEL, 0x8c86), board_ahci }, /* 9 Series RAID */ | ||
313 | { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ | ||
314 | { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ | ||
315 | { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ | ||
308 | 316 | ||
309 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ | 317 | /* JMicron 360/1/3/5/6, match class to avoid IDE function */ |
310 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 318 | { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
@@ -442,6 +450,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
442 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), | 450 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), |
443 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ | 451 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ |
444 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), | 452 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), |
453 | .driver_data = board_ahci_yes_fbs }, /* 88se9182 */ | ||
454 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9182), | ||
445 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ | 455 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ |
446 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), | 456 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), |
447 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ | 457 | .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ |
@@ -1329,6 +1339,18 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1329 | else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) | 1339 | else if (pdev->vendor == 0x1c44 && pdev->device == 0x8000) |
1330 | ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; | 1340 | ahci_pci_bar = AHCI_PCI_BAR_ENMOTUS; |
1331 | 1341 | ||
1342 | /* | ||
1343 | * The JMicron chip 361/363 contains one SATA controller and one | ||
1344 | * PATA controller,for powering on these both controllers, we must | ||
1345 | * follow the sequence one by one, otherwise one of them can not be | ||
1346 | * powered on successfully, so here we disable the async suspend | ||
1347 | * method for these chips. | ||
1348 | */ | ||
1349 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON && | ||
1350 | (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || | ||
1351 | pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) | ||
1352 | device_disable_async_suspend(&pdev->dev); | ||
1353 | |||
1332 | /* acquire resources */ | 1354 | /* acquire resources */ |
1333 | rc = pcim_enable_device(pdev); | 1355 | rc = pcim_enable_device(pdev); |
1334 | if (rc) | 1356 | if (rc) |
diff --git a/drivers/ata/ahci_tegra.c b/drivers/ata/ahci_tegra.c index f1fef74e503c..032904402c95 100644 --- a/drivers/ata/ahci_tegra.c +++ b/drivers/ata/ahci_tegra.c | |||
@@ -18,14 +18,17 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | #include <linux/ahci_platform.h> | 20 | #include <linux/ahci_platform.h> |
21 | #include <linux/reset.h> | ||
22 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
23 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
24 | #include <linux/module.h> | 23 | #include <linux/module.h> |
25 | #include <linux/of_device.h> | 24 | #include <linux/of_device.h> |
26 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
27 | #include <linux/regulator/consumer.h> | 26 | #include <linux/regulator/consumer.h> |
27 | #include <linux/reset.h> | ||
28 | |||
29 | #include <soc/tegra/fuse.h> | ||
28 | #include <soc/tegra/pmc.h> | 30 | #include <soc/tegra/pmc.h> |
31 | |||
29 | #include "ahci.h" | 32 | #include "ahci.h" |
30 | 33 | ||
31 | #define SATA_CONFIGURATION_0 0x180 | 34 | #define SATA_CONFIGURATION_0 0x180 |
@@ -180,9 +183,12 @@ static int tegra_ahci_controller_init(struct ahci_host_priv *hpriv) | |||
180 | 183 | ||
181 | /* Pad calibration */ | 184 | /* Pad calibration */ |
182 | 185 | ||
183 | /* FIXME Always use calibration 0. Change this to read the calibration | 186 | ret = tegra_fuse_readl(FUSE_SATA_CALIB, &val); |
184 | * fuse once the fuse driver has landed. */ | 187 | if (ret) { |
185 | val = 0; | 188 | dev_err(&tegra->pdev->dev, |
189 | "failed to read calibration fuse: %d\n", ret); | ||
190 | return ret; | ||
191 | } | ||
186 | 192 | ||
187 | calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK]; | 193 | calib = tegra124_pad_calibration[val & FUSE_SATA_CALIB_MASK]; |
188 | 194 | ||
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index c6962300b93c..f03aab187f4d 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c | |||
@@ -78,6 +78,9 @@ | |||
78 | #define CFG_MEM_RAM_SHUTDOWN 0x00000070 | 78 | #define CFG_MEM_RAM_SHUTDOWN 0x00000070 |
79 | #define BLOCK_MEM_RDY 0x00000074 | 79 | #define BLOCK_MEM_RDY 0x00000074 |
80 | 80 | ||
81 | /* Max retry for link down */ | ||
82 | #define MAX_LINK_DOWN_RETRY 3 | ||
83 | |||
81 | struct xgene_ahci_context { | 84 | struct xgene_ahci_context { |
82 | struct ahci_host_priv *hpriv; | 85 | struct ahci_host_priv *hpriv; |
83 | struct device *dev; | 86 | struct device *dev; |
@@ -145,6 +148,14 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) | |||
145 | return rc; | 148 | return rc; |
146 | } | 149 | } |
147 | 150 | ||
151 | static bool xgene_ahci_is_memram_inited(struct xgene_ahci_context *ctx) | ||
152 | { | ||
153 | void __iomem *diagcsr = ctx->csr_diag; | ||
154 | |||
155 | return (readl(diagcsr + CFG_MEM_RAM_SHUTDOWN) == 0 && | ||
156 | readl(diagcsr + BLOCK_MEM_RDY) == 0xFFFFFFFF); | ||
157 | } | ||
158 | |||
148 | /** | 159 | /** |
149 | * xgene_ahci_read_id - Read ID data from the specified device | 160 | * xgene_ahci_read_id - Read ID data from the specified device |
150 | * @dev: device | 161 | * @dev: device |
@@ -229,8 +240,11 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) | |||
229 | * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will | 240 | * and Gen1 (1.5Gbps). Otherwise during long IO stress test, the PHY will |
230 | * report disparity error and etc. In addition, during COMRESET, there can | 241 | * report disparity error and etc. In addition, during COMRESET, there can |
231 | * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and | 242 | * be error reported in the register PORT_SCR_ERR. For SERR_DISPARITY and |
232 | * SERR_10B_8B_ERR, the PHY receiver line must be reseted. The following | 243 | * SERR_10B_8B_ERR, the PHY receiver line must be reseted. Also during long |
233 | * algorithm is followed to proper configure the hardware PHY during COMRESET: | 244 | * reboot cycle regression, sometimes the PHY reports link down even if the |
245 | * device is present because of speed negotiation failure. so need to retry | ||
246 | * the COMRESET to get the link up. The following algorithm is followed to | ||
247 | * proper configure the hardware PHY during COMRESET: | ||
234 | * | 248 | * |
235 | * Alg Part 1: | 249 | * Alg Part 1: |
236 | * 1. Start the PHY at Gen3 speed (default setting) | 250 | * 1. Start the PHY at Gen3 speed (default setting) |
@@ -246,9 +260,15 @@ static void xgene_ahci_set_phy_cfg(struct xgene_ahci_context *ctx, int channel) | |||
246 | * Alg Part 2: | 260 | * Alg Part 2: |
247 | * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error | 261 | * 1. On link up, if there are any SERR_DISPARITY and SERR_10B_8B_ERR error |
248 | * reported in the register PORT_SCR_ERR, then reset the PHY receiver line | 262 | * reported in the register PORT_SCR_ERR, then reset the PHY receiver line |
249 | * 2. Go to Alg Part 3 | 263 | * 2. Go to Alg Part 4 |
250 | * | 264 | * |
251 | * Alg Part 3: | 265 | * Alg Part 3: |
266 | * 1. Check the PORT_SCR_STAT to see whether device presence detected but PHY | ||
267 | * communication establishment failed and maximum link down attempts are | ||
268 | * less than Max attempts 3 then goto Alg Part 1. | ||
269 | * 2. Go to Alg Part 4. | ||
270 | * | ||
271 | * Alg Part 4: | ||
252 | * 1. Clear any pending from register PORT_SCR_ERR. | 272 | * 1. Clear any pending from register PORT_SCR_ERR. |
253 | * | 273 | * |
254 | * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition | 274 | * NOTE: For the initial version, we will NOT support Gen1/Gen2. In addition |
@@ -267,19 +287,27 @@ static int xgene_ahci_do_hardreset(struct ata_link *link, | |||
267 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; | 287 | u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; |
268 | void __iomem *port_mmio = ahci_port_base(ap); | 288 | void __iomem *port_mmio = ahci_port_base(ap); |
269 | struct ata_taskfile tf; | 289 | struct ata_taskfile tf; |
290 | int link_down_retry = 0; | ||
270 | int rc; | 291 | int rc; |
271 | u32 val; | 292 | u32 val, sstatus; |
272 | 293 | ||
273 | /* clear D2H reception area to properly wait for D2H FIS */ | 294 | do { |
274 | ata_tf_init(link->device, &tf); | 295 | /* clear D2H reception area to properly wait for D2H FIS */ |
275 | tf.command = ATA_BUSY; | 296 | ata_tf_init(link->device, &tf); |
276 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); | 297 | tf.command = ATA_BUSY; |
277 | rc = sata_link_hardreset(link, timing, deadline, online, | 298 | ata_tf_to_fis(&tf, 0, 0, d2h_fis); |
299 | rc = sata_link_hardreset(link, timing, deadline, online, | ||
278 | ahci_check_ready); | 300 | ahci_check_ready); |
301 | if (*online) { | ||
302 | val = readl(port_mmio + PORT_SCR_ERR); | ||
303 | if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) | ||
304 | dev_warn(ctx->dev, "link has error\n"); | ||
305 | break; | ||
306 | } | ||
279 | 307 | ||
280 | val = readl(port_mmio + PORT_SCR_ERR); | 308 | sata_scr_read(link, SCR_STATUS, &sstatus); |
281 | if (val & (SERR_DISPARITY | SERR_10B_8B_ERR)) | 309 | } while (link_down_retry++ < MAX_LINK_DOWN_RETRY && |
282 | dev_warn(ctx->dev, "link has error\n"); | 310 | (sstatus & 0xff) == 0x1); |
283 | 311 | ||
284 | /* clear all errors if any pending */ | 312 | /* clear all errors if any pending */ |
285 | val = readl(port_mmio + PORT_SCR_ERR); | 313 | val = readl(port_mmio + PORT_SCR_ERR); |
@@ -467,6 +495,11 @@ static int xgene_ahci_probe(struct platform_device *pdev) | |||
467 | return -ENODEV; | 495 | return -ENODEV; |
468 | } | 496 | } |
469 | 497 | ||
498 | if (xgene_ahci_is_memram_inited(ctx)) { | ||
499 | dev_info(dev, "skip clock and PHY initialization\n"); | ||
500 | goto skip_clk_phy; | ||
501 | } | ||
502 | |||
470 | /* Due to errata, HW requires full toggle transition */ | 503 | /* Due to errata, HW requires full toggle transition */ |
471 | rc = ahci_platform_enable_clks(hpriv); | 504 | rc = ahci_platform_enable_clks(hpriv); |
472 | if (rc) | 505 | if (rc) |
@@ -479,7 +512,7 @@ static int xgene_ahci_probe(struct platform_device *pdev) | |||
479 | 512 | ||
480 | /* Configure the host controller */ | 513 | /* Configure the host controller */ |
481 | xgene_ahci_hw_init(hpriv); | 514 | xgene_ahci_hw_init(hpriv); |
482 | 515 | skip_clk_phy: | |
483 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; | 516 | hpriv->flags = AHCI_HFLAG_NO_PMP | AHCI_HFLAG_NO_NCQ; |
484 | 517 | ||
485 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); | 518 | rc = ahci_platform_init_host(pdev, hpriv, &xgene_ahci_port_info); |
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index 893e30e9a9ef..ffbe625e6fd2 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -340,6 +340,14 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
340 | { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, | 340 | { 0x8086, 0x0F21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_byt }, |
341 | /* SATA Controller IDE (Coleto Creek) */ | 341 | /* SATA Controller IDE (Coleto Creek) */ |
342 | { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 342 | { 0x8086, 0x23a6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
343 | /* SATA Controller IDE (9 Series) */ | ||
344 | { 0x8086, 0x8c88, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, | ||
345 | /* SATA Controller IDE (9 Series) */ | ||
346 | { 0x8086, 0x8c89, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb }, | ||
347 | /* SATA Controller IDE (9 Series) */ | ||
348 | { 0x8086, 0x8c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | ||
349 | /* SATA Controller IDE (9 Series) */ | ||
350 | { 0x8086, 0x8c81, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, | ||
343 | 351 | ||
344 | { } /* terminate list */ | 352 | { } /* terminate list */ |
345 | }; | 353 | }; |
diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c index 4d1a5d2c4287..47e418b8c8ba 100644 --- a/drivers/ata/pata_jmicron.c +++ b/drivers/ata/pata_jmicron.c | |||
@@ -143,6 +143,18 @@ static int jmicron_init_one (struct pci_dev *pdev, const struct pci_device_id *i | |||
143 | }; | 143 | }; |
144 | const struct ata_port_info *ppi[] = { &info, NULL }; | 144 | const struct ata_port_info *ppi[] = { &info, NULL }; |
145 | 145 | ||
146 | /* | ||
147 | * The JMicron chip 361/363 contains one SATA controller and one | ||
148 | * PATA controller,for powering on these both controllers, we must | ||
149 | * follow the sequence one by one, otherwise one of them can not be | ||
150 | * powered on successfully, so here we disable the async suspend | ||
151 | * method for these chips. | ||
152 | */ | ||
153 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON && | ||
154 | (pdev->device == PCI_DEVICE_ID_JMICRON_JMB363 || | ||
155 | pdev->device == PCI_DEVICE_ID_JMICRON_JMB361)) | ||
156 | device_disable_async_suspend(&pdev->dev); | ||
157 | |||
146 | return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); | 158 | return ata_pci_bmdma_init_one(pdev, ppi, &jmicron_sht, NULL, 0); |
147 | } | 159 | } |
148 | 160 | ||
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 7d1326985bee..bfc90b8547f2 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h | |||
@@ -146,6 +146,9 @@ struct regcache_ops { | |||
146 | enum regcache_type type; | 146 | enum regcache_type type; |
147 | int (*init)(struct regmap *map); | 147 | int (*init)(struct regmap *map); |
148 | int (*exit)(struct regmap *map); | 148 | int (*exit)(struct regmap *map); |
149 | #ifdef CONFIG_DEBUG_FS | ||
150 | void (*debugfs_init)(struct regmap *map); | ||
151 | #endif | ||
149 | int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); | 152 | int (*read)(struct regmap *map, unsigned int reg, unsigned int *value); |
150 | int (*write)(struct regmap *map, unsigned int reg, unsigned int value); | 153 | int (*write)(struct regmap *map, unsigned int reg, unsigned int value); |
151 | int (*sync)(struct regmap *map, unsigned int min, unsigned int max); | 154 | int (*sync)(struct regmap *map, unsigned int min, unsigned int max); |
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c index 6a7e4fa12854..f3e8fe0cc650 100644 --- a/drivers/base/regmap/regcache-rbtree.c +++ b/drivers/base/regmap/regcache-rbtree.c | |||
@@ -194,10 +194,6 @@ static void rbtree_debugfs_init(struct regmap *map) | |||
194 | { | 194 | { |
195 | debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); | 195 | debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops); |
196 | } | 196 | } |
197 | #else | ||
198 | static void rbtree_debugfs_init(struct regmap *map) | ||
199 | { | ||
200 | } | ||
201 | #endif | 197 | #endif |
202 | 198 | ||
203 | static int regcache_rbtree_init(struct regmap *map) | 199 | static int regcache_rbtree_init(struct regmap *map) |
@@ -222,8 +218,6 @@ static int regcache_rbtree_init(struct regmap *map) | |||
222 | goto err; | 218 | goto err; |
223 | } | 219 | } |
224 | 220 | ||
225 | rbtree_debugfs_init(map); | ||
226 | |||
227 | return 0; | 221 | return 0; |
228 | 222 | ||
229 | err: | 223 | err: |
@@ -532,6 +526,9 @@ struct regcache_ops regcache_rbtree_ops = { | |||
532 | .name = "rbtree", | 526 | .name = "rbtree", |
533 | .init = regcache_rbtree_init, | 527 | .init = regcache_rbtree_init, |
534 | .exit = regcache_rbtree_exit, | 528 | .exit = regcache_rbtree_exit, |
529 | #ifdef CONFIG_DEBUG_FS | ||
530 | .debugfs_init = rbtree_debugfs_init, | ||
531 | #endif | ||
535 | .read = regcache_rbtree_read, | 532 | .read = regcache_rbtree_read, |
536 | .write = regcache_rbtree_write, | 533 | .write = regcache_rbtree_write, |
537 | .sync = regcache_rbtree_sync, | 534 | .sync = regcache_rbtree_sync, |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index 29b4128da0b0..5617da6dc898 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -698,7 +698,7 @@ int regcache_sync_block(struct regmap *map, void *block, | |||
698 | unsigned int block_base, unsigned int start, | 698 | unsigned int block_base, unsigned int start, |
699 | unsigned int end) | 699 | unsigned int end) |
700 | { | 700 | { |
701 | if (regmap_can_raw_write(map)) | 701 | if (regmap_can_raw_write(map) && !map->use_single_rw) |
702 | return regcache_sync_block_raw(map, block, cache_present, | 702 | return regcache_sync_block_raw(map, block, cache_present, |
703 | block_base, start, end); | 703 | block_base, start, end); |
704 | else | 704 | else |
diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index 45d812c0ea77..65ea7b256b3e 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c | |||
@@ -538,6 +538,9 @@ void regmap_debugfs_init(struct regmap *map, const char *name) | |||
538 | 538 | ||
539 | next = rb_next(&range_node->node); | 539 | next = rb_next(&range_node->node); |
540 | } | 540 | } |
541 | |||
542 | if (map->cache_ops && map->cache_ops->debugfs_init) | ||
543 | map->cache_ops->debugfs_init(map); | ||
541 | } | 544 | } |
542 | 545 | ||
543 | void regmap_debugfs_exit(struct regmap *map) | 546 | void regmap_debugfs_exit(struct regmap *map) |
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 78f43fb2fe84..1cf427bc0d4a 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c | |||
@@ -109,7 +109,7 @@ bool regmap_readable(struct regmap *map, unsigned int reg) | |||
109 | 109 | ||
110 | bool regmap_volatile(struct regmap *map, unsigned int reg) | 110 | bool regmap_volatile(struct regmap *map, unsigned int reg) |
111 | { | 111 | { |
112 | if (!regmap_readable(map, reg)) | 112 | if (!map->format.format_write && !regmap_readable(map, reg)) |
113 | return false; | 113 | return false; |
114 | 114 | ||
115 | if (map->volatile_reg) | 115 | if (map->volatile_reg) |
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index 294a7dd25190..f032ed6dd459 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c | |||
@@ -282,6 +282,7 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = { | |||
282 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, | 282 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, |
283 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, | 283 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, |
284 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, | 284 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, |
285 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */ | ||
285 | { 0, }, | 286 | { 0, }, |
286 | }; | 287 | }; |
287 | MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); | 288 | MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index db1e9560d8a7..5c8e7fe07745 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3918,7 +3918,6 @@ skip_create_disk: | |||
3918 | if (rv) { | 3918 | if (rv) { |
3919 | dev_err(&dd->pdev->dev, | 3919 | dev_err(&dd->pdev->dev, |
3920 | "Unable to allocate request queue\n"); | 3920 | "Unable to allocate request queue\n"); |
3921 | rv = -ENOMEM; | ||
3922 | goto block_queue_alloc_init_error; | 3921 | goto block_queue_alloc_init_error; |
3923 | } | 3922 | } |
3924 | 3923 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index a3b042c4d448..00d469c7f9f7 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -462,17 +462,21 @@ static int null_add_dev(void) | |||
462 | struct gendisk *disk; | 462 | struct gendisk *disk; |
463 | struct nullb *nullb; | 463 | struct nullb *nullb; |
464 | sector_t size; | 464 | sector_t size; |
465 | int rv; | ||
465 | 466 | ||
466 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); | 467 | nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); |
467 | if (!nullb) | 468 | if (!nullb) { |
469 | rv = -ENOMEM; | ||
468 | goto out; | 470 | goto out; |
471 | } | ||
469 | 472 | ||
470 | spin_lock_init(&nullb->lock); | 473 | spin_lock_init(&nullb->lock); |
471 | 474 | ||
472 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) | 475 | if (queue_mode == NULL_Q_MQ && use_per_node_hctx) |
473 | submit_queues = nr_online_nodes; | 476 | submit_queues = nr_online_nodes; |
474 | 477 | ||
475 | if (setup_queues(nullb)) | 478 | rv = setup_queues(nullb); |
479 | if (rv) | ||
476 | goto out_free_nullb; | 480 | goto out_free_nullb; |
477 | 481 | ||
478 | if (queue_mode == NULL_Q_MQ) { | 482 | if (queue_mode == NULL_Q_MQ) { |
@@ -484,22 +488,29 @@ static int null_add_dev(void) | |||
484 | nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 488 | nullb->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
485 | nullb->tag_set.driver_data = nullb; | 489 | nullb->tag_set.driver_data = nullb; |
486 | 490 | ||
487 | if (blk_mq_alloc_tag_set(&nullb->tag_set)) | 491 | rv = blk_mq_alloc_tag_set(&nullb->tag_set); |
492 | if (rv) | ||
488 | goto out_cleanup_queues; | 493 | goto out_cleanup_queues; |
489 | 494 | ||
490 | nullb->q = blk_mq_init_queue(&nullb->tag_set); | 495 | nullb->q = blk_mq_init_queue(&nullb->tag_set); |
491 | if (!nullb->q) | 496 | if (!nullb->q) { |
497 | rv = -ENOMEM; | ||
492 | goto out_cleanup_tags; | 498 | goto out_cleanup_tags; |
499 | } | ||
493 | } else if (queue_mode == NULL_Q_BIO) { | 500 | } else if (queue_mode == NULL_Q_BIO) { |
494 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); | 501 | nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); |
495 | if (!nullb->q) | 502 | if (!nullb->q) { |
503 | rv = -ENOMEM; | ||
496 | goto out_cleanup_queues; | 504 | goto out_cleanup_queues; |
505 | } | ||
497 | blk_queue_make_request(nullb->q, null_queue_bio); | 506 | blk_queue_make_request(nullb->q, null_queue_bio); |
498 | init_driver_queues(nullb); | 507 | init_driver_queues(nullb); |
499 | } else { | 508 | } else { |
500 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); | 509 | nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); |
501 | if (!nullb->q) | 510 | if (!nullb->q) { |
511 | rv = -ENOMEM; | ||
502 | goto out_cleanup_queues; | 512 | goto out_cleanup_queues; |
513 | } | ||
503 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); | 514 | blk_queue_prep_rq(nullb->q, null_rq_prep_fn); |
504 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); | 515 | blk_queue_softirq_done(nullb->q, null_softirq_done_fn); |
505 | init_driver_queues(nullb); | 516 | init_driver_queues(nullb); |
@@ -509,8 +520,10 @@ static int null_add_dev(void) | |||
509 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); | 520 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); |
510 | 521 | ||
511 | disk = nullb->disk = alloc_disk_node(1, home_node); | 522 | disk = nullb->disk = alloc_disk_node(1, home_node); |
512 | if (!disk) | 523 | if (!disk) { |
524 | rv = -ENOMEM; | ||
513 | goto out_cleanup_blk_queue; | 525 | goto out_cleanup_blk_queue; |
526 | } | ||
514 | 527 | ||
515 | mutex_lock(&lock); | 528 | mutex_lock(&lock); |
516 | list_add_tail(&nullb->list, &nullb_list); | 529 | list_add_tail(&nullb->list, &nullb_list); |
@@ -544,7 +557,7 @@ out_cleanup_queues: | |||
544 | out_free_nullb: | 557 | out_free_nullb: |
545 | kfree(nullb); | 558 | kfree(nullb); |
546 | out: | 559 | out: |
547 | return -ENOMEM; | 560 | return rv; |
548 | } | 561 | } |
549 | 562 | ||
550 | static int __init null_init(void) | 563 | static int __init null_init(void) |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 623c84145b79..4b97baf8afa3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -5087,9 +5087,11 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) | |||
5087 | set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); | 5087 | set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); |
5088 | set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); | 5088 | set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); |
5089 | 5089 | ||
5090 | rbd_dev->rq_wq = alloc_workqueue(rbd_dev->disk->disk_name, 0, 0); | 5090 | rbd_dev->rq_wq = alloc_workqueue("%s", 0, 0, rbd_dev->disk->disk_name); |
5091 | if (!rbd_dev->rq_wq) | 5091 | if (!rbd_dev->rq_wq) { |
5092 | ret = -ENOMEM; | ||
5092 | goto err_out_mapping; | 5093 | goto err_out_mapping; |
5094 | } | ||
5093 | 5095 | ||
5094 | ret = rbd_bus_add_dev(rbd_dev); | 5096 | ret = rbd_bus_add_dev(rbd_dev); |
5095 | if (ret) | 5097 | if (ret) |
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index 6f550d9e7a2d..a60f26400705 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c | |||
@@ -586,6 +586,30 @@ static int arm_ccn_pmu_type_eq(u32 a, u32 b) | |||
586 | return 0; | 586 | return 0; |
587 | } | 587 | } |
588 | 588 | ||
589 | static void arm_ccn_pmu_event_destroy(struct perf_event *event) | ||
590 | { | ||
591 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
592 | struct hw_perf_event *hw = &event->hw; | ||
593 | |||
594 | if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { | ||
595 | clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); | ||
596 | } else { | ||
597 | struct arm_ccn_component *source = | ||
598 | ccn->dt.pmu_counters[hw->idx].source; | ||
599 | |||
600 | if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && | ||
601 | CCN_CONFIG_EVENT(event->attr.config) == | ||
602 | CCN_EVENT_WATCHPOINT) | ||
603 | clear_bit(hw->config_base, source->xp.dt_cmp_mask); | ||
604 | else | ||
605 | clear_bit(hw->config_base, source->pmu_events_mask); | ||
606 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
607 | } | ||
608 | |||
609 | ccn->dt.pmu_counters[hw->idx].source = NULL; | ||
610 | ccn->dt.pmu_counters[hw->idx].event = NULL; | ||
611 | } | ||
612 | |||
589 | static int arm_ccn_pmu_event_init(struct perf_event *event) | 613 | static int arm_ccn_pmu_event_init(struct perf_event *event) |
590 | { | 614 | { |
591 | struct arm_ccn *ccn; | 615 | struct arm_ccn *ccn; |
@@ -599,6 +623,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
599 | return -ENOENT; | 623 | return -ENOENT; |
600 | 624 | ||
601 | ccn = pmu_to_arm_ccn(event->pmu); | 625 | ccn = pmu_to_arm_ccn(event->pmu); |
626 | event->destroy = arm_ccn_pmu_event_destroy; | ||
602 | 627 | ||
603 | if (hw->sample_period) { | 628 | if (hw->sample_period) { |
604 | dev_warn(ccn->dev, "Sampling not supported!\n"); | 629 | dev_warn(ccn->dev, "Sampling not supported!\n"); |
@@ -731,30 +756,6 @@ static int arm_ccn_pmu_event_init(struct perf_event *event) | |||
731 | return 0; | 756 | return 0; |
732 | } | 757 | } |
733 | 758 | ||
734 | static void arm_ccn_pmu_event_free(struct perf_event *event) | ||
735 | { | ||
736 | struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu); | ||
737 | struct hw_perf_event *hw = &event->hw; | ||
738 | |||
739 | if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) { | ||
740 | clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask); | ||
741 | } else { | ||
742 | struct arm_ccn_component *source = | ||
743 | ccn->dt.pmu_counters[hw->idx].source; | ||
744 | |||
745 | if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP && | ||
746 | CCN_CONFIG_EVENT(event->attr.config) == | ||
747 | CCN_EVENT_WATCHPOINT) | ||
748 | clear_bit(hw->config_base, source->xp.dt_cmp_mask); | ||
749 | else | ||
750 | clear_bit(hw->config_base, source->pmu_events_mask); | ||
751 | clear_bit(hw->idx, ccn->dt.pmu_counters_mask); | ||
752 | } | ||
753 | |||
754 | ccn->dt.pmu_counters[hw->idx].source = NULL; | ||
755 | ccn->dt.pmu_counters[hw->idx].event = NULL; | ||
756 | } | ||
757 | |||
758 | static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) | 759 | static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx) |
759 | { | 760 | { |
760 | u64 res; | 761 | u64 res; |
@@ -1027,8 +1028,6 @@ static int arm_ccn_pmu_event_add(struct perf_event *event, int flags) | |||
1027 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) | 1028 | static void arm_ccn_pmu_event_del(struct perf_event *event, int flags) |
1028 | { | 1029 | { |
1029 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); | 1030 | arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE); |
1030 | |||
1031 | arm_ccn_pmu_event_free(event); | ||
1032 | } | 1031 | } |
1033 | 1032 | ||
1034 | static void arm_ccn_pmu_event_read(struct perf_event *event) | 1033 | static void arm_ccn_pmu_event_read(struct perf_event *event) |
diff --git a/drivers/cpufreq/cpufreq_opp.c b/drivers/cpufreq/cpufreq_opp.c index f7a32d2326c6..773bcde893c0 100644 --- a/drivers/cpufreq/cpufreq_opp.c +++ b/drivers/cpufreq/cpufreq_opp.c | |||
@@ -60,7 +60,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, | |||
60 | goto out; | 60 | goto out; |
61 | } | 61 | } |
62 | 62 | ||
63 | freq_table = kcalloc(sizeof(*freq_table), (max_opps + 1), GFP_ATOMIC); | 63 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); |
64 | if (!freq_table) { | 64 | if (!freq_table) { |
65 | ret = -ENOMEM; | 65 | ret = -ENOMEM; |
66 | goto out; | 66 | goto out; |
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index e396ad3f8f3f..0668b389c516 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -708,10 +708,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num) | |||
708 | 708 | ||
709 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | 709 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) |
710 | { | 710 | { |
711 | struct cpudata *cpu; | ||
712 | |||
713 | cpu = all_cpu_data[policy->cpu]; | ||
714 | |||
715 | if (!policy->cpuinfo.max_freq) | 711 | if (!policy->cpuinfo.max_freq) |
716 | return -ENODEV; | 712 | return -ENODEV; |
717 | 713 | ||
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 6a9d89c93b1f..ae2ab14e64b3 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
@@ -362,8 +362,9 @@ static void jz4740_dma_chan_irq(struct jz4740_dmaengine_chan *chan) | |||
362 | vchan_cyclic_callback(&chan->desc->vdesc); | 362 | vchan_cyclic_callback(&chan->desc->vdesc); |
363 | } else { | 363 | } else { |
364 | if (chan->next_sg == chan->desc->num_sgs) { | 364 | if (chan->next_sg == chan->desc->num_sgs) { |
365 | chan->desc = NULL; | 365 | list_del(&chan->desc->vdesc.node); |
366 | vchan_cookie_complete(&chan->desc->vdesc); | 366 | vchan_cookie_complete(&chan->desc->vdesc); |
367 | chan->desc = NULL; | ||
367 | } | 368 | } |
368 | } | 369 | } |
369 | } | 370 | } |
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c index 6557147d9331..7e4c43c18960 100644 --- a/drivers/gpio/gpio-bt8xx.c +++ b/drivers/gpio/gpio-bt8xx.c | |||
@@ -241,9 +241,6 @@ static void bt8xxgpio_remove(struct pci_dev *pdev) | |||
241 | bgwrite(~0x0, BT848_INT_STAT); | 241 | bgwrite(~0x0, BT848_INT_STAT); |
242 | bgwrite(0x0, BT848_GPIO_OUT_EN); | 242 | bgwrite(0x0, BT848_GPIO_OUT_EN); |
243 | 243 | ||
244 | iounmap(bg->mmio); | ||
245 | release_mem_region(pci_resource_start(pdev, 0), | ||
246 | pci_resource_len(pdev, 0)); | ||
247 | pci_disable_device(pdev); | 244 | pci_disable_device(pdev); |
248 | } | 245 | } |
249 | 246 | ||
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index a2cc6be97983..b792194e0d9c 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c | |||
@@ -67,6 +67,7 @@ static int ast_detect_chip(struct drm_device *dev) | |||
67 | { | 67 | { |
68 | struct ast_private *ast = dev->dev_private; | 68 | struct ast_private *ast = dev->dev_private; |
69 | uint32_t data, jreg; | 69 | uint32_t data, jreg; |
70 | ast_open_key(ast); | ||
70 | 71 | ||
71 | if (dev->pdev->device == PCI_CHIP_AST1180) { | 72 | if (dev->pdev->device == PCI_CHIP_AST1180) { |
72 | ast->chip = AST1100; | 73 | ast->chip = AST1100; |
@@ -104,7 +105,7 @@ static int ast_detect_chip(struct drm_device *dev) | |||
104 | } | 105 | } |
105 | ast->vga2_clone = false; | 106 | ast->vga2_clone = false; |
106 | } else { | 107 | } else { |
107 | ast->chip = 2000; | 108 | ast->chip = AST2000; |
108 | DRM_INFO("AST 2000 detected\n"); | 109 | DRM_INFO("AST 2000 detected\n"); |
109 | } | 110 | } |
110 | } | 111 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 2e7f03ad5ee2..9933c26017ed 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1336,12 +1336,17 @@ static int i915_load_modeset_init(struct drm_device *dev) | |||
1336 | 1336 | ||
1337 | intel_power_domains_init_hw(dev_priv); | 1337 | intel_power_domains_init_hw(dev_priv); |
1338 | 1338 | ||
1339 | /* | ||
1340 | * We enable some interrupt sources in our postinstall hooks, so mark | ||
1341 | * interrupts as enabled _before_ actually enabling them to avoid | ||
1342 | * special cases in our ordering checks. | ||
1343 | */ | ||
1344 | dev_priv->pm._irqs_disabled = false; | ||
1345 | |||
1339 | ret = drm_irq_install(dev, dev->pdev->irq); | 1346 | ret = drm_irq_install(dev, dev->pdev->irq); |
1340 | if (ret) | 1347 | if (ret) |
1341 | goto cleanup_gem_stolen; | 1348 | goto cleanup_gem_stolen; |
1342 | 1349 | ||
1343 | dev_priv->pm._irqs_disabled = false; | ||
1344 | |||
1345 | /* Important: The output setup functions called by modeset_init need | 1350 | /* Important: The output setup functions called by modeset_init need |
1346 | * working irqs for e.g. gmbus and dp aux transfers. */ | 1351 | * working irqs for e.g. gmbus and dp aux transfers. */ |
1347 | intel_modeset_init(dev); | 1352 | intel_modeset_init(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 7a830eac5ba3..3524306d8cfb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -184,6 +184,7 @@ enum hpd_pin { | |||
184 | if ((1 << (domain)) & (mask)) | 184 | if ((1 << (domain)) & (mask)) |
185 | 185 | ||
186 | struct drm_i915_private; | 186 | struct drm_i915_private; |
187 | struct i915_mm_struct; | ||
187 | struct i915_mmu_object; | 188 | struct i915_mmu_object; |
188 | 189 | ||
189 | enum intel_dpll_id { | 190 | enum intel_dpll_id { |
@@ -1506,9 +1507,8 @@ struct drm_i915_private { | |||
1506 | struct i915_gtt gtt; /* VM representing the global address space */ | 1507 | struct i915_gtt gtt; /* VM representing the global address space */ |
1507 | 1508 | ||
1508 | struct i915_gem_mm mm; | 1509 | struct i915_gem_mm mm; |
1509 | #if defined(CONFIG_MMU_NOTIFIER) | 1510 | DECLARE_HASHTABLE(mm_structs, 7); |
1510 | DECLARE_HASHTABLE(mmu_notifiers, 7); | 1511 | struct mutex mm_lock; |
1511 | #endif | ||
1512 | 1512 | ||
1513 | /* Kernel Modesetting */ | 1513 | /* Kernel Modesetting */ |
1514 | 1514 | ||
@@ -1814,8 +1814,8 @@ struct drm_i915_gem_object { | |||
1814 | unsigned workers :4; | 1814 | unsigned workers :4; |
1815 | #define I915_GEM_USERPTR_MAX_WORKERS 15 | 1815 | #define I915_GEM_USERPTR_MAX_WORKERS 15 |
1816 | 1816 | ||
1817 | struct mm_struct *mm; | 1817 | struct i915_mm_struct *mm; |
1818 | struct i915_mmu_object *mn; | 1818 | struct i915_mmu_object *mmu_object; |
1819 | struct work_struct *work; | 1819 | struct work_struct *work; |
1820 | } userptr; | 1820 | } userptr; |
1821 | }; | 1821 | }; |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ba7f5c6bb50d..ad55b06a3cb1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1590,10 +1590,13 @@ unlock: | |||
1590 | out: | 1590 | out: |
1591 | switch (ret) { | 1591 | switch (ret) { |
1592 | case -EIO: | 1592 | case -EIO: |
1593 | /* If this -EIO is due to a gpu hang, give the reset code a | 1593 | /* |
1594 | * chance to clean up the mess. Otherwise return the proper | 1594 | * We eat errors when the gpu is terminally wedged to avoid |
1595 | * SIGBUS. */ | 1595 | * userspace unduly crashing (gl has no provisions for mmaps to |
1596 | if (i915_terminally_wedged(&dev_priv->gpu_error)) { | 1596 | * fail). But any other -EIO isn't ours (e.g. swap in failure) |
1597 | * and so needs to be reported. | ||
1598 | */ | ||
1599 | if (!i915_terminally_wedged(&dev_priv->gpu_error)) { | ||
1597 | ret = VM_FAULT_SIGBUS; | 1600 | ret = VM_FAULT_SIGBUS; |
1598 | break; | 1601 | break; |
1599 | } | 1602 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c index fe69fc837d9e..d38413997379 100644 --- a/drivers/gpu/drm/i915/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/i915_gem_userptr.c | |||
@@ -32,6 +32,15 @@ | |||
32 | #include <linux/mempolicy.h> | 32 | #include <linux/mempolicy.h> |
33 | #include <linux/swap.h> | 33 | #include <linux/swap.h> |
34 | 34 | ||
35 | struct i915_mm_struct { | ||
36 | struct mm_struct *mm; | ||
37 | struct drm_device *dev; | ||
38 | struct i915_mmu_notifier *mn; | ||
39 | struct hlist_node node; | ||
40 | struct kref kref; | ||
41 | struct work_struct work; | ||
42 | }; | ||
43 | |||
35 | #if defined(CONFIG_MMU_NOTIFIER) | 44 | #if defined(CONFIG_MMU_NOTIFIER) |
36 | #include <linux/interval_tree.h> | 45 | #include <linux/interval_tree.h> |
37 | 46 | ||
@@ -41,16 +50,12 @@ struct i915_mmu_notifier { | |||
41 | struct mmu_notifier mn; | 50 | struct mmu_notifier mn; |
42 | struct rb_root objects; | 51 | struct rb_root objects; |
43 | struct list_head linear; | 52 | struct list_head linear; |
44 | struct drm_device *dev; | ||
45 | struct mm_struct *mm; | ||
46 | struct work_struct work; | ||
47 | unsigned long count; | ||
48 | unsigned long serial; | 53 | unsigned long serial; |
49 | bool has_linear; | 54 | bool has_linear; |
50 | }; | 55 | }; |
51 | 56 | ||
52 | struct i915_mmu_object { | 57 | struct i915_mmu_object { |
53 | struct i915_mmu_notifier *mmu; | 58 | struct i915_mmu_notifier *mn; |
54 | struct interval_tree_node it; | 59 | struct interval_tree_node it; |
55 | struct list_head link; | 60 | struct list_head link; |
56 | struct drm_i915_gem_object *obj; | 61 | struct drm_i915_gem_object *obj; |
@@ -96,18 +101,18 @@ static void *invalidate_range__linear(struct i915_mmu_notifier *mn, | |||
96 | unsigned long start, | 101 | unsigned long start, |
97 | unsigned long end) | 102 | unsigned long end) |
98 | { | 103 | { |
99 | struct i915_mmu_object *mmu; | 104 | struct i915_mmu_object *mo; |
100 | unsigned long serial; | 105 | unsigned long serial; |
101 | 106 | ||
102 | restart: | 107 | restart: |
103 | serial = mn->serial; | 108 | serial = mn->serial; |
104 | list_for_each_entry(mmu, &mn->linear, link) { | 109 | list_for_each_entry(mo, &mn->linear, link) { |
105 | struct drm_i915_gem_object *obj; | 110 | struct drm_i915_gem_object *obj; |
106 | 111 | ||
107 | if (mmu->it.last < start || mmu->it.start > end) | 112 | if (mo->it.last < start || mo->it.start > end) |
108 | continue; | 113 | continue; |
109 | 114 | ||
110 | obj = mmu->obj; | 115 | obj = mo->obj; |
111 | drm_gem_object_reference(&obj->base); | 116 | drm_gem_object_reference(&obj->base); |
112 | spin_unlock(&mn->lock); | 117 | spin_unlock(&mn->lock); |
113 | 118 | ||
@@ -160,130 +165,47 @@ static const struct mmu_notifier_ops i915_gem_userptr_notifier = { | |||
160 | }; | 165 | }; |
161 | 166 | ||
162 | static struct i915_mmu_notifier * | 167 | static struct i915_mmu_notifier * |
163 | __i915_mmu_notifier_lookup(struct drm_device *dev, struct mm_struct *mm) | 168 | i915_mmu_notifier_create(struct mm_struct *mm) |
164 | { | ||
165 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
166 | struct i915_mmu_notifier *mmu; | ||
167 | |||
168 | /* Protected by dev->struct_mutex */ | ||
169 | hash_for_each_possible(dev_priv->mmu_notifiers, mmu, node, (unsigned long)mm) | ||
170 | if (mmu->mm == mm) | ||
171 | return mmu; | ||
172 | |||
173 | return NULL; | ||
174 | } | ||
175 | |||
176 | static struct i915_mmu_notifier * | ||
177 | i915_mmu_notifier_get(struct drm_device *dev, struct mm_struct *mm) | ||
178 | { | 169 | { |
179 | struct drm_i915_private *dev_priv = to_i915(dev); | 170 | struct i915_mmu_notifier *mn; |
180 | struct i915_mmu_notifier *mmu; | ||
181 | int ret; | 171 | int ret; |
182 | 172 | ||
183 | lockdep_assert_held(&dev->struct_mutex); | 173 | mn = kmalloc(sizeof(*mn), GFP_KERNEL); |
184 | 174 | if (mn == NULL) | |
185 | mmu = __i915_mmu_notifier_lookup(dev, mm); | ||
186 | if (mmu) | ||
187 | return mmu; | ||
188 | |||
189 | mmu = kmalloc(sizeof(*mmu), GFP_KERNEL); | ||
190 | if (mmu == NULL) | ||
191 | return ERR_PTR(-ENOMEM); | 175 | return ERR_PTR(-ENOMEM); |
192 | 176 | ||
193 | spin_lock_init(&mmu->lock); | 177 | spin_lock_init(&mn->lock); |
194 | mmu->dev = dev; | 178 | mn->mn.ops = &i915_gem_userptr_notifier; |
195 | mmu->mn.ops = &i915_gem_userptr_notifier; | 179 | mn->objects = RB_ROOT; |
196 | mmu->mm = mm; | 180 | mn->serial = 1; |
197 | mmu->objects = RB_ROOT; | 181 | INIT_LIST_HEAD(&mn->linear); |
198 | mmu->count = 0; | 182 | mn->has_linear = false; |
199 | mmu->serial = 1; | 183 | |
200 | INIT_LIST_HEAD(&mmu->linear); | 184 | /* Protected by mmap_sem (write-lock) */ |
201 | mmu->has_linear = false; | 185 | ret = __mmu_notifier_register(&mn->mn, mm); |
202 | |||
203 | /* Protected by mmap_sem (write-lock) */ | ||
204 | ret = __mmu_notifier_register(&mmu->mn, mm); | ||
205 | if (ret) { | 186 | if (ret) { |
206 | kfree(mmu); | 187 | kfree(mn); |
207 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
208 | } | 189 | } |
209 | 190 | ||
210 | /* Protected by dev->struct_mutex */ | 191 | return mn; |
211 | hash_add(dev_priv->mmu_notifiers, &mmu->node, (unsigned long)mm); | ||
212 | return mmu; | ||
213 | } | 192 | } |
214 | 193 | ||
215 | static void | 194 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) |
216 | __i915_mmu_notifier_destroy_worker(struct work_struct *work) | ||
217 | { | 195 | { |
218 | struct i915_mmu_notifier *mmu = container_of(work, typeof(*mmu), work); | 196 | if (++mn->serial == 0) |
219 | mmu_notifier_unregister(&mmu->mn, mmu->mm); | 197 | mn->serial = 1; |
220 | kfree(mmu); | ||
221 | } | ||
222 | |||
223 | static void | ||
224 | __i915_mmu_notifier_destroy(struct i915_mmu_notifier *mmu) | ||
225 | { | ||
226 | lockdep_assert_held(&mmu->dev->struct_mutex); | ||
227 | |||
228 | /* Protected by dev->struct_mutex */ | ||
229 | hash_del(&mmu->node); | ||
230 | |||
231 | /* Our lock ordering is: mmap_sem, mmu_notifier_scru, struct_mutex. | ||
232 | * We enter the function holding struct_mutex, therefore we need | ||
233 | * to drop our mutex prior to calling mmu_notifier_unregister in | ||
234 | * order to prevent lock inversion (and system-wide deadlock) | ||
235 | * between the mmap_sem and struct-mutex. Hence we defer the | ||
236 | * unregistration to a workqueue where we hold no locks. | ||
237 | */ | ||
238 | INIT_WORK(&mmu->work, __i915_mmu_notifier_destroy_worker); | ||
239 | schedule_work(&mmu->work); | ||
240 | } | ||
241 | |||
242 | static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mmu) | ||
243 | { | ||
244 | if (++mmu->serial == 0) | ||
245 | mmu->serial = 1; | ||
246 | } | ||
247 | |||
248 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mmu) | ||
249 | { | ||
250 | struct i915_mmu_object *mn; | ||
251 | |||
252 | list_for_each_entry(mn, &mmu->linear, link) | ||
253 | if (mn->is_linear) | ||
254 | return true; | ||
255 | |||
256 | return false; | ||
257 | } | ||
258 | |||
259 | static void | ||
260 | i915_mmu_notifier_del(struct i915_mmu_notifier *mmu, | ||
261 | struct i915_mmu_object *mn) | ||
262 | { | ||
263 | lockdep_assert_held(&mmu->dev->struct_mutex); | ||
264 | |||
265 | spin_lock(&mmu->lock); | ||
266 | list_del(&mn->link); | ||
267 | if (mn->is_linear) | ||
268 | mmu->has_linear = i915_mmu_notifier_has_linear(mmu); | ||
269 | else | ||
270 | interval_tree_remove(&mn->it, &mmu->objects); | ||
271 | __i915_mmu_notifier_update_serial(mmu); | ||
272 | spin_unlock(&mmu->lock); | ||
273 | |||
274 | /* Protected against _add() by dev->struct_mutex */ | ||
275 | if (--mmu->count == 0) | ||
276 | __i915_mmu_notifier_destroy(mmu); | ||
277 | } | 198 | } |
278 | 199 | ||
279 | static int | 200 | static int |
280 | i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | 201 | i915_mmu_notifier_add(struct drm_device *dev, |
281 | struct i915_mmu_object *mn) | 202 | struct i915_mmu_notifier *mn, |
203 | struct i915_mmu_object *mo) | ||
282 | { | 204 | { |
283 | struct interval_tree_node *it; | 205 | struct interval_tree_node *it; |
284 | int ret; | 206 | int ret; |
285 | 207 | ||
286 | ret = i915_mutex_lock_interruptible(mmu->dev); | 208 | ret = i915_mutex_lock_interruptible(dev); |
287 | if (ret) | 209 | if (ret) |
288 | return ret; | 210 | return ret; |
289 | 211 | ||
@@ -291,11 +213,11 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | |||
291 | * remove the objects from the interval tree) before we do | 213 | * remove the objects from the interval tree) before we do |
292 | * the check for overlapping objects. | 214 | * the check for overlapping objects. |
293 | */ | 215 | */ |
294 | i915_gem_retire_requests(mmu->dev); | 216 | i915_gem_retire_requests(dev); |
295 | 217 | ||
296 | spin_lock(&mmu->lock); | 218 | spin_lock(&mn->lock); |
297 | it = interval_tree_iter_first(&mmu->objects, | 219 | it = interval_tree_iter_first(&mn->objects, |
298 | mn->it.start, mn->it.last); | 220 | mo->it.start, mo->it.last); |
299 | if (it) { | 221 | if (it) { |
300 | struct drm_i915_gem_object *obj; | 222 | struct drm_i915_gem_object *obj; |
301 | 223 | ||
@@ -312,86 +234,122 @@ i915_mmu_notifier_add(struct i915_mmu_notifier *mmu, | |||
312 | 234 | ||
313 | obj = container_of(it, struct i915_mmu_object, it)->obj; | 235 | obj = container_of(it, struct i915_mmu_object, it)->obj; |
314 | if (!obj->userptr.workers) | 236 | if (!obj->userptr.workers) |
315 | mmu->has_linear = mn->is_linear = true; | 237 | mn->has_linear = mo->is_linear = true; |
316 | else | 238 | else |
317 | ret = -EAGAIN; | 239 | ret = -EAGAIN; |
318 | } else | 240 | } else |
319 | interval_tree_insert(&mn->it, &mmu->objects); | 241 | interval_tree_insert(&mo->it, &mn->objects); |
320 | 242 | ||
321 | if (ret == 0) { | 243 | if (ret == 0) { |
322 | list_add(&mn->link, &mmu->linear); | 244 | list_add(&mo->link, &mn->linear); |
323 | __i915_mmu_notifier_update_serial(mmu); | 245 | __i915_mmu_notifier_update_serial(mn); |
324 | } | 246 | } |
325 | spin_unlock(&mmu->lock); | 247 | spin_unlock(&mn->lock); |
326 | mutex_unlock(&mmu->dev->struct_mutex); | 248 | mutex_unlock(&dev->struct_mutex); |
327 | 249 | ||
328 | return ret; | 250 | return ret; |
329 | } | 251 | } |
330 | 252 | ||
253 | static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn) | ||
254 | { | ||
255 | struct i915_mmu_object *mo; | ||
256 | |||
257 | list_for_each_entry(mo, &mn->linear, link) | ||
258 | if (mo->is_linear) | ||
259 | return true; | ||
260 | |||
261 | return false; | ||
262 | } | ||
263 | |||
264 | static void | ||
265 | i915_mmu_notifier_del(struct i915_mmu_notifier *mn, | ||
266 | struct i915_mmu_object *mo) | ||
267 | { | ||
268 | spin_lock(&mn->lock); | ||
269 | list_del(&mo->link); | ||
270 | if (mo->is_linear) | ||
271 | mn->has_linear = i915_mmu_notifier_has_linear(mn); | ||
272 | else | ||
273 | interval_tree_remove(&mo->it, &mn->objects); | ||
274 | __i915_mmu_notifier_update_serial(mn); | ||
275 | spin_unlock(&mn->lock); | ||
276 | } | ||
277 | |||
331 | static void | 278 | static void |
332 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) | 279 | i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj) |
333 | { | 280 | { |
334 | struct i915_mmu_object *mn; | 281 | struct i915_mmu_object *mo; |
335 | 282 | ||
336 | mn = obj->userptr.mn; | 283 | mo = obj->userptr.mmu_object; |
337 | if (mn == NULL) | 284 | if (mo == NULL) |
338 | return; | 285 | return; |
339 | 286 | ||
340 | i915_mmu_notifier_del(mn->mmu, mn); | 287 | i915_mmu_notifier_del(mo->mn, mo); |
341 | obj->userptr.mn = NULL; | 288 | kfree(mo); |
289 | |||
290 | obj->userptr.mmu_object = NULL; | ||
291 | } | ||
292 | |||
293 | static struct i915_mmu_notifier * | ||
294 | i915_mmu_notifier_find(struct i915_mm_struct *mm) | ||
295 | { | ||
296 | if (mm->mn == NULL) { | ||
297 | down_write(&mm->mm->mmap_sem); | ||
298 | mutex_lock(&to_i915(mm->dev)->mm_lock); | ||
299 | if (mm->mn == NULL) | ||
300 | mm->mn = i915_mmu_notifier_create(mm->mm); | ||
301 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | ||
302 | up_write(&mm->mm->mmap_sem); | ||
303 | } | ||
304 | return mm->mn; | ||
342 | } | 305 | } |
343 | 306 | ||
344 | static int | 307 | static int |
345 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | 308 | i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, |
346 | unsigned flags) | 309 | unsigned flags) |
347 | { | 310 | { |
348 | struct i915_mmu_notifier *mmu; | 311 | struct i915_mmu_notifier *mn; |
349 | struct i915_mmu_object *mn; | 312 | struct i915_mmu_object *mo; |
350 | int ret; | 313 | int ret; |
351 | 314 | ||
352 | if (flags & I915_USERPTR_UNSYNCHRONIZED) | 315 | if (flags & I915_USERPTR_UNSYNCHRONIZED) |
353 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; | 316 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; |
354 | 317 | ||
355 | down_write(&obj->userptr.mm->mmap_sem); | 318 | if (WARN_ON(obj->userptr.mm == NULL)) |
356 | ret = i915_mutex_lock_interruptible(obj->base.dev); | 319 | return -EINVAL; |
357 | if (ret == 0) { | ||
358 | mmu = i915_mmu_notifier_get(obj->base.dev, obj->userptr.mm); | ||
359 | if (!IS_ERR(mmu)) | ||
360 | mmu->count++; /* preemptive add to act as a refcount */ | ||
361 | else | ||
362 | ret = PTR_ERR(mmu); | ||
363 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
364 | } | ||
365 | up_write(&obj->userptr.mm->mmap_sem); | ||
366 | if (ret) | ||
367 | return ret; | ||
368 | 320 | ||
369 | mn = kzalloc(sizeof(*mn), GFP_KERNEL); | 321 | mn = i915_mmu_notifier_find(obj->userptr.mm); |
370 | if (mn == NULL) { | 322 | if (IS_ERR(mn)) |
371 | ret = -ENOMEM; | 323 | return PTR_ERR(mn); |
372 | goto destroy_mmu; | ||
373 | } | ||
374 | 324 | ||
375 | mn->mmu = mmu; | 325 | mo = kzalloc(sizeof(*mo), GFP_KERNEL); |
376 | mn->it.start = obj->userptr.ptr; | 326 | if (mo == NULL) |
377 | mn->it.last = mn->it.start + obj->base.size - 1; | 327 | return -ENOMEM; |
378 | mn->obj = obj; | ||
379 | 328 | ||
380 | ret = i915_mmu_notifier_add(mmu, mn); | 329 | mo->mn = mn; |
381 | if (ret) | 330 | mo->it.start = obj->userptr.ptr; |
382 | goto free_mn; | 331 | mo->it.last = mo->it.start + obj->base.size - 1; |
332 | mo->obj = obj; | ||
383 | 333 | ||
384 | obj->userptr.mn = mn; | 334 | ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); |
335 | if (ret) { | ||
336 | kfree(mo); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | obj->userptr.mmu_object = mo; | ||
385 | return 0; | 341 | return 0; |
342 | } | ||
343 | |||
344 | static void | ||
345 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | ||
346 | struct mm_struct *mm) | ||
347 | { | ||
348 | if (mn == NULL) | ||
349 | return; | ||
386 | 350 | ||
387 | free_mn: | 351 | mmu_notifier_unregister(&mn->mn, mm); |
388 | kfree(mn); | 352 | kfree(mn); |
389 | destroy_mmu: | ||
390 | mutex_lock(&obj->base.dev->struct_mutex); | ||
391 | if (--mmu->count == 0) | ||
392 | __i915_mmu_notifier_destroy(mmu); | ||
393 | mutex_unlock(&obj->base.dev->struct_mutex); | ||
394 | return ret; | ||
395 | } | 353 | } |
396 | 354 | ||
397 | #else | 355 | #else |
@@ -413,15 +371,114 @@ i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj, | |||
413 | 371 | ||
414 | return 0; | 372 | return 0; |
415 | } | 373 | } |
374 | |||
375 | static void | ||
376 | i915_mmu_notifier_free(struct i915_mmu_notifier *mn, | ||
377 | struct mm_struct *mm) | ||
378 | { | ||
379 | } | ||
380 | |||
416 | #endif | 381 | #endif |
417 | 382 | ||
383 | static struct i915_mm_struct * | ||
384 | __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real) | ||
385 | { | ||
386 | struct i915_mm_struct *mm; | ||
387 | |||
388 | /* Protected by dev_priv->mm_lock */ | ||
389 | hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real) | ||
390 | if (mm->mm == real) | ||
391 | return mm; | ||
392 | |||
393 | return NULL; | ||
394 | } | ||
395 | |||
396 | static int | ||
397 | i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj) | ||
398 | { | ||
399 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); | ||
400 | struct i915_mm_struct *mm; | ||
401 | int ret = 0; | ||
402 | |||
403 | /* During release of the GEM object we hold the struct_mutex. This | ||
404 | * precludes us from calling mmput() at that time as that may be | ||
405 | * the last reference and so call exit_mmap(). exit_mmap() will | ||
406 | * attempt to reap the vma, and if we were holding a GTT mmap | ||
407 | * would then call drm_gem_vm_close() and attempt to reacquire | ||
408 | * the struct mutex. So in order to avoid that recursion, we have | ||
409 | * to defer releasing the mm reference until after we drop the | ||
410 | * struct_mutex, i.e. we need to schedule a worker to do the clean | ||
411 | * up. | ||
412 | */ | ||
413 | mutex_lock(&dev_priv->mm_lock); | ||
414 | mm = __i915_mm_struct_find(dev_priv, current->mm); | ||
415 | if (mm == NULL) { | ||
416 | mm = kmalloc(sizeof(*mm), GFP_KERNEL); | ||
417 | if (mm == NULL) { | ||
418 | ret = -ENOMEM; | ||
419 | goto out; | ||
420 | } | ||
421 | |||
422 | kref_init(&mm->kref); | ||
423 | mm->dev = obj->base.dev; | ||
424 | |||
425 | mm->mm = current->mm; | ||
426 | atomic_inc(¤t->mm->mm_count); | ||
427 | |||
428 | mm->mn = NULL; | ||
429 | |||
430 | /* Protected by dev_priv->mm_lock */ | ||
431 | hash_add(dev_priv->mm_structs, | ||
432 | &mm->node, (unsigned long)mm->mm); | ||
433 | } else | ||
434 | kref_get(&mm->kref); | ||
435 | |||
436 | obj->userptr.mm = mm; | ||
437 | out: | ||
438 | mutex_unlock(&dev_priv->mm_lock); | ||
439 | return ret; | ||
440 | } | ||
441 | |||
442 | static void | ||
443 | __i915_mm_struct_free__worker(struct work_struct *work) | ||
444 | { | ||
445 | struct i915_mm_struct *mm = container_of(work, typeof(*mm), work); | ||
446 | i915_mmu_notifier_free(mm->mn, mm->mm); | ||
447 | mmdrop(mm->mm); | ||
448 | kfree(mm); | ||
449 | } | ||
450 | |||
451 | static void | ||
452 | __i915_mm_struct_free(struct kref *kref) | ||
453 | { | ||
454 | struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref); | ||
455 | |||
456 | /* Protected by dev_priv->mm_lock */ | ||
457 | hash_del(&mm->node); | ||
458 | mutex_unlock(&to_i915(mm->dev)->mm_lock); | ||
459 | |||
460 | INIT_WORK(&mm->work, __i915_mm_struct_free__worker); | ||
461 | schedule_work(&mm->work); | ||
462 | } | ||
463 | |||
464 | static void | ||
465 | i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj) | ||
466 | { | ||
467 | if (obj->userptr.mm == NULL) | ||
468 | return; | ||
469 | |||
470 | kref_put_mutex(&obj->userptr.mm->kref, | ||
471 | __i915_mm_struct_free, | ||
472 | &to_i915(obj->base.dev)->mm_lock); | ||
473 | obj->userptr.mm = NULL; | ||
474 | } | ||
475 | |||
418 | struct get_pages_work { | 476 | struct get_pages_work { |
419 | struct work_struct work; | 477 | struct work_struct work; |
420 | struct drm_i915_gem_object *obj; | 478 | struct drm_i915_gem_object *obj; |
421 | struct task_struct *task; | 479 | struct task_struct *task; |
422 | }; | 480 | }; |
423 | 481 | ||
424 | |||
425 | #if IS_ENABLED(CONFIG_SWIOTLB) | 482 | #if IS_ENABLED(CONFIG_SWIOTLB) |
426 | #define swiotlb_active() swiotlb_nr_tbl() | 483 | #define swiotlb_active() swiotlb_nr_tbl() |
427 | #else | 484 | #else |
@@ -479,7 +536,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) | |||
479 | if (pvec == NULL) | 536 | if (pvec == NULL) |
480 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); | 537 | pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); |
481 | if (pvec != NULL) { | 538 | if (pvec != NULL) { |
482 | struct mm_struct *mm = obj->userptr.mm; | 539 | struct mm_struct *mm = obj->userptr.mm->mm; |
483 | 540 | ||
484 | down_read(&mm->mmap_sem); | 541 | down_read(&mm->mmap_sem); |
485 | while (pinned < num_pages) { | 542 | while (pinned < num_pages) { |
@@ -545,7 +602,7 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) | |||
545 | 602 | ||
546 | pvec = NULL; | 603 | pvec = NULL; |
547 | pinned = 0; | 604 | pinned = 0; |
548 | if (obj->userptr.mm == current->mm) { | 605 | if (obj->userptr.mm->mm == current->mm) { |
549 | pvec = kmalloc(num_pages*sizeof(struct page *), | 606 | pvec = kmalloc(num_pages*sizeof(struct page *), |
550 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); | 607 | GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); |
551 | if (pvec == NULL) { | 608 | if (pvec == NULL) { |
@@ -651,17 +708,13 @@ static void | |||
651 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) | 708 | i915_gem_userptr_release(struct drm_i915_gem_object *obj) |
652 | { | 709 | { |
653 | i915_gem_userptr_release__mmu_notifier(obj); | 710 | i915_gem_userptr_release__mmu_notifier(obj); |
654 | 711 | i915_gem_userptr_release__mm_struct(obj); | |
655 | if (obj->userptr.mm) { | ||
656 | mmput(obj->userptr.mm); | ||
657 | obj->userptr.mm = NULL; | ||
658 | } | ||
659 | } | 712 | } |
660 | 713 | ||
661 | static int | 714 | static int |
662 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) | 715 | i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) |
663 | { | 716 | { |
664 | if (obj->userptr.mn) | 717 | if (obj->userptr.mmu_object) |
665 | return 0; | 718 | return 0; |
666 | 719 | ||
667 | return i915_gem_userptr_init__mmu_notifier(obj, 0); | 720 | return i915_gem_userptr_init__mmu_notifier(obj, 0); |
@@ -736,7 +789,6 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
736 | return -ENODEV; | 789 | return -ENODEV; |
737 | } | 790 | } |
738 | 791 | ||
739 | /* Allocate the new object */ | ||
740 | obj = i915_gem_object_alloc(dev); | 792 | obj = i915_gem_object_alloc(dev); |
741 | if (obj == NULL) | 793 | if (obj == NULL) |
742 | return -ENOMEM; | 794 | return -ENOMEM; |
@@ -754,8 +806,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
754 | * at binding. This means that we need to hook into the mmu_notifier | 806 | * at binding. This means that we need to hook into the mmu_notifier |
755 | * in order to detect if the mmu is destroyed. | 807 | * in order to detect if the mmu is destroyed. |
756 | */ | 808 | */ |
757 | ret = -ENOMEM; | 809 | ret = i915_gem_userptr_init__mm_struct(obj); |
758 | if ((obj->userptr.mm = get_task_mm(current))) | 810 | if (ret == 0) |
759 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); | 811 | ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags); |
760 | if (ret == 0) | 812 | if (ret == 0) |
761 | ret = drm_gem_handle_create(file, &obj->base, &handle); | 813 | ret = drm_gem_handle_create(file, &obj->base, &handle); |
@@ -772,9 +824,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file | |||
772 | int | 824 | int |
773 | i915_gem_init_userptr(struct drm_device *dev) | 825 | i915_gem_init_userptr(struct drm_device *dev) |
774 | { | 826 | { |
775 | #if defined(CONFIG_MMU_NOTIFIER) | ||
776 | struct drm_i915_private *dev_priv = to_i915(dev); | 827 | struct drm_i915_private *dev_priv = to_i915(dev); |
777 | hash_init(dev_priv->mmu_notifiers); | 828 | mutex_init(&dev_priv->mm_lock); |
778 | #endif | 829 | hash_init(dev_priv->mm_structs); |
779 | return 0; | 830 | return 0; |
780 | } | 831 | } |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e4d7607da2c4..f29b44c86a2f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -334,16 +334,20 @@ | |||
334 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) | 334 | #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) |
335 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) | 335 | #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) |
336 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) | 336 | #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) |
337 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) | 337 | |
338 | #define COLOR_BLT_CMD (2<<29 | 0x40<<22 | (5-2)) | ||
339 | #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) | ||
338 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) | 340 | #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) |
339 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) | 341 | #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) |
340 | #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) | 342 | #define BLT_WRITE_A (2<<20) |
341 | #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) | 343 | #define BLT_WRITE_RGB (1<<20) |
344 | #define BLT_WRITE_RGBA (BLT_WRITE_RGB | BLT_WRITE_A) | ||
342 | #define BLT_DEPTH_8 (0<<24) | 345 | #define BLT_DEPTH_8 (0<<24) |
343 | #define BLT_DEPTH_16_565 (1<<24) | 346 | #define BLT_DEPTH_16_565 (1<<24) |
344 | #define BLT_DEPTH_16_1555 (2<<24) | 347 | #define BLT_DEPTH_16_1555 (2<<24) |
345 | #define BLT_DEPTH_32 (3<<24) | 348 | #define BLT_DEPTH_32 (3<<24) |
346 | #define BLT_ROP_GXCOPY (0xcc<<16) | 349 | #define BLT_ROP_SRC_COPY (0xcc<<16) |
350 | #define BLT_ROP_COLOR_COPY (0xf0<<16) | ||
347 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ | 351 | #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ |
348 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ | 352 | #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ |
349 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) | 353 | #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index a66955037e4e..eee79e1c3222 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1123,7 +1123,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
1123 | } | 1123 | } |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static int __init intel_no_opregion_vbt_callback(const struct dmi_system_id *id) | 1126 | static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id) |
1127 | { | 1127 | { |
1128 | DRM_DEBUG_KMS("Falling back to manually reading VBT from " | 1128 | DRM_DEBUG_KMS("Falling back to manually reading VBT from " |
1129 | "VBIOS ROM for %s\n", | 1129 | "VBIOS ROM for %s\n", |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index e8abfce40976..9212e6504e0f 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -804,7 +804,7 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
804 | .destroy = intel_encoder_destroy, | 804 | .destroy = intel_encoder_destroy, |
805 | }; | 805 | }; |
806 | 806 | ||
807 | static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id) | 807 | static int intel_no_crt_dmi_callback(const struct dmi_system_id *id) |
808 | { | 808 | { |
809 | DRM_INFO("Skipping CRT initialization for %s\n", id->ident); | 809 | DRM_INFO("Skipping CRT initialization for %s\n", id->ident); |
810 | return 1; | 810 | return 1; |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d074d704f458..d8324c69fa86 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2233,6 +2233,15 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
2233 | if (need_vtd_wa(dev) && alignment < 256 * 1024) | 2233 | if (need_vtd_wa(dev) && alignment < 256 * 1024) |
2234 | alignment = 256 * 1024; | 2234 | alignment = 256 * 1024; |
2235 | 2235 | ||
2236 | /* | ||
2237 | * Global gtt pte registers are special registers which actually forward | ||
2238 | * writes to a chunk of system memory. Which means that there is no risk | ||
2239 | * that the register values disappear as soon as we call | ||
2240 | * intel_runtime_pm_put(), so it is correct to wrap only the | ||
2241 | * pin/unpin/fence and not more. | ||
2242 | */ | ||
2243 | intel_runtime_pm_get(dev_priv); | ||
2244 | |||
2236 | dev_priv->mm.interruptible = false; | 2245 | dev_priv->mm.interruptible = false; |
2237 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); | 2246 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
2238 | if (ret) | 2247 | if (ret) |
@@ -2250,12 +2259,14 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev, | |||
2250 | i915_gem_object_pin_fence(obj); | 2259 | i915_gem_object_pin_fence(obj); |
2251 | 2260 | ||
2252 | dev_priv->mm.interruptible = true; | 2261 | dev_priv->mm.interruptible = true; |
2262 | intel_runtime_pm_put(dev_priv); | ||
2253 | return 0; | 2263 | return 0; |
2254 | 2264 | ||
2255 | err_unpin: | 2265 | err_unpin: |
2256 | i915_gem_object_unpin_from_display_plane(obj); | 2266 | i915_gem_object_unpin_from_display_plane(obj); |
2257 | err_interruptible: | 2267 | err_interruptible: |
2258 | dev_priv->mm.interruptible = true; | 2268 | dev_priv->mm.interruptible = true; |
2269 | intel_runtime_pm_put(dev_priv); | ||
2259 | return ret; | 2270 | return ret; |
2260 | } | 2271 | } |
2261 | 2272 | ||
@@ -4188,10 +4199,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
4188 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); | 4199 | intel_set_pch_fifo_underrun_reporting(dev, pipe, false); |
4189 | 4200 | ||
4190 | intel_disable_pipe(dev_priv, pipe); | 4201 | intel_disable_pipe(dev_priv, pipe); |
4191 | |||
4192 | if (intel_crtc->config.dp_encoder_is_mst) | ||
4193 | intel_ddi_set_vc_payload_alloc(crtc, false); | ||
4194 | |||
4195 | ironlake_pfit_disable(intel_crtc); | 4202 | ironlake_pfit_disable(intel_crtc); |
4196 | 4203 | ||
4197 | for_each_encoder_on_crtc(dev, crtc, encoder) | 4204 | for_each_encoder_on_crtc(dev, crtc, encoder) |
@@ -4256,6 +4263,9 @@ static void haswell_crtc_disable(struct drm_crtc *crtc) | |||
4256 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); | 4263 | intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); |
4257 | intel_disable_pipe(dev_priv, pipe); | 4264 | intel_disable_pipe(dev_priv, pipe); |
4258 | 4265 | ||
4266 | if (intel_crtc->config.dp_encoder_is_mst) | ||
4267 | intel_ddi_set_vc_payload_alloc(crtc, false); | ||
4268 | |||
4259 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); | 4269 | intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); |
4260 | 4270 | ||
4261 | ironlake_pfit_disable(intel_crtc); | 4271 | ironlake_pfit_disable(intel_crtc); |
@@ -8240,6 +8250,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, | |||
8240 | goto fail_locked; | 8250 | goto fail_locked; |
8241 | } | 8251 | } |
8242 | 8252 | ||
8253 | /* | ||
8254 | * Global gtt pte registers are special registers which actually | ||
8255 | * forward writes to a chunk of system memory. Which means that | ||
8256 | * there is no risk that the register values disappear as soon | ||
8257 | * as we call intel_runtime_pm_put(), so it is correct to wrap | ||
8258 | * only the pin/unpin/fence and not more. | ||
8259 | */ | ||
8260 | intel_runtime_pm_get(dev_priv); | ||
8261 | |||
8243 | /* Note that the w/a also requires 2 PTE of padding following | 8262 | /* Note that the w/a also requires 2 PTE of padding following |
8244 | * the bo. We currently fill all unused PTE with the shadow | 8263 | * the bo. We currently fill all unused PTE with the shadow |
8245 | * page and so we should always have valid PTE following the | 8264 | * page and so we should always have valid PTE following the |
@@ -8252,16 +8271,20 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, | |||
8252 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); | 8271 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, NULL); |
8253 | if (ret) { | 8272 | if (ret) { |
8254 | DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); | 8273 | DRM_DEBUG_KMS("failed to move cursor bo into the GTT\n"); |
8274 | intel_runtime_pm_put(dev_priv); | ||
8255 | goto fail_locked; | 8275 | goto fail_locked; |
8256 | } | 8276 | } |
8257 | 8277 | ||
8258 | ret = i915_gem_object_put_fence(obj); | 8278 | ret = i915_gem_object_put_fence(obj); |
8259 | if (ret) { | 8279 | if (ret) { |
8260 | DRM_DEBUG_KMS("failed to release fence for cursor"); | 8280 | DRM_DEBUG_KMS("failed to release fence for cursor"); |
8281 | intel_runtime_pm_put(dev_priv); | ||
8261 | goto fail_unpin; | 8282 | goto fail_unpin; |
8262 | } | 8283 | } |
8263 | 8284 | ||
8264 | addr = i915_gem_obj_ggtt_offset(obj); | 8285 | addr = i915_gem_obj_ggtt_offset(obj); |
8286 | |||
8287 | intel_runtime_pm_put(dev_priv); | ||
8265 | } else { | 8288 | } else { |
8266 | int align = IS_I830(dev) ? 16 * 1024 : 256; | 8289 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
8267 | ret = i915_gem_object_attach_phys(obj, align); | 8290 | ret = i915_gem_object_attach_phys(obj, align); |
@@ -12481,6 +12504,9 @@ static struct intel_quirk intel_quirks[] = { | |||
12481 | /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ | 12504 | /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */ |
12482 | { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, | 12505 | { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present }, |
12483 | 12506 | ||
12507 | /* Acer C720 Chromebook (Core i3 4005U) */ | ||
12508 | { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, | ||
12509 | |||
12484 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ | 12510 | /* Toshiba CB35 Chromebook (Celeron 2955U) */ |
12485 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, | 12511 | { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, |
12486 | 12512 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 67cfed6d911a..81d7681faa63 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -3661,24 +3661,12 @@ ironlake_dp_detect(struct intel_dp *intel_dp) | |||
3661 | return intel_dp_detect_dpcd(intel_dp); | 3661 | return intel_dp_detect_dpcd(intel_dp); |
3662 | } | 3662 | } |
3663 | 3663 | ||
3664 | static enum drm_connector_status | 3664 | static int g4x_digital_port_connected(struct drm_device *dev, |
3665 | g4x_dp_detect(struct intel_dp *intel_dp) | 3665 | struct intel_digital_port *intel_dig_port) |
3666 | { | 3666 | { |
3667 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
3668 | struct drm_i915_private *dev_priv = dev->dev_private; | 3667 | struct drm_i915_private *dev_priv = dev->dev_private; |
3669 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
3670 | uint32_t bit; | 3668 | uint32_t bit; |
3671 | 3669 | ||
3672 | /* Can't disconnect eDP, but you can close the lid... */ | ||
3673 | if (is_edp(intel_dp)) { | ||
3674 | enum drm_connector_status status; | ||
3675 | |||
3676 | status = intel_panel_detect(dev); | ||
3677 | if (status == connector_status_unknown) | ||
3678 | status = connector_status_connected; | ||
3679 | return status; | ||
3680 | } | ||
3681 | |||
3682 | if (IS_VALLEYVIEW(dev)) { | 3670 | if (IS_VALLEYVIEW(dev)) { |
3683 | switch (intel_dig_port->port) { | 3671 | switch (intel_dig_port->port) { |
3684 | case PORT_B: | 3672 | case PORT_B: |
@@ -3691,7 +3679,7 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
3691 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; | 3679 | bit = PORTD_HOTPLUG_LIVE_STATUS_VLV; |
3692 | break; | 3680 | break; |
3693 | default: | 3681 | default: |
3694 | return connector_status_unknown; | 3682 | return -EINVAL; |
3695 | } | 3683 | } |
3696 | } else { | 3684 | } else { |
3697 | switch (intel_dig_port->port) { | 3685 | switch (intel_dig_port->port) { |
@@ -3705,11 +3693,36 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
3705 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; | 3693 | bit = PORTD_HOTPLUG_LIVE_STATUS_G4X; |
3706 | break; | 3694 | break; |
3707 | default: | 3695 | default: |
3708 | return connector_status_unknown; | 3696 | return -EINVAL; |
3709 | } | 3697 | } |
3710 | } | 3698 | } |
3711 | 3699 | ||
3712 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) | 3700 | if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) |
3701 | return 0; | ||
3702 | return 1; | ||
3703 | } | ||
3704 | |||
3705 | static enum drm_connector_status | ||
3706 | g4x_dp_detect(struct intel_dp *intel_dp) | ||
3707 | { | ||
3708 | struct drm_device *dev = intel_dp_to_dev(intel_dp); | ||
3709 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
3710 | int ret; | ||
3711 | |||
3712 | /* Can't disconnect eDP, but you can close the lid... */ | ||
3713 | if (is_edp(intel_dp)) { | ||
3714 | enum drm_connector_status status; | ||
3715 | |||
3716 | status = intel_panel_detect(dev); | ||
3717 | if (status == connector_status_unknown) | ||
3718 | status = connector_status_connected; | ||
3719 | return status; | ||
3720 | } | ||
3721 | |||
3722 | ret = g4x_digital_port_connected(dev, intel_dig_port); | ||
3723 | if (ret == -EINVAL) | ||
3724 | return connector_status_unknown; | ||
3725 | else if (ret == 0) | ||
3713 | return connector_status_disconnected; | 3726 | return connector_status_disconnected; |
3714 | 3727 | ||
3715 | return intel_dp_detect_dpcd(intel_dp); | 3728 | return intel_dp_detect_dpcd(intel_dp); |
@@ -4066,8 +4079,14 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) | |||
4066 | intel_display_power_get(dev_priv, power_domain); | 4079 | intel_display_power_get(dev_priv, power_domain); |
4067 | 4080 | ||
4068 | if (long_hpd) { | 4081 | if (long_hpd) { |
4069 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | 4082 | |
4070 | goto mst_fail; | 4083 | if (HAS_PCH_SPLIT(dev)) { |
4084 | if (!ibx_digital_port_connected(dev_priv, intel_dig_port)) | ||
4085 | goto mst_fail; | ||
4086 | } else { | ||
4087 | if (g4x_digital_port_connected(dev, intel_dig_port) != 1) | ||
4088 | goto mst_fail; | ||
4089 | } | ||
4071 | 4090 | ||
4072 | if (!intel_dp_get_dpcd(intel_dp)) { | 4091 | if (!intel_dp_get_dpcd(intel_dp)) { |
4073 | goto mst_fail; | 4092 | goto mst_fail; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 881361c0f27e..fdf40267249c 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -538,7 +538,7 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = { | |||
538 | .destroy = intel_encoder_destroy, | 538 | .destroy = intel_encoder_destroy, |
539 | }; | 539 | }; |
540 | 540 | ||
541 | static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) | 541 | static int intel_no_lvds_dmi_callback(const struct dmi_system_id *id) |
542 | { | 542 | { |
543 | DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); | 543 | DRM_INFO("Skipping LVDS initialization for %s\n", id->ident); |
544 | return 1; | 544 | return 1; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 59b028f0b1e8..8e374449c6b5 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -801,7 +801,7 @@ static void pch_enable_backlight(struct intel_connector *connector) | |||
801 | 801 | ||
802 | cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); | 802 | cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2); |
803 | if (cpu_ctl2 & BLM_PWM_ENABLE) { | 803 | if (cpu_ctl2 & BLM_PWM_ENABLE) { |
804 | WARN(1, "cpu backlight already enabled\n"); | 804 | DRM_DEBUG_KMS("cpu backlight already enabled\n"); |
805 | cpu_ctl2 &= ~BLM_PWM_ENABLE; | 805 | cpu_ctl2 &= ~BLM_PWM_ENABLE; |
806 | I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); | 806 | I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2); |
807 | } | 807 | } |
@@ -845,7 +845,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) | |||
845 | 845 | ||
846 | ctl = I915_READ(BLC_PWM_CTL); | 846 | ctl = I915_READ(BLC_PWM_CTL); |
847 | if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { | 847 | if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) { |
848 | WARN(1, "backlight already enabled\n"); | 848 | DRM_DEBUG_KMS("backlight already enabled\n"); |
849 | I915_WRITE(BLC_PWM_CTL, 0); | 849 | I915_WRITE(BLC_PWM_CTL, 0); |
850 | } | 850 | } |
851 | 851 | ||
@@ -876,7 +876,7 @@ static void i965_enable_backlight(struct intel_connector *connector) | |||
876 | 876 | ||
877 | ctl2 = I915_READ(BLC_PWM_CTL2); | 877 | ctl2 = I915_READ(BLC_PWM_CTL2); |
878 | if (ctl2 & BLM_PWM_ENABLE) { | 878 | if (ctl2 & BLM_PWM_ENABLE) { |
879 | WARN(1, "backlight already enabled\n"); | 879 | DRM_DEBUG_KMS("backlight already enabled\n"); |
880 | ctl2 &= ~BLM_PWM_ENABLE; | 880 | ctl2 &= ~BLM_PWM_ENABLE; |
881 | I915_WRITE(BLC_PWM_CTL2, ctl2); | 881 | I915_WRITE(BLC_PWM_CTL2, ctl2); |
882 | } | 882 | } |
@@ -910,7 +910,7 @@ static void vlv_enable_backlight(struct intel_connector *connector) | |||
910 | 910 | ||
911 | ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); | 911 | ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); |
912 | if (ctl2 & BLM_PWM_ENABLE) { | 912 | if (ctl2 & BLM_PWM_ENABLE) { |
913 | WARN(1, "backlight already enabled\n"); | 913 | DRM_DEBUG_KMS("backlight already enabled\n"); |
914 | ctl2 &= ~BLM_PWM_ENABLE; | 914 | ctl2 &= ~BLM_PWM_ENABLE; |
915 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); | 915 | I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2); |
916 | } | 916 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 16371a444426..2d068edd1adc 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1363,54 +1363,66 @@ i965_dispatch_execbuffer(struct intel_engine_cs *ring, | |||
1363 | 1363 | ||
1364 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ | 1364 | /* Just userspace ABI convention to limit the wa batch bo to a resonable size */ |
1365 | #define I830_BATCH_LIMIT (256*1024) | 1365 | #define I830_BATCH_LIMIT (256*1024) |
1366 | #define I830_TLB_ENTRIES (2) | ||
1367 | #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT) | ||
1366 | static int | 1368 | static int |
1367 | i830_dispatch_execbuffer(struct intel_engine_cs *ring, | 1369 | i830_dispatch_execbuffer(struct intel_engine_cs *ring, |
1368 | u64 offset, u32 len, | 1370 | u64 offset, u32 len, |
1369 | unsigned flags) | 1371 | unsigned flags) |
1370 | { | 1372 | { |
1373 | u32 cs_offset = ring->scratch.gtt_offset; | ||
1371 | int ret; | 1374 | int ret; |
1372 | 1375 | ||
1373 | if (flags & I915_DISPATCH_PINNED) { | 1376 | ret = intel_ring_begin(ring, 6); |
1374 | ret = intel_ring_begin(ring, 4); | 1377 | if (ret) |
1375 | if (ret) | 1378 | return ret; |
1376 | return ret; | ||
1377 | 1379 | ||
1378 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1380 | /* Evict the invalid PTE TLBs */ |
1379 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | 1381 | intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA); |
1380 | intel_ring_emit(ring, offset + len - 8); | 1382 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096); |
1381 | intel_ring_emit(ring, MI_NOOP); | 1383 | intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */ |
1382 | intel_ring_advance(ring); | 1384 | intel_ring_emit(ring, cs_offset); |
1383 | } else { | 1385 | intel_ring_emit(ring, 0xdeadbeef); |
1384 | u32 cs_offset = ring->scratch.gtt_offset; | 1386 | intel_ring_emit(ring, MI_NOOP); |
1387 | intel_ring_advance(ring); | ||
1385 | 1388 | ||
1389 | if ((flags & I915_DISPATCH_PINNED) == 0) { | ||
1386 | if (len > I830_BATCH_LIMIT) | 1390 | if (len > I830_BATCH_LIMIT) |
1387 | return -ENOSPC; | 1391 | return -ENOSPC; |
1388 | 1392 | ||
1389 | ret = intel_ring_begin(ring, 9+3); | 1393 | ret = intel_ring_begin(ring, 6 + 2); |
1390 | if (ret) | 1394 | if (ret) |
1391 | return ret; | 1395 | return ret; |
1392 | /* Blit the batch (which has now all relocs applied) to the stable batch | 1396 | |
1393 | * scratch bo area (so that the CS never stumbles over its tlb | 1397 | /* Blit the batch (which has now all relocs applied) to the |
1394 | * invalidation bug) ... */ | 1398 | * stable batch scratch bo area (so that the CS never |
1395 | intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD | | 1399 | * stumbles over its tlb invalidation bug) ... |
1396 | XY_SRC_COPY_BLT_WRITE_ALPHA | | 1400 | */ |
1397 | XY_SRC_COPY_BLT_WRITE_RGB); | 1401 | intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA); |
1398 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096); | 1402 | intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096); |
1399 | intel_ring_emit(ring, 0); | 1403 | intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 1024); |
1400 | intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024); | ||
1401 | intel_ring_emit(ring, cs_offset); | 1404 | intel_ring_emit(ring, cs_offset); |
1402 | intel_ring_emit(ring, 0); | ||
1403 | intel_ring_emit(ring, 4096); | 1405 | intel_ring_emit(ring, 4096); |
1404 | intel_ring_emit(ring, offset); | 1406 | intel_ring_emit(ring, offset); |
1407 | |||
1405 | intel_ring_emit(ring, MI_FLUSH); | 1408 | intel_ring_emit(ring, MI_FLUSH); |
1409 | intel_ring_emit(ring, MI_NOOP); | ||
1410 | intel_ring_advance(ring); | ||
1406 | 1411 | ||
1407 | /* ... and execute it. */ | 1412 | /* ... and execute it. */ |
1408 | intel_ring_emit(ring, MI_BATCH_BUFFER); | 1413 | offset = cs_offset; |
1409 | intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | ||
1410 | intel_ring_emit(ring, cs_offset + len - 8); | ||
1411 | intel_ring_advance(ring); | ||
1412 | } | 1414 | } |
1413 | 1415 | ||
1416 | ret = intel_ring_begin(ring, 4); | ||
1417 | if (ret) | ||
1418 | return ret; | ||
1419 | |||
1420 | intel_ring_emit(ring, MI_BATCH_BUFFER); | ||
1421 | intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE)); | ||
1422 | intel_ring_emit(ring, offset + len - 8); | ||
1423 | intel_ring_emit(ring, MI_NOOP); | ||
1424 | intel_ring_advance(ring); | ||
1425 | |||
1414 | return 0; | 1426 | return 0; |
1415 | } | 1427 | } |
1416 | 1428 | ||
@@ -2200,7 +2212,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
2200 | 2212 | ||
2201 | /* Workaround batchbuffer to combat CS tlb bug. */ | 2213 | /* Workaround batchbuffer to combat CS tlb bug. */ |
2202 | if (HAS_BROKEN_CS_TLB(dev)) { | 2214 | if (HAS_BROKEN_CS_TLB(dev)) { |
2203 | obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT); | 2215 | obj = i915_gem_alloc_object(dev, I830_WA_SIZE); |
2204 | if (obj == NULL) { | 2216 | if (obj == NULL) { |
2205 | DRM_ERROR("Failed to allocate batch bo\n"); | 2217 | DRM_ERROR("Failed to allocate batch bo\n"); |
2206 | return -ENOMEM; | 2218 | return -ENOMEM; |
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c index 32186a656816..c14341ca3ef9 100644 --- a/drivers/gpu/drm/i915/intel_tv.c +++ b/drivers/gpu/drm/i915/intel_tv.c | |||
@@ -854,6 +854,10 @@ intel_enable_tv(struct intel_encoder *encoder) | |||
854 | struct drm_device *dev = encoder->base.dev; | 854 | struct drm_device *dev = encoder->base.dev; |
855 | struct drm_i915_private *dev_priv = dev->dev_private; | 855 | struct drm_i915_private *dev_priv = dev->dev_private; |
856 | 856 | ||
857 | /* Prevents vblank waits from timing out in intel_tv_detect_type() */ | ||
858 | intel_wait_for_vblank(encoder->base.dev, | ||
859 | to_intel_crtc(encoder->base.crtc)->pipe); | ||
860 | |||
857 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); | 861 | I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE); |
858 | } | 862 | } |
859 | 863 | ||
@@ -1311,6 +1315,7 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1311 | { | 1315 | { |
1312 | struct drm_display_mode mode; | 1316 | struct drm_display_mode mode; |
1313 | struct intel_tv *intel_tv = intel_attached_tv(connector); | 1317 | struct intel_tv *intel_tv = intel_attached_tv(connector); |
1318 | enum drm_connector_status status; | ||
1314 | int type; | 1319 | int type; |
1315 | 1320 | ||
1316 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", | 1321 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n", |
@@ -1328,16 +1333,19 @@ intel_tv_detect(struct drm_connector *connector, bool force) | |||
1328 | if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { | 1333 | if (intel_get_load_detect_pipe(connector, &mode, &tmp, &ctx)) { |
1329 | type = intel_tv_detect_type(intel_tv, connector); | 1334 | type = intel_tv_detect_type(intel_tv, connector); |
1330 | intel_release_load_detect_pipe(connector, &tmp); | 1335 | intel_release_load_detect_pipe(connector, &tmp); |
1336 | status = type < 0 ? | ||
1337 | connector_status_disconnected : | ||
1338 | connector_status_connected; | ||
1331 | } else | 1339 | } else |
1332 | return connector_status_unknown; | 1340 | status = connector_status_unknown; |
1333 | 1341 | ||
1334 | drm_modeset_drop_locks(&ctx); | 1342 | drm_modeset_drop_locks(&ctx); |
1335 | drm_modeset_acquire_fini(&ctx); | 1343 | drm_modeset_acquire_fini(&ctx); |
1336 | } else | 1344 | } else |
1337 | return connector->status; | 1345 | return connector->status; |
1338 | 1346 | ||
1339 | if (type < 0) | 1347 | if (status != connector_status_connected) |
1340 | return connector_status_disconnected; | 1348 | return status; |
1341 | 1349 | ||
1342 | intel_tv->type = type; | 1350 | intel_tv->type = type; |
1343 | intel_tv_find_better_format(connector); | 1351 | intel_tv_find_better_format(connector); |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index a125a7e32742..c6c9b02e0ada 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c | |||
@@ -258,28 +258,30 @@ static void set_hdmi_pdev(struct drm_device *dev, | |||
258 | priv->hdmi_pdev = pdev; | 258 | priv->hdmi_pdev = pdev; |
259 | } | 259 | } |
260 | 260 | ||
261 | #ifdef CONFIG_OF | ||
262 | static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) | ||
263 | { | ||
264 | int gpio = of_get_named_gpio(of_node, name, 0); | ||
265 | if (gpio < 0) { | ||
266 | char name2[32]; | ||
267 | snprintf(name2, sizeof(name2), "%s-gpio", name); | ||
268 | gpio = of_get_named_gpio(of_node, name2, 0); | ||
269 | if (gpio < 0) { | ||
270 | dev_err(dev, "failed to get gpio: %s (%d)\n", | ||
271 | name, gpio); | ||
272 | gpio = -1; | ||
273 | } | ||
274 | } | ||
275 | return gpio; | ||
276 | } | ||
277 | #endif | ||
278 | |||
261 | static int hdmi_bind(struct device *dev, struct device *master, void *data) | 279 | static int hdmi_bind(struct device *dev, struct device *master, void *data) |
262 | { | 280 | { |
263 | static struct hdmi_platform_config config = {}; | 281 | static struct hdmi_platform_config config = {}; |
264 | #ifdef CONFIG_OF | 282 | #ifdef CONFIG_OF |
265 | struct device_node *of_node = dev->of_node; | 283 | struct device_node *of_node = dev->of_node; |
266 | 284 | ||
267 | int get_gpio(const char *name) | ||
268 | { | ||
269 | int gpio = of_get_named_gpio(of_node, name, 0); | ||
270 | if (gpio < 0) { | ||
271 | char name2[32]; | ||
272 | snprintf(name2, sizeof(name2), "%s-gpio", name); | ||
273 | gpio = of_get_named_gpio(of_node, name2, 0); | ||
274 | if (gpio < 0) { | ||
275 | dev_err(dev, "failed to get gpio: %s (%d)\n", | ||
276 | name, gpio); | ||
277 | gpio = -1; | ||
278 | } | ||
279 | } | ||
280 | return gpio; | ||
281 | } | ||
282 | |||
283 | if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { | 285 | if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8074")) { |
284 | static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; | 286 | static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; |
285 | static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; | 287 | static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; |
@@ -312,12 +314,12 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data) | |||
312 | } | 314 | } |
313 | 315 | ||
314 | config.mmio_name = "core_physical"; | 316 | config.mmio_name = "core_physical"; |
315 | config.ddc_clk_gpio = get_gpio("qcom,hdmi-tx-ddc-clk"); | 317 | config.ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); |
316 | config.ddc_data_gpio = get_gpio("qcom,hdmi-tx-ddc-data"); | 318 | config.ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); |
317 | config.hpd_gpio = get_gpio("qcom,hdmi-tx-hpd"); | 319 | config.hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); |
318 | config.mux_en_gpio = get_gpio("qcom,hdmi-tx-mux-en"); | 320 | config.mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); |
319 | config.mux_sel_gpio = get_gpio("qcom,hdmi-tx-mux-sel"); | 321 | config.mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); |
320 | config.mux_lpm_gpio = get_gpio("qcom,hdmi-tx-mux-lpm"); | 322 | config.mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); |
321 | 323 | ||
322 | #else | 324 | #else |
323 | static const char *hpd_clk_names[] = { | 325 | static const char *hpd_clk_names[] = { |
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c index 902d7685d441..f408b69486a8 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c | |||
@@ -15,19 +15,25 @@ | |||
15 | * this program. If not, see <http://www.gnu.org/licenses/>. | 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | #ifdef CONFIG_COMMON_CLK | ||
18 | #include <linux/clk.h> | 19 | #include <linux/clk.h> |
19 | #include <linux/clk-provider.h> | 20 | #include <linux/clk-provider.h> |
21 | #endif | ||
20 | 22 | ||
21 | #include "hdmi.h" | 23 | #include "hdmi.h" |
22 | 24 | ||
23 | struct hdmi_phy_8960 { | 25 | struct hdmi_phy_8960 { |
24 | struct hdmi_phy base; | 26 | struct hdmi_phy base; |
25 | struct hdmi *hdmi; | 27 | struct hdmi *hdmi; |
28 | #ifdef CONFIG_COMMON_CLK | ||
26 | struct clk_hw pll_hw; | 29 | struct clk_hw pll_hw; |
27 | struct clk *pll; | 30 | struct clk *pll; |
28 | unsigned long pixclk; | 31 | unsigned long pixclk; |
32 | #endif | ||
29 | }; | 33 | }; |
30 | #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) | 34 | #define to_hdmi_phy_8960(x) container_of(x, struct hdmi_phy_8960, base) |
35 | |||
36 | #ifdef CONFIG_COMMON_CLK | ||
31 | #define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) | 37 | #define clk_to_phy(x) container_of(x, struct hdmi_phy_8960, pll_hw) |
32 | 38 | ||
33 | /* | 39 | /* |
@@ -374,7 +380,7 @@ static struct clk_init_data pll_init = { | |||
374 | .parent_names = hdmi_pll_parents, | 380 | .parent_names = hdmi_pll_parents, |
375 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), | 381 | .num_parents = ARRAY_SIZE(hdmi_pll_parents), |
376 | }; | 382 | }; |
377 | 383 | #endif | |
378 | 384 | ||
379 | /* | 385 | /* |
380 | * HDMI Phy: | 386 | * HDMI Phy: |
@@ -480,12 +486,15 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) | |||
480 | { | 486 | { |
481 | struct hdmi_phy_8960 *phy_8960; | 487 | struct hdmi_phy_8960 *phy_8960; |
482 | struct hdmi_phy *phy = NULL; | 488 | struct hdmi_phy *phy = NULL; |
483 | int ret, i; | 489 | int ret; |
490 | #ifdef CONFIG_COMMON_CLK | ||
491 | int i; | ||
484 | 492 | ||
485 | /* sanity check: */ | 493 | /* sanity check: */ |
486 | for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) | 494 | for (i = 0; i < (ARRAY_SIZE(freqtbl) - 1); i++) |
487 | if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) | 495 | if (WARN_ON(freqtbl[i].rate < freqtbl[i+1].rate)) |
488 | return ERR_PTR(-EINVAL); | 496 | return ERR_PTR(-EINVAL); |
497 | #endif | ||
489 | 498 | ||
490 | phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); | 499 | phy_8960 = kzalloc(sizeof(*phy_8960), GFP_KERNEL); |
491 | if (!phy_8960) { | 500 | if (!phy_8960) { |
@@ -499,6 +508,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) | |||
499 | 508 | ||
500 | phy_8960->hdmi = hdmi; | 509 | phy_8960->hdmi = hdmi; |
501 | 510 | ||
511 | #ifdef CONFIG_COMMON_CLK | ||
502 | phy_8960->pll_hw.init = &pll_init; | 512 | phy_8960->pll_hw.init = &pll_init; |
503 | phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); | 513 | phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); |
504 | if (IS_ERR(phy_8960->pll)) { | 514 | if (IS_ERR(phy_8960->pll)) { |
@@ -506,6 +516,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi) | |||
506 | phy_8960->pll = NULL; | 516 | phy_8960->pll = NULL; |
507 | goto fail; | 517 | goto fail; |
508 | } | 518 | } |
519 | #endif | ||
509 | 520 | ||
510 | return phy; | 521 | return phy; |
511 | 522 | ||
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 26ee80db17af..fcf95680413d 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -52,7 +52,7 @@ module_param(reglog, bool, 0600); | |||
52 | #define reglog 0 | 52 | #define reglog 0 |
53 | #endif | 53 | #endif |
54 | 54 | ||
55 | static char *vram; | 55 | static char *vram = "16m"; |
56 | MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); | 56 | MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU"); |
57 | module_param(vram, charp, 0); | 57 | module_param(vram, charp, 0); |
58 | 58 | ||
diff --git a/drivers/gpu/drm/nouveau/core/core/parent.c b/drivers/gpu/drm/nouveau/core/core/parent.c index 8701968a9743..30a2911878f8 100644 --- a/drivers/gpu/drm/nouveau/core/core/parent.c +++ b/drivers/gpu/drm/nouveau/core/core/parent.c | |||
@@ -86,7 +86,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | |||
86 | sclass = nv_parent(parent)->sclass; | 86 | sclass = nv_parent(parent)->sclass; |
87 | while (sclass) { | 87 | while (sclass) { |
88 | if (++nr < size) | 88 | if (++nr < size) |
89 | lclass[nr] = sclass->oclass->handle; | 89 | lclass[nr] = sclass->oclass->handle & 0xffff; |
90 | sclass = sclass->sclass; | 90 | sclass = sclass->sclass; |
91 | } | 91 | } |
92 | 92 | ||
@@ -96,7 +96,7 @@ nouveau_parent_lclass(struct nouveau_object *parent, u32 *lclass, int size) | |||
96 | if (engine && (oclass = engine->sclass)) { | 96 | if (engine && (oclass = engine->sclass)) { |
97 | while (oclass->ofuncs) { | 97 | while (oclass->ofuncs) { |
98 | if (++nr < size) | 98 | if (++nr < size) |
99 | lclass[nr] = oclass->handle; | 99 | lclass[nr] = oclass->handle & 0xffff; |
100 | oclass++; | 100 | oclass++; |
101 | } | 101 | } |
102 | } | 102 | } |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index b1e11f8434e2..ac14b67621d3 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -405,16 +405,13 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector) | |||
405 | u8 msg[DP_DPCD_SIZE]; | 405 | u8 msg[DP_DPCD_SIZE]; |
406 | int ret; | 406 | int ret; |
407 | 407 | ||
408 | char dpcd_hex_dump[DP_DPCD_SIZE * 3]; | ||
409 | |||
410 | ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, | 408 | ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, |
411 | DP_DPCD_SIZE); | 409 | DP_DPCD_SIZE); |
412 | if (ret > 0) { | 410 | if (ret > 0) { |
413 | memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); | 411 | memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); |
414 | 412 | ||
415 | hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd), | 413 | DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd), |
416 | 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false); | 414 | dig_connector->dpcd); |
417 | DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump); | ||
418 | 415 | ||
419 | radeon_dp_probe_oui(radeon_connector); | 416 | radeon_dp_probe_oui(radeon_connector); |
420 | 417 | ||
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e616eb5f6e7a..3cfb50056f7a 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2769,8 +2769,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, | |||
2769 | radeon_ring_write(ring, lower_32_bits(addr)); | 2769 | radeon_ring_write(ring, lower_32_bits(addr)); |
2770 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); | 2770 | radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); |
2771 | 2771 | ||
2772 | /* PFP_SYNC_ME packet only exists on 7xx+ */ | 2772 | /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ |
2773 | if (emit_wait && (rdev->family >= CHIP_RV770)) { | 2773 | if (emit_wait && (rdev->family >= CHIP_CEDAR)) { |
2774 | /* Prevent the PFP from running ahead of the semaphore wait */ | 2774 | /* Prevent the PFP from running ahead of the semaphore wait */ |
2775 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); | 2775 | radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); |
2776 | radeon_ring_write(ring, 0x0); | 2776 | radeon_ring_write(ring, 0x0); |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 92b2d8dd4735..e74c7e387dde 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
@@ -447,6 +447,13 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, | |||
447 | } | 447 | } |
448 | } | 448 | } |
449 | 449 | ||
450 | /* Fujitsu D3003-S2 board lists DVI-I as DVI-I and VGA */ | ||
451 | if ((dev->pdev->device == 0x9805) && | ||
452 | (dev->pdev->subsystem_vendor == 0x1734) && | ||
453 | (dev->pdev->subsystem_device == 0x11bd)) { | ||
454 | if (*connector_type == DRM_MODE_CONNECTOR_VGA) | ||
455 | return false; | ||
456 | } | ||
450 | 457 | ||
451 | return true; | 458 | return true; |
452 | } | 459 | } |
@@ -2281,19 +2288,31 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r | |||
2281 | (controller->ucFanParameters & | 2288 | (controller->ucFanParameters & |
2282 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 2289 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
2283 | rdev->pm.int_thermal_type = THERMAL_TYPE_KV; | 2290 | rdev->pm.int_thermal_type = THERMAL_TYPE_KV; |
2284 | } else if ((controller->ucType == | 2291 | } else if (controller->ucType == |
2285 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || | 2292 | ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { |
2286 | (controller->ucType == | 2293 | DRM_INFO("External GPIO thermal controller %s fan control\n", |
2287 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || | 2294 | (controller->ucFanParameters & |
2288 | (controller->ucType == | 2295 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
2289 | ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { | 2296 | rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; |
2290 | DRM_INFO("Special thermal controller config\n"); | 2297 | } else if (controller->ucType == |
2298 | ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { | ||
2299 | DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", | ||
2300 | (controller->ucFanParameters & | ||
2301 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2302 | rdev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; | ||
2303 | } else if (controller->ucType == | ||
2304 | ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { | ||
2305 | DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", | ||
2306 | (controller->ucFanParameters & | ||
2307 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | ||
2308 | rdev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; | ||
2291 | } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { | 2309 | } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { |
2292 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", | 2310 | DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", |
2293 | pp_lib_thermal_controller_names[controller->ucType], | 2311 | pp_lib_thermal_controller_names[controller->ucType], |
2294 | controller->ucI2cAddress >> 1, | 2312 | controller->ucI2cAddress >> 1, |
2295 | (controller->ucFanParameters & | 2313 | (controller->ucFanParameters & |
2296 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); | 2314 | ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); |
2315 | rdev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; | ||
2297 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); | 2316 | i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); |
2298 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); | 2317 | rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); |
2299 | if (rdev->pm.i2c_bus) { | 2318 | if (rdev->pm.i2c_bus) { |
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c index 56d9fd66d8ae..abd6753a570a 100644 --- a/drivers/gpu/drm/radeon/radeon_semaphore.c +++ b/drivers/gpu/drm/radeon/radeon_semaphore.c | |||
@@ -34,7 +34,7 @@ | |||
34 | int radeon_semaphore_create(struct radeon_device *rdev, | 34 | int radeon_semaphore_create(struct radeon_device *rdev, |
35 | struct radeon_semaphore **semaphore) | 35 | struct radeon_semaphore **semaphore) |
36 | { | 36 | { |
37 | uint32_t *cpu_addr; | 37 | uint64_t *cpu_addr; |
38 | int i, r; | 38 | int i, r; |
39 | 39 | ||
40 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); | 40 | *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7bfdaa163a33..36b871686d3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -450,11 +450,11 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
450 | res, | 450 | res, |
451 | id_loc - sw_context->buf_start); | 451 | id_loc - sw_context->buf_start); |
452 | if (unlikely(ret != 0)) | 452 | if (unlikely(ret != 0)) |
453 | goto out_err; | 453 | return ret; |
454 | 454 | ||
455 | ret = vmw_resource_val_add(sw_context, res, &node); | 455 | ret = vmw_resource_val_add(sw_context, res, &node); |
456 | if (unlikely(ret != 0)) | 456 | if (unlikely(ret != 0)) |
457 | goto out_err; | 457 | return ret; |
458 | 458 | ||
459 | if (res_type == vmw_res_context && dev_priv->has_mob && | 459 | if (res_type == vmw_res_context && dev_priv->has_mob && |
460 | node->first_usage) { | 460 | node->first_usage) { |
@@ -468,13 +468,13 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
468 | 468 | ||
469 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); | 469 | ret = vmw_resource_context_res_add(dev_priv, sw_context, res); |
470 | if (unlikely(ret != 0)) | 470 | if (unlikely(ret != 0)) |
471 | goto out_err; | 471 | return ret; |
472 | node->staged_bindings = | 472 | node->staged_bindings = |
473 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); | 473 | kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); |
474 | if (node->staged_bindings == NULL) { | 474 | if (node->staged_bindings == NULL) { |
475 | DRM_ERROR("Failed to allocate context binding " | 475 | DRM_ERROR("Failed to allocate context binding " |
476 | "information.\n"); | 476 | "information.\n"); |
477 | goto out_err; | 477 | return -ENOMEM; |
478 | } | 478 | } |
479 | INIT_LIST_HEAD(&node->staged_bindings->list); | 479 | INIT_LIST_HEAD(&node->staged_bindings->list); |
480 | } | 480 | } |
@@ -482,8 +482,7 @@ static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, | |||
482 | if (p_val) | 482 | if (p_val) |
483 | *p_val = node; | 483 | *p_val = node; |
484 | 484 | ||
485 | out_err: | 485 | return 0; |
486 | return ret; | ||
487 | } | 486 | } |
488 | 487 | ||
489 | 488 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 6ccd993e26bf..6eae14d2a3f7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
@@ -180,8 +180,9 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) | |||
180 | 180 | ||
181 | mutex_lock(&dev_priv->hw_mutex); | 181 | mutex_lock(&dev_priv->hw_mutex); |
182 | 182 | ||
183 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | ||
183 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) | 184 | while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) |
184 | vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); | 185 | ; |
185 | 186 | ||
186 | dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); | 187 | dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); |
187 | 188 | ||
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c index fc6f5d54e7f7..8890870309e4 100644 --- a/drivers/hwmon/ds1621.c +++ b/drivers/hwmon/ds1621.c | |||
@@ -309,6 +309,7 @@ static ssize_t set_convrate(struct device *dev, struct device_attribute *da, | |||
309 | data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT); | 309 | data->conf |= (resol << DS1621_REG_CONFIG_RESOL_SHIFT); |
310 | i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf); | 310 | i2c_smbus_write_byte_data(client, DS1621_REG_CONF, data->conf); |
311 | data->update_interval = ds1721_convrates[resol]; | 311 | data->update_interval = ds1721_convrates[resol]; |
312 | data->zbits = 7 - resol; | ||
312 | mutex_unlock(&data->update_lock); | 313 | mutex_unlock(&data->update_lock); |
313 | 314 | ||
314 | return count; | 315 | return count; |
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index 79a68999a696..917d54588d95 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c | |||
@@ -101,6 +101,7 @@ struct at91_twi_dev { | |||
101 | unsigned twi_cwgr_reg; | 101 | unsigned twi_cwgr_reg; |
102 | struct at91_twi_pdata *pdata; | 102 | struct at91_twi_pdata *pdata; |
103 | bool use_dma; | 103 | bool use_dma; |
104 | bool recv_len_abort; | ||
104 | struct at91_twi_dma dma; | 105 | struct at91_twi_dma dma; |
105 | }; | 106 | }; |
106 | 107 | ||
@@ -267,12 +268,24 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) | |||
267 | *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; | 268 | *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff; |
268 | --dev->buf_len; | 269 | --dev->buf_len; |
269 | 270 | ||
271 | /* return if aborting, we only needed to read RHR to clear RXRDY*/ | ||
272 | if (dev->recv_len_abort) | ||
273 | return; | ||
274 | |||
270 | /* handle I2C_SMBUS_BLOCK_DATA */ | 275 | /* handle I2C_SMBUS_BLOCK_DATA */ |
271 | if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { | 276 | if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) { |
272 | dev->msg->flags &= ~I2C_M_RECV_LEN; | 277 | /* ensure length byte is a valid value */ |
273 | dev->buf_len += *dev->buf; | 278 | if (*dev->buf <= I2C_SMBUS_BLOCK_MAX && *dev->buf > 0) { |
274 | dev->msg->len = dev->buf_len + 1; | 279 | dev->msg->flags &= ~I2C_M_RECV_LEN; |
275 | dev_dbg(dev->dev, "received block length %d\n", dev->buf_len); | 280 | dev->buf_len += *dev->buf; |
281 | dev->msg->len = dev->buf_len + 1; | ||
282 | dev_dbg(dev->dev, "received block length %d\n", | ||
283 | dev->buf_len); | ||
284 | } else { | ||
285 | /* abort and send the stop by reading one more byte */ | ||
286 | dev->recv_len_abort = true; | ||
287 | dev->buf_len = 1; | ||
288 | } | ||
276 | } | 289 | } |
277 | 290 | ||
278 | /* send stop if second but last byte has been read */ | 291 | /* send stop if second but last byte has been read */ |
@@ -421,8 +434,8 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
421 | } | 434 | } |
422 | } | 435 | } |
423 | 436 | ||
424 | ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, | 437 | ret = wait_for_completion_io_timeout(&dev->cmd_complete, |
425 | dev->adapter.timeout); | 438 | dev->adapter.timeout); |
426 | if (ret == 0) { | 439 | if (ret == 0) { |
427 | dev_err(dev->dev, "controller timed out\n"); | 440 | dev_err(dev->dev, "controller timed out\n"); |
428 | at91_init_twi_bus(dev); | 441 | at91_init_twi_bus(dev); |
@@ -444,6 +457,12 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) | |||
444 | ret = -EIO; | 457 | ret = -EIO; |
445 | goto error; | 458 | goto error; |
446 | } | 459 | } |
460 | if (dev->recv_len_abort) { | ||
461 | dev_err(dev->dev, "invalid smbus block length recvd\n"); | ||
462 | ret = -EPROTO; | ||
463 | goto error; | ||
464 | } | ||
465 | |||
447 | dev_dbg(dev->dev, "transfer complete\n"); | 466 | dev_dbg(dev->dev, "transfer complete\n"); |
448 | 467 | ||
449 | return 0; | 468 | return 0; |
@@ -500,6 +519,7 @@ static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num) | |||
500 | dev->buf_len = m_start->len; | 519 | dev->buf_len = m_start->len; |
501 | dev->buf = m_start->buf; | 520 | dev->buf = m_start->buf; |
502 | dev->msg = m_start; | 521 | dev->msg = m_start; |
522 | dev->recv_len_abort = false; | ||
503 | 523 | ||
504 | ret = at91_do_twi_transfer(dev); | 524 | ret = at91_do_twi_transfer(dev); |
505 | 525 | ||
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c index 6dc5ded86f62..2f64273d3f2b 100644 --- a/drivers/i2c/busses/i2c-mv64xxx.c +++ b/drivers/i2c/busses/i2c-mv64xxx.c | |||
@@ -746,8 +746,7 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data, | |||
746 | } | 746 | } |
747 | tclk = clk_get_rate(drv_data->clk); | 747 | tclk = clk_get_rate(drv_data->clk); |
748 | 748 | ||
749 | rc = of_property_read_u32(np, "clock-frequency", &bus_freq); | 749 | if (of_property_read_u32(np, "clock-frequency", &bus_freq)) |
750 | if (rc) | ||
751 | bus_freq = 100000; /* 100kHz by default */ | 750 | bus_freq = 100000; /* 100kHz by default */ |
752 | 751 | ||
753 | if (!mv64xxx_find_baud_factors(bus_freq, tclk, | 752 | if (!mv64xxx_find_baud_factors(bus_freq, tclk, |
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index f3c7139dfa25..1cc146cfc1f3 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/platform_device.h> | 34 | #include <linux/platform_device.h> |
35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
36 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
37 | #include <linux/spinlock.h> | ||
37 | 38 | ||
38 | /* register offsets */ | 39 | /* register offsets */ |
39 | #define ICSCR 0x00 /* slave ctrl */ | 40 | #define ICSCR 0x00 /* slave ctrl */ |
@@ -95,6 +96,7 @@ struct rcar_i2c_priv { | |||
95 | struct i2c_msg *msg; | 96 | struct i2c_msg *msg; |
96 | struct clk *clk; | 97 | struct clk *clk; |
97 | 98 | ||
99 | spinlock_t lock; | ||
98 | wait_queue_head_t wait; | 100 | wait_queue_head_t wait; |
99 | 101 | ||
100 | int pos; | 102 | int pos; |
@@ -365,20 +367,20 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr) | |||
365 | struct rcar_i2c_priv *priv = ptr; | 367 | struct rcar_i2c_priv *priv = ptr; |
366 | u32 msr; | 368 | u32 msr; |
367 | 369 | ||
370 | /*-------------- spin lock -----------------*/ | ||
371 | spin_lock(&priv->lock); | ||
372 | |||
368 | msr = rcar_i2c_read(priv, ICMSR); | 373 | msr = rcar_i2c_read(priv, ICMSR); |
369 | 374 | ||
375 | /* Only handle interrupts that are currently enabled */ | ||
376 | msr &= rcar_i2c_read(priv, ICMIER); | ||
377 | |||
370 | /* Arbitration lost */ | 378 | /* Arbitration lost */ |
371 | if (msr & MAL) { | 379 | if (msr & MAL) { |
372 | rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST)); | 380 | rcar_i2c_flags_set(priv, (ID_DONE | ID_ARBLOST)); |
373 | goto out; | 381 | goto out; |
374 | } | 382 | } |
375 | 383 | ||
376 | /* Stop */ | ||
377 | if (msr & MST) { | ||
378 | rcar_i2c_flags_set(priv, ID_DONE); | ||
379 | goto out; | ||
380 | } | ||
381 | |||
382 | /* Nack */ | 384 | /* Nack */ |
383 | if (msr & MNR) { | 385 | if (msr & MNR) { |
384 | /* go to stop phase */ | 386 | /* go to stop phase */ |
@@ -388,6 +390,12 @@ static irqreturn_t rcar_i2c_irq(int irq, void *ptr) | |||
388 | goto out; | 390 | goto out; |
389 | } | 391 | } |
390 | 392 | ||
393 | /* Stop */ | ||
394 | if (msr & MST) { | ||
395 | rcar_i2c_flags_set(priv, ID_DONE); | ||
396 | goto out; | ||
397 | } | ||
398 | |||
391 | if (rcar_i2c_is_recv(priv)) | 399 | if (rcar_i2c_is_recv(priv)) |
392 | rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr)); | 400 | rcar_i2c_flags_set(priv, rcar_i2c_irq_recv(priv, msr)); |
393 | else | 401 | else |
@@ -400,6 +408,9 @@ out: | |||
400 | wake_up(&priv->wait); | 408 | wake_up(&priv->wait); |
401 | } | 409 | } |
402 | 410 | ||
411 | spin_unlock(&priv->lock); | ||
412 | /*-------------- spin unlock -----------------*/ | ||
413 | |||
403 | return IRQ_HANDLED; | 414 | return IRQ_HANDLED; |
404 | } | 415 | } |
405 | 416 | ||
@@ -409,14 +420,21 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
409 | { | 420 | { |
410 | struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); | 421 | struct rcar_i2c_priv *priv = i2c_get_adapdata(adap); |
411 | struct device *dev = rcar_i2c_priv_to_dev(priv); | 422 | struct device *dev = rcar_i2c_priv_to_dev(priv); |
423 | unsigned long flags; | ||
412 | int i, ret, timeout; | 424 | int i, ret, timeout; |
413 | 425 | ||
414 | pm_runtime_get_sync(dev); | 426 | pm_runtime_get_sync(dev); |
415 | 427 | ||
428 | /*-------------- spin lock -----------------*/ | ||
429 | spin_lock_irqsave(&priv->lock, flags); | ||
430 | |||
416 | rcar_i2c_init(priv); | 431 | rcar_i2c_init(priv); |
417 | /* start clock */ | 432 | /* start clock */ |
418 | rcar_i2c_write(priv, ICCCR, priv->icccr); | 433 | rcar_i2c_write(priv, ICCCR, priv->icccr); |
419 | 434 | ||
435 | spin_unlock_irqrestore(&priv->lock, flags); | ||
436 | /*-------------- spin unlock -----------------*/ | ||
437 | |||
420 | ret = rcar_i2c_bus_barrier(priv); | 438 | ret = rcar_i2c_bus_barrier(priv); |
421 | if (ret < 0) | 439 | if (ret < 0) |
422 | goto out; | 440 | goto out; |
@@ -428,6 +446,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
428 | break; | 446 | break; |
429 | } | 447 | } |
430 | 448 | ||
449 | /*-------------- spin lock -----------------*/ | ||
450 | spin_lock_irqsave(&priv->lock, flags); | ||
451 | |||
431 | /* init each data */ | 452 | /* init each data */ |
432 | priv->msg = &msgs[i]; | 453 | priv->msg = &msgs[i]; |
433 | priv->pos = 0; | 454 | priv->pos = 0; |
@@ -437,6 +458,9 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap, | |||
437 | 458 | ||
438 | ret = rcar_i2c_prepare_msg(priv); | 459 | ret = rcar_i2c_prepare_msg(priv); |
439 | 460 | ||
461 | spin_unlock_irqrestore(&priv->lock, flags); | ||
462 | /*-------------- spin unlock -----------------*/ | ||
463 | |||
440 | if (ret < 0) | 464 | if (ret < 0) |
441 | break; | 465 | break; |
442 | 466 | ||
@@ -540,6 +564,7 @@ static int rcar_i2c_probe(struct platform_device *pdev) | |||
540 | 564 | ||
541 | irq = platform_get_irq(pdev, 0); | 565 | irq = platform_get_irq(pdev, 0); |
542 | init_waitqueue_head(&priv->wait); | 566 | init_waitqueue_head(&priv->wait); |
567 | spin_lock_init(&priv->lock); | ||
543 | 568 | ||
544 | adap = &priv->adap; | 569 | adap = &priv->adap; |
545 | adap->nr = pdev->id; | 570 | adap->nr = pdev->id; |
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 69e11853e8bf..e637c32ae517 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c | |||
@@ -323,6 +323,10 @@ static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd) | |||
323 | /* ack interrupt */ | 323 | /* ack interrupt */ |
324 | i2c_writel(i2c, REG_INT_MBRF, REG_IPD); | 324 | i2c_writel(i2c, REG_INT_MBRF, REG_IPD); |
325 | 325 | ||
326 | /* Can only handle a maximum of 32 bytes at a time */ | ||
327 | if (len > 32) | ||
328 | len = 32; | ||
329 | |||
326 | /* read the data from receive buffer */ | 330 | /* read the data from receive buffer */ |
327 | for (i = 0; i < len; ++i) { | 331 | for (i = 0; i < len; ++i) { |
328 | if (i % 4 == 0) | 332 | if (i % 4 == 0) |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e1e558a3d692..af8256353c7d 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -1089,6 +1089,30 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) | |||
1089 | return err; | 1089 | return err; |
1090 | } | 1090 | } |
1091 | 1091 | ||
1092 | static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, | ||
1093 | u64 *reg_id) | ||
1094 | { | ||
1095 | void *ib_flow; | ||
1096 | union ib_flow_spec *ib_spec; | ||
1097 | struct mlx4_dev *dev = to_mdev(qp->device)->dev; | ||
1098 | int err = 0; | ||
1099 | |||
1100 | if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | ||
1101 | return 0; /* do nothing */ | ||
1102 | |||
1103 | ib_flow = flow_attr + 1; | ||
1104 | ib_spec = (union ib_flow_spec *)ib_flow; | ||
1105 | |||
1106 | if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) | ||
1107 | return 0; /* do nothing */ | ||
1108 | |||
1109 | err = mlx4_tunnel_steer_add(to_mdev(qp->device)->dev, ib_spec->eth.val.dst_mac, | ||
1110 | flow_attr->port, qp->qp_num, | ||
1111 | MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), | ||
1112 | reg_id); | ||
1113 | return err; | ||
1114 | } | ||
1115 | |||
1092 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | 1116 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, |
1093 | struct ib_flow_attr *flow_attr, | 1117 | struct ib_flow_attr *flow_attr, |
1094 | int domain) | 1118 | int domain) |
@@ -1136,6 +1160,12 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, | |||
1136 | i++; | 1160 | i++; |
1137 | } | 1161 | } |
1138 | 1162 | ||
1163 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { | ||
1164 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); | ||
1165 | if (err) | ||
1166 | goto err_free; | ||
1167 | } | ||
1168 | |||
1139 | return &mflow->ibflow; | 1169 | return &mflow->ibflow; |
1140 | 1170 | ||
1141 | err_free: | 1171 | err_free: |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 67780452f0cf..efb9eff8906c 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -1677,9 +1677,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
1677 | } | 1677 | } |
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) | 1680 | if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) { |
1681 | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | | 1681 | context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | |
1682 | MLX4_IB_LINK_TYPE_ETH; | 1682 | MLX4_IB_LINK_TYPE_ETH; |
1683 | if (dev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) { | ||
1684 | /* set QP to receive both tunneled & non-tunneled packets */ | ||
1685 | if (!(context->flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET))) | ||
1686 | context->srqn = cpu_to_be32(7 << 28); | ||
1687 | } | ||
1688 | } | ||
1683 | 1689 | ||
1684 | if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { | 1690 | if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { |
1685 | int is_eth = rdma_port_get_link_layer( | 1691 | int is_eth = rdma_port_get_link_layer( |
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c index c30204f2fa30..fbe29fcb15c5 100644 --- a/drivers/input/input-mt.c +++ b/drivers/input/input-mt.c | |||
@@ -236,6 +236,18 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) | |||
236 | } | 236 | } |
237 | EXPORT_SYMBOL(input_mt_report_pointer_emulation); | 237 | EXPORT_SYMBOL(input_mt_report_pointer_emulation); |
238 | 238 | ||
239 | static void __input_mt_drop_unused(struct input_dev *dev, struct input_mt *mt) | ||
240 | { | ||
241 | int i; | ||
242 | |||
243 | for (i = 0; i < mt->num_slots; i++) { | ||
244 | if (!input_mt_is_used(mt, &mt->slots[i])) { | ||
245 | input_mt_slot(dev, i); | ||
246 | input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); | ||
247 | } | ||
248 | } | ||
249 | } | ||
250 | |||
239 | /** | 251 | /** |
240 | * input_mt_drop_unused() - Inactivate slots not seen in this frame | 252 | * input_mt_drop_unused() - Inactivate slots not seen in this frame |
241 | * @dev: input device with allocated MT slots | 253 | * @dev: input device with allocated MT slots |
@@ -245,19 +257,11 @@ EXPORT_SYMBOL(input_mt_report_pointer_emulation); | |||
245 | void input_mt_drop_unused(struct input_dev *dev) | 257 | void input_mt_drop_unused(struct input_dev *dev) |
246 | { | 258 | { |
247 | struct input_mt *mt = dev->mt; | 259 | struct input_mt *mt = dev->mt; |
248 | int i; | ||
249 | 260 | ||
250 | if (!mt) | 261 | if (mt) { |
251 | return; | 262 | __input_mt_drop_unused(dev, mt); |
252 | 263 | mt->frame++; | |
253 | for (i = 0; i < mt->num_slots; i++) { | ||
254 | if (!input_mt_is_used(mt, &mt->slots[i])) { | ||
255 | input_mt_slot(dev, i); | ||
256 | input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); | ||
257 | } | ||
258 | } | 264 | } |
259 | |||
260 | mt->frame++; | ||
261 | } | 265 | } |
262 | EXPORT_SYMBOL(input_mt_drop_unused); | 266 | EXPORT_SYMBOL(input_mt_drop_unused); |
263 | 267 | ||
@@ -278,12 +282,14 @@ void input_mt_sync_frame(struct input_dev *dev) | |||
278 | return; | 282 | return; |
279 | 283 | ||
280 | if (mt->flags & INPUT_MT_DROP_UNUSED) | 284 | if (mt->flags & INPUT_MT_DROP_UNUSED) |
281 | input_mt_drop_unused(dev); | 285 | __input_mt_drop_unused(dev, mt); |
282 | 286 | ||
283 | if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT)) | 287 | if ((mt->flags & INPUT_MT_POINTER) && !(mt->flags & INPUT_MT_SEMI_MT)) |
284 | use_count = true; | 288 | use_count = true; |
285 | 289 | ||
286 | input_mt_report_pointer_emulation(dev, use_count); | 290 | input_mt_report_pointer_emulation(dev, use_count); |
291 | |||
292 | mt->frame++; | ||
287 | } | 293 | } |
288 | EXPORT_SYMBOL(input_mt_sync_frame); | 294 | EXPORT_SYMBOL(input_mt_sync_frame); |
289 | 295 | ||
diff --git a/drivers/input/keyboard/cap1106.c b/drivers/input/keyboard/cap1106.c index 180b184ab90f..d70b65a14ced 100644 --- a/drivers/input/keyboard/cap1106.c +++ b/drivers/input/keyboard/cap1106.c | |||
@@ -33,8 +33,8 @@ | |||
33 | #define CAP1106_REG_SENSOR_CONFIG 0x22 | 33 | #define CAP1106_REG_SENSOR_CONFIG 0x22 |
34 | #define CAP1106_REG_SENSOR_CONFIG2 0x23 | 34 | #define CAP1106_REG_SENSOR_CONFIG2 0x23 |
35 | #define CAP1106_REG_SAMPLING_CONFIG 0x24 | 35 | #define CAP1106_REG_SAMPLING_CONFIG 0x24 |
36 | #define CAP1106_REG_CALIBRATION 0x25 | 36 | #define CAP1106_REG_CALIBRATION 0x26 |
37 | #define CAP1106_REG_INT_ENABLE 0x26 | 37 | #define CAP1106_REG_INT_ENABLE 0x27 |
38 | #define CAP1106_REG_REPEAT_RATE 0x28 | 38 | #define CAP1106_REG_REPEAT_RATE 0x28 |
39 | #define CAP1106_REG_MT_CONFIG 0x2a | 39 | #define CAP1106_REG_MT_CONFIG 0x2a |
40 | #define CAP1106_REG_MT_PATTERN_CONFIG 0x2b | 40 | #define CAP1106_REG_MT_PATTERN_CONFIG 0x2b |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 8d2e19e81e1e..e651fa692afe 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -332,23 +332,24 @@ static int matrix_keypad_init_gpio(struct platform_device *pdev, | |||
332 | } | 332 | } |
333 | 333 | ||
334 | if (pdata->clustered_irq > 0) { | 334 | if (pdata->clustered_irq > 0) { |
335 | err = request_irq(pdata->clustered_irq, | 335 | err = request_any_context_irq(pdata->clustered_irq, |
336 | matrix_keypad_interrupt, | 336 | matrix_keypad_interrupt, |
337 | pdata->clustered_irq_flags, | 337 | pdata->clustered_irq_flags, |
338 | "matrix-keypad", keypad); | 338 | "matrix-keypad", keypad); |
339 | if (err) { | 339 | if (err < 0) { |
340 | dev_err(&pdev->dev, | 340 | dev_err(&pdev->dev, |
341 | "Unable to acquire clustered interrupt\n"); | 341 | "Unable to acquire clustered interrupt\n"); |
342 | goto err_free_rows; | 342 | goto err_free_rows; |
343 | } | 343 | } |
344 | } else { | 344 | } else { |
345 | for (i = 0; i < pdata->num_row_gpios; i++) { | 345 | for (i = 0; i < pdata->num_row_gpios; i++) { |
346 | err = request_irq(gpio_to_irq(pdata->row_gpios[i]), | 346 | err = request_any_context_irq( |
347 | gpio_to_irq(pdata->row_gpios[i]), | ||
347 | matrix_keypad_interrupt, | 348 | matrix_keypad_interrupt, |
348 | IRQF_TRIGGER_RISING | | 349 | IRQF_TRIGGER_RISING | |
349 | IRQF_TRIGGER_FALLING, | 350 | IRQF_TRIGGER_FALLING, |
350 | "matrix-keypad", keypad); | 351 | "matrix-keypad", keypad); |
351 | if (err) { | 352 | if (err < 0) { |
352 | dev_err(&pdev->dev, | 353 | dev_err(&pdev->dev, |
353 | "Unable to acquire interrupt for GPIO line %i\n", | 354 | "Unable to acquire interrupt for GPIO line %i\n", |
354 | pdata->row_gpios[i]); | 355 | pdata->row_gpios[i]); |
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index a59a1a64b674..35a49bf57227 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c | |||
@@ -2234,8 +2234,8 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) | |||
2234 | return 0; | 2234 | return 0; |
2235 | } | 2235 | } |
2236 | 2236 | ||
2237 | psmouse_info(psmouse, | 2237 | psmouse_dbg(psmouse, |
2238 | "Unknown ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); | 2238 | "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); |
2239 | 2239 | ||
2240 | return -EINVAL; | 2240 | return -EINVAL; |
2241 | } | 2241 | } |
@@ -2373,6 +2373,10 @@ int alps_init(struct psmouse *psmouse) | |||
2373 | dev2->keybit[BIT_WORD(BTN_LEFT)] = | 2373 | dev2->keybit[BIT_WORD(BTN_LEFT)] = |
2374 | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); | 2374 | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); |
2375 | 2375 | ||
2376 | __set_bit(INPUT_PROP_POINTER, dev2->propbit); | ||
2377 | if (priv->flags & ALPS_DUALPOINT) | ||
2378 | __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); | ||
2379 | |||
2376 | if (input_register_device(priv->dev2)) | 2380 | if (input_register_device(priv->dev2)) |
2377 | goto init_fail; | 2381 | goto init_fail; |
2378 | 2382 | ||
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ee2a04d90d20..06fc6e76ffbe 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/input/mt.h> | 18 | #include <linux/input/mt.h> |
19 | #include <linux/serio.h> | 19 | #include <linux/serio.h> |
20 | #include <linux/libps2.h> | 20 | #include <linux/libps2.h> |
21 | #include <asm/unaligned.h> | ||
21 | #include "psmouse.h" | 22 | #include "psmouse.h" |
22 | #include "elantech.h" | 23 | #include "elantech.h" |
23 | 24 | ||
@@ -403,6 +404,68 @@ static void elantech_report_absolute_v2(struct psmouse *psmouse) | |||
403 | input_sync(dev); | 404 | input_sync(dev); |
404 | } | 405 | } |
405 | 406 | ||
407 | static void elantech_report_trackpoint(struct psmouse *psmouse, | ||
408 | int packet_type) | ||
409 | { | ||
410 | /* | ||
411 | * byte 0: 0 0 sx sy 0 M R L | ||
412 | * byte 1:~sx 0 0 0 0 0 0 0 | ||
413 | * byte 2:~sy 0 0 0 0 0 0 0 | ||
414 | * byte 3: 0 0 ~sy ~sx 0 1 1 0 | ||
415 | * byte 4: x7 x6 x5 x4 x3 x2 x1 x0 | ||
416 | * byte 5: y7 y6 y5 y4 y3 y2 y1 y0 | ||
417 | * | ||
418 | * x and y are written in two's complement spread | ||
419 | * over 9 bits with sx/sy the relative top bit and | ||
420 | * x7..x0 and y7..y0 the lower bits. | ||
421 | * The sign of y is opposite to what the input driver | ||
422 | * expects for a relative movement | ||
423 | */ | ||
424 | |||
425 | struct elantech_data *etd = psmouse->private; | ||
426 | struct input_dev *tp_dev = etd->tp_dev; | ||
427 | unsigned char *packet = psmouse->packet; | ||
428 | int x, y; | ||
429 | u32 t; | ||
430 | |||
431 | if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev, | ||
432 | !tp_dev, | ||
433 | psmouse_fmt("Unexpected trackpoint message\n"))) { | ||
434 | if (etd->debug == 1) | ||
435 | elantech_packet_dump(psmouse); | ||
436 | return; | ||
437 | } | ||
438 | |||
439 | t = get_unaligned_le32(&packet[0]); | ||
440 | |||
441 | switch (t & ~7U) { | ||
442 | case 0x06000030U: | ||
443 | case 0x16008020U: | ||
444 | case 0x26800010U: | ||
445 | case 0x36808000U: | ||
446 | x = packet[4] - (int)((packet[1]^0x80) << 1); | ||
447 | y = (int)((packet[2]^0x80) << 1) - packet[5]; | ||
448 | |||
449 | input_report_key(tp_dev, BTN_LEFT, packet[0] & 0x01); | ||
450 | input_report_key(tp_dev, BTN_RIGHT, packet[0] & 0x02); | ||
451 | input_report_key(tp_dev, BTN_MIDDLE, packet[0] & 0x04); | ||
452 | |||
453 | input_report_rel(tp_dev, REL_X, x); | ||
454 | input_report_rel(tp_dev, REL_Y, y); | ||
455 | |||
456 | input_sync(tp_dev); | ||
457 | |||
458 | break; | ||
459 | |||
460 | default: | ||
461 | /* Dump unexpected packet sequences if debug=1 (default) */ | ||
462 | if (etd->debug == 1) | ||
463 | elantech_packet_dump(psmouse); | ||
464 | |||
465 | break; | ||
466 | } | ||
467 | } | ||
468 | |||
406 | /* | 469 | /* |
407 | * Interpret complete data packets and report absolute mode input events for | 470 | * Interpret complete data packets and report absolute mode input events for |
408 | * hardware version 3. (12 byte packets for two fingers) | 471 | * hardware version 3. (12 byte packets for two fingers) |
@@ -715,6 +778,8 @@ static int elantech_packet_check_v3(struct psmouse *psmouse) | |||
715 | 778 | ||
716 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) | 779 | if ((packet[0] & 0x0c) == 0x0c && (packet[3] & 0xce) == 0x0c) |
717 | return PACKET_V3_TAIL; | 780 | return PACKET_V3_TAIL; |
781 | if ((packet[3] & 0x0f) == 0x06) | ||
782 | return PACKET_TRACKPOINT; | ||
718 | } | 783 | } |
719 | 784 | ||
720 | return PACKET_UNKNOWN; | 785 | return PACKET_UNKNOWN; |
@@ -791,14 +856,23 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse) | |||
791 | 856 | ||
792 | case 3: | 857 | case 3: |
793 | packet_type = elantech_packet_check_v3(psmouse); | 858 | packet_type = elantech_packet_check_v3(psmouse); |
794 | /* ignore debounce */ | 859 | switch (packet_type) { |
795 | if (packet_type == PACKET_DEBOUNCE) | 860 | case PACKET_UNKNOWN: |
796 | return PSMOUSE_FULL_PACKET; | ||
797 | |||
798 | if (packet_type == PACKET_UNKNOWN) | ||
799 | return PSMOUSE_BAD_DATA; | 861 | return PSMOUSE_BAD_DATA; |
800 | 862 | ||
801 | elantech_report_absolute_v3(psmouse, packet_type); | 863 | case PACKET_DEBOUNCE: |
864 | /* ignore debounce */ | ||
865 | break; | ||
866 | |||
867 | case PACKET_TRACKPOINT: | ||
868 | elantech_report_trackpoint(psmouse, packet_type); | ||
869 | break; | ||
870 | |||
871 | default: | ||
872 | elantech_report_absolute_v3(psmouse, packet_type); | ||
873 | break; | ||
874 | } | ||
875 | |||
802 | break; | 876 | break; |
803 | 877 | ||
804 | case 4: | 878 | case 4: |
@@ -1018,8 +1092,10 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1018 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1092 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1019 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1093 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1020 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1094 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1095 | * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) | ||
1021 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons | 1096 | * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons |
1022 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) | 1097 | * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) |
1098 | * Lenovo L530 0x350f02 b9, 15, 0c 2 hw buttons (*) | ||
1023 | * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons | 1099 | * Samsung NF210 0x150b00 78, 14, 0a 2 hw buttons |
1024 | * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad | 1100 | * Samsung NP770Z5E 0x575f01 10, 15, 0f clickpad |
1025 | * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad | 1101 | * Samsung NP700Z5B 0x361f06 21, 15, 0f clickpad |
@@ -1029,6 +1105,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1029 | * Samsung RF710 0x450f00 ? 2 hw buttons | 1105 | * Samsung RF710 0x450f00 ? 2 hw buttons |
1030 | * System76 Pangolin 0x250f01 ? 2 hw buttons | 1106 | * System76 Pangolin 0x250f01 ? 2 hw buttons |
1031 | * (*) + 3 trackpoint buttons | 1107 | * (*) + 3 trackpoint buttons |
1108 | * (**) + 0 trackpoint buttons | ||
1109 | * Note: Lenovo L430 and Lenovo L430 have the same fw_version/caps | ||
1032 | */ | 1110 | */ |
1033 | static void elantech_set_buttonpad_prop(struct psmouse *psmouse) | 1111 | static void elantech_set_buttonpad_prop(struct psmouse *psmouse) |
1034 | { | 1112 | { |
@@ -1253,6 +1331,13 @@ static bool elantech_is_signature_valid(const unsigned char *param) | |||
1253 | if (param[1] == 0) | 1331 | if (param[1] == 0) |
1254 | return true; | 1332 | return true; |
1255 | 1333 | ||
1334 | /* | ||
1335 | * Some models have a revision higher then 20. Meaning param[2] may | ||
1336 | * be 10 or 20, skip the rates check for these. | ||
1337 | */ | ||
1338 | if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40) | ||
1339 | return true; | ||
1340 | |||
1256 | for (i = 0; i < ARRAY_SIZE(rates); i++) | 1341 | for (i = 0; i < ARRAY_SIZE(rates); i++) |
1257 | if (param[2] == rates[i]) | 1342 | if (param[2] == rates[i]) |
1258 | return false; | 1343 | return false; |
@@ -1324,6 +1409,10 @@ int elantech_detect(struct psmouse *psmouse, bool set_properties) | |||
1324 | */ | 1409 | */ |
1325 | static void elantech_disconnect(struct psmouse *psmouse) | 1410 | static void elantech_disconnect(struct psmouse *psmouse) |
1326 | { | 1411 | { |
1412 | struct elantech_data *etd = psmouse->private; | ||
1413 | |||
1414 | if (etd->tp_dev) | ||
1415 | input_unregister_device(etd->tp_dev); | ||
1327 | sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, | 1416 | sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, |
1328 | &elantech_attr_group); | 1417 | &elantech_attr_group); |
1329 | kfree(psmouse->private); | 1418 | kfree(psmouse->private); |
@@ -1438,8 +1527,10 @@ static int elantech_set_properties(struct elantech_data *etd) | |||
1438 | int elantech_init(struct psmouse *psmouse) | 1527 | int elantech_init(struct psmouse *psmouse) |
1439 | { | 1528 | { |
1440 | struct elantech_data *etd; | 1529 | struct elantech_data *etd; |
1441 | int i, error; | 1530 | int i; |
1531 | int error = -EINVAL; | ||
1442 | unsigned char param[3]; | 1532 | unsigned char param[3]; |
1533 | struct input_dev *tp_dev; | ||
1443 | 1534 | ||
1444 | psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL); | 1535 | psmouse->private = etd = kzalloc(sizeof(struct elantech_data), GFP_KERNEL); |
1445 | if (!etd) | 1536 | if (!etd) |
@@ -1498,14 +1589,53 @@ int elantech_init(struct psmouse *psmouse) | |||
1498 | goto init_fail; | 1589 | goto init_fail; |
1499 | } | 1590 | } |
1500 | 1591 | ||
1592 | /* The MSB indicates the presence of the trackpoint */ | ||
1593 | if ((etd->capabilities[0] & 0x80) == 0x80) { | ||
1594 | tp_dev = input_allocate_device(); | ||
1595 | |||
1596 | if (!tp_dev) { | ||
1597 | error = -ENOMEM; | ||
1598 | goto init_fail_tp_alloc; | ||
1599 | } | ||
1600 | |||
1601 | etd->tp_dev = tp_dev; | ||
1602 | snprintf(etd->tp_phys, sizeof(etd->tp_phys), "%s/input1", | ||
1603 | psmouse->ps2dev.serio->phys); | ||
1604 | tp_dev->phys = etd->tp_phys; | ||
1605 | tp_dev->name = "Elantech PS/2 TrackPoint"; | ||
1606 | tp_dev->id.bustype = BUS_I8042; | ||
1607 | tp_dev->id.vendor = 0x0002; | ||
1608 | tp_dev->id.product = PSMOUSE_ELANTECH; | ||
1609 | tp_dev->id.version = 0x0000; | ||
1610 | tp_dev->dev.parent = &psmouse->ps2dev.serio->dev; | ||
1611 | tp_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); | ||
1612 | tp_dev->relbit[BIT_WORD(REL_X)] = | ||
1613 | BIT_MASK(REL_X) | BIT_MASK(REL_Y); | ||
1614 | tp_dev->keybit[BIT_WORD(BTN_LEFT)] = | ||
1615 | BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | | ||
1616 | BIT_MASK(BTN_RIGHT); | ||
1617 | |||
1618 | __set_bit(INPUT_PROP_POINTER, tp_dev->propbit); | ||
1619 | __set_bit(INPUT_PROP_POINTING_STICK, tp_dev->propbit); | ||
1620 | |||
1621 | error = input_register_device(etd->tp_dev); | ||
1622 | if (error < 0) | ||
1623 | goto init_fail_tp_reg; | ||
1624 | } | ||
1625 | |||
1501 | psmouse->protocol_handler = elantech_process_byte; | 1626 | psmouse->protocol_handler = elantech_process_byte; |
1502 | psmouse->disconnect = elantech_disconnect; | 1627 | psmouse->disconnect = elantech_disconnect; |
1503 | psmouse->reconnect = elantech_reconnect; | 1628 | psmouse->reconnect = elantech_reconnect; |
1504 | psmouse->pktsize = etd->hw_version > 1 ? 6 : 4; | 1629 | psmouse->pktsize = etd->hw_version > 1 ? 6 : 4; |
1505 | 1630 | ||
1506 | return 0; | 1631 | return 0; |
1507 | 1632 | init_fail_tp_reg: | |
1633 | input_free_device(tp_dev); | ||
1634 | init_fail_tp_alloc: | ||
1635 | sysfs_remove_group(&psmouse->ps2dev.serio->dev.kobj, | ||
1636 | &elantech_attr_group); | ||
1508 | init_fail: | 1637 | init_fail: |
1638 | psmouse_reset(psmouse); | ||
1509 | kfree(etd); | 1639 | kfree(etd); |
1510 | return -1; | 1640 | return error; |
1511 | } | 1641 | } |
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h index 9e0e2a1f340d..6f3afec02f03 100644 --- a/drivers/input/mouse/elantech.h +++ b/drivers/input/mouse/elantech.h | |||
@@ -94,6 +94,7 @@ | |||
94 | #define PACKET_V4_HEAD 0x05 | 94 | #define PACKET_V4_HEAD 0x05 |
95 | #define PACKET_V4_MOTION 0x06 | 95 | #define PACKET_V4_MOTION 0x06 |
96 | #define PACKET_V4_STATUS 0x07 | 96 | #define PACKET_V4_STATUS 0x07 |
97 | #define PACKET_TRACKPOINT 0x08 | ||
97 | 98 | ||
98 | /* | 99 | /* |
99 | * track up to 5 fingers for v4 hardware | 100 | * track up to 5 fingers for v4 hardware |
@@ -114,6 +115,8 @@ struct finger_pos { | |||
114 | }; | 115 | }; |
115 | 116 | ||
116 | struct elantech_data { | 117 | struct elantech_data { |
118 | struct input_dev *tp_dev; /* Relative device for trackpoint */ | ||
119 | char tp_phys[32]; | ||
117 | unsigned char reg_07; | 120 | unsigned char reg_07; |
118 | unsigned char reg_10; | 121 | unsigned char reg_10; |
119 | unsigned char reg_11; | 122 | unsigned char reg_11; |
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index cff065f6261c..b4e1f014ddc2 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c | |||
@@ -670,6 +670,8 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) | |||
670 | __set_bit(REL_X, input_dev->relbit); | 670 | __set_bit(REL_X, input_dev->relbit); |
671 | __set_bit(REL_Y, input_dev->relbit); | 671 | __set_bit(REL_Y, input_dev->relbit); |
672 | 672 | ||
673 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
674 | |||
673 | psmouse->set_rate = psmouse_set_rate; | 675 | psmouse->set_rate = psmouse_set_rate; |
674 | psmouse->set_resolution = psmouse_set_resolution; | 676 | psmouse->set_resolution = psmouse_set_resolution; |
675 | psmouse->poll = psmouse_poll; | 677 | psmouse->poll = psmouse_poll; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index e8573c68f77e..fd23181c1fb7 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -629,10 +629,61 @@ static int synaptics_parse_hw_state(const unsigned char buf[], | |||
629 | ((buf[0] & 0x04) >> 1) | | 629 | ((buf[0] & 0x04) >> 1) | |
630 | ((buf[3] & 0x04) >> 2)); | 630 | ((buf[3] & 0x04) >> 2)); |
631 | 631 | ||
632 | if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || | ||
633 | SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && | ||
634 | hw->w == 2) { | ||
635 | synaptics_parse_agm(buf, priv, hw); | ||
636 | return 1; | ||
637 | } | ||
638 | |||
639 | hw->x = (((buf[3] & 0x10) << 8) | | ||
640 | ((buf[1] & 0x0f) << 8) | | ||
641 | buf[4]); | ||
642 | hw->y = (((buf[3] & 0x20) << 7) | | ||
643 | ((buf[1] & 0xf0) << 4) | | ||
644 | buf[5]); | ||
645 | hw->z = buf[2]; | ||
646 | |||
632 | hw->left = (buf[0] & 0x01) ? 1 : 0; | 647 | hw->left = (buf[0] & 0x01) ? 1 : 0; |
633 | hw->right = (buf[0] & 0x02) ? 1 : 0; | 648 | hw->right = (buf[0] & 0x02) ? 1 : 0; |
634 | 649 | ||
635 | if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { | 650 | if (SYN_CAP_FORCEPAD(priv->ext_cap_0c)) { |
651 | /* | ||
652 | * ForcePads, like Clickpads, use middle button | ||
653 | * bits to report primary button clicks. | ||
654 | * Unfortunately they report primary button not | ||
655 | * only when user presses on the pad above certain | ||
656 | * threshold, but also when there are more than one | ||
657 | * finger on the touchpad, which interferes with | ||
658 | * out multi-finger gestures. | ||
659 | */ | ||
660 | if (hw->z == 0) { | ||
661 | /* No contacts */ | ||
662 | priv->press = priv->report_press = false; | ||
663 | } else if (hw->w >= 4 && ((buf[0] ^ buf[3]) & 0x01)) { | ||
664 | /* | ||
665 | * Single-finger touch with pressure above | ||
666 | * the threshold. If pressure stays long | ||
667 | * enough, we'll start reporting primary | ||
668 | * button. We rely on the device continuing | ||
669 | * sending data even if finger does not | ||
670 | * move. | ||
671 | */ | ||
672 | if (!priv->press) { | ||
673 | priv->press_start = jiffies; | ||
674 | priv->press = true; | ||
675 | } else if (time_after(jiffies, | ||
676 | priv->press_start + | ||
677 | msecs_to_jiffies(50))) { | ||
678 | priv->report_press = true; | ||
679 | } | ||
680 | } else { | ||
681 | priv->press = false; | ||
682 | } | ||
683 | |||
684 | hw->left = priv->report_press; | ||
685 | |||
686 | } else if (SYN_CAP_CLICKPAD(priv->ext_cap_0c)) { | ||
636 | /* | 687 | /* |
637 | * Clickpad's button is transmitted as middle button, | 688 | * Clickpad's button is transmitted as middle button, |
638 | * however, since it is primary button, we will report | 689 | * however, since it is primary button, we will report |
@@ -651,21 +702,6 @@ static int synaptics_parse_hw_state(const unsigned char buf[], | |||
651 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; | 702 | hw->down = ((buf[0] ^ buf[3]) & 0x02) ? 1 : 0; |
652 | } | 703 | } |
653 | 704 | ||
654 | if ((SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) || | ||
655 | SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) && | ||
656 | hw->w == 2) { | ||
657 | synaptics_parse_agm(buf, priv, hw); | ||
658 | return 1; | ||
659 | } | ||
660 | |||
661 | hw->x = (((buf[3] & 0x10) << 8) | | ||
662 | ((buf[1] & 0x0f) << 8) | | ||
663 | buf[4]); | ||
664 | hw->y = (((buf[3] & 0x20) << 7) | | ||
665 | ((buf[1] & 0xf0) << 4) | | ||
666 | buf[5]); | ||
667 | hw->z = buf[2]; | ||
668 | |||
669 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && | 705 | if (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) && |
670 | ((buf[0] ^ buf[3]) & 0x02)) { | 706 | ((buf[0] ^ buf[3]) & 0x02)) { |
671 | switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { | 707 | switch (SYN_CAP_MULTI_BUTTON_NO(priv->ext_cap) & ~0x01) { |
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index e594af0b264b..fb2e076738ae 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h | |||
@@ -78,6 +78,11 @@ | |||
78 | * 2 0x08 image sensor image sensor tracks 5 fingers, but only | 78 | * 2 0x08 image sensor image sensor tracks 5 fingers, but only |
79 | * reports 2. | 79 | * reports 2. |
80 | * 2 0x20 report min query 0x0f gives min coord reported | 80 | * 2 0x20 report min query 0x0f gives min coord reported |
81 | * 2 0x80 forcepad forcepad is a variant of clickpad that | ||
82 | * does not have physical buttons but rather | ||
83 | * uses pressure above certain threshold to | ||
84 | * report primary clicks. Forcepads also have | ||
85 | * clickpad bit set. | ||
81 | */ | 86 | */ |
82 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ | 87 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ |
83 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ | 88 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ |
@@ -86,6 +91,7 @@ | |||
86 | #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) | 91 | #define SYN_CAP_ADV_GESTURE(ex0c) ((ex0c) & 0x080000) |
87 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) | 92 | #define SYN_CAP_REDUCED_FILTERING(ex0c) ((ex0c) & 0x000400) |
88 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) | 93 | #define SYN_CAP_IMAGE_SENSOR(ex0c) ((ex0c) & 0x000800) |
94 | #define SYN_CAP_FORCEPAD(ex0c) ((ex0c) & 0x008000) | ||
89 | 95 | ||
90 | /* synaptics modes query bits */ | 96 | /* synaptics modes query bits */ |
91 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) | 97 | #define SYN_MODE_ABSOLUTE(m) ((m) & (1 << 7)) |
@@ -177,6 +183,11 @@ struct synaptics_data { | |||
177 | */ | 183 | */ |
178 | struct synaptics_hw_state agm; | 184 | struct synaptics_hw_state agm; |
179 | bool agm_pending; /* new AGM packet received */ | 185 | bool agm_pending; /* new AGM packet received */ |
186 | |||
187 | /* ForcePad handling */ | ||
188 | unsigned long press_start; | ||
189 | bool press; | ||
190 | bool report_press; | ||
180 | }; | 191 | }; |
181 | 192 | ||
182 | void synaptics_module_init(void); | 193 | void synaptics_module_init(void); |
diff --git a/drivers/input/mouse/synaptics_usb.c b/drivers/input/mouse/synaptics_usb.c index e122bda16aab..6bcc0189c1c9 100644 --- a/drivers/input/mouse/synaptics_usb.c +++ b/drivers/input/mouse/synaptics_usb.c | |||
@@ -387,6 +387,7 @@ static int synusb_probe(struct usb_interface *intf, | |||
387 | __set_bit(EV_REL, input_dev->evbit); | 387 | __set_bit(EV_REL, input_dev->evbit); |
388 | __set_bit(REL_X, input_dev->relbit); | 388 | __set_bit(REL_X, input_dev->relbit); |
389 | __set_bit(REL_Y, input_dev->relbit); | 389 | __set_bit(REL_Y, input_dev->relbit); |
390 | __set_bit(INPUT_PROP_POINTING_STICK, input_dev->propbit); | ||
390 | input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); | 391 | input_set_abs_params(input_dev, ABS_PRESSURE, 0, 127, 0, 0); |
391 | } else { | 392 | } else { |
392 | input_set_abs_params(input_dev, ABS_X, | 393 | input_set_abs_params(input_dev, ABS_X, |
@@ -401,6 +402,11 @@ static int synusb_probe(struct usb_interface *intf, | |||
401 | __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); | 402 | __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); |
402 | } | 403 | } |
403 | 404 | ||
405 | if (synusb->flags & SYNUSB_TOUCHSCREEN) | ||
406 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | ||
407 | else | ||
408 | __set_bit(INPUT_PROP_POINTER, input_dev->propbit); | ||
409 | |||
404 | __set_bit(BTN_LEFT, input_dev->keybit); | 410 | __set_bit(BTN_LEFT, input_dev->keybit); |
405 | __set_bit(BTN_RIGHT, input_dev->keybit); | 411 | __set_bit(BTN_RIGHT, input_dev->keybit); |
406 | __set_bit(BTN_MIDDLE, input_dev->keybit); | 412 | __set_bit(BTN_MIDDLE, input_dev->keybit); |
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index ca843b6cf6bd..30c8b6998808 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c | |||
@@ -393,6 +393,9 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties) | |||
393 | if ((button_info & 0x0f) >= 3) | 393 | if ((button_info & 0x0f) >= 3) |
394 | __set_bit(BTN_MIDDLE, psmouse->dev->keybit); | 394 | __set_bit(BTN_MIDDLE, psmouse->dev->keybit); |
395 | 395 | ||
396 | __set_bit(INPUT_PROP_POINTER, psmouse->dev->propbit); | ||
397 | __set_bit(INPUT_PROP_POINTING_STICK, psmouse->dev->propbit); | ||
398 | |||
396 | trackpoint_defaults(psmouse->private); | 399 | trackpoint_defaults(psmouse->private); |
397 | 400 | ||
398 | error = trackpoint_power_on_reset(&psmouse->ps2dev); | 401 | error = trackpoint_power_on_reset(&psmouse->ps2dev); |
diff --git a/drivers/input/serio/i8042-sparcio.h b/drivers/input/serio/i8042-sparcio.h index d6aa4c67dbb6..93cb7912703c 100644 --- a/drivers/input/serio/i8042-sparcio.h +++ b/drivers/input/serio/i8042-sparcio.h | |||
@@ -17,7 +17,6 @@ static int i8042_aux_irq = -1; | |||
17 | #define I8042_MUX_PHYS_DESC "sparcps2/serio%d" | 17 | #define I8042_MUX_PHYS_DESC "sparcps2/serio%d" |
18 | 18 | ||
19 | static void __iomem *kbd_iobase; | 19 | static void __iomem *kbd_iobase; |
20 | static struct resource *kbd_res; | ||
21 | 20 | ||
22 | #define I8042_COMMAND_REG (kbd_iobase + 0x64UL) | 21 | #define I8042_COMMAND_REG (kbd_iobase + 0x64UL) |
23 | #define I8042_DATA_REG (kbd_iobase + 0x60UL) | 22 | #define I8042_DATA_REG (kbd_iobase + 0x60UL) |
@@ -44,6 +43,8 @@ static inline void i8042_write_command(int val) | |||
44 | 43 | ||
45 | #ifdef CONFIG_PCI | 44 | #ifdef CONFIG_PCI |
46 | 45 | ||
46 | static struct resource *kbd_res; | ||
47 | |||
47 | #define OBP_PS2KBD_NAME1 "kb_ps2" | 48 | #define OBP_PS2KBD_NAME1 "kb_ps2" |
48 | #define OBP_PS2KBD_NAME2 "keyboard" | 49 | #define OBP_PS2KBD_NAME2 "keyboard" |
49 | #define OBP_PS2MS_NAME1 "kdmouse" | 50 | #define OBP_PS2MS_NAME1 "kdmouse" |
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c index 0cb7ef59071b..69175b825346 100644 --- a/drivers/input/serio/serport.c +++ b/drivers/input/serio/serport.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/serio.h> | 22 | #include <linux/serio.h> |
23 | #include <linux/tty.h> | 23 | #include <linux/tty.h> |
24 | #include <linux/compat.h> | ||
24 | 25 | ||
25 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); | 26 | MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); |
26 | MODULE_DESCRIPTION("Input device TTY line discipline"); | 27 | MODULE_DESCRIPTION("Input device TTY line discipline"); |
@@ -198,28 +199,55 @@ static ssize_t serport_ldisc_read(struct tty_struct * tty, struct file * file, u | |||
198 | return 0; | 199 | return 0; |
199 | } | 200 | } |
200 | 201 | ||
202 | static void serport_set_type(struct tty_struct *tty, unsigned long type) | ||
203 | { | ||
204 | struct serport *serport = tty->disc_data; | ||
205 | |||
206 | serport->id.proto = type & 0x000000ff; | ||
207 | serport->id.id = (type & 0x0000ff00) >> 8; | ||
208 | serport->id.extra = (type & 0x00ff0000) >> 16; | ||
209 | } | ||
210 | |||
201 | /* | 211 | /* |
202 | * serport_ldisc_ioctl() allows to set the port protocol, and device ID | 212 | * serport_ldisc_ioctl() allows to set the port protocol, and device ID |
203 | */ | 213 | */ |
204 | 214 | ||
205 | static int serport_ldisc_ioctl(struct tty_struct * tty, struct file * file, unsigned int cmd, unsigned long arg) | 215 | static int serport_ldisc_ioctl(struct tty_struct *tty, struct file *file, |
216 | unsigned int cmd, unsigned long arg) | ||
206 | { | 217 | { |
207 | struct serport *serport = (struct serport*) tty->disc_data; | ||
208 | unsigned long type; | ||
209 | |||
210 | if (cmd == SPIOCSTYPE) { | 218 | if (cmd == SPIOCSTYPE) { |
219 | unsigned long type; | ||
220 | |||
211 | if (get_user(type, (unsigned long __user *) arg)) | 221 | if (get_user(type, (unsigned long __user *) arg)) |
212 | return -EFAULT; | 222 | return -EFAULT; |
213 | 223 | ||
214 | serport->id.proto = type & 0x000000ff; | 224 | serport_set_type(tty, type); |
215 | serport->id.id = (type & 0x0000ff00) >> 8; | 225 | return 0; |
216 | serport->id.extra = (type & 0x00ff0000) >> 16; | 226 | } |
227 | |||
228 | return -EINVAL; | ||
229 | } | ||
230 | |||
231 | #ifdef CONFIG_COMPAT | ||
232 | #define COMPAT_SPIOCSTYPE _IOW('q', 0x01, compat_ulong_t) | ||
233 | static long serport_ldisc_compat_ioctl(struct tty_struct *tty, | ||
234 | struct file *file, | ||
235 | unsigned int cmd, unsigned long arg) | ||
236 | { | ||
237 | if (cmd == COMPAT_SPIOCSTYPE) { | ||
238 | void __user *uarg = compat_ptr(arg); | ||
239 | compat_ulong_t compat_type; | ||
240 | |||
241 | if (get_user(compat_type, (compat_ulong_t __user *)uarg)) | ||
242 | return -EFAULT; | ||
217 | 243 | ||
244 | serport_set_type(tty, compat_type); | ||
218 | return 0; | 245 | return 0; |
219 | } | 246 | } |
220 | 247 | ||
221 | return -EINVAL; | 248 | return -EINVAL; |
222 | } | 249 | } |
250 | #endif | ||
223 | 251 | ||
224 | static void serport_ldisc_write_wakeup(struct tty_struct * tty) | 252 | static void serport_ldisc_write_wakeup(struct tty_struct * tty) |
225 | { | 253 | { |
@@ -243,6 +271,9 @@ static struct tty_ldisc_ops serport_ldisc = { | |||
243 | .close = serport_ldisc_close, | 271 | .close = serport_ldisc_close, |
244 | .read = serport_ldisc_read, | 272 | .read = serport_ldisc_read, |
245 | .ioctl = serport_ldisc_ioctl, | 273 | .ioctl = serport_ldisc_ioctl, |
274 | #ifdef CONFIG_COMPAT | ||
275 | .compat_ioctl = serport_ldisc_compat_ioctl, | ||
276 | #endif | ||
246 | .receive_buf = serport_ldisc_receive, | 277 | .receive_buf = serport_ldisc_receive, |
247 | .write_wakeup = serport_ldisc_write_wakeup | 278 | .write_wakeup = serport_ldisc_write_wakeup |
248 | }; | 279 | }; |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index db178ed2b47e..aaacf8bfa61f 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -837,7 +837,12 @@ static irqreturn_t mxt_process_messages_t44(struct mxt_data *data) | |||
837 | count = data->msg_buf[0]; | 837 | count = data->msg_buf[0]; |
838 | 838 | ||
839 | if (count == 0) { | 839 | if (count == 0) { |
840 | dev_warn(dev, "Interrupt triggered but zero messages\n"); | 840 | /* |
841 | * This condition is caused by the CHG line being configured | ||
842 | * in Mode 0. It results in unnecessary I2C operations but it | ||
843 | * is benign. | ||
844 | */ | ||
845 | dev_dbg(dev, "Interrupt triggered but zero messages\n"); | ||
841 | return IRQ_NONE; | 846 | return IRQ_NONE; |
842 | } else if (count > data->max_reportid) { | 847 | } else if (count > data->max_reportid) { |
843 | dev_err(dev, "T44 count %d exceeded max report id\n", count); | 848 | dev_err(dev, "T44 count %d exceeded max report id\n", count); |
@@ -1374,11 +1379,16 @@ static int mxt_get_info(struct mxt_data *data) | |||
1374 | return 0; | 1379 | return 0; |
1375 | } | 1380 | } |
1376 | 1381 | ||
1377 | static void mxt_free_object_table(struct mxt_data *data) | 1382 | static void mxt_free_input_device(struct mxt_data *data) |
1378 | { | 1383 | { |
1379 | input_unregister_device(data->input_dev); | 1384 | if (data->input_dev) { |
1380 | data->input_dev = NULL; | 1385 | input_unregister_device(data->input_dev); |
1386 | data->input_dev = NULL; | ||
1387 | } | ||
1388 | } | ||
1381 | 1389 | ||
1390 | static void mxt_free_object_table(struct mxt_data *data) | ||
1391 | { | ||
1382 | kfree(data->object_table); | 1392 | kfree(data->object_table); |
1383 | data->object_table = NULL; | 1393 | data->object_table = NULL; |
1384 | kfree(data->msg_buf); | 1394 | kfree(data->msg_buf); |
@@ -1957,11 +1967,13 @@ static int mxt_load_fw(struct device *dev, const char *fn) | |||
1957 | ret = mxt_lookup_bootloader_address(data, 0); | 1967 | ret = mxt_lookup_bootloader_address(data, 0); |
1958 | if (ret) | 1968 | if (ret) |
1959 | goto release_firmware; | 1969 | goto release_firmware; |
1970 | |||
1971 | mxt_free_input_device(data); | ||
1972 | mxt_free_object_table(data); | ||
1960 | } else { | 1973 | } else { |
1961 | enable_irq(data->irq); | 1974 | enable_irq(data->irq); |
1962 | } | 1975 | } |
1963 | 1976 | ||
1964 | mxt_free_object_table(data); | ||
1965 | reinit_completion(&data->bl_completion); | 1977 | reinit_completion(&data->bl_completion); |
1966 | 1978 | ||
1967 | ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false); | 1979 | ret = mxt_check_bootloader(data, MXT_WAITING_BOOTLOAD_CMD, false); |
@@ -2210,6 +2222,7 @@ static int mxt_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
2210 | return 0; | 2222 | return 0; |
2211 | 2223 | ||
2212 | err_free_object: | 2224 | err_free_object: |
2225 | mxt_free_input_device(data); | ||
2213 | mxt_free_object_table(data); | 2226 | mxt_free_object_table(data); |
2214 | err_free_irq: | 2227 | err_free_irq: |
2215 | free_irq(client->irq, data); | 2228 | free_irq(client->irq, data); |
@@ -2224,7 +2237,7 @@ static int mxt_remove(struct i2c_client *client) | |||
2224 | 2237 | ||
2225 | sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); | 2238 | sysfs_remove_group(&client->dev.kobj, &mxt_attr_group); |
2226 | free_irq(data->irq, data); | 2239 | free_irq(data->irq, data); |
2227 | input_unregister_device(data->input_dev); | 2240 | mxt_free_input_device(data); |
2228 | mxt_free_object_table(data); | 2241 | mxt_free_object_table(data); |
2229 | kfree(data); | 2242 | kfree(data); |
2230 | 2243 | ||
diff --git a/drivers/input/touchscreen/wm9712.c b/drivers/input/touchscreen/wm9712.c index 16b52115c27f..705ffa1e064a 100644 --- a/drivers/input/touchscreen/wm9712.c +++ b/drivers/input/touchscreen/wm9712.c | |||
@@ -41,7 +41,7 @@ | |||
41 | */ | 41 | */ |
42 | static int rpu = 8; | 42 | static int rpu = 8; |
43 | module_param(rpu, int, 0); | 43 | module_param(rpu, int, 0); |
44 | MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); | 44 | MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect."); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Set current used for pressure measurement. | 47 | * Set current used for pressure measurement. |
diff --git a/drivers/input/touchscreen/wm9713.c b/drivers/input/touchscreen/wm9713.c index 7405353199d7..572a5a64face 100644 --- a/drivers/input/touchscreen/wm9713.c +++ b/drivers/input/touchscreen/wm9713.c | |||
@@ -41,7 +41,7 @@ | |||
41 | */ | 41 | */ |
42 | static int rpu = 8; | 42 | static int rpu = 8; |
43 | module_param(rpu, int, 0); | 43 | module_param(rpu, int, 0); |
44 | MODULE_PARM_DESC(rpu, "Set internal pull up resitor for pen detect."); | 44 | MODULE_PARM_DESC(rpu, "Set internal pull up resistor for pen detect."); |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Set current used for pressure measurement. | 47 | * Set current used for pressure measurement. |
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index ca18d6d42a9b..a83cc2a2a2ca 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c | |||
@@ -146,6 +146,8 @@ | |||
146 | #define ID0_CTTW (1 << 14) | 146 | #define ID0_CTTW (1 << 14) |
147 | #define ID0_NUMIRPT_SHIFT 16 | 147 | #define ID0_NUMIRPT_SHIFT 16 |
148 | #define ID0_NUMIRPT_MASK 0xff | 148 | #define ID0_NUMIRPT_MASK 0xff |
149 | #define ID0_NUMSIDB_SHIFT 9 | ||
150 | #define ID0_NUMSIDB_MASK 0xf | ||
149 | #define ID0_NUMSMRG_SHIFT 0 | 151 | #define ID0_NUMSMRG_SHIFT 0 |
150 | #define ID0_NUMSMRG_MASK 0xff | 152 | #define ID0_NUMSMRG_MASK 0xff |
151 | 153 | ||
@@ -524,9 +526,18 @@ static int register_smmu_master(struct arm_smmu_device *smmu, | |||
524 | master->of_node = masterspec->np; | 526 | master->of_node = masterspec->np; |
525 | master->cfg.num_streamids = masterspec->args_count; | 527 | master->cfg.num_streamids = masterspec->args_count; |
526 | 528 | ||
527 | for (i = 0; i < master->cfg.num_streamids; ++i) | 529 | for (i = 0; i < master->cfg.num_streamids; ++i) { |
528 | master->cfg.streamids[i] = masterspec->args[i]; | 530 | u16 streamid = masterspec->args[i]; |
529 | 531 | ||
532 | if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && | ||
533 | (streamid >= smmu->num_mapping_groups)) { | ||
534 | dev_err(dev, | ||
535 | "stream ID for master device %s greater than maximum allowed (%d)\n", | ||
536 | masterspec->np->name, smmu->num_mapping_groups); | ||
537 | return -ERANGE; | ||
538 | } | ||
539 | master->cfg.streamids[i] = streamid; | ||
540 | } | ||
530 | return insert_smmu_master(smmu, master); | 541 | return insert_smmu_master(smmu, master); |
531 | } | 542 | } |
532 | 543 | ||
@@ -623,7 +634,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev) | |||
623 | 634 | ||
624 | if (fsr & FSR_IGN) | 635 | if (fsr & FSR_IGN) |
625 | dev_err_ratelimited(smmu->dev, | 636 | dev_err_ratelimited(smmu->dev, |
626 | "Unexpected context fault (fsr 0x%u)\n", | 637 | "Unexpected context fault (fsr 0x%x)\n", |
627 | fsr); | 638 | fsr); |
628 | 639 | ||
629 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); | 640 | fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); |
@@ -752,6 +763,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
752 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); | 763 | reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); |
753 | break; | 764 | break; |
754 | case 39: | 765 | case 39: |
766 | case 40: | ||
755 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); | 767 | reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); |
756 | break; | 768 | break; |
757 | case 42: | 769 | case 42: |
@@ -773,6 +785,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
773 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); | 785 | reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); |
774 | break; | 786 | break; |
775 | case 39: | 787 | case 39: |
788 | case 40: | ||
776 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); | 789 | reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); |
777 | break; | 790 | break; |
778 | case 42: | 791 | case 42: |
@@ -843,8 +856,11 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
843 | reg |= TTBCR_EAE | | 856 | reg |= TTBCR_EAE | |
844 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | | 857 | (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | |
845 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | | 858 | (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | |
846 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT) | | 859 | (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); |
847 | (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | 860 | |
861 | if (!stage1) | ||
862 | reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); | ||
863 | |||
848 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); | 864 | writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); |
849 | 865 | ||
850 | /* MAIR0 (stage-1 only) */ | 866 | /* MAIR0 (stage-1 only) */ |
@@ -868,10 +884,15 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) | |||
868 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, | 884 | static int arm_smmu_init_domain_context(struct iommu_domain *domain, |
869 | struct arm_smmu_device *smmu) | 885 | struct arm_smmu_device *smmu) |
870 | { | 886 | { |
871 | int irq, ret, start; | 887 | int irq, start, ret = 0; |
888 | unsigned long flags; | ||
872 | struct arm_smmu_domain *smmu_domain = domain->priv; | 889 | struct arm_smmu_domain *smmu_domain = domain->priv; |
873 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; | 890 | struct arm_smmu_cfg *cfg = &smmu_domain->cfg; |
874 | 891 | ||
892 | spin_lock_irqsave(&smmu_domain->lock, flags); | ||
893 | if (smmu_domain->smmu) | ||
894 | goto out_unlock; | ||
895 | |||
875 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | 896 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { |
876 | /* | 897 | /* |
877 | * We will likely want to change this if/when KVM gets | 898 | * We will likely want to change this if/when KVM gets |
@@ -890,7 +911,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
890 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, | 911 | ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, |
891 | smmu->num_context_banks); | 912 | smmu->num_context_banks); |
892 | if (IS_ERR_VALUE(ret)) | 913 | if (IS_ERR_VALUE(ret)) |
893 | return ret; | 914 | goto out_unlock; |
894 | 915 | ||
895 | cfg->cbndx = ret; | 916 | cfg->cbndx = ret; |
896 | if (smmu->version == 1) { | 917 | if (smmu->version == 1) { |
@@ -900,6 +921,10 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
900 | cfg->irptndx = cfg->cbndx; | 921 | cfg->irptndx = cfg->cbndx; |
901 | } | 922 | } |
902 | 923 | ||
924 | ACCESS_ONCE(smmu_domain->smmu) = smmu; | ||
925 | arm_smmu_init_context_bank(smmu_domain); | ||
926 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
927 | |||
903 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; | 928 | irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; |
904 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, | 929 | ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, |
905 | "arm-smmu-context-fault", domain); | 930 | "arm-smmu-context-fault", domain); |
@@ -907,15 +932,12 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, | |||
907 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", | 932 | dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", |
908 | cfg->irptndx, irq); | 933 | cfg->irptndx, irq); |
909 | cfg->irptndx = INVALID_IRPTNDX; | 934 | cfg->irptndx = INVALID_IRPTNDX; |
910 | goto out_free_context; | ||
911 | } | 935 | } |
912 | 936 | ||
913 | smmu_domain->smmu = smmu; | ||
914 | arm_smmu_init_context_bank(smmu_domain); | ||
915 | return 0; | 937 | return 0; |
916 | 938 | ||
917 | out_free_context: | 939 | out_unlock: |
918 | __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); | 940 | spin_unlock_irqrestore(&smmu_domain->lock, flags); |
919 | return ret; | 941 | return ret; |
920 | } | 942 | } |
921 | 943 | ||
@@ -975,7 +997,6 @@ static void arm_smmu_free_ptes(pmd_t *pmd) | |||
975 | { | 997 | { |
976 | pgtable_t table = pmd_pgtable(*pmd); | 998 | pgtable_t table = pmd_pgtable(*pmd); |
977 | 999 | ||
978 | pgtable_page_dtor(table); | ||
979 | __free_page(table); | 1000 | __free_page(table); |
980 | } | 1001 | } |
981 | 1002 | ||
@@ -1108,6 +1129,9 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |||
1108 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | 1129 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); |
1109 | struct arm_smmu_smr *smrs = cfg->smrs; | 1130 | struct arm_smmu_smr *smrs = cfg->smrs; |
1110 | 1131 | ||
1132 | if (!smrs) | ||
1133 | return; | ||
1134 | |||
1111 | /* Invalidate the SMRs before freeing back to the allocator */ | 1135 | /* Invalidate the SMRs before freeing back to the allocator */ |
1112 | for (i = 0; i < cfg->num_streamids; ++i) { | 1136 | for (i = 0; i < cfg->num_streamids; ++i) { |
1113 | u8 idx = smrs[i].idx; | 1137 | u8 idx = smrs[i].idx; |
@@ -1120,20 +1144,6 @@ static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu, | |||
1120 | kfree(smrs); | 1144 | kfree(smrs); |
1121 | } | 1145 | } |
1122 | 1146 | ||
1123 | static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device *smmu, | ||
1124 | struct arm_smmu_master_cfg *cfg) | ||
1125 | { | ||
1126 | int i; | ||
1127 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1128 | |||
1129 | for (i = 0; i < cfg->num_streamids; ++i) { | ||
1130 | u16 sid = cfg->streamids[i]; | ||
1131 | |||
1132 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1133 | gr0_base + ARM_SMMU_GR0_S2CR(sid)); | ||
1134 | } | ||
1135 | } | ||
1136 | |||
1137 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | 1147 | static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, |
1138 | struct arm_smmu_master_cfg *cfg) | 1148 | struct arm_smmu_master_cfg *cfg) |
1139 | { | 1149 | { |
@@ -1160,23 +1170,30 @@ static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain, | |||
1160 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, | 1170 | static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, |
1161 | struct arm_smmu_master_cfg *cfg) | 1171 | struct arm_smmu_master_cfg *cfg) |
1162 | { | 1172 | { |
1173 | int i; | ||
1163 | struct arm_smmu_device *smmu = smmu_domain->smmu; | 1174 | struct arm_smmu_device *smmu = smmu_domain->smmu; |
1175 | void __iomem *gr0_base = ARM_SMMU_GR0(smmu); | ||
1164 | 1176 | ||
1165 | /* | 1177 | /* |
1166 | * We *must* clear the S2CR first, because freeing the SMR means | 1178 | * We *must* clear the S2CR first, because freeing the SMR means |
1167 | * that it can be re-allocated immediately. | 1179 | * that it can be re-allocated immediately. |
1168 | */ | 1180 | */ |
1169 | arm_smmu_bypass_stream_mapping(smmu, cfg); | 1181 | for (i = 0; i < cfg->num_streamids; ++i) { |
1182 | u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i]; | ||
1183 | |||
1184 | writel_relaxed(S2CR_TYPE_BYPASS, | ||
1185 | gr0_base + ARM_SMMU_GR0_S2CR(idx)); | ||
1186 | } | ||
1187 | |||
1170 | arm_smmu_master_free_smrs(smmu, cfg); | 1188 | arm_smmu_master_free_smrs(smmu, cfg); |
1171 | } | 1189 | } |
1172 | 1190 | ||
1173 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | 1191 | static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) |
1174 | { | 1192 | { |
1175 | int ret = -EINVAL; | 1193 | int ret; |
1176 | struct arm_smmu_domain *smmu_domain = domain->priv; | 1194 | struct arm_smmu_domain *smmu_domain = domain->priv; |
1177 | struct arm_smmu_device *smmu; | 1195 | struct arm_smmu_device *smmu, *dom_smmu; |
1178 | struct arm_smmu_master_cfg *cfg; | 1196 | struct arm_smmu_master_cfg *cfg; |
1179 | unsigned long flags; | ||
1180 | 1197 | ||
1181 | smmu = dev_get_master_dev(dev)->archdata.iommu; | 1198 | smmu = dev_get_master_dev(dev)->archdata.iommu; |
1182 | if (!smmu) { | 1199 | if (!smmu) { |
@@ -1188,20 +1205,22 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1188 | * Sanity check the domain. We don't support domains across | 1205 | * Sanity check the domain. We don't support domains across |
1189 | * different SMMUs. | 1206 | * different SMMUs. |
1190 | */ | 1207 | */ |
1191 | spin_lock_irqsave(&smmu_domain->lock, flags); | 1208 | dom_smmu = ACCESS_ONCE(smmu_domain->smmu); |
1192 | if (!smmu_domain->smmu) { | 1209 | if (!dom_smmu) { |
1193 | /* Now that we have a master, we can finalise the domain */ | 1210 | /* Now that we have a master, we can finalise the domain */ |
1194 | ret = arm_smmu_init_domain_context(domain, smmu); | 1211 | ret = arm_smmu_init_domain_context(domain, smmu); |
1195 | if (IS_ERR_VALUE(ret)) | 1212 | if (IS_ERR_VALUE(ret)) |
1196 | goto err_unlock; | 1213 | return ret; |
1197 | } else if (smmu_domain->smmu != smmu) { | 1214 | |
1215 | dom_smmu = smmu_domain->smmu; | ||
1216 | } | ||
1217 | |||
1218 | if (dom_smmu != smmu) { | ||
1198 | dev_err(dev, | 1219 | dev_err(dev, |
1199 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", | 1220 | "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", |
1200 | dev_name(smmu_domain->smmu->dev), | 1221 | dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); |
1201 | dev_name(smmu->dev)); | 1222 | return -EINVAL; |
1202 | goto err_unlock; | ||
1203 | } | 1223 | } |
1204 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
1205 | 1224 | ||
1206 | /* Looks ok, so add the device to the domain */ | 1225 | /* Looks ok, so add the device to the domain */ |
1207 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); | 1226 | cfg = find_smmu_master_cfg(smmu_domain->smmu, dev); |
@@ -1209,10 +1228,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) | |||
1209 | return -ENODEV; | 1228 | return -ENODEV; |
1210 | 1229 | ||
1211 | return arm_smmu_domain_add_master(smmu_domain, cfg); | 1230 | return arm_smmu_domain_add_master(smmu_domain, cfg); |
1212 | |||
1213 | err_unlock: | ||
1214 | spin_unlock_irqrestore(&smmu_domain->lock, flags); | ||
1215 | return ret; | ||
1216 | } | 1231 | } |
1217 | 1232 | ||
1218 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) | 1233 | static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) |
@@ -1247,10 +1262,6 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, | |||
1247 | return -ENOMEM; | 1262 | return -ENOMEM; |
1248 | 1263 | ||
1249 | arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); | 1264 | arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); |
1250 | if (!pgtable_page_ctor(table)) { | ||
1251 | __free_page(table); | ||
1252 | return -ENOMEM; | ||
1253 | } | ||
1254 | pmd_populate(NULL, pmd, table); | 1265 | pmd_populate(NULL, pmd, table); |
1255 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); | 1266 | arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); |
1256 | } | 1267 | } |
@@ -1626,7 +1637,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) | |||
1626 | 1637 | ||
1627 | /* Mark all SMRn as invalid and all S2CRn as bypass */ | 1638 | /* Mark all SMRn as invalid and all S2CRn as bypass */ |
1628 | for (i = 0; i < smmu->num_mapping_groups; ++i) { | 1639 | for (i = 0; i < smmu->num_mapping_groups; ++i) { |
1629 | writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(i)); | 1640 | writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i)); |
1630 | writel_relaxed(S2CR_TYPE_BYPASS, | 1641 | writel_relaxed(S2CR_TYPE_BYPASS, |
1631 | gr0_base + ARM_SMMU_GR0_S2CR(i)); | 1642 | gr0_base + ARM_SMMU_GR0_S2CR(i)); |
1632 | } | 1643 | } |
@@ -1761,6 +1772,9 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1761 | dev_notice(smmu->dev, | 1772 | dev_notice(smmu->dev, |
1762 | "\tstream matching with %u register groups, mask 0x%x", | 1773 | "\tstream matching with %u register groups, mask 0x%x", |
1763 | smmu->num_mapping_groups, mask); | 1774 | smmu->num_mapping_groups, mask); |
1775 | } else { | ||
1776 | smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) & | ||
1777 | ID0_NUMSIDB_MASK; | ||
1764 | } | 1778 | } |
1765 | 1779 | ||
1766 | /* ID1 */ | 1780 | /* ID1 */ |
@@ -1794,11 +1808,16 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) | |||
1794 | * Stage-1 output limited by stage-2 input size due to pgd | 1808 | * Stage-1 output limited by stage-2 input size due to pgd |
1795 | * allocation (PTRS_PER_PGD). | 1809 | * allocation (PTRS_PER_PGD). |
1796 | */ | 1810 | */ |
1811 | if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { | ||
1797 | #ifdef CONFIG_64BIT | 1812 | #ifdef CONFIG_64BIT |
1798 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); | 1813 | smmu->s1_output_size = min_t(unsigned long, VA_BITS, size); |
1799 | #else | 1814 | #else |
1800 | smmu->s1_output_size = min(32UL, size); | 1815 | smmu->s1_output_size = min(32UL, size); |
1801 | #endif | 1816 | #endif |
1817 | } else { | ||
1818 | smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, | ||
1819 | size); | ||
1820 | } | ||
1802 | 1821 | ||
1803 | /* The stage-2 output mask is also applied for bypass */ | 1822 | /* The stage-2 output mask is also applied for bypass */ |
1804 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); | 1823 | size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); |
@@ -1889,6 +1908,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1889 | smmu->irqs[i] = irq; | 1908 | smmu->irqs[i] = irq; |
1890 | } | 1909 | } |
1891 | 1910 | ||
1911 | err = arm_smmu_device_cfg_probe(smmu); | ||
1912 | if (err) | ||
1913 | return err; | ||
1914 | |||
1892 | i = 0; | 1915 | i = 0; |
1893 | smmu->masters = RB_ROOT; | 1916 | smmu->masters = RB_ROOT; |
1894 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", | 1917 | while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters", |
@@ -1905,10 +1928,6 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
1905 | } | 1928 | } |
1906 | dev_notice(dev, "registered %d master devices\n", i); | 1929 | dev_notice(dev, "registered %d master devices\n", i); |
1907 | 1930 | ||
1908 | err = arm_smmu_device_cfg_probe(smmu); | ||
1909 | if (err) | ||
1910 | goto out_put_masters; | ||
1911 | |||
1912 | parse_driver_options(smmu); | 1931 | parse_driver_options(smmu); |
1913 | 1932 | ||
1914 | if (smmu->version > 1 && | 1933 | if (smmu->version > 1 && |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 60ab474bfff3..06d268abe951 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -678,8 +678,7 @@ static int __init dmar_acpi_dev_scope_init(void) | |||
678 | andd->device_name); | 678 | andd->device_name); |
679 | continue; | 679 | continue; |
680 | } | 680 | } |
681 | acpi_bus_get_device(h, &adev); | 681 | if (acpi_bus_get_device(h, &adev)) { |
682 | if (!adev) { | ||
683 | pr_err("Failed to get device for ACPI object %s\n", | 682 | pr_err("Failed to get device for ACPI object %s\n", |
684 | andd->device_name); | 683 | andd->device_name); |
685 | continue; | 684 | continue; |
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c index 61d1dafa242d..56feed7cec15 100644 --- a/drivers/iommu/fsl_pamu_domain.c +++ b/drivers/iommu/fsl_pamu_domain.c | |||
@@ -984,7 +984,7 @@ static int fsl_pamu_add_device(struct device *dev) | |||
984 | struct iommu_group *group = ERR_PTR(-ENODEV); | 984 | struct iommu_group *group = ERR_PTR(-ENODEV); |
985 | struct pci_dev *pdev; | 985 | struct pci_dev *pdev; |
986 | const u32 *prop; | 986 | const u32 *prop; |
987 | int ret, len; | 987 | int ret = 0, len; |
988 | 988 | ||
989 | /* | 989 | /* |
990 | * For platform devices we allocate a separate group for | 990 | * For platform devices we allocate a separate group for |
@@ -1007,7 +1007,13 @@ static int fsl_pamu_add_device(struct device *dev) | |||
1007 | if (IS_ERR(group)) | 1007 | if (IS_ERR(group)) |
1008 | return PTR_ERR(group); | 1008 | return PTR_ERR(group); |
1009 | 1009 | ||
1010 | ret = iommu_group_add_device(group, dev); | 1010 | /* |
1011 | * Check if device has already been added to an iommu group. | ||
1012 | * Group could have already been created for a PCI device in | ||
1013 | * the iommu_group_get_for_dev path. | ||
1014 | */ | ||
1015 | if (!dev->iommu_group) | ||
1016 | ret = iommu_group_add_device(group, dev); | ||
1011 | 1017 | ||
1012 | iommu_group_put(group); | 1018 | iommu_group_put(group); |
1013 | return ret; | 1019 | return ret; |
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index ac4adb337038..0639b9274b11 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c | |||
@@ -678,15 +678,17 @@ static struct iommu_group *iommu_group_get_for_pci_dev(struct pci_dev *pdev) | |||
678 | */ | 678 | */ |
679 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) | 679 | struct iommu_group *iommu_group_get_for_dev(struct device *dev) |
680 | { | 680 | { |
681 | struct iommu_group *group = ERR_PTR(-EIO); | 681 | struct iommu_group *group; |
682 | int ret; | 682 | int ret; |
683 | 683 | ||
684 | group = iommu_group_get(dev); | 684 | group = iommu_group_get(dev); |
685 | if (group) | 685 | if (group) |
686 | return group; | 686 | return group; |
687 | 687 | ||
688 | if (dev_is_pci(dev)) | 688 | if (!dev_is_pci(dev)) |
689 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | 689 | return ERR_PTR(-EINVAL); |
690 | |||
691 | group = iommu_group_get_for_pci_dev(to_pci_dev(dev)); | ||
690 | 692 | ||
691 | if (IS_ERR(group)) | 693 | if (IS_ERR(group)) |
692 | return group; | 694 | return group; |
diff --git a/drivers/irqchip/exynos-combiner.c b/drivers/irqchip/exynos-combiner.c index f8636a650cf6..5945223b73fa 100644 --- a/drivers/irqchip/exynos-combiner.c +++ b/drivers/irqchip/exynos-combiner.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/irqdomain.h> | 16 | #include <linux/irqdomain.h> |
17 | #include <linux/irqchip/chained_irq.h> | 17 | #include <linux/irqchip/chained_irq.h> |
18 | #include <linux/interrupt.h> | ||
18 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
19 | #include <linux/of_irq.h> | 20 | #include <linux/of_irq.h> |
20 | 21 | ||
diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c index 85c2985d8bcb..bbbaf5de65d2 100644 --- a/drivers/irqchip/irq-crossbar.c +++ b/drivers/irqchip/irq-crossbar.c | |||
@@ -220,7 +220,7 @@ static int __init crossbar_of_init(struct device_node *node) | |||
220 | of_property_read_u32_index(node, | 220 | of_property_read_u32_index(node, |
221 | "ti,irqs-reserved", | 221 | "ti,irqs-reserved", |
222 | i, &entry); | 222 | i, &entry); |
223 | if (entry > max) { | 223 | if (entry >= max) { |
224 | pr_err("Invalid reserved entry\n"); | 224 | pr_err("Invalid reserved entry\n"); |
225 | ret = -EINVAL; | 225 | ret = -EINVAL; |
226 | goto err_irq_map; | 226 | goto err_irq_map; |
@@ -238,7 +238,7 @@ static int __init crossbar_of_init(struct device_node *node) | |||
238 | of_property_read_u32_index(node, | 238 | of_property_read_u32_index(node, |
239 | "ti,irqs-skip", | 239 | "ti,irqs-skip", |
240 | i, &entry); | 240 | i, &entry); |
241 | if (entry > max) { | 241 | if (entry >= max) { |
242 | pr_err("Invalid skip entry\n"); | 242 | pr_err("Invalid skip entry\n"); |
243 | ret = -EINVAL; | 243 | ret = -EINVAL; |
244 | goto err_irq_map; | 244 | goto err_irq_map; |
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 57eaa5a0b1e3..a0698b4f0303 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c | |||
@@ -36,7 +36,7 @@ | |||
36 | struct gic_chip_data { | 36 | struct gic_chip_data { |
37 | void __iomem *dist_base; | 37 | void __iomem *dist_base; |
38 | void __iomem **redist_base; | 38 | void __iomem **redist_base; |
39 | void __percpu __iomem **rdist; | 39 | void __iomem * __percpu *rdist; |
40 | struct irq_domain *domain; | 40 | struct irq_domain *domain; |
41 | u64 redist_stride; | 41 | u64 redist_stride; |
42 | u32 redist_regions; | 42 | u32 redist_regions; |
@@ -104,7 +104,7 @@ static void gic_redist_wait_for_rwp(void) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | /* Low level accessors */ | 106 | /* Low level accessors */ |
107 | static u64 gic_read_iar(void) | 107 | static u64 __maybe_unused gic_read_iar(void) |
108 | { | 108 | { |
109 | u64 irqstat; | 109 | u64 irqstat; |
110 | 110 | ||
@@ -112,24 +112,24 @@ static u64 gic_read_iar(void) | |||
112 | return irqstat; | 112 | return irqstat; |
113 | } | 113 | } |
114 | 114 | ||
115 | static void gic_write_pmr(u64 val) | 115 | static void __maybe_unused gic_write_pmr(u64 val) |
116 | { | 116 | { |
117 | asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); | 117 | asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" (val)); |
118 | } | 118 | } |
119 | 119 | ||
120 | static void gic_write_ctlr(u64 val) | 120 | static void __maybe_unused gic_write_ctlr(u64 val) |
121 | { | 121 | { |
122 | asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); | 122 | asm volatile("msr_s " __stringify(ICC_CTLR_EL1) ", %0" : : "r" (val)); |
123 | isb(); | 123 | isb(); |
124 | } | 124 | } |
125 | 125 | ||
126 | static void gic_write_grpen1(u64 val) | 126 | static void __maybe_unused gic_write_grpen1(u64 val) |
127 | { | 127 | { |
128 | asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); | 128 | asm volatile("msr_s " __stringify(ICC_GRPEN1_EL1) ", %0" : : "r" (val)); |
129 | isb(); | 129 | isb(); |
130 | } | 130 | } |
131 | 131 | ||
132 | static void gic_write_sgi1r(u64 val) | 132 | static void __maybe_unused gic_write_sgi1r(u64 val) |
133 | { | 133 | { |
134 | asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); | 134 | asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val)); |
135 | } | 135 | } |
@@ -200,19 +200,6 @@ static void gic_poke_irq(struct irq_data *d, u32 offset) | |||
200 | rwp_wait(); | 200 | rwp_wait(); |
201 | } | 201 | } |
202 | 202 | ||
203 | static int gic_peek_irq(struct irq_data *d, u32 offset) | ||
204 | { | ||
205 | u32 mask = 1 << (gic_irq(d) % 32); | ||
206 | void __iomem *base; | ||
207 | |||
208 | if (gic_irq_in_rdist(d)) | ||
209 | base = gic_data_rdist_sgi_base(); | ||
210 | else | ||
211 | base = gic_data.dist_base; | ||
212 | |||
213 | return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); | ||
214 | } | ||
215 | |||
216 | static void gic_mask_irq(struct irq_data *d) | 203 | static void gic_mask_irq(struct irq_data *d) |
217 | { | 204 | { |
218 | gic_poke_irq(d, GICD_ICENABLER); | 205 | gic_poke_irq(d, GICD_ICENABLER); |
@@ -401,6 +388,19 @@ static void gic_cpu_init(void) | |||
401 | } | 388 | } |
402 | 389 | ||
403 | #ifdef CONFIG_SMP | 390 | #ifdef CONFIG_SMP |
391 | static int gic_peek_irq(struct irq_data *d, u32 offset) | ||
392 | { | ||
393 | u32 mask = 1 << (gic_irq(d) % 32); | ||
394 | void __iomem *base; | ||
395 | |||
396 | if (gic_irq_in_rdist(d)) | ||
397 | base = gic_data_rdist_sgi_base(); | ||
398 | else | ||
399 | base = gic_data.dist_base; | ||
400 | |||
401 | return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask); | ||
402 | } | ||
403 | |||
404 | static int gic_secondary_init(struct notifier_block *nfb, | 404 | static int gic_secondary_init(struct notifier_block *nfb, |
405 | unsigned long action, void *hcpu) | 405 | unsigned long action, void *hcpu) |
406 | { | 406 | { |
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4b959e606fe8..dda6dbc23565 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c | |||
@@ -867,7 +867,7 @@ static int gic_routable_irq_domain_xlate(struct irq_domain *d, | |||
867 | return 0; | 867 | return 0; |
868 | } | 868 | } |
869 | 869 | ||
870 | const struct irq_domain_ops gic_default_routable_irq_domain_ops = { | 870 | static const struct irq_domain_ops gic_default_routable_irq_domain_ops = { |
871 | .map = gic_routable_irq_domain_map, | 871 | .map = gic_routable_irq_domain_map, |
872 | .unmap = gic_routable_irq_domain_unmap, | 872 | .unmap = gic_routable_irq_domain_unmap, |
873 | .xlate = gic_routable_irq_domain_xlate, | 873 | .xlate = gic_routable_irq_domain_xlate, |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index 129729d35478..aa29198fca3e 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -15,10 +15,10 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
18 | #include <linux/timer.h> | ||
18 | #include <linux/err.h> | 19 | #include <linux/err.h> |
19 | #include <linux/ctype.h> | 20 | #include <linux/ctype.h> |
20 | #include <linux/leds.h> | 21 | #include <linux/leds.h> |
21 | #include <linux/workqueue.h> | ||
22 | #include "leds.h" | 22 | #include "leds.h" |
23 | 23 | ||
24 | static struct class *leds_class; | 24 | static struct class *leds_class; |
@@ -97,10 +97,9 @@ static const struct attribute_group *led_groups[] = { | |||
97 | NULL, | 97 | NULL, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | static void led_work_function(struct work_struct *ws) | 100 | static void led_timer_function(unsigned long data) |
101 | { | 101 | { |
102 | struct led_classdev *led_cdev = | 102 | struct led_classdev *led_cdev = (void *)data; |
103 | container_of(ws, struct led_classdev, blink_work.work); | ||
104 | unsigned long brightness; | 103 | unsigned long brightness; |
105 | unsigned long delay; | 104 | unsigned long delay; |
106 | 105 | ||
@@ -144,8 +143,7 @@ static void led_work_function(struct work_struct *ws) | |||
144 | } | 143 | } |
145 | } | 144 | } |
146 | 145 | ||
147 | queue_delayed_work(system_wq, &led_cdev->blink_work, | 146 | mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay)); |
148 | msecs_to_jiffies(delay)); | ||
149 | } | 147 | } |
150 | 148 | ||
151 | static void set_brightness_delayed(struct work_struct *ws) | 149 | static void set_brightness_delayed(struct work_struct *ws) |
@@ -233,7 +231,9 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | |||
233 | 231 | ||
234 | INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); | 232 | INIT_WORK(&led_cdev->set_brightness_work, set_brightness_delayed); |
235 | 233 | ||
236 | INIT_DELAYED_WORK(&led_cdev->blink_work, led_work_function); | 234 | init_timer(&led_cdev->blink_timer); |
235 | led_cdev->blink_timer.function = led_timer_function; | ||
236 | led_cdev->blink_timer.data = (unsigned long)led_cdev; | ||
237 | 237 | ||
238 | #ifdef CONFIG_LEDS_TRIGGERS | 238 | #ifdef CONFIG_LEDS_TRIGGERS |
239 | led_trigger_set_default(led_cdev); | 239 | led_trigger_set_default(led_cdev); |
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c index 4bb116867b88..71b40d3bf776 100644 --- a/drivers/leds/led-core.c +++ b/drivers/leds/led-core.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
18 | #include <linux/leds.h> | 18 | #include <linux/leds.h> |
19 | #include <linux/workqueue.h> | ||
20 | #include "leds.h" | 19 | #include "leds.h" |
21 | 20 | ||
22 | DECLARE_RWSEM(leds_list_lock); | 21 | DECLARE_RWSEM(leds_list_lock); |
@@ -52,7 +51,7 @@ static void led_set_software_blink(struct led_classdev *led_cdev, | |||
52 | return; | 51 | return; |
53 | } | 52 | } |
54 | 53 | ||
55 | queue_delayed_work(system_wq, &led_cdev->blink_work, 1); | 54 | mod_timer(&led_cdev->blink_timer, jiffies + 1); |
56 | } | 55 | } |
57 | 56 | ||
58 | 57 | ||
@@ -76,7 +75,7 @@ void led_blink_set(struct led_classdev *led_cdev, | |||
76 | unsigned long *delay_on, | 75 | unsigned long *delay_on, |
77 | unsigned long *delay_off) | 76 | unsigned long *delay_off) |
78 | { | 77 | { |
79 | cancel_delayed_work_sync(&led_cdev->blink_work); | 78 | del_timer_sync(&led_cdev->blink_timer); |
80 | 79 | ||
81 | led_cdev->flags &= ~LED_BLINK_ONESHOT; | 80 | led_cdev->flags &= ~LED_BLINK_ONESHOT; |
82 | led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; | 81 | led_cdev->flags &= ~LED_BLINK_ONESHOT_STOP; |
@@ -91,7 +90,7 @@ void led_blink_set_oneshot(struct led_classdev *led_cdev, | |||
91 | int invert) | 90 | int invert) |
92 | { | 91 | { |
93 | if ((led_cdev->flags & LED_BLINK_ONESHOT) && | 92 | if ((led_cdev->flags & LED_BLINK_ONESHOT) && |
94 | delayed_work_pending(&led_cdev->blink_work)) | 93 | timer_pending(&led_cdev->blink_timer)) |
95 | return; | 94 | return; |
96 | 95 | ||
97 | led_cdev->flags |= LED_BLINK_ONESHOT; | 96 | led_cdev->flags |= LED_BLINK_ONESHOT; |
@@ -108,7 +107,7 @@ EXPORT_SYMBOL(led_blink_set_oneshot); | |||
108 | 107 | ||
109 | void led_stop_software_blink(struct led_classdev *led_cdev) | 108 | void led_stop_software_blink(struct led_classdev *led_cdev) |
110 | { | 109 | { |
111 | cancel_delayed_work_sync(&led_cdev->blink_work); | 110 | del_timer_sync(&led_cdev->blink_timer); |
112 | led_cdev->blink_delay_on = 0; | 111 | led_cdev->blink_delay_on = 0; |
113 | led_cdev->blink_delay_off = 0; | 112 | led_cdev->blink_delay_off = 0; |
114 | } | 113 | } |
@@ -117,7 +116,7 @@ EXPORT_SYMBOL_GPL(led_stop_software_blink); | |||
117 | void led_set_brightness(struct led_classdev *led_cdev, | 116 | void led_set_brightness(struct led_classdev *led_cdev, |
118 | enum led_brightness brightness) | 117 | enum led_brightness brightness) |
119 | { | 118 | { |
120 | /* delay brightness setting if need to stop soft-blink work */ | 119 | /* delay brightness setting if need to stop soft-blink timer */ |
121 | if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { | 120 | if (led_cdev->blink_delay_on || led_cdev->blink_delay_off) { |
122 | led_cdev->delayed_set_value = brightness; | 121 | led_cdev->delayed_set_value = brightness; |
123 | schedule_work(&led_cdev->set_brightness_work); | 122 | schedule_work(&led_cdev->set_brightness_work); |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af40ee209e2..7130505c2425 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -895,8 +895,8 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg) | |||
895 | struct cache *cache = mg->cache; | 895 | struct cache *cache = mg->cache; |
896 | 896 | ||
897 | if (mg->writeback) { | 897 | if (mg->writeback) { |
898 | cell_defer(cache, mg->old_ocell, false); | ||
899 | clear_dirty(cache, mg->old_oblock, mg->cblock); | 898 | clear_dirty(cache, mg->old_oblock, mg->cblock); |
899 | cell_defer(cache, mg->old_ocell, false); | ||
900 | cleanup_migration(mg); | 900 | cleanup_migration(mg); |
901 | return; | 901 | return; |
902 | 902 | ||
@@ -951,13 +951,13 @@ static void migration_success_post_commit(struct dm_cache_migration *mg) | |||
951 | } | 951 | } |
952 | 952 | ||
953 | } else { | 953 | } else { |
954 | clear_dirty(cache, mg->new_oblock, mg->cblock); | ||
954 | if (mg->requeue_holder) | 955 | if (mg->requeue_holder) |
955 | cell_defer(cache, mg->new_ocell, true); | 956 | cell_defer(cache, mg->new_ocell, true); |
956 | else { | 957 | else { |
957 | bio_endio(mg->new_ocell->holder, 0); | 958 | bio_endio(mg->new_ocell->holder, 0); |
958 | cell_defer(cache, mg->new_ocell, false); | 959 | cell_defer(cache, mg->new_ocell, false); |
959 | } | 960 | } |
960 | clear_dirty(cache, mg->new_oblock, mg->cblock); | ||
961 | cleanup_migration(mg); | 961 | cleanup_migration(mg); |
962 | } | 962 | } |
963 | } | 963 | } |
diff --git a/drivers/misc/lattice-ecp3-config.c b/drivers/misc/lattice-ecp3-config.c index 7ffdb589841e..7e1efd5f58f0 100644 --- a/drivers/misc/lattice-ecp3-config.c +++ b/drivers/misc/lattice-ecp3-config.c | |||
@@ -79,6 +79,11 @@ static void firmware_load(const struct firmware *fw, void *context) | |||
79 | u32 jedec_id; | 79 | u32 jedec_id; |
80 | u32 status; | 80 | u32 status; |
81 | 81 | ||
82 | if (fw == NULL) { | ||
83 | dev_err(&spi->dev, "Cannot load firmware, aborting\n"); | ||
84 | return; | ||
85 | } | ||
86 | |||
82 | if (fw->size == 0) { | 87 | if (fw->size == 0) { |
83 | dev_err(&spi->dev, "Error: Firmware size is 0!\n"); | 88 | dev_err(&spi->dev, "Error: Firmware size is 0!\n"); |
84 | return; | 89 | return; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index 5a4bfe33112a..46c4643b7a07 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -1434,6 +1434,10 @@ static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len, | |||
1434 | 1434 | ||
1435 | mutex_lock(&chip->mutex); | 1435 | mutex_lock(&chip->mutex); |
1436 | ret = get_chip(map, chip, base, FL_LOCKING); | 1436 | ret = get_chip(map, chip, base, FL_LOCKING); |
1437 | if (ret) { | ||
1438 | mutex_unlock(&chip->mutex); | ||
1439 | return ret; | ||
1440 | } | ||
1437 | 1441 | ||
1438 | /* Enter lock register command */ | 1442 | /* Enter lock register command */ |
1439 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, | 1443 | cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, |
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 059c7414e303..3fe45c705933 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c | |||
@@ -2177,10 +2177,10 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2177 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2178 | 2178 | ||
2179 | vp->tx_ring[entry].frag[i+1].addr = | 2179 | vp->tx_ring[entry].frag[i+1].addr = |
2180 | cpu_to_le32(pci_map_single( | 2180 | cpu_to_le32(skb_frag_dma_map( |
2181 | VORTEX_PCI(vp), | 2181 | &VORTEX_PCI(vp)->dev, |
2182 | (void *)skb_frag_address(frag), | 2182 | frag, |
2183 | skb_frag_size(frag), PCI_DMA_TODEVICE)); | 2183 | frag->page_offset, frag->size, DMA_TO_DEVICE)); |
2184 | 2184 | ||
2185 | if (i == skb_shinfo(skb)->nr_frags-1) | 2185 | if (i == skb_shinfo(skb)->nr_frags-1) |
2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); | 2186 | vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); |
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 23578dfee249..3005155e412b 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c | |||
@@ -123,6 +123,12 @@ static inline void greth_enable_tx(struct greth_private *greth) | |||
123 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); | 123 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN); |
124 | } | 124 | } |
125 | 125 | ||
126 | static inline void greth_enable_tx_and_irq(struct greth_private *greth) | ||
127 | { | ||
128 | wmb(); /* BDs must been written to memory before enabling TX */ | ||
129 | GRETH_REGORIN(greth->regs->control, GRETH_TXEN | GRETH_TXI); | ||
130 | } | ||
131 | |||
126 | static inline void greth_disable_tx(struct greth_private *greth) | 132 | static inline void greth_disable_tx(struct greth_private *greth) |
127 | { | 133 | { |
128 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); | 134 | GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN); |
@@ -447,29 +453,30 @@ out: | |||
447 | return err; | 453 | return err; |
448 | } | 454 | } |
449 | 455 | ||
456 | static inline u16 greth_num_free_bds(u16 tx_last, u16 tx_next) | ||
457 | { | ||
458 | if (tx_next < tx_last) | ||
459 | return (tx_last - tx_next) - 1; | ||
460 | else | ||
461 | return GRETH_TXBD_NUM - (tx_next - tx_last) - 1; | ||
462 | } | ||
450 | 463 | ||
451 | static netdev_tx_t | 464 | static netdev_tx_t |
452 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | 465 | greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) |
453 | { | 466 | { |
454 | struct greth_private *greth = netdev_priv(dev); | 467 | struct greth_private *greth = netdev_priv(dev); |
455 | struct greth_bd *bdp; | 468 | struct greth_bd *bdp; |
456 | u32 status = 0, dma_addr, ctrl; | 469 | u32 status, dma_addr; |
457 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; | 470 | int curr_tx, nr_frags, i, err = NETDEV_TX_OK; |
458 | unsigned long flags; | 471 | unsigned long flags; |
472 | u16 tx_last; | ||
459 | 473 | ||
460 | nr_frags = skb_shinfo(skb)->nr_frags; | 474 | nr_frags = skb_shinfo(skb)->nr_frags; |
475 | tx_last = greth->tx_last; | ||
476 | rmb(); /* tx_last is updated by the poll task */ | ||
461 | 477 | ||
462 | /* Clean TX Ring */ | 478 | if (greth_num_free_bds(tx_last, greth->tx_next) < nr_frags + 1) { |
463 | greth_clean_tx_gbit(dev); | ||
464 | |||
465 | if (greth->tx_free < nr_frags + 1) { | ||
466 | spin_lock_irqsave(&greth->devlock, flags);/*save from poll/irq*/ | ||
467 | ctrl = GRETH_REGLOAD(greth->regs->control); | ||
468 | /* Enable TX IRQ only if not already in poll() routine */ | ||
469 | if (ctrl & GRETH_RXI) | ||
470 | GRETH_REGSAVE(greth->regs->control, ctrl | GRETH_TXI); | ||
471 | netif_stop_queue(dev); | 479 | netif_stop_queue(dev); |
472 | spin_unlock_irqrestore(&greth->devlock, flags); | ||
473 | err = NETDEV_TX_BUSY; | 480 | err = NETDEV_TX_BUSY; |
474 | goto out; | 481 | goto out; |
475 | } | 482 | } |
@@ -488,6 +495,8 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
488 | /* Linear buf */ | 495 | /* Linear buf */ |
489 | if (nr_frags != 0) | 496 | if (nr_frags != 0) |
490 | status = GRETH_TXBD_MORE; | 497 | status = GRETH_TXBD_MORE; |
498 | else | ||
499 | status = GRETH_BD_IE; | ||
491 | 500 | ||
492 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 501 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
493 | status |= GRETH_TXBD_CSALL; | 502 | status |= GRETH_TXBD_CSALL; |
@@ -545,14 +554,12 @@ greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev) | |||
545 | 554 | ||
546 | /* Enable the descriptor chain by enabling the first descriptor */ | 555 | /* Enable the descriptor chain by enabling the first descriptor */ |
547 | bdp = greth->tx_bd_base + greth->tx_next; | 556 | bdp = greth->tx_bd_base + greth->tx_next; |
548 | greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN); | 557 | greth_write_bd(&bdp->stat, |
549 | greth->tx_next = curr_tx; | 558 | greth_read_bd(&bdp->stat) | GRETH_BD_EN); |
550 | greth->tx_free -= nr_frags + 1; | ||
551 | |||
552 | wmb(); | ||
553 | 559 | ||
554 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ | 560 | spin_lock_irqsave(&greth->devlock, flags); /*save from poll/irq*/ |
555 | greth_enable_tx(greth); | 561 | greth->tx_next = curr_tx; |
562 | greth_enable_tx_and_irq(greth); | ||
556 | spin_unlock_irqrestore(&greth->devlock, flags); | 563 | spin_unlock_irqrestore(&greth->devlock, flags); |
557 | 564 | ||
558 | return NETDEV_TX_OK; | 565 | return NETDEV_TX_OK; |
@@ -648,7 +655,6 @@ static void greth_clean_tx(struct net_device *dev) | |||
648 | if (greth->tx_free > 0) { | 655 | if (greth->tx_free > 0) { |
649 | netif_wake_queue(dev); | 656 | netif_wake_queue(dev); |
650 | } | 657 | } |
651 | |||
652 | } | 658 | } |
653 | 659 | ||
654 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) | 660 | static inline void greth_update_tx_stats(struct net_device *dev, u32 stat) |
@@ -670,20 +676,22 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
670 | { | 676 | { |
671 | struct greth_private *greth; | 677 | struct greth_private *greth; |
672 | struct greth_bd *bdp, *bdp_last_frag; | 678 | struct greth_bd *bdp, *bdp_last_frag; |
673 | struct sk_buff *skb; | 679 | struct sk_buff *skb = NULL; |
674 | u32 stat; | 680 | u32 stat; |
675 | int nr_frags, i; | 681 | int nr_frags, i; |
682 | u16 tx_last; | ||
676 | 683 | ||
677 | greth = netdev_priv(dev); | 684 | greth = netdev_priv(dev); |
685 | tx_last = greth->tx_last; | ||
678 | 686 | ||
679 | while (greth->tx_free < GRETH_TXBD_NUM) { | 687 | while (tx_last != greth->tx_next) { |
680 | 688 | ||
681 | skb = greth->tx_skbuff[greth->tx_last]; | 689 | skb = greth->tx_skbuff[tx_last]; |
682 | 690 | ||
683 | nr_frags = skb_shinfo(skb)->nr_frags; | 691 | nr_frags = skb_shinfo(skb)->nr_frags; |
684 | 692 | ||
685 | /* We only clean fully completed SKBs */ | 693 | /* We only clean fully completed SKBs */ |
686 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags); | 694 | bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); |
687 | 695 | ||
688 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); | 696 | GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); |
689 | mb(); | 697 | mb(); |
@@ -692,14 +700,14 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
692 | if (stat & GRETH_BD_EN) | 700 | if (stat & GRETH_BD_EN) |
693 | break; | 701 | break; |
694 | 702 | ||
695 | greth->tx_skbuff[greth->tx_last] = NULL; | 703 | greth->tx_skbuff[tx_last] = NULL; |
696 | 704 | ||
697 | greth_update_tx_stats(dev, stat); | 705 | greth_update_tx_stats(dev, stat); |
698 | dev->stats.tx_bytes += skb->len; | 706 | dev->stats.tx_bytes += skb->len; |
699 | 707 | ||
700 | bdp = greth->tx_bd_base + greth->tx_last; | 708 | bdp = greth->tx_bd_base + tx_last; |
701 | 709 | ||
702 | greth->tx_last = NEXT_TX(greth->tx_last); | 710 | tx_last = NEXT_TX(tx_last); |
703 | 711 | ||
704 | dma_unmap_single(greth->dev, | 712 | dma_unmap_single(greth->dev, |
705 | greth_read_bd(&bdp->addr), | 713 | greth_read_bd(&bdp->addr), |
@@ -708,21 +716,26 @@ static void greth_clean_tx_gbit(struct net_device *dev) | |||
708 | 716 | ||
709 | for (i = 0; i < nr_frags; i++) { | 717 | for (i = 0; i < nr_frags; i++) { |
710 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 718 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
711 | bdp = greth->tx_bd_base + greth->tx_last; | 719 | bdp = greth->tx_bd_base + tx_last; |
712 | 720 | ||
713 | dma_unmap_page(greth->dev, | 721 | dma_unmap_page(greth->dev, |
714 | greth_read_bd(&bdp->addr), | 722 | greth_read_bd(&bdp->addr), |
715 | skb_frag_size(frag), | 723 | skb_frag_size(frag), |
716 | DMA_TO_DEVICE); | 724 | DMA_TO_DEVICE); |
717 | 725 | ||
718 | greth->tx_last = NEXT_TX(greth->tx_last); | 726 | tx_last = NEXT_TX(tx_last); |
719 | } | 727 | } |
720 | greth->tx_free += nr_frags+1; | ||
721 | dev_kfree_skb(skb); | 728 | dev_kfree_skb(skb); |
722 | } | 729 | } |
730 | if (skb) { /* skb is set only if the above while loop was entered */ | ||
731 | wmb(); | ||
732 | greth->tx_last = tx_last; | ||
723 | 733 | ||
724 | if (netif_queue_stopped(dev) && (greth->tx_free > (MAX_SKB_FRAGS+1))) | 734 | if (netif_queue_stopped(dev) && |
725 | netif_wake_queue(dev); | 735 | (greth_num_free_bds(tx_last, greth->tx_next) > |
736 | (MAX_SKB_FRAGS+1))) | ||
737 | netif_wake_queue(dev); | ||
738 | } | ||
726 | } | 739 | } |
727 | 740 | ||
728 | static int greth_rx(struct net_device *dev, int limit) | 741 | static int greth_rx(struct net_device *dev, int limit) |
@@ -965,16 +978,12 @@ static int greth_poll(struct napi_struct *napi, int budget) | |||
965 | greth = container_of(napi, struct greth_private, napi); | 978 | greth = container_of(napi, struct greth_private, napi); |
966 | 979 | ||
967 | restart_txrx_poll: | 980 | restart_txrx_poll: |
968 | if (netif_queue_stopped(greth->netdev)) { | ||
969 | if (greth->gbit_mac) | ||
970 | greth_clean_tx_gbit(greth->netdev); | ||
971 | else | ||
972 | greth_clean_tx(greth->netdev); | ||
973 | } | ||
974 | |||
975 | if (greth->gbit_mac) { | 981 | if (greth->gbit_mac) { |
982 | greth_clean_tx_gbit(greth->netdev); | ||
976 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); | 983 | work_done += greth_rx_gbit(greth->netdev, budget - work_done); |
977 | } else { | 984 | } else { |
985 | if (netif_queue_stopped(greth->netdev)) | ||
986 | greth_clean_tx(greth->netdev); | ||
978 | work_done += greth_rx(greth->netdev, budget - work_done); | 987 | work_done += greth_rx(greth->netdev, budget - work_done); |
979 | } | 988 | } |
980 | 989 | ||
@@ -983,7 +992,8 @@ restart_txrx_poll: | |||
983 | spin_lock_irqsave(&greth->devlock, flags); | 992 | spin_lock_irqsave(&greth->devlock, flags); |
984 | 993 | ||
985 | ctrl = GRETH_REGLOAD(greth->regs->control); | 994 | ctrl = GRETH_REGLOAD(greth->regs->control); |
986 | if (netif_queue_stopped(greth->netdev)) { | 995 | if ((greth->gbit_mac && (greth->tx_last != greth->tx_next)) || |
996 | (!greth->gbit_mac && netif_queue_stopped(greth->netdev))) { | ||
987 | GRETH_REGSAVE(greth->regs->control, | 997 | GRETH_REGSAVE(greth->regs->control, |
988 | ctrl | GRETH_TXI | GRETH_RXI); | 998 | ctrl | GRETH_TXI | GRETH_RXI); |
989 | mask = GRETH_INT_RX | GRETH_INT_RE | | 999 | mask = GRETH_INT_RX | GRETH_INT_RE | |
diff --git a/drivers/net/ethernet/aeroflex/greth.h b/drivers/net/ethernet/aeroflex/greth.h index 232a622a85b7..ae16ac94daf8 100644 --- a/drivers/net/ethernet/aeroflex/greth.h +++ b/drivers/net/ethernet/aeroflex/greth.h | |||
@@ -107,7 +107,7 @@ struct greth_private { | |||
107 | 107 | ||
108 | u16 tx_next; | 108 | u16 tx_next; |
109 | u16 tx_last; | 109 | u16 tx_last; |
110 | u16 tx_free; | 110 | u16 tx_free; /* only used on 10/100Mbit */ |
111 | u16 rx_cur; | 111 | u16 rx_cur; |
112 | 112 | ||
113 | struct greth_regs *regs; /* Address of controller registers. */ | 113 | struct greth_regs *regs; /* Address of controller registers. */ |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 346592dca33c..a3c11355a34d 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | |||
@@ -272,8 +272,8 @@ static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer, | |||
272 | struct xgbe_prv_data *pdata = filp->private_data; | 272 | struct xgbe_prv_data *pdata = filp->private_data; |
273 | unsigned int value; | 273 | unsigned int value; |
274 | 274 | ||
275 | value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, | 275 | value = XMDIO_READ(pdata, pdata->debugfs_xpcs_mmd, |
276 | pdata->debugfs_xpcs_reg); | 276 | pdata->debugfs_xpcs_reg); |
277 | 277 | ||
278 | return xgbe_common_read(buffer, count, ppos, value); | 278 | return xgbe_common_read(buffer, count, ppos, value); |
279 | } | 279 | } |
@@ -290,8 +290,8 @@ static ssize_t xpcs_reg_value_write(struct file *filp, | |||
290 | if (len < 0) | 290 | if (len < 0) |
291 | return len; | 291 | return len; |
292 | 292 | ||
293 | pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd, | 293 | XMDIO_WRITE(pdata, pdata->debugfs_xpcs_mmd, pdata->debugfs_xpcs_reg, |
294 | pdata->debugfs_xpcs_reg, value); | 294 | value); |
295 | 295 | ||
296 | return len; | 296 | return len; |
297 | } | 297 | } |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c index edaca4496264..ea273836d999 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c | |||
@@ -348,7 +348,7 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
348 | 348 | ||
349 | /* Clear MAC flow control */ | 349 | /* Clear MAC flow control */ |
350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 350 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
351 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); | 351 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
352 | reg = MAC_Q0TFCR; | 352 | reg = MAC_Q0TFCR; |
353 | for (i = 0; i < q_count; i++) { | 353 | for (i = 0; i < q_count; i++) { |
354 | reg_val = XGMAC_IOREAD(pdata, reg); | 354 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -373,7 +373,7 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata) | |||
373 | 373 | ||
374 | /* Set MAC flow control */ | 374 | /* Set MAC flow control */ |
375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; | 375 | max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES; |
376 | q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count); | 376 | q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count); |
377 | reg = MAC_Q0TFCR; | 377 | reg = MAC_Q0TFCR; |
378 | for (i = 0; i < q_count; i++) { | 378 | for (i = 0; i < q_count; i++) { |
379 | reg_val = XGMAC_IOREAD(pdata, reg); | 379 | reg_val = XGMAC_IOREAD(pdata, reg); |
@@ -509,8 +509,8 @@ static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata) | |||
509 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); | 509 | XGMAC_IOWRITE(pdata, MAC_IER, mac_ier); |
510 | 510 | ||
511 | /* Enable all counter interrupts */ | 511 | /* Enable all counter interrupts */ |
512 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff); | 512 | XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff); |
513 | XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff); | 513 | XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff); |
514 | } | 514 | } |
515 | 515 | ||
516 | static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) | 516 | static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata) |
@@ -1633,6 +1633,9 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata) | |||
1633 | { | 1633 | { |
1634 | unsigned int i, count; | 1634 | unsigned int i, count; |
1635 | 1635 | ||
1636 | if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) | ||
1637 | return 0; | ||
1638 | |||
1636 | for (i = 0; i < pdata->tx_q_count; i++) | 1639 | for (i = 0; i < pdata->tx_q_count; i++) |
1637 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); | 1640 | XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); |
1638 | 1641 | ||
@@ -1703,8 +1706,8 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata) | |||
1703 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); | 1706 | XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); |
1704 | } | 1707 | } |
1705 | 1708 | ||
1706 | static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, | 1709 | static unsigned int xgbe_calculate_per_queue_fifo(unsigned int fifo_size, |
1707 | unsigned char queue_count) | 1710 | unsigned int queue_count) |
1708 | { | 1711 | { |
1709 | unsigned int q_fifo_size = 0; | 1712 | unsigned int q_fifo_size = 0; |
1710 | enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; | 1713 | enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256; |
@@ -1748,6 +1751,10 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size, | |||
1748 | q_fifo_size = XGBE_FIFO_SIZE_KB(256); | 1751 | q_fifo_size = XGBE_FIFO_SIZE_KB(256); |
1749 | break; | 1752 | break; |
1750 | } | 1753 | } |
1754 | |||
1755 | /* The configured value is not the actual amount of fifo RAM */ | ||
1756 | q_fifo_size = min_t(unsigned int, XGBE_FIFO_MAX, q_fifo_size); | ||
1757 | |||
1751 | q_fifo_size = q_fifo_size / queue_count; | 1758 | q_fifo_size = q_fifo_size / queue_count; |
1752 | 1759 | ||
1753 | /* Set the queue fifo size programmable value */ | 1760 | /* Set the queue fifo size programmable value */ |
@@ -1947,6 +1954,32 @@ static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata) | |||
1947 | xgbe_disable_rx_vlan_stripping(pdata); | 1954 | xgbe_disable_rx_vlan_stripping(pdata); |
1948 | } | 1955 | } |
1949 | 1956 | ||
1957 | static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo) | ||
1958 | { | ||
1959 | bool read_hi; | ||
1960 | u64 val; | ||
1961 | |||
1962 | switch (reg_lo) { | ||
1963 | /* These registers are always 64 bit */ | ||
1964 | case MMC_TXOCTETCOUNT_GB_LO: | ||
1965 | case MMC_TXOCTETCOUNT_G_LO: | ||
1966 | case MMC_RXOCTETCOUNT_GB_LO: | ||
1967 | case MMC_RXOCTETCOUNT_G_LO: | ||
1968 | read_hi = true; | ||
1969 | break; | ||
1970 | |||
1971 | default: | ||
1972 | read_hi = false; | ||
1973 | }; | ||
1974 | |||
1975 | val = XGMAC_IOREAD(pdata, reg_lo); | ||
1976 | |||
1977 | if (read_hi) | ||
1978 | val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32); | ||
1979 | |||
1980 | return val; | ||
1981 | } | ||
1982 | |||
1950 | static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) | 1983 | static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) |
1951 | { | 1984 | { |
1952 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; | 1985 | struct xgbe_mmc_stats *stats = &pdata->mmc_stats; |
@@ -1954,75 +1987,75 @@ static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata) | |||
1954 | 1987 | ||
1955 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) | 1988 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB)) |
1956 | stats->txoctetcount_gb += | 1989 | stats->txoctetcount_gb += |
1957 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); | 1990 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
1958 | 1991 | ||
1959 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) | 1992 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB)) |
1960 | stats->txframecount_gb += | 1993 | stats->txframecount_gb += |
1961 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); | 1994 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
1962 | 1995 | ||
1963 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) | 1996 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G)) |
1964 | stats->txbroadcastframes_g += | 1997 | stats->txbroadcastframes_g += |
1965 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); | 1998 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
1966 | 1999 | ||
1967 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) | 2000 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G)) |
1968 | stats->txmulticastframes_g += | 2001 | stats->txmulticastframes_g += |
1969 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); | 2002 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
1970 | 2003 | ||
1971 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) | 2004 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB)) |
1972 | stats->tx64octets_gb += | 2005 | stats->tx64octets_gb += |
1973 | XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); | 2006 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
1974 | 2007 | ||
1975 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) | 2008 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB)) |
1976 | stats->tx65to127octets_gb += | 2009 | stats->tx65to127octets_gb += |
1977 | XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); | 2010 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
1978 | 2011 | ||
1979 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) | 2012 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB)) |
1980 | stats->tx128to255octets_gb += | 2013 | stats->tx128to255octets_gb += |
1981 | XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); | 2014 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
1982 | 2015 | ||
1983 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) | 2016 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB)) |
1984 | stats->tx256to511octets_gb += | 2017 | stats->tx256to511octets_gb += |
1985 | XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); | 2018 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
1986 | 2019 | ||
1987 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) | 2020 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB)) |
1988 | stats->tx512to1023octets_gb += | 2021 | stats->tx512to1023octets_gb += |
1989 | XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); | 2022 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
1990 | 2023 | ||
1991 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) | 2024 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB)) |
1992 | stats->tx1024tomaxoctets_gb += | 2025 | stats->tx1024tomaxoctets_gb += |
1993 | XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | 2026 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
1994 | 2027 | ||
1995 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) | 2028 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB)) |
1996 | stats->txunicastframes_gb += | 2029 | stats->txunicastframes_gb += |
1997 | XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); | 2030 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
1998 | 2031 | ||
1999 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) | 2032 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB)) |
2000 | stats->txmulticastframes_gb += | 2033 | stats->txmulticastframes_gb += |
2001 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | 2034 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
2002 | 2035 | ||
2003 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) | 2036 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB)) |
2004 | stats->txbroadcastframes_g += | 2037 | stats->txbroadcastframes_g += |
2005 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | 2038 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
2006 | 2039 | ||
2007 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) | 2040 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR)) |
2008 | stats->txunderflowerror += | 2041 | stats->txunderflowerror += |
2009 | XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); | 2042 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
2010 | 2043 | ||
2011 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) | 2044 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G)) |
2012 | stats->txoctetcount_g += | 2045 | stats->txoctetcount_g += |
2013 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); | 2046 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
2014 | 2047 | ||
2015 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) | 2048 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G)) |
2016 | stats->txframecount_g += | 2049 | stats->txframecount_g += |
2017 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); | 2050 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
2018 | 2051 | ||
2019 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) | 2052 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES)) |
2020 | stats->txpauseframes += | 2053 | stats->txpauseframes += |
2021 | XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); | 2054 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
2022 | 2055 | ||
2023 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) | 2056 | if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G)) |
2024 | stats->txvlanframes_g += | 2057 | stats->txvlanframes_g += |
2025 | XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); | 2058 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
2026 | } | 2059 | } |
2027 | 2060 | ||
2028 | static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) | 2061 | static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) |
@@ -2032,95 +2065,95 @@ static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata) | |||
2032 | 2065 | ||
2033 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) | 2066 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB)) |
2034 | stats->rxframecount_gb += | 2067 | stats->rxframecount_gb += |
2035 | XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); | 2068 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
2036 | 2069 | ||
2037 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) | 2070 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB)) |
2038 | stats->rxoctetcount_gb += | 2071 | stats->rxoctetcount_gb += |
2039 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); | 2072 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
2040 | 2073 | ||
2041 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) | 2074 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G)) |
2042 | stats->rxoctetcount_g += | 2075 | stats->rxoctetcount_g += |
2043 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); | 2076 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2044 | 2077 | ||
2045 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) | 2078 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G)) |
2046 | stats->rxbroadcastframes_g += | 2079 | stats->rxbroadcastframes_g += |
2047 | XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); | 2080 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2048 | 2081 | ||
2049 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) | 2082 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G)) |
2050 | stats->rxmulticastframes_g += | 2083 | stats->rxmulticastframes_g += |
2051 | XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); | 2084 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2052 | 2085 | ||
2053 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) | 2086 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR)) |
2054 | stats->rxcrcerror += | 2087 | stats->rxcrcerror += |
2055 | XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); | 2088 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2056 | 2089 | ||
2057 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) | 2090 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR)) |
2058 | stats->rxrunterror += | 2091 | stats->rxrunterror += |
2059 | XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); | 2092 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
2060 | 2093 | ||
2061 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) | 2094 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR)) |
2062 | stats->rxjabbererror += | 2095 | stats->rxjabbererror += |
2063 | XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); | 2096 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
2064 | 2097 | ||
2065 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) | 2098 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G)) |
2066 | stats->rxundersize_g += | 2099 | stats->rxundersize_g += |
2067 | XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); | 2100 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2068 | 2101 | ||
2069 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) | 2102 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G)) |
2070 | stats->rxoversize_g += | 2103 | stats->rxoversize_g += |
2071 | XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); | 2104 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2072 | 2105 | ||
2073 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) | 2106 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB)) |
2074 | stats->rx64octets_gb += | 2107 | stats->rx64octets_gb += |
2075 | XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); | 2108 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2076 | 2109 | ||
2077 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) | 2110 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB)) |
2078 | stats->rx65to127octets_gb += | 2111 | stats->rx65to127octets_gb += |
2079 | XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); | 2112 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2080 | 2113 | ||
2081 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) | 2114 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB)) |
2082 | stats->rx128to255octets_gb += | 2115 | stats->rx128to255octets_gb += |
2083 | XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); | 2116 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2084 | 2117 | ||
2085 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) | 2118 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB)) |
2086 | stats->rx256to511octets_gb += | 2119 | stats->rx256to511octets_gb += |
2087 | XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); | 2120 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2088 | 2121 | ||
2089 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) | 2122 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB)) |
2090 | stats->rx512to1023octets_gb += | 2123 | stats->rx512to1023octets_gb += |
2091 | XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); | 2124 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2092 | 2125 | ||
2093 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) | 2126 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB)) |
2094 | stats->rx1024tomaxoctets_gb += | 2127 | stats->rx1024tomaxoctets_gb += |
2095 | XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | 2128 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2096 | 2129 | ||
2097 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) | 2130 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G)) |
2098 | stats->rxunicastframes_g += | 2131 | stats->rxunicastframes_g += |
2099 | XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); | 2132 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2100 | 2133 | ||
2101 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) | 2134 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR)) |
2102 | stats->rxlengtherror += | 2135 | stats->rxlengtherror += |
2103 | XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); | 2136 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2104 | 2137 | ||
2105 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) | 2138 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE)) |
2106 | stats->rxoutofrangetype += | 2139 | stats->rxoutofrangetype += |
2107 | XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); | 2140 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2108 | 2141 | ||
2109 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) | 2142 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES)) |
2110 | stats->rxpauseframes += | 2143 | stats->rxpauseframes += |
2111 | XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); | 2144 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2112 | 2145 | ||
2113 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) | 2146 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW)) |
2114 | stats->rxfifooverflow += | 2147 | stats->rxfifooverflow += |
2115 | XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); | 2148 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2116 | 2149 | ||
2117 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) | 2150 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB)) |
2118 | stats->rxvlanframes_gb += | 2151 | stats->rxvlanframes_gb += |
2119 | XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); | 2152 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2120 | 2153 | ||
2121 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) | 2154 | if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR)) |
2122 | stats->rxwatchdogerror += | 2155 | stats->rxwatchdogerror += |
2123 | XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); | 2156 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2124 | } | 2157 | } |
2125 | 2158 | ||
2126 | static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) | 2159 | static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) |
@@ -2131,127 +2164,127 @@ static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata) | |||
2131 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); | 2164 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1); |
2132 | 2165 | ||
2133 | stats->txoctetcount_gb += | 2166 | stats->txoctetcount_gb += |
2134 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO); | 2167 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO); |
2135 | 2168 | ||
2136 | stats->txframecount_gb += | 2169 | stats->txframecount_gb += |
2137 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO); | 2170 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO); |
2138 | 2171 | ||
2139 | stats->txbroadcastframes_g += | 2172 | stats->txbroadcastframes_g += |
2140 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO); | 2173 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO); |
2141 | 2174 | ||
2142 | stats->txmulticastframes_g += | 2175 | stats->txmulticastframes_g += |
2143 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO); | 2176 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO); |
2144 | 2177 | ||
2145 | stats->tx64octets_gb += | 2178 | stats->tx64octets_gb += |
2146 | XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO); | 2179 | xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO); |
2147 | 2180 | ||
2148 | stats->tx65to127octets_gb += | 2181 | stats->tx65to127octets_gb += |
2149 | XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO); | 2182 | xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO); |
2150 | 2183 | ||
2151 | stats->tx128to255octets_gb += | 2184 | stats->tx128to255octets_gb += |
2152 | XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO); | 2185 | xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO); |
2153 | 2186 | ||
2154 | stats->tx256to511octets_gb += | 2187 | stats->tx256to511octets_gb += |
2155 | XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO); | 2188 | xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO); |
2156 | 2189 | ||
2157 | stats->tx512to1023octets_gb += | 2190 | stats->tx512to1023octets_gb += |
2158 | XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO); | 2191 | xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO); |
2159 | 2192 | ||
2160 | stats->tx1024tomaxoctets_gb += | 2193 | stats->tx1024tomaxoctets_gb += |
2161 | XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); | 2194 | xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO); |
2162 | 2195 | ||
2163 | stats->txunicastframes_gb += | 2196 | stats->txunicastframes_gb += |
2164 | XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO); | 2197 | xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO); |
2165 | 2198 | ||
2166 | stats->txmulticastframes_gb += | 2199 | stats->txmulticastframes_gb += |
2167 | XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO); | 2200 | xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO); |
2168 | 2201 | ||
2169 | stats->txbroadcastframes_g += | 2202 | stats->txbroadcastframes_g += |
2170 | XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO); | 2203 | xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO); |
2171 | 2204 | ||
2172 | stats->txunderflowerror += | 2205 | stats->txunderflowerror += |
2173 | XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO); | 2206 | xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO); |
2174 | 2207 | ||
2175 | stats->txoctetcount_g += | 2208 | stats->txoctetcount_g += |
2176 | XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO); | 2209 | xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO); |
2177 | 2210 | ||
2178 | stats->txframecount_g += | 2211 | stats->txframecount_g += |
2179 | XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO); | 2212 | xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO); |
2180 | 2213 | ||
2181 | stats->txpauseframes += | 2214 | stats->txpauseframes += |
2182 | XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO); | 2215 | xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO); |
2183 | 2216 | ||
2184 | stats->txvlanframes_g += | 2217 | stats->txvlanframes_g += |
2185 | XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO); | 2218 | xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO); |
2186 | 2219 | ||
2187 | stats->rxframecount_gb += | 2220 | stats->rxframecount_gb += |
2188 | XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO); | 2221 | xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO); |
2189 | 2222 | ||
2190 | stats->rxoctetcount_gb += | 2223 | stats->rxoctetcount_gb += |
2191 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO); | 2224 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO); |
2192 | 2225 | ||
2193 | stats->rxoctetcount_g += | 2226 | stats->rxoctetcount_g += |
2194 | XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO); | 2227 | xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO); |
2195 | 2228 | ||
2196 | stats->rxbroadcastframes_g += | 2229 | stats->rxbroadcastframes_g += |
2197 | XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO); | 2230 | xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO); |
2198 | 2231 | ||
2199 | stats->rxmulticastframes_g += | 2232 | stats->rxmulticastframes_g += |
2200 | XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO); | 2233 | xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO); |
2201 | 2234 | ||
2202 | stats->rxcrcerror += | 2235 | stats->rxcrcerror += |
2203 | XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO); | 2236 | xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO); |
2204 | 2237 | ||
2205 | stats->rxrunterror += | 2238 | stats->rxrunterror += |
2206 | XGMAC_IOREAD(pdata, MMC_RXRUNTERROR); | 2239 | xgbe_mmc_read(pdata, MMC_RXRUNTERROR); |
2207 | 2240 | ||
2208 | stats->rxjabbererror += | 2241 | stats->rxjabbererror += |
2209 | XGMAC_IOREAD(pdata, MMC_RXJABBERERROR); | 2242 | xgbe_mmc_read(pdata, MMC_RXJABBERERROR); |
2210 | 2243 | ||
2211 | stats->rxundersize_g += | 2244 | stats->rxundersize_g += |
2212 | XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G); | 2245 | xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G); |
2213 | 2246 | ||
2214 | stats->rxoversize_g += | 2247 | stats->rxoversize_g += |
2215 | XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G); | 2248 | xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G); |
2216 | 2249 | ||
2217 | stats->rx64octets_gb += | 2250 | stats->rx64octets_gb += |
2218 | XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO); | 2251 | xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO); |
2219 | 2252 | ||
2220 | stats->rx65to127octets_gb += | 2253 | stats->rx65to127octets_gb += |
2221 | XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO); | 2254 | xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO); |
2222 | 2255 | ||
2223 | stats->rx128to255octets_gb += | 2256 | stats->rx128to255octets_gb += |
2224 | XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO); | 2257 | xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO); |
2225 | 2258 | ||
2226 | stats->rx256to511octets_gb += | 2259 | stats->rx256to511octets_gb += |
2227 | XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO); | 2260 | xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO); |
2228 | 2261 | ||
2229 | stats->rx512to1023octets_gb += | 2262 | stats->rx512to1023octets_gb += |
2230 | XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO); | 2263 | xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO); |
2231 | 2264 | ||
2232 | stats->rx1024tomaxoctets_gb += | 2265 | stats->rx1024tomaxoctets_gb += |
2233 | XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); | 2266 | xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO); |
2234 | 2267 | ||
2235 | stats->rxunicastframes_g += | 2268 | stats->rxunicastframes_g += |
2236 | XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO); | 2269 | xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO); |
2237 | 2270 | ||
2238 | stats->rxlengtherror += | 2271 | stats->rxlengtherror += |
2239 | XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO); | 2272 | xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO); |
2240 | 2273 | ||
2241 | stats->rxoutofrangetype += | 2274 | stats->rxoutofrangetype += |
2242 | XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO); | 2275 | xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO); |
2243 | 2276 | ||
2244 | stats->rxpauseframes += | 2277 | stats->rxpauseframes += |
2245 | XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO); | 2278 | xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO); |
2246 | 2279 | ||
2247 | stats->rxfifooverflow += | 2280 | stats->rxfifooverflow += |
2248 | XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO); | 2281 | xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO); |
2249 | 2282 | ||
2250 | stats->rxvlanframes_gb += | 2283 | stats->rxvlanframes_gb += |
2251 | XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO); | 2284 | xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO); |
2252 | 2285 | ||
2253 | stats->rxwatchdogerror += | 2286 | stats->rxwatchdogerror += |
2254 | XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR); | 2287 | xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR); |
2255 | 2288 | ||
2256 | /* Un-freeze counters */ | 2289 | /* Un-freeze counters */ |
2257 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); | 2290 | XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index dc84f7193c2d..b26d75856553 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c | |||
@@ -361,6 +361,8 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata) | |||
361 | 361 | ||
362 | memset(hw_feat, 0, sizeof(*hw_feat)); | 362 | memset(hw_feat, 0, sizeof(*hw_feat)); |
363 | 363 | ||
364 | hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR); | ||
365 | |||
364 | /* Hardware feature register 0 */ | 366 | /* Hardware feature register 0 */ |
365 | hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); | 367 | hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); |
366 | hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); | 368 | hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index a076aca138a1..46f613028e9c 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c | |||
@@ -361,15 +361,16 @@ static void xgbe_get_drvinfo(struct net_device *netdev, | |||
361 | struct ethtool_drvinfo *drvinfo) | 361 | struct ethtool_drvinfo *drvinfo) |
362 | { | 362 | { |
363 | struct xgbe_prv_data *pdata = netdev_priv(netdev); | 363 | struct xgbe_prv_data *pdata = netdev_priv(netdev); |
364 | struct xgbe_hw_features *hw_feat = &pdata->hw_feat; | ||
364 | 365 | ||
365 | strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); | 366 | strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver)); |
366 | strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); | 367 | strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version)); |
367 | strlcpy(drvinfo->bus_info, dev_name(pdata->dev), | 368 | strlcpy(drvinfo->bus_info, dev_name(pdata->dev), |
368 | sizeof(drvinfo->bus_info)); | 369 | sizeof(drvinfo->bus_info)); |
369 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", | 370 | snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d", |
370 | XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER), | 371 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), |
371 | XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID), | 372 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), |
372 | XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER)); | 373 | XGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); |
373 | drvinfo->n_stats = XGBE_STATS_COUNT; | 374 | drvinfo->n_stats = XGBE_STATS_COUNT; |
374 | } | 375 | } |
375 | 376 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 8aa6a9353f7b..bdf9cfa70e88 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c | |||
@@ -172,7 +172,7 @@ static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | if (i < pdata->rx_ring_count) { | 174 | if (i < pdata->rx_ring_count) { |
175 | spin_lock_init(&tx_ring->lock); | 175 | spin_lock_init(&rx_ring->lock); |
176 | channel->rx_ring = rx_ring++; | 176 | channel->rx_ring = rx_ring++; |
177 | } | 177 | } |
178 | 178 | ||
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index 07bf70a82908..e9fe6e6ddcc3 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h | |||
@@ -183,6 +183,7 @@ | |||
183 | #define XGMAC_DRIVER_CONTEXT 1 | 183 | #define XGMAC_DRIVER_CONTEXT 1 |
184 | #define XGMAC_IOCTL_CONTEXT 2 | 184 | #define XGMAC_IOCTL_CONTEXT 2 |
185 | 185 | ||
186 | #define XGBE_FIFO_MAX 81920 | ||
186 | #define XGBE_FIFO_SIZE_B(x) (x) | 187 | #define XGBE_FIFO_SIZE_B(x) (x) |
187 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) | 188 | #define XGBE_FIFO_SIZE_KB(x) (x * 1024) |
188 | 189 | ||
@@ -526,6 +527,9 @@ struct xgbe_desc_if { | |||
526 | * or configurations are present in the device. | 527 | * or configurations are present in the device. |
527 | */ | 528 | */ |
528 | struct xgbe_hw_features { | 529 | struct xgbe_hw_features { |
530 | /* HW Version */ | ||
531 | unsigned int version; | ||
532 | |||
529 | /* HW Feature Register0 */ | 533 | /* HW Feature Register0 */ |
530 | unsigned int gmii; /* 1000 Mbps support */ | 534 | unsigned int gmii; /* 1000 Mbps support */ |
531 | unsigned int vlhash; /* VLAN Hash Filter */ | 535 | unsigned int vlhash; /* VLAN Hash Filter */ |
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig index 616dff6d3f5f..f4054d242f3c 100644 --- a/drivers/net/ethernet/apm/xgene/Kconfig +++ b/drivers/net/ethernet/apm/xgene/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config NET_XGENE | 1 | config NET_XGENE |
2 | tristate "APM X-Gene SoC Ethernet Driver" | 2 | tristate "APM X-Gene SoC Ethernet Driver" |
3 | depends on HAS_DMA | ||
3 | select PHYLIB | 4 | select PHYLIB |
4 | help | 5 | help |
5 | This is the Ethernet driver for the on-chip ethernet interface on the | 6 | This is the Ethernet driver for the on-chip ethernet interface on the |
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 7dcfb19a31c8..d8d07a818b89 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig | |||
@@ -84,7 +84,7 @@ config BNX2 | |||
84 | 84 | ||
85 | config CNIC | 85 | config CNIC |
86 | tristate "QLogic CNIC support" | 86 | tristate "QLogic CNIC support" |
87 | depends on PCI | 87 | depends on PCI && (IPV6 || IPV6=n) |
88 | select BNX2 | 88 | select BNX2 |
89 | select UIO | 89 | select UIO |
90 | ---help--- | 90 | ---help--- |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5ba8af50c84f..c4daa068f1db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -2233,7 +2233,12 @@ struct shmem2_region { | |||
2233 | u32 reserved3; /* Offset 0x14C */ | 2233 | u32 reserved3; /* Offset 0x14C */ |
2234 | u32 reserved4; /* Offset 0x150 */ | 2234 | u32 reserved4; /* Offset 0x150 */ |
2235 | u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ | 2235 | u32 link_attr_sync[PORT_MAX]; /* Offset 0x154 */ |
2236 | #define LINK_ATTR_SYNC_KR2_ENABLE (1<<0) | 2236 | #define LINK_ATTR_SYNC_KR2_ENABLE 0x00000001 |
2237 | #define LINK_SFP_EEPROM_COMP_CODE_MASK 0x0000ff00 | ||
2238 | #define LINK_SFP_EEPROM_COMP_CODE_SHIFT 8 | ||
2239 | #define LINK_SFP_EEPROM_COMP_CODE_SR 0x00001000 | ||
2240 | #define LINK_SFP_EEPROM_COMP_CODE_LR 0x00002000 | ||
2241 | #define LINK_SFP_EEPROM_COMP_CODE_LRM 0x00004000 | ||
2237 | 2242 | ||
2238 | u32 reserved5[2]; | 2243 | u32 reserved5[2]; |
2239 | u32 reserved6[PORT_MAX]; | 2244 | u32 reserved6[PORT_MAX]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 53fb4fa61b40..549549eaf580 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c | |||
@@ -154,15 +154,22 @@ typedef int (*read_sfp_module_eeprom_func_p)(struct bnx2x_phy *phy, | |||
154 | LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) | 154 | LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE) |
155 | 155 | ||
156 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 | 156 | #define SFP_EEPROM_CON_TYPE_ADDR 0x2 |
157 | #define SFP_EEPROM_CON_TYPE_VAL_UNKNOWN 0x0 | ||
157 | #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 | 158 | #define SFP_EEPROM_CON_TYPE_VAL_LC 0x7 |
158 | #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 | 159 | #define SFP_EEPROM_CON_TYPE_VAL_COPPER 0x21 |
159 | #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 | 160 | #define SFP_EEPROM_CON_TYPE_VAL_RJ45 0x22 |
160 | 161 | ||
161 | 162 | ||
162 | #define SFP_EEPROM_COMP_CODE_ADDR 0x3 | 163 | #define SFP_EEPROM_10G_COMP_CODE_ADDR 0x3 |
163 | #define SFP_EEPROM_COMP_CODE_SR_MASK (1<<4) | 164 | #define SFP_EEPROM_10G_COMP_CODE_SR_MASK (1<<4) |
164 | #define SFP_EEPROM_COMP_CODE_LR_MASK (1<<5) | 165 | #define SFP_EEPROM_10G_COMP_CODE_LR_MASK (1<<5) |
165 | #define SFP_EEPROM_COMP_CODE_LRM_MASK (1<<6) | 166 | #define SFP_EEPROM_10G_COMP_CODE_LRM_MASK (1<<6) |
167 | |||
168 | #define SFP_EEPROM_1G_COMP_CODE_ADDR 0x6 | ||
169 | #define SFP_EEPROM_1G_COMP_CODE_SX (1<<0) | ||
170 | #define SFP_EEPROM_1G_COMP_CODE_LX (1<<1) | ||
171 | #define SFP_EEPROM_1G_COMP_CODE_CX (1<<2) | ||
172 | #define SFP_EEPROM_1G_COMP_CODE_BASE_T (1<<3) | ||
166 | 173 | ||
167 | #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 | 174 | #define SFP_EEPROM_FC_TX_TECH_ADDR 0x8 |
168 | #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 | 175 | #define SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE 0x4 |
@@ -3633,8 +3640,8 @@ static void bnx2x_warpcore_enable_AN_KR2(struct bnx2x_phy *phy, | |||
3633 | reg_set[i].val); | 3640 | reg_set[i].val); |
3634 | 3641 | ||
3635 | /* Start KR2 work-around timer which handles BCM8073 link-parner */ | 3642 | /* Start KR2 work-around timer which handles BCM8073 link-parner */ |
3636 | vars->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; | 3643 | params->link_attr_sync |= LINK_ATTR_SYNC_KR2_ENABLE; |
3637 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3644 | bnx2x_update_link_attr(params, params->link_attr_sync); |
3638 | } | 3645 | } |
3639 | 3646 | ||
3640 | static void bnx2x_disable_kr2(struct link_params *params, | 3647 | static void bnx2x_disable_kr2(struct link_params *params, |
@@ -3666,8 +3673,8 @@ static void bnx2x_disable_kr2(struct link_params *params, | |||
3666 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) | 3673 | for (i = 0; i < ARRAY_SIZE(reg_set); i++) |
3667 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, | 3674 | bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, |
3668 | reg_set[i].val); | 3675 | reg_set[i].val); |
3669 | vars->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; | 3676 | params->link_attr_sync &= ~LINK_ATTR_SYNC_KR2_ENABLE; |
3670 | bnx2x_update_link_attr(params, vars->link_attr_sync); | 3677 | bnx2x_update_link_attr(params, params->link_attr_sync); |
3671 | 3678 | ||
3672 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; | 3679 | vars->check_kr2_recovery_cnt = CHECK_KR2_RECOVERY_CNT; |
3673 | } | 3680 | } |
@@ -4810,7 +4817,7 @@ void bnx2x_link_status_update(struct link_params *params, | |||
4810 | ~FEATURE_CONFIG_PFC_ENABLED; | 4817 | ~FEATURE_CONFIG_PFC_ENABLED; |
4811 | 4818 | ||
4812 | if (SHMEM2_HAS(bp, link_attr_sync)) | 4819 | if (SHMEM2_HAS(bp, link_attr_sync)) |
4813 | vars->link_attr_sync = SHMEM2_RD(bp, | 4820 | params->link_attr_sync = SHMEM2_RD(bp, |
4814 | link_attr_sync[params->port]); | 4821 | link_attr_sync[params->port]); |
4815 | 4822 | ||
4816 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", | 4823 | DP(NETIF_MSG_LINK, "link_status 0x%x phy_link_up %x int_mask 0x%x\n", |
@@ -8057,21 +8064,24 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8057 | { | 8064 | { |
8058 | struct bnx2x *bp = params->bp; | 8065 | struct bnx2x *bp = params->bp; |
8059 | u32 sync_offset = 0, phy_idx, media_types; | 8066 | u32 sync_offset = 0, phy_idx, media_types; |
8060 | u8 gport, val[2], check_limiting_mode = 0; | 8067 | u8 val[SFP_EEPROM_FC_TX_TECH_ADDR + 1], check_limiting_mode = 0; |
8061 | *edc_mode = EDC_MODE_LIMITING; | 8068 | *edc_mode = EDC_MODE_LIMITING; |
8062 | phy->media_type = ETH_PHY_UNSPECIFIED; | 8069 | phy->media_type = ETH_PHY_UNSPECIFIED; |
8063 | /* First check for copper cable */ | 8070 | /* First check for copper cable */ |
8064 | if (bnx2x_read_sfp_module_eeprom(phy, | 8071 | if (bnx2x_read_sfp_module_eeprom(phy, |
8065 | params, | 8072 | params, |
8066 | I2C_DEV_ADDR_A0, | 8073 | I2C_DEV_ADDR_A0, |
8067 | SFP_EEPROM_CON_TYPE_ADDR, | 8074 | 0, |
8068 | 2, | 8075 | SFP_EEPROM_FC_TX_TECH_ADDR + 1, |
8069 | (u8 *)val) != 0) { | 8076 | (u8 *)val) != 0) { |
8070 | DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); | 8077 | DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); |
8071 | return -EINVAL; | 8078 | return -EINVAL; |
8072 | } | 8079 | } |
8073 | 8080 | params->link_attr_sync &= ~LINK_SFP_EEPROM_COMP_CODE_MASK; | |
8074 | switch (val[0]) { | 8081 | params->link_attr_sync |= val[SFP_EEPROM_10G_COMP_CODE_ADDR] << |
8082 | LINK_SFP_EEPROM_COMP_CODE_SHIFT; | ||
8083 | bnx2x_update_link_attr(params, params->link_attr_sync); | ||
8084 | switch (val[SFP_EEPROM_CON_TYPE_ADDR]) { | ||
8075 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: | 8085 | case SFP_EEPROM_CON_TYPE_VAL_COPPER: |
8076 | { | 8086 | { |
8077 | u8 copper_module_type; | 8087 | u8 copper_module_type; |
@@ -8079,17 +8089,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8079 | /* Check if its active cable (includes SFP+ module) | 8089 | /* Check if its active cable (includes SFP+ module) |
8080 | * of passive cable | 8090 | * of passive cable |
8081 | */ | 8091 | */ |
8082 | if (bnx2x_read_sfp_module_eeprom(phy, | 8092 | copper_module_type = val[SFP_EEPROM_FC_TX_TECH_ADDR]; |
8083 | params, | ||
8084 | I2C_DEV_ADDR_A0, | ||
8085 | SFP_EEPROM_FC_TX_TECH_ADDR, | ||
8086 | 1, | ||
8087 | &copper_module_type) != 0) { | ||
8088 | DP(NETIF_MSG_LINK, | ||
8089 | "Failed to read copper-cable-type" | ||
8090 | " from SFP+ EEPROM\n"); | ||
8091 | return -EINVAL; | ||
8092 | } | ||
8093 | 8093 | ||
8094 | if (copper_module_type & | 8094 | if (copper_module_type & |
8095 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { | 8095 | SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_ACTIVE) { |
@@ -8115,16 +8115,18 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8115 | } | 8115 | } |
8116 | break; | 8116 | break; |
8117 | } | 8117 | } |
8118 | case SFP_EEPROM_CON_TYPE_VAL_UNKNOWN: | ||
8118 | case SFP_EEPROM_CON_TYPE_VAL_LC: | 8119 | case SFP_EEPROM_CON_TYPE_VAL_LC: |
8119 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: | 8120 | case SFP_EEPROM_CON_TYPE_VAL_RJ45: |
8120 | check_limiting_mode = 1; | 8121 | check_limiting_mode = 1; |
8121 | if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | | 8122 | if ((val[SFP_EEPROM_10G_COMP_CODE_ADDR] & |
8122 | SFP_EEPROM_COMP_CODE_LR_MASK | | 8123 | (SFP_EEPROM_10G_COMP_CODE_SR_MASK | |
8123 | SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { | 8124 | SFP_EEPROM_10G_COMP_CODE_LR_MASK | |
8125 | SFP_EEPROM_10G_COMP_CODE_LRM_MASK)) == 0) { | ||
8124 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); | 8126 | DP(NETIF_MSG_LINK, "1G SFP module detected\n"); |
8125 | gport = params->port; | ||
8126 | phy->media_type = ETH_PHY_SFP_1G_FIBER; | 8127 | phy->media_type = ETH_PHY_SFP_1G_FIBER; |
8127 | if (phy->req_line_speed != SPEED_1000) { | 8128 | if (phy->req_line_speed != SPEED_1000) { |
8129 | u8 gport = params->port; | ||
8128 | phy->req_line_speed = SPEED_1000; | 8130 | phy->req_line_speed = SPEED_1000; |
8129 | if (!CHIP_IS_E1x(bp)) { | 8131 | if (!CHIP_IS_E1x(bp)) { |
8130 | gport = BP_PATH(bp) + | 8132 | gport = BP_PATH(bp) + |
@@ -8134,6 +8136,12 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8134 | "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", | 8136 | "Warning: Link speed was forced to 1000Mbps. Current SFP module in port %d is not compliant with 10G Ethernet\n", |
8135 | gport); | 8137 | gport); |
8136 | } | 8138 | } |
8139 | if (val[SFP_EEPROM_1G_COMP_CODE_ADDR] & | ||
8140 | SFP_EEPROM_1G_COMP_CODE_BASE_T) { | ||
8141 | bnx2x_sfp_set_transmitter(params, phy, 0); | ||
8142 | msleep(40); | ||
8143 | bnx2x_sfp_set_transmitter(params, phy, 1); | ||
8144 | } | ||
8137 | } else { | 8145 | } else { |
8138 | int idx, cfg_idx = 0; | 8146 | int idx, cfg_idx = 0; |
8139 | DP(NETIF_MSG_LINK, "10G Optic module detected\n"); | 8147 | DP(NETIF_MSG_LINK, "10G Optic module detected\n"); |
@@ -8149,7 +8157,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, | |||
8149 | break; | 8157 | break; |
8150 | default: | 8158 | default: |
8151 | DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", | 8159 | DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", |
8152 | val[0]); | 8160 | val[SFP_EEPROM_CON_TYPE_ADDR]); |
8153 | return -EINVAL; | 8161 | return -EINVAL; |
8154 | } | 8162 | } |
8155 | sync_offset = params->shmem_base + | 8163 | sync_offset = params->shmem_base + |
@@ -13507,7 +13515,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13507 | 13515 | ||
13508 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); | 13516 | sigdet = bnx2x_warpcore_get_sigdet(phy, params); |
13509 | if (!sigdet) { | 13517 | if (!sigdet) { |
13510 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13518 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13511 | bnx2x_kr2_recovery(params, vars, phy); | 13519 | bnx2x_kr2_recovery(params, vars, phy); |
13512 | DP(NETIF_MSG_LINK, "No sigdet\n"); | 13520 | DP(NETIF_MSG_LINK, "No sigdet\n"); |
13513 | } | 13521 | } |
@@ -13525,7 +13533,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13525 | 13533 | ||
13526 | /* CL73 has not begun yet */ | 13534 | /* CL73 has not begun yet */ |
13527 | if (base_page == 0) { | 13535 | if (base_page == 0) { |
13528 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13536 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13529 | bnx2x_kr2_recovery(params, vars, phy); | 13537 | bnx2x_kr2_recovery(params, vars, phy); |
13530 | DP(NETIF_MSG_LINK, "No BP\n"); | 13538 | DP(NETIF_MSG_LINK, "No BP\n"); |
13531 | } | 13539 | } |
@@ -13541,7 +13549,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params, | |||
13541 | ((next_page & 0xe0) == 0x20)))); | 13549 | ((next_page & 0xe0) == 0x20)))); |
13542 | 13550 | ||
13543 | /* In case KR2 is already disabled, check if we need to re-enable it */ | 13551 | /* In case KR2 is already disabled, check if we need to re-enable it */ |
13544 | if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { | 13552 | if (!(params->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) { |
13545 | if (!not_kr2_device) { | 13553 | if (!not_kr2_device) { |
13546 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, | 13554 | DP(NETIF_MSG_LINK, "BP=0x%x, NP=0x%x\n", base_page, |
13547 | next_page); | 13555 | next_page); |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index 389f5f8cb0a3..d9cce4c3899b 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h | |||
@@ -323,6 +323,9 @@ struct link_params { | |||
323 | #define LINK_FLAGS_INT_DISABLED (1<<0) | 323 | #define LINK_FLAGS_INT_DISABLED (1<<0) |
324 | #define PHY_INITIALIZED (1<<1) | 324 | #define PHY_INITIALIZED (1<<1) |
325 | u32 lfa_base; | 325 | u32 lfa_base; |
326 | |||
327 | /* The same definitions as the shmem2 parameter */ | ||
328 | u32 link_attr_sync; | ||
326 | }; | 329 | }; |
327 | 330 | ||
328 | /* Output parameters */ | 331 | /* Output parameters */ |
@@ -364,8 +367,6 @@ struct link_vars { | |||
364 | u8 rx_tx_asic_rst; | 367 | u8 rx_tx_asic_rst; |
365 | u8 turn_to_run_wc_rt; | 368 | u8 turn_to_run_wc_rt; |
366 | u16 rsrv2; | 369 | u16 rsrv2; |
367 | /* The same definitions as the shmem2 parameter */ | ||
368 | u32 link_attr_sync; | ||
369 | }; | 370 | }; |
370 | 371 | ||
371 | /***********************************************************/ | 372 | /***********************************************************/ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 900cab420810..d1c093dcb054 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -6849,6 +6849,37 @@ static void bnx2x__common_init_phy(struct bnx2x *bp) | |||
6849 | bnx2x_release_phy_lock(bp); | 6849 | bnx2x_release_phy_lock(bp); |
6850 | } | 6850 | } |
6851 | 6851 | ||
6852 | static void bnx2x_config_endianity(struct bnx2x *bp, u32 val) | ||
6853 | { | ||
6854 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, val); | ||
6855 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, val); | ||
6856 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, val); | ||
6857 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, val); | ||
6858 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, val); | ||
6859 | |||
6860 | /* make sure this value is 0 */ | ||
6861 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); | ||
6862 | |||
6863 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, val); | ||
6864 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, val); | ||
6865 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, val); | ||
6866 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, val); | ||
6867 | } | ||
6868 | |||
6869 | static void bnx2x_set_endianity(struct bnx2x *bp) | ||
6870 | { | ||
6871 | #ifdef __BIG_ENDIAN | ||
6872 | bnx2x_config_endianity(bp, 1); | ||
6873 | #else | ||
6874 | bnx2x_config_endianity(bp, 0); | ||
6875 | #endif | ||
6876 | } | ||
6877 | |||
6878 | static void bnx2x_reset_endianity(struct bnx2x *bp) | ||
6879 | { | ||
6880 | bnx2x_config_endianity(bp, 0); | ||
6881 | } | ||
6882 | |||
6852 | /** | 6883 | /** |
6853 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. | 6884 | * bnx2x_init_hw_common - initialize the HW at the COMMON phase. |
6854 | * | 6885 | * |
@@ -6915,23 +6946,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp) | |||
6915 | 6946 | ||
6916 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); | 6947 | bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON); |
6917 | bnx2x_init_pxp(bp); | 6948 | bnx2x_init_pxp(bp); |
6918 | 6949 | bnx2x_set_endianity(bp); | |
6919 | #ifdef __BIG_ENDIAN | ||
6920 | REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1); | ||
6921 | REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1); | ||
6922 | REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1); | ||
6923 | REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1); | ||
6924 | REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1); | ||
6925 | /* make sure this value is 0 */ | ||
6926 | REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0); | ||
6927 | |||
6928 | /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */ | ||
6929 | REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1); | ||
6930 | REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1); | ||
6931 | REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1); | ||
6932 | REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1); | ||
6933 | #endif | ||
6934 | |||
6935 | bnx2x_ilt_init_page_size(bp, INITOP_SET); | 6950 | bnx2x_ilt_init_page_size(bp, INITOP_SET); |
6936 | 6951 | ||
6937 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) | 6952 | if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp)) |
@@ -13169,9 +13184,15 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
13169 | bnx2x_iov_remove_one(bp); | 13184 | bnx2x_iov_remove_one(bp); |
13170 | 13185 | ||
13171 | /* Power on: we can't let PCI layer write to us while we are in D3 */ | 13186 | /* Power on: we can't let PCI layer write to us while we are in D3 */ |
13172 | if (IS_PF(bp)) | 13187 | if (IS_PF(bp)) { |
13173 | bnx2x_set_power_state(bp, PCI_D0); | 13188 | bnx2x_set_power_state(bp, PCI_D0); |
13174 | 13189 | ||
13190 | /* Set endianity registers to reset values in case next driver | ||
13191 | * boots in different endianty environment. | ||
13192 | */ | ||
13193 | bnx2x_reset_endianity(bp); | ||
13194 | } | ||
13195 | |||
13175 | /* Disable MSI/MSI-X */ | 13196 | /* Disable MSI/MSI-X */ |
13176 | bnx2x_disable_msi(bp); | 13197 | bnx2x_disable_msi(bp); |
13177 | 13198 | ||
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index 27861a6c7ca5..a6a9f284c8dd 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <linux/if_vlan.h> | 31 | #include <linux/if_vlan.h> |
32 | #include <linux/prefetch.h> | 32 | #include <linux/prefetch.h> |
33 | #include <linux/random.h> | 33 | #include <linux/random.h> |
34 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 34 | #if IS_ENABLED(CONFIG_VLAN_8021Q) |
35 | #define BCM_VLAN 1 | 35 | #define BCM_VLAN 1 |
36 | #endif | 36 | #endif |
37 | #include <net/ip.h> | 37 | #include <net/ip.h> |
@@ -3685,7 +3685,7 @@ static int cnic_get_v4_route(struct sockaddr_in *dst_addr, | |||
3685 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, | 3685 | static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr, |
3686 | struct dst_entry **dst) | 3686 | struct dst_entry **dst) |
3687 | { | 3687 | { |
3688 | #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE)) | 3688 | #if IS_ENABLED(CONFIG_IPV6) |
3689 | struct flowi6 fl6; | 3689 | struct flowi6 fl6; |
3690 | 3690 | ||
3691 | memset(&fl6, 0, sizeof(fl6)); | 3691 | memset(&fl6, 0, sizeof(fl6)); |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 3ac5d23454a8..cb77ae93d89a 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -11617,6 +11617,12 @@ static int tg3_open(struct net_device *dev) | |||
11617 | struct tg3 *tp = netdev_priv(dev); | 11617 | struct tg3 *tp = netdev_priv(dev); |
11618 | int err; | 11618 | int err; |
11619 | 11619 | ||
11620 | if (tp->pcierr_recovery) { | ||
11621 | netdev_err(dev, "Failed to open device. PCI error recovery " | ||
11622 | "in progress\n"); | ||
11623 | return -EAGAIN; | ||
11624 | } | ||
11625 | |||
11620 | if (tp->fw_needed) { | 11626 | if (tp->fw_needed) { |
11621 | err = tg3_request_firmware(tp); | 11627 | err = tg3_request_firmware(tp); |
11622 | if (tg3_asic_rev(tp) == ASIC_REV_57766) { | 11628 | if (tg3_asic_rev(tp) == ASIC_REV_57766) { |
@@ -11674,6 +11680,12 @@ static int tg3_close(struct net_device *dev) | |||
11674 | { | 11680 | { |
11675 | struct tg3 *tp = netdev_priv(dev); | 11681 | struct tg3 *tp = netdev_priv(dev); |
11676 | 11682 | ||
11683 | if (tp->pcierr_recovery) { | ||
11684 | netdev_err(dev, "Failed to close device. PCI error recovery " | ||
11685 | "in progress\n"); | ||
11686 | return -EAGAIN; | ||
11687 | } | ||
11688 | |||
11677 | tg3_ptp_fini(tp); | 11689 | tg3_ptp_fini(tp); |
11678 | 11690 | ||
11679 | tg3_stop(tp); | 11691 | tg3_stop(tp); |
@@ -17561,6 +17573,7 @@ static int tg3_init_one(struct pci_dev *pdev, | |||
17561 | tp->rx_mode = TG3_DEF_RX_MODE; | 17573 | tp->rx_mode = TG3_DEF_RX_MODE; |
17562 | tp->tx_mode = TG3_DEF_TX_MODE; | 17574 | tp->tx_mode = TG3_DEF_TX_MODE; |
17563 | tp->irq_sync = 1; | 17575 | tp->irq_sync = 1; |
17576 | tp->pcierr_recovery = false; | ||
17564 | 17577 | ||
17565 | if (tg3_debug > 0) | 17578 | if (tg3_debug > 0) |
17566 | tp->msg_enable = tg3_debug; | 17579 | tp->msg_enable = tg3_debug; |
@@ -18071,6 +18084,8 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
18071 | 18084 | ||
18072 | rtnl_lock(); | 18085 | rtnl_lock(); |
18073 | 18086 | ||
18087 | tp->pcierr_recovery = true; | ||
18088 | |||
18074 | /* We probably don't have netdev yet */ | 18089 | /* We probably don't have netdev yet */ |
18075 | if (!netdev || !netif_running(netdev)) | 18090 | if (!netdev || !netif_running(netdev)) |
18076 | goto done; | 18091 | goto done; |
@@ -18195,6 +18210,7 @@ static void tg3_io_resume(struct pci_dev *pdev) | |||
18195 | tg3_phy_start(tp); | 18210 | tg3_phy_start(tp); |
18196 | 18211 | ||
18197 | done: | 18212 | done: |
18213 | tp->pcierr_recovery = false; | ||
18198 | rtnl_unlock(); | 18214 | rtnl_unlock(); |
18199 | } | 18215 | } |
18200 | 18216 | ||
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h index 461accaf0aa4..31c9f8295953 100644 --- a/drivers/net/ethernet/broadcom/tg3.h +++ b/drivers/net/ethernet/broadcom/tg3.h | |||
@@ -3407,6 +3407,7 @@ struct tg3 { | |||
3407 | 3407 | ||
3408 | struct device *hwmon_dev; | 3408 | struct device *hwmon_dev; |
3409 | bool link_up; | 3409 | bool link_up; |
3410 | bool pcierr_recovery; | ||
3410 | }; | 3411 | }; |
3411 | 3412 | ||
3412 | /* Accessor macros for chip and asic attributes | 3413 | /* Accessor macros for chip and asic attributes |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index ff8cae5e2535..ffc92a41d75b 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -2506,7 +2506,7 @@ bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb) | |||
2506 | * For TSO, the TCP checksum field is seeded with pseudo-header sum | 2506 | * For TSO, the TCP checksum field is seeded with pseudo-header sum |
2507 | * excluding the length field. | 2507 | * excluding the length field. |
2508 | */ | 2508 | */ |
2509 | if (skb->protocol == htons(ETH_P_IP)) { | 2509 | if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { |
2510 | struct iphdr *iph = ip_hdr(skb); | 2510 | struct iphdr *iph = ip_hdr(skb); |
2511 | 2511 | ||
2512 | /* Do we really need these? */ | 2512 | /* Do we really need these? */ |
@@ -2870,12 +2870,13 @@ bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb, | |||
2870 | } | 2870 | } |
2871 | 2871 | ||
2872 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2872 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2873 | __be16 net_proto = vlan_get_protocol(skb); | ||
2873 | u8 proto = 0; | 2874 | u8 proto = 0; |
2874 | 2875 | ||
2875 | if (skb->protocol == htons(ETH_P_IP)) | 2876 | if (net_proto == htons(ETH_P_IP)) |
2876 | proto = ip_hdr(skb)->protocol; | 2877 | proto = ip_hdr(skb)->protocol; |
2877 | #ifdef NETIF_F_IPV6_CSUM | 2878 | #ifdef NETIF_F_IPV6_CSUM |
2878 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 2879 | else if (net_proto == htons(ETH_P_IPV6)) { |
2879 | /* nexthdr may not be TCP immediately. */ | 2880 | /* nexthdr may not be TCP immediately. */ |
2880 | proto = ipv6_hdr(skb)->nexthdr; | 2881 | proto = ipv6_hdr(skb)->nexthdr; |
2881 | } | 2882 | } |
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig index 184a063bed5f..07d2201530d2 100644 --- a/drivers/net/ethernet/calxeda/Kconfig +++ b/drivers/net/ethernet/calxeda/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config NET_CALXEDA_XGMAC | 1 | config NET_CALXEDA_XGMAC |
2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" | 2 | tristate "Calxeda 1G/10G XGMAC Ethernet driver" |
3 | depends on HAS_IOMEM && HAS_DMA | 3 | depends on HAS_IOMEM && HAS_DMA |
4 | depends on ARCH_HIGHBANK || COMPILE_TEST | ||
4 | select CRC32 | 5 | select CRC32 |
5 | help | 6 | help |
6 | This is the driver for the XGMAC Ethernet IP block found on Calxeda | 7 | This is the driver for the XGMAC Ethernet IP block found on Calxeda |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 18fb9c61d7ba..8c34811a1128 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -1253,7 +1253,9 @@ freeout: t4_free_sge_resources(adap); | |||
1253 | goto freeout; | 1253 | goto freeout; |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | t4_write_reg(adap, MPS_TRC_RSS_CONTROL, | 1256 | t4_write_reg(adap, is_t4(adap->params.chip) ? |
1257 | MPS_TRC_RSS_CONTROL : | ||
1258 | MPS_T5_TRC_RSS_CONTROL, | ||
1257 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | | 1259 | RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) | |
1258 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); | 1260 | QUEUENUMBER(s->ethrxq[0].rspq.abs_id)); |
1259 | return 0; | 1261 | return 0; |
@@ -1761,7 +1763,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
1761 | 0xd004, 0xd03c, | 1763 | 0xd004, 0xd03c, |
1762 | 0xdfc0, 0xdfe0, | 1764 | 0xdfc0, 0xdfe0, |
1763 | 0xe000, 0xea7c, | 1765 | 0xe000, 0xea7c, |
1764 | 0xf000, 0x11190, | 1766 | 0xf000, 0x11110, |
1767 | 0x11118, 0x11190, | ||
1765 | 0x19040, 0x1906c, | 1768 | 0x19040, 0x1906c, |
1766 | 0x19078, 0x19080, | 1769 | 0x19078, 0x19080, |
1767 | 0x1908c, 0x19124, | 1770 | 0x1908c, 0x19124, |
@@ -1968,7 +1971,8 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
1968 | 0xd004, 0xd03c, | 1971 | 0xd004, 0xd03c, |
1969 | 0xdfc0, 0xdfe0, | 1972 | 0xdfc0, 0xdfe0, |
1970 | 0xe000, 0x11088, | 1973 | 0xe000, 0x11088, |
1971 | 0x1109c, 0x1117c, | 1974 | 0x1109c, 0x11110, |
1975 | 0x11118, 0x1117c, | ||
1972 | 0x11190, 0x11204, | 1976 | 0x11190, 0x11204, |
1973 | 0x19040, 0x1906c, | 1977 | 0x19040, 0x1906c, |
1974 | 0x19078, 0x19080, | 1978 | 0x19078, 0x19080, |
@@ -5955,7 +5959,8 @@ static int adap_init0(struct adapter *adap) | |||
5955 | params[3] = FW_PARAM_PFVF(CQ_END); | 5959 | params[3] = FW_PARAM_PFVF(CQ_END); |
5956 | params[4] = FW_PARAM_PFVF(OCQ_START); | 5960 | params[4] = FW_PARAM_PFVF(OCQ_START); |
5957 | params[5] = FW_PARAM_PFVF(OCQ_END); | 5961 | params[5] = FW_PARAM_PFVF(OCQ_END); |
5958 | ret = t4_query_params(adap, 0, 0, 0, 6, params, val); | 5962 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, |
5963 | val); | ||
5959 | if (ret < 0) | 5964 | if (ret < 0) |
5960 | goto bye; | 5965 | goto bye; |
5961 | adap->vres.qp.start = val[0]; | 5966 | adap->vres.qp.start = val[0]; |
@@ -5967,7 +5972,8 @@ static int adap_init0(struct adapter *adap) | |||
5967 | 5972 | ||
5968 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); | 5973 | params[0] = FW_PARAM_DEV(MAXORDIRD_QP); |
5969 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); | 5974 | params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER); |
5970 | ret = t4_query_params(adap, 0, 0, 0, 2, params, val); | 5975 | ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, |
5976 | val); | ||
5971 | if (ret < 0) { | 5977 | if (ret < 0) { |
5972 | adap->params.max_ordird_qp = 8; | 5978 | adap->params.max_ordird_qp = 8; |
5973 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; | 5979 | adap->params.max_ird_adapter = 32 * adap->tids.ntids; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index a853133d8db8..41d04462b72e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -168,6 +168,34 @@ void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * t4_report_fw_error - report firmware error | ||
172 | * @adap: the adapter | ||
173 | * | ||
174 | * The adapter firmware can indicate error conditions to the host. | ||
175 | * If the firmware has indicated an error, print out the reason for | ||
176 | * the firmware error. | ||
177 | */ | ||
178 | static void t4_report_fw_error(struct adapter *adap) | ||
179 | { | ||
180 | static const char *const reason[] = { | ||
181 | "Crash", /* PCIE_FW_EVAL_CRASH */ | ||
182 | "During Device Preparation", /* PCIE_FW_EVAL_PREP */ | ||
183 | "During Device Configuration", /* PCIE_FW_EVAL_CONF */ | ||
184 | "During Device Initialization", /* PCIE_FW_EVAL_INIT */ | ||
185 | "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */ | ||
186 | "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */ | ||
187 | "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */ | ||
188 | "Reserved", /* reserved */ | ||
189 | }; | ||
190 | u32 pcie_fw; | ||
191 | |||
192 | pcie_fw = t4_read_reg(adap, MA_PCIE_FW); | ||
193 | if (pcie_fw & FW_PCIE_FW_ERR) | ||
194 | dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n", | ||
195 | reason[FW_PCIE_FW_EVAL_GET(pcie_fw)]); | ||
196 | } | ||
197 | |||
198 | /* | ||
171 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. | 199 | * Get the reply to a mailbox command and store it in @rpl in big-endian order. |
172 | */ | 200 | */ |
173 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, | 201 | static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit, |
@@ -300,6 +328,7 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, | |||
300 | dump_mbox(adap, mbox, data_reg); | 328 | dump_mbox(adap, mbox, data_reg); |
301 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", | 329 | dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n", |
302 | *(const u8 *)cmd, mbox); | 330 | *(const u8 *)cmd, mbox); |
331 | t4_report_fw_error(adap); | ||
303 | return -ETIMEDOUT; | 332 | return -ETIMEDOUT; |
304 | } | 333 | } |
305 | 334 | ||
@@ -566,6 +595,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, | |||
566 | #define VPD_BASE 0x400 | 595 | #define VPD_BASE 0x400 |
567 | #define VPD_BASE_OLD 0 | 596 | #define VPD_BASE_OLD 0 |
568 | #define VPD_LEN 1024 | 597 | #define VPD_LEN 1024 |
598 | #define CHELSIO_VPD_UNIQUE_ID 0x82 | ||
569 | 599 | ||
570 | /** | 600 | /** |
571 | * t4_seeprom_wp - enable/disable EEPROM write protection | 601 | * t4_seeprom_wp - enable/disable EEPROM write protection |
@@ -603,7 +633,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
603 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); | 633 | ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd); |
604 | if (ret < 0) | 634 | if (ret < 0) |
605 | goto out; | 635 | goto out; |
606 | addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD; | 636 | |
637 | /* The VPD shall have a unique identifier specified by the PCI SIG. | ||
638 | * For chelsio adapters, the identifier is 0x82. The first byte of a VPD | ||
639 | * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software | ||
640 | * is expected to automatically put this entry at the | ||
641 | * beginning of the VPD. | ||
642 | */ | ||
643 | addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD; | ||
607 | 644 | ||
608 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); | 645 | ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd); |
609 | if (ret < 0) | 646 | if (ret < 0) |
@@ -667,6 +704,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p) | |||
667 | i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); | 704 | i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE); |
668 | memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); | 705 | memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN)); |
669 | strim(p->sn); | 706 | strim(p->sn); |
707 | i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE); | ||
670 | memcpy(p->pn, vpd + pn, min(i, PN_LEN)); | 708 | memcpy(p->pn, vpd + pn, min(i, PN_LEN)); |
671 | strim(p->pn); | 709 | strim(p->pn); |
672 | 710 | ||
@@ -1394,15 +1432,18 @@ static void pcie_intr_handler(struct adapter *adapter) | |||
1394 | 1432 | ||
1395 | int fat; | 1433 | int fat; |
1396 | 1434 | ||
1397 | fat = t4_handle_intr_status(adapter, | 1435 | if (is_t4(adapter->params.chip)) |
1398 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, | 1436 | fat = t4_handle_intr_status(adapter, |
1399 | sysbus_intr_info) + | 1437 | PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS, |
1400 | t4_handle_intr_status(adapter, | 1438 | sysbus_intr_info) + |
1401 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, | 1439 | t4_handle_intr_status(adapter, |
1402 | pcie_port_intr_info) + | 1440 | PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS, |
1403 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | 1441 | pcie_port_intr_info) + |
1404 | is_t4(adapter->params.chip) ? | 1442 | t4_handle_intr_status(adapter, PCIE_INT_CAUSE, |
1405 | pcie_intr_info : t5_pcie_intr_info); | 1443 | pcie_intr_info); |
1444 | else | ||
1445 | fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE, | ||
1446 | t5_pcie_intr_info); | ||
1406 | 1447 | ||
1407 | if (fat) | 1448 | if (fat) |
1408 | t4_fatal_err(adapter); | 1449 | t4_fatal_err(adapter); |
@@ -1521,6 +1562,9 @@ static void cim_intr_handler(struct adapter *adapter) | |||
1521 | 1562 | ||
1522 | int fat; | 1563 | int fat; |
1523 | 1564 | ||
1565 | if (t4_read_reg(adapter, MA_PCIE_FW) & FW_PCIE_FW_ERR) | ||
1566 | t4_report_fw_error(adapter); | ||
1567 | |||
1524 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, | 1568 | fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE, |
1525 | cim_intr_info) + | 1569 | cim_intr_info) + |
1526 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, | 1570 | t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE, |
@@ -1768,10 +1812,16 @@ static void ma_intr_handler(struct adapter *adap) | |||
1768 | { | 1812 | { |
1769 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); | 1813 | u32 v, status = t4_read_reg(adap, MA_INT_CAUSE); |
1770 | 1814 | ||
1771 | if (status & MEM_PERR_INT_CAUSE) | 1815 | if (status & MEM_PERR_INT_CAUSE) { |
1772 | dev_alert(adap->pdev_dev, | 1816 | dev_alert(adap->pdev_dev, |
1773 | "MA parity error, parity status %#x\n", | 1817 | "MA parity error, parity status %#x\n", |
1774 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); | 1818 | t4_read_reg(adap, MA_PARITY_ERROR_STATUS)); |
1819 | if (is_t5(adap->params.chip)) | ||
1820 | dev_alert(adap->pdev_dev, | ||
1821 | "MA parity error, parity status %#x\n", | ||
1822 | t4_read_reg(adap, | ||
1823 | MA_PARITY_ERROR_STATUS2)); | ||
1824 | } | ||
1775 | if (status & MEM_WRAP_INT_CAUSE) { | 1825 | if (status & MEM_WRAP_INT_CAUSE) { |
1776 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); | 1826 | v = t4_read_reg(adap, MA_INT_WRAP_STATUS); |
1777 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " | 1827 | dev_alert(adap->pdev_dev, "MA address wrap-around error by " |
@@ -2733,12 +2783,16 @@ retry: | |||
2733 | /* | 2783 | /* |
2734 | * Issue the HELLO command to the firmware. If it's not successful | 2784 | * Issue the HELLO command to the firmware. If it's not successful |
2735 | * but indicates that we got a "busy" or "timeout" condition, retry | 2785 | * but indicates that we got a "busy" or "timeout" condition, retry |
2736 | * the HELLO until we exhaust our retry limit. | 2786 | * the HELLO until we exhaust our retry limit. If we do exceed our |
2787 | * retry limit, check to see if the firmware left us any error | ||
2788 | * information and report that if so. | ||
2737 | */ | 2789 | */ |
2738 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); | 2790 | ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c); |
2739 | if (ret < 0) { | 2791 | if (ret < 0) { |
2740 | if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) | 2792 | if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0) |
2741 | goto retry; | 2793 | goto retry; |
2794 | if (t4_read_reg(adap, MA_PCIE_FW) & FW_PCIE_FW_ERR) | ||
2795 | t4_report_fw_error(adap); | ||
2742 | return ret; | 2796 | return ret; |
2743 | } | 2797 | } |
2744 | 2798 | ||
@@ -3742,6 +3796,7 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl) | |||
3742 | lc->link_ok = link_ok; | 3796 | lc->link_ok = link_ok; |
3743 | lc->speed = speed; | 3797 | lc->speed = speed; |
3744 | lc->fc = fc; | 3798 | lc->fc = fc; |
3799 | lc->supported = be16_to_cpu(p->u.info.pcap); | ||
3745 | t4_os_link_changed(adap, port, link_ok); | 3800 | t4_os_link_changed(adap, port, link_ok); |
3746 | } | 3801 | } |
3747 | if (mod != pi->mod_type) { | 3802 | if (mod != pi->mod_type) { |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h index e3146e83df20..39fb325474f7 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h | |||
@@ -511,6 +511,7 @@ | |||
511 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) | 511 | #define MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT) |
512 | #define MA_PCIE_FW 0x30b8 | 512 | #define MA_PCIE_FW 0x30b8 |
513 | #define MA_PARITY_ERROR_STATUS 0x77f4 | 513 | #define MA_PARITY_ERROR_STATUS 0x77f4 |
514 | #define MA_PARITY_ERROR_STATUS2 0x7804 | ||
514 | 515 | ||
515 | #define MA_EXT_MEMORY1_BAR 0x7808 | 516 | #define MA_EXT_MEMORY1_BAR 0x7808 |
516 | #define EDC_0_BASE_ADDR 0x7900 | 517 | #define EDC_0_BASE_ADDR 0x7900 |
@@ -959,6 +960,7 @@ | |||
959 | #define TRCMULTIFILTER 0x00000001U | 960 | #define TRCMULTIFILTER 0x00000001U |
960 | 961 | ||
961 | #define MPS_TRC_RSS_CONTROL 0x9808 | 962 | #define MPS_TRC_RSS_CONTROL 0x9808 |
963 | #define MPS_T5_TRC_RSS_CONTROL 0xa00c | ||
962 | #define RSSCONTROL_MASK 0x00ff0000U | 964 | #define RSSCONTROL_MASK 0x00ff0000U |
963 | #define RSSCONTROL_SHIFT 16 | 965 | #define RSSCONTROL_SHIFT 16 |
964 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) | 966 | #define RSSCONTROL(x) ((x) << RSSCONTROL_SHIFT) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 5f2729ebadbe..3409756a85b9 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h | |||
@@ -2228,6 +2228,10 @@ struct fw_debug_cmd { | |||
2228 | #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) | 2228 | #define FW_PCIE_FW_MASTER(x) ((x) << FW_PCIE_FW_MASTER_SHIFT) |
2229 | #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ | 2229 | #define FW_PCIE_FW_MASTER_GET(x) (((x) >> FW_PCIE_FW_MASTER_SHIFT) & \ |
2230 | FW_PCIE_FW_MASTER_MASK) | 2230 | FW_PCIE_FW_MASTER_MASK) |
2231 | #define FW_PCIE_FW_EVAL_MASK 0x7 | ||
2232 | #define FW_PCIE_FW_EVAL_SHIFT 24 | ||
2233 | #define FW_PCIE_FW_EVAL_GET(x) (((x) >> FW_PCIE_FW_EVAL_SHIFT) & \ | ||
2234 | FW_PCIE_FW_EVAL_MASK) | ||
2231 | 2235 | ||
2232 | struct fw_hdr { | 2236 | struct fw_hdr { |
2233 | u8 ver; | 2237 | u8 ver; |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index a0b418e007a0..566b17db135a 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -1994,7 +1994,7 @@ static void xmit_common(struct sk_buff *skb, struct ehea_swqe *swqe) | |||
1994 | { | 1994 | { |
1995 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; | 1995 | swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT | EHEA_SWQE_CRC; |
1996 | 1996 | ||
1997 | if (skb->protocol != htons(ETH_P_IP)) | 1997 | if (vlan_get_protocol(skb) != htons(ETH_P_IP)) |
1998 | return; | 1998 | return; |
1999 | 1999 | ||
2000 | if (skb->ip_summed == CHECKSUM_PARTIAL) | 2000 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index cbc330b301cd..ad3d5d12173f 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -2674,7 +2674,8 @@ set_itr_now: | |||
2674 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 2674 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
2675 | 2675 | ||
2676 | static int e1000_tso(struct e1000_adapter *adapter, | 2676 | static int e1000_tso(struct e1000_adapter *adapter, |
2677 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | 2677 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2678 | __be16 protocol) | ||
2678 | { | 2679 | { |
2679 | struct e1000_context_desc *context_desc; | 2680 | struct e1000_context_desc *context_desc; |
2680 | struct e1000_buffer *buffer_info; | 2681 | struct e1000_buffer *buffer_info; |
@@ -2692,7 +2693,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2692 | 2693 | ||
2693 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 2694 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
2694 | mss = skb_shinfo(skb)->gso_size; | 2695 | mss = skb_shinfo(skb)->gso_size; |
2695 | if (skb->protocol == htons(ETH_P_IP)) { | 2696 | if (protocol == htons(ETH_P_IP)) { |
2696 | struct iphdr *iph = ip_hdr(skb); | 2697 | struct iphdr *iph = ip_hdr(skb); |
2697 | iph->tot_len = 0; | 2698 | iph->tot_len = 0; |
2698 | iph->check = 0; | 2699 | iph->check = 0; |
@@ -2702,7 +2703,7 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2702 | 0); | 2703 | 0); |
2703 | cmd_length = E1000_TXD_CMD_IP; | 2704 | cmd_length = E1000_TXD_CMD_IP; |
2704 | ipcse = skb_transport_offset(skb) - 1; | 2705 | ipcse = skb_transport_offset(skb) - 1; |
2705 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2706 | } else if (skb_is_gso_v6(skb)) { |
2706 | ipv6_hdr(skb)->payload_len = 0; | 2707 | ipv6_hdr(skb)->payload_len = 0; |
2707 | tcp_hdr(skb)->check = | 2708 | tcp_hdr(skb)->check = |
2708 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 2709 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
@@ -2745,7 +2746,8 @@ static int e1000_tso(struct e1000_adapter *adapter, | |||
2745 | } | 2746 | } |
2746 | 2747 | ||
2747 | static bool e1000_tx_csum(struct e1000_adapter *adapter, | 2748 | static bool e1000_tx_csum(struct e1000_adapter *adapter, |
2748 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb) | 2749 | struct e1000_tx_ring *tx_ring, struct sk_buff *skb, |
2750 | __be16 protocol) | ||
2749 | { | 2751 | { |
2750 | struct e1000_context_desc *context_desc; | 2752 | struct e1000_context_desc *context_desc; |
2751 | struct e1000_buffer *buffer_info; | 2753 | struct e1000_buffer *buffer_info; |
@@ -2756,7 +2758,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, | |||
2756 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 2758 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
2757 | return false; | 2759 | return false; |
2758 | 2760 | ||
2759 | switch (skb->protocol) { | 2761 | switch (protocol) { |
2760 | case cpu_to_be16(ETH_P_IP): | 2762 | case cpu_to_be16(ETH_P_IP): |
2761 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 2763 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
2762 | cmd_len |= E1000_TXD_CMD_TCP; | 2764 | cmd_len |= E1000_TXD_CMD_TCP; |
@@ -3097,6 +3099,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3097 | int count = 0; | 3099 | int count = 0; |
3098 | int tso; | 3100 | int tso; |
3099 | unsigned int f; | 3101 | unsigned int f; |
3102 | __be16 protocol = vlan_get_protocol(skb); | ||
3100 | 3103 | ||
3101 | /* This goes back to the question of how to logically map a Tx queue | 3104 | /* This goes back to the question of how to logically map a Tx queue |
3102 | * to a flow. Right now, performance is impacted slightly negatively | 3105 | * to a flow. Right now, performance is impacted slightly negatively |
@@ -3210,7 +3213,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3210 | 3213 | ||
3211 | first = tx_ring->next_to_use; | 3214 | first = tx_ring->next_to_use; |
3212 | 3215 | ||
3213 | tso = e1000_tso(adapter, tx_ring, skb); | 3216 | tso = e1000_tso(adapter, tx_ring, skb, protocol); |
3214 | if (tso < 0) { | 3217 | if (tso < 0) { |
3215 | dev_kfree_skb_any(skb); | 3218 | dev_kfree_skb_any(skb); |
3216 | return NETDEV_TX_OK; | 3219 | return NETDEV_TX_OK; |
@@ -3220,10 +3223,10 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
3220 | if (likely(hw->mac_type != e1000_82544)) | 3223 | if (likely(hw->mac_type != e1000_82544)) |
3221 | tx_ring->last_tx_tso = true; | 3224 | tx_ring->last_tx_tso = true; |
3222 | tx_flags |= E1000_TX_FLAGS_TSO; | 3225 | tx_flags |= E1000_TX_FLAGS_TSO; |
3223 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb))) | 3226 | } else if (likely(e1000_tx_csum(adapter, tx_ring, skb, protocol))) |
3224 | tx_flags |= E1000_TX_FLAGS_CSUM; | 3227 | tx_flags |= E1000_TX_FLAGS_CSUM; |
3225 | 3228 | ||
3226 | if (likely(skb->protocol == htons(ETH_P_IP))) | 3229 | if (protocol == htons(ETH_P_IP)) |
3227 | tx_flags |= E1000_TX_FLAGS_IPV4; | 3230 | tx_flags |= E1000_TX_FLAGS_IPV4; |
3228 | 3231 | ||
3229 | if (unlikely(skb->no_fcs)) | 3232 | if (unlikely(skb->no_fcs)) |
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 65c3aef2bd36..247335d2c7ec 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c | |||
@@ -5164,7 +5164,8 @@ link_up: | |||
5164 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 | 5164 | #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000 |
5165 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 | 5165 | #define E1000_TX_FLAGS_VLAN_SHIFT 16 |
5166 | 5166 | ||
5167 | static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | 5167 | static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb, |
5168 | __be16 protocol) | ||
5168 | { | 5169 | { |
5169 | struct e1000_context_desc *context_desc; | 5170 | struct e1000_context_desc *context_desc; |
5170 | struct e1000_buffer *buffer_info; | 5171 | struct e1000_buffer *buffer_info; |
@@ -5183,7 +5184,7 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5183 | 5184 | ||
5184 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); | 5185 | hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); |
5185 | mss = skb_shinfo(skb)->gso_size; | 5186 | mss = skb_shinfo(skb)->gso_size; |
5186 | if (skb->protocol == htons(ETH_P_IP)) { | 5187 | if (protocol == htons(ETH_P_IP)) { |
5187 | struct iphdr *iph = ip_hdr(skb); | 5188 | struct iphdr *iph = ip_hdr(skb); |
5188 | iph->tot_len = 0; | 5189 | iph->tot_len = 0; |
5189 | iph->check = 0; | 5190 | iph->check = 0; |
@@ -5231,7 +5232,8 @@ static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5231 | return 1; | 5232 | return 1; |
5232 | } | 5233 | } |
5233 | 5234 | ||
5234 | static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | 5235 | static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb, |
5236 | __be16 protocol) | ||
5235 | { | 5237 | { |
5236 | struct e1000_adapter *adapter = tx_ring->adapter; | 5238 | struct e1000_adapter *adapter = tx_ring->adapter; |
5237 | struct e1000_context_desc *context_desc; | 5239 | struct e1000_context_desc *context_desc; |
@@ -5239,16 +5241,10 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) | |||
5239 | unsigned int i; | 5241 | unsigned int i; |
5240 | u8 css; | 5242 | u8 css; |
5241 | u32 cmd_len = E1000_TXD_CMD_DEXT; | 5243 | u32 cmd_len = E1000_TXD_CMD_DEXT; |
5242 | __be16 protocol; | ||
5243 | 5244 | ||
5244 | if (skb->ip_summed != CHECKSUM_PARTIAL) | 5245 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
5245 | return false; | 5246 | return false; |
5246 | 5247 | ||
5247 | if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) | ||
5248 | protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; | ||
5249 | else | ||
5250 | protocol = skb->protocol; | ||
5251 | |||
5252 | switch (protocol) { | 5248 | switch (protocol) { |
5253 | case cpu_to_be16(ETH_P_IP): | 5249 | case cpu_to_be16(ETH_P_IP): |
5254 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) | 5250 | if (ip_hdr(skb)->protocol == IPPROTO_TCP) |
@@ -5546,6 +5542,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5546 | int count = 0; | 5542 | int count = 0; |
5547 | int tso; | 5543 | int tso; |
5548 | unsigned int f; | 5544 | unsigned int f; |
5545 | __be16 protocol = vlan_get_protocol(skb); | ||
5549 | 5546 | ||
5550 | if (test_bit(__E1000_DOWN, &adapter->state)) { | 5547 | if (test_bit(__E1000_DOWN, &adapter->state)) { |
5551 | dev_kfree_skb_any(skb); | 5548 | dev_kfree_skb_any(skb); |
@@ -5620,7 +5617,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5620 | 5617 | ||
5621 | first = tx_ring->next_to_use; | 5618 | first = tx_ring->next_to_use; |
5622 | 5619 | ||
5623 | tso = e1000_tso(tx_ring, skb); | 5620 | tso = e1000_tso(tx_ring, skb, protocol); |
5624 | if (tso < 0) { | 5621 | if (tso < 0) { |
5625 | dev_kfree_skb_any(skb); | 5622 | dev_kfree_skb_any(skb); |
5626 | return NETDEV_TX_OK; | 5623 | return NETDEV_TX_OK; |
@@ -5628,14 +5625,14 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, | |||
5628 | 5625 | ||
5629 | if (tso) | 5626 | if (tso) |
5630 | tx_flags |= E1000_TX_FLAGS_TSO; | 5627 | tx_flags |= E1000_TX_FLAGS_TSO; |
5631 | else if (e1000_tx_csum(tx_ring, skb)) | 5628 | else if (e1000_tx_csum(tx_ring, skb, protocol)) |
5632 | tx_flags |= E1000_TX_FLAGS_CSUM; | 5629 | tx_flags |= E1000_TX_FLAGS_CSUM; |
5633 | 5630 | ||
5634 | /* Old method was to assume IPv4 packet by default if TSO was enabled. | 5631 | /* Old method was to assume IPv4 packet by default if TSO was enabled. |
5635 | * 82571 hardware supports TSO capabilities for IPv6 as well... | 5632 | * 82571 hardware supports TSO capabilities for IPv6 as well... |
5636 | * no longer assume, we must. | 5633 | * no longer assume, we must. |
5637 | */ | 5634 | */ |
5638 | if (skb->protocol == htons(ETH_P_IP)) | 5635 | if (protocol == htons(ETH_P_IP)) |
5639 | tx_flags |= E1000_TX_FLAGS_IPV4; | 5636 | tx_flags |= E1000_TX_FLAGS_IPV4; |
5640 | 5637 | ||
5641 | if (unlikely(skb->no_fcs)) | 5638 | if (unlikely(skb->no_fcs)) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a51aa37b7b5a..369848e107f8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -2295,7 +2295,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
2295 | goto out_drop; | 2295 | goto out_drop; |
2296 | 2296 | ||
2297 | /* obtain protocol of skb */ | 2297 | /* obtain protocol of skb */ |
2298 | protocol = skb->protocol; | 2298 | protocol = vlan_get_protocol(skb); |
2299 | 2299 | ||
2300 | /* record the location of the first descriptor for this packet */ | 2300 | /* record the location of the first descriptor for this packet */ |
2301 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; | 2301 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; |
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 79bf96ca6489..95a3ec236b49 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -1597,7 +1597,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, | |||
1597 | goto out_drop; | 1597 | goto out_drop; |
1598 | 1598 | ||
1599 | /* obtain protocol of skb */ | 1599 | /* obtain protocol of skb */ |
1600 | protocol = skb->protocol; | 1600 | protocol = vlan_get_protocol(skb); |
1601 | 1601 | ||
1602 | /* record the location of the first descriptor for this packet */ | 1602 | /* record the location of the first descriptor for this packet */ |
1603 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; | 1603 | first = &tx_ring->tx_bi[tx_ring->next_to_use]; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index c9f1d1b7ef37..ade067de1689 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/mbus.h> | 20 | #include <linux/mbus.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/if_vlan.h> | ||
23 | #include <net/ip.h> | 24 | #include <net/ip.h> |
24 | #include <net/ipv6.h> | 25 | #include <net/ipv6.h> |
25 | #include <linux/io.h> | 26 | #include <linux/io.h> |
@@ -1371,15 +1372,16 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) | |||
1371 | { | 1372 | { |
1372 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 1373 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
1373 | int ip_hdr_len = 0; | 1374 | int ip_hdr_len = 0; |
1375 | __be16 l3_proto = vlan_get_protocol(skb); | ||
1374 | u8 l4_proto; | 1376 | u8 l4_proto; |
1375 | 1377 | ||
1376 | if (skb->protocol == htons(ETH_P_IP)) { | 1378 | if (l3_proto == htons(ETH_P_IP)) { |
1377 | struct iphdr *ip4h = ip_hdr(skb); | 1379 | struct iphdr *ip4h = ip_hdr(skb); |
1378 | 1380 | ||
1379 | /* Calculate IPv4 checksum and L4 checksum */ | 1381 | /* Calculate IPv4 checksum and L4 checksum */ |
1380 | ip_hdr_len = ip4h->ihl; | 1382 | ip_hdr_len = ip4h->ihl; |
1381 | l4_proto = ip4h->protocol; | 1383 | l4_proto = ip4h->protocol; |
1382 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 1384 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
1383 | struct ipv6hdr *ip6h = ipv6_hdr(skb); | 1385 | struct ipv6hdr *ip6h = ipv6_hdr(skb); |
1384 | 1386 | ||
1385 | /* Read l4_protocol from one of IPv6 extra headers */ | 1387 | /* Read l4_protocol from one of IPv6 extra headers */ |
@@ -1390,7 +1392,7 @@ static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb) | |||
1390 | return MVNETA_TX_L4_CSUM_NOT; | 1392 | return MVNETA_TX_L4_CSUM_NOT; |
1391 | 1393 | ||
1392 | return mvneta_txq_desc_csum(skb_network_offset(skb), | 1394 | return mvneta_txq_desc_csum(skb_network_offset(skb), |
1393 | skb->protocol, ip_hdr_len, l4_proto); | 1395 | l3_proto, ip_hdr_len, l4_proto); |
1394 | } | 1396 | } |
1395 | 1397 | ||
1396 | return MVNETA_TX_L4_CSUM_NOT; | 1398 | return MVNETA_TX_L4_CSUM_NOT; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index bb536aa613f4..abddcf8c40aa 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -474,39 +474,12 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad | |||
474 | int qpn, u64 *reg_id) | 474 | int qpn, u64 *reg_id) |
475 | { | 475 | { |
476 | int err; | 476 | int err; |
477 | struct mlx4_spec_list spec_eth_outer = { {NULL} }; | ||
478 | struct mlx4_spec_list spec_vxlan = { {NULL} }; | ||
479 | struct mlx4_spec_list spec_eth_inner = { {NULL} }; | ||
480 | |||
481 | struct mlx4_net_trans_rule rule = { | ||
482 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | ||
483 | .exclusive = 0, | ||
484 | .allow_loopback = 1, | ||
485 | .promisc_mode = MLX4_FS_REGULAR, | ||
486 | .priority = MLX4_DOMAIN_NIC, | ||
487 | }; | ||
488 | |||
489 | __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); | ||
490 | 477 | ||
491 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) | 478 | if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
492 | return 0; /* do nothing */ | 479 | return 0; /* do nothing */ |
493 | 480 | ||
494 | rule.port = priv->port; | 481 | err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, |
495 | rule.qpn = qpn; | 482 | MLX4_DOMAIN_NIC, reg_id); |
496 | INIT_LIST_HEAD(&rule.list); | ||
497 | |||
498 | spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; | ||
499 | memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); | ||
500 | memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); | ||
501 | |||
502 | spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ | ||
503 | spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ | ||
504 | |||
505 | list_add_tail(&spec_eth_outer.list, &rule.list); | ||
506 | list_add_tail(&spec_vxlan.list, &rule.list); | ||
507 | list_add_tail(&spec_eth_inner.list, &rule.list); | ||
508 | |||
509 | err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id); | ||
510 | if (err) { | 483 | if (err) { |
511 | en_err(priv, "failed to add vxlan steering rule, err %d\n", err); | 484 | en_err(priv, "failed to add vxlan steering rule, err %d\n", err); |
512 | return err; | 485 | return err; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index d80e7a6fac74..ca0f98c95105 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -1020,6 +1020,44 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) | |||
1020 | } | 1020 | } |
1021 | EXPORT_SYMBOL_GPL(mlx4_flow_detach); | 1021 | EXPORT_SYMBOL_GPL(mlx4_flow_detach); |
1022 | 1022 | ||
1023 | int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, | ||
1024 | int port, int qpn, u16 prio, u64 *reg_id) | ||
1025 | { | ||
1026 | int err; | ||
1027 | struct mlx4_spec_list spec_eth_outer = { {NULL} }; | ||
1028 | struct mlx4_spec_list spec_vxlan = { {NULL} }; | ||
1029 | struct mlx4_spec_list spec_eth_inner = { {NULL} }; | ||
1030 | |||
1031 | struct mlx4_net_trans_rule rule = { | ||
1032 | .queue_mode = MLX4_NET_TRANS_Q_FIFO, | ||
1033 | .exclusive = 0, | ||
1034 | .allow_loopback = 1, | ||
1035 | .promisc_mode = MLX4_FS_REGULAR, | ||
1036 | }; | ||
1037 | |||
1038 | __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); | ||
1039 | |||
1040 | rule.port = port; | ||
1041 | rule.qpn = qpn; | ||
1042 | rule.priority = prio; | ||
1043 | INIT_LIST_HEAD(&rule.list); | ||
1044 | |||
1045 | spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH; | ||
1046 | memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN); | ||
1047 | memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN); | ||
1048 | |||
1049 | spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */ | ||
1050 | spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */ | ||
1051 | |||
1052 | list_add_tail(&spec_eth_outer.list, &rule.list); | ||
1053 | list_add_tail(&spec_vxlan.list, &rule.list); | ||
1054 | list_add_tail(&spec_eth_inner.list, &rule.list); | ||
1055 | |||
1056 | err = mlx4_flow_attach(dev, &rule, reg_id); | ||
1057 | return err; | ||
1058 | } | ||
1059 | EXPORT_SYMBOL(mlx4_tunnel_steer_add); | ||
1060 | |||
1023 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, | 1061 | int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, |
1024 | u32 max_range_qpn) | 1062 | u32 max_range_qpn) |
1025 | { | 1063 | { |
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5020fd47825d..2f12c88c66ab 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c | |||
@@ -206,7 +206,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
206 | int rx_head = priv->rx_head; | 206 | int rx_head = priv->rx_head; |
207 | int rx = 0; | 207 | int rx = 0; |
208 | 208 | ||
209 | while (1) { | 209 | while (rx < budget) { |
210 | desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); | 210 | desc = priv->rx_desc_base + (RX_REG_DESC_SIZE * rx_head); |
211 | desc0 = readl(desc + RX_REG_OFFSET_DESC0); | 211 | desc0 = readl(desc + RX_REG_OFFSET_DESC0); |
212 | 212 | ||
@@ -218,7 +218,7 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
218 | net_dbg_ratelimited("packet error\n"); | 218 | net_dbg_ratelimited("packet error\n"); |
219 | priv->stats.rx_dropped++; | 219 | priv->stats.rx_dropped++; |
220 | priv->stats.rx_errors++; | 220 | priv->stats.rx_errors++; |
221 | continue; | 221 | goto rx_next; |
222 | } | 222 | } |
223 | 223 | ||
224 | len = desc0 & RX_DESC0_FRAME_LEN_MASK; | 224 | len = desc0 & RX_DESC0_FRAME_LEN_MASK; |
@@ -226,13 +226,19 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
226 | if (len > RX_BUF_SIZE) | 226 | if (len > RX_BUF_SIZE) |
227 | len = RX_BUF_SIZE; | 227 | len = RX_BUF_SIZE; |
228 | 228 | ||
229 | skb = build_skb(priv->rx_buf[rx_head], priv->rx_buf_size); | 229 | dma_sync_single_for_cpu(&ndev->dev, |
230 | priv->rx_mapping[rx_head], | ||
231 | priv->rx_buf_size, DMA_FROM_DEVICE); | ||
232 | skb = netdev_alloc_skb_ip_align(ndev, len); | ||
233 | |||
230 | if (unlikely(!skb)) { | 234 | if (unlikely(!skb)) { |
231 | net_dbg_ratelimited("build_skb failed\n"); | 235 | net_dbg_ratelimited("netdev_alloc_skb_ip_align failed\n"); |
232 | priv->stats.rx_dropped++; | 236 | priv->stats.rx_dropped++; |
233 | priv->stats.rx_errors++; | 237 | priv->stats.rx_errors++; |
238 | goto rx_next; | ||
234 | } | 239 | } |
235 | 240 | ||
241 | memcpy(skb->data, priv->rx_buf[rx_head], len); | ||
236 | skb_put(skb, len); | 242 | skb_put(skb, len); |
237 | skb->protocol = eth_type_trans(skb, ndev); | 243 | skb->protocol = eth_type_trans(skb, ndev); |
238 | napi_gro_receive(&priv->napi, skb); | 244 | napi_gro_receive(&priv->napi, skb); |
@@ -244,18 +250,15 @@ static int moxart_rx_poll(struct napi_struct *napi, int budget) | |||
244 | if (desc0 & RX_DESC0_MULTICAST) | 250 | if (desc0 & RX_DESC0_MULTICAST) |
245 | priv->stats.multicast++; | 251 | priv->stats.multicast++; |
246 | 252 | ||
253 | rx_next: | ||
247 | writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); | 254 | writel(RX_DESC0_DMA_OWN, desc + RX_REG_OFFSET_DESC0); |
248 | 255 | ||
249 | rx_head = RX_NEXT(rx_head); | 256 | rx_head = RX_NEXT(rx_head); |
250 | priv->rx_head = rx_head; | 257 | priv->rx_head = rx_head; |
251 | |||
252 | if (rx >= budget) | ||
253 | break; | ||
254 | } | 258 | } |
255 | 259 | ||
256 | if (rx < budget) { | 260 | if (rx < budget) { |
257 | napi_gro_flush(napi, false); | 261 | napi_complete(napi); |
258 | __napi_complete(napi); | ||
259 | } | 262 | } |
260 | 263 | ||
261 | priv->reg_imr |= RPKT_FINISH_M; | 264 | priv->reg_imr |= RPKT_FINISH_M; |
@@ -346,10 +349,12 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
346 | len = ETH_ZLEN; | 349 | len = ETH_ZLEN; |
347 | } | 350 | } |
348 | 351 | ||
349 | txdes1 = readl(desc + TX_REG_OFFSET_DESC1); | 352 | dma_sync_single_for_device(&ndev->dev, priv->tx_mapping[tx_head], |
350 | txdes1 |= TX_DESC1_LTS | TX_DESC1_FTS; | 353 | priv->tx_buf_size, DMA_TO_DEVICE); |
351 | txdes1 &= ~(TX_DESC1_FIFO_COMPLETE | TX_DESC1_INTR_COMPLETE); | 354 | |
352 | txdes1 |= (len & TX_DESC1_BUF_SIZE_MASK); | 355 | txdes1 = TX_DESC1_LTS | TX_DESC1_FTS | (len & TX_DESC1_BUF_SIZE_MASK); |
356 | if (tx_head == TX_DESC_NUM_MASK) | ||
357 | txdes1 |= TX_DESC1_END; | ||
353 | writel(txdes1, desc + TX_REG_OFFSET_DESC1); | 358 | writel(txdes1, desc + TX_REG_OFFSET_DESC1); |
354 | writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); | 359 | writel(TX_DESC0_DMA_OWN, desc + TX_REG_OFFSET_DESC0); |
355 | 360 | ||
@@ -465,8 +470,7 @@ static int moxart_mac_probe(struct platform_device *pdev) | |||
465 | spin_lock_init(&priv->txlock); | 470 | spin_lock_init(&priv->txlock); |
466 | 471 | ||
467 | priv->tx_buf_size = TX_BUF_SIZE; | 472 | priv->tx_buf_size = TX_BUF_SIZE; |
468 | priv->rx_buf_size = RX_BUF_SIZE + | 473 | priv->rx_buf_size = RX_BUF_SIZE; |
469 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); | ||
470 | 474 | ||
471 | priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * | 475 | priv->tx_desc_base = dma_alloc_coherent(NULL, TX_REG_DESC_SIZE * |
472 | TX_DESC_NUM, &priv->tx_base, | 476 | TX_DESC_NUM, &priv->tx_base, |
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 8706c0dbd0c3..a44a03c45014 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c | |||
@@ -1220,6 +1220,9 @@ static int lpc_eth_open(struct net_device *ndev) | |||
1220 | 1220 | ||
1221 | __lpc_eth_clock_enable(pldat, true); | 1221 | __lpc_eth_clock_enable(pldat, true); |
1222 | 1222 | ||
1223 | /* Suspended PHY makes LPC ethernet core block, so resume now */ | ||
1224 | phy_resume(pldat->phy_dev); | ||
1225 | |||
1223 | /* Reset and initialize */ | 1226 | /* Reset and initialize */ |
1224 | __lpc_eth_reset(pldat); | 1227 | __lpc_eth_reset(pldat); |
1225 | __lpc_eth_init(pldat); | 1228 | __lpc_eth_init(pldat); |
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 188626e2a861..3e96f269150d 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c | |||
@@ -2556,6 +2556,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2556 | 2556 | ||
2557 | if (skb_is_gso(skb)) { | 2557 | if (skb_is_gso(skb)) { |
2558 | int err; | 2558 | int err; |
2559 | __be16 l3_proto = vlan_get_protocol(skb); | ||
2559 | 2560 | ||
2560 | err = skb_cow_head(skb, 0); | 2561 | err = skb_cow_head(skb, 0); |
2561 | if (err < 0) | 2562 | if (err < 0) |
@@ -2572,7 +2573,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2572 | << OB_MAC_TRANSPORT_HDR_SHIFT); | 2573 | << OB_MAC_TRANSPORT_HDR_SHIFT); |
2573 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); | 2574 | mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size); |
2574 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; | 2575 | mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO; |
2575 | if (likely(skb->protocol == htons(ETH_P_IP))) { | 2576 | if (likely(l3_proto == htons(ETH_P_IP))) { |
2576 | struct iphdr *iph = ip_hdr(skb); | 2577 | struct iphdr *iph = ip_hdr(skb); |
2577 | iph->check = 0; | 2578 | iph->check = 0; |
2578 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; | 2579 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4; |
@@ -2580,7 +2581,7 @@ static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr) | |||
2580 | iph->daddr, 0, | 2581 | iph->daddr, 0, |
2581 | IPPROTO_TCP, | 2582 | IPPROTO_TCP, |
2582 | 0); | 2583 | 0); |
2583 | } else if (skb->protocol == htons(ETH_P_IPV6)) { | 2584 | } else if (l3_proto == htons(ETH_P_IPV6)) { |
2584 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; | 2585 | mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6; |
2585 | tcp_hdr(skb)->check = | 2586 | tcp_hdr(skb)->check = |
2586 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, | 2587 | ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 9e757c792d84..196e98a2d93b 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig | |||
@@ -5,6 +5,7 @@ | |||
5 | config SH_ETH | 5 | config SH_ETH |
6 | tristate "Renesas SuperH Ethernet support" | 6 | tristate "Renesas SuperH Ethernet support" |
7 | depends on HAS_DMA | 7 | depends on HAS_DMA |
8 | depends on ARCH_SHMOBILE || SUPERH || COMPILE_TEST | ||
8 | select CRC32 | 9 | select CRC32 |
9 | select MII | 10 | select MII |
10 | select MDIO_BITBANG | 11 | select MDIO_BITBANG |
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index c553f6b5a913..cf28daba4346 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -47,7 +47,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
47 | 47 | ||
48 | desc->des2 = dma_map_single(priv->device, skb->data, | 48 | desc->des2 = dma_map_single(priv->device, skb->data, |
49 | bmax, DMA_TO_DEVICE); | 49 | bmax, DMA_TO_DEVICE); |
50 | priv->tx_skbuff_dma[entry] = desc->des2; | 50 | if (dma_mapping_error(priv->device, desc->des2)) |
51 | return -1; | ||
52 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
51 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); | 53 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); |
52 | 54 | ||
53 | while (len != 0) { | 55 | while (len != 0) { |
@@ -59,7 +61,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
59 | desc->des2 = dma_map_single(priv->device, | 61 | desc->des2 = dma_map_single(priv->device, |
60 | (skb->data + bmax * i), | 62 | (skb->data + bmax * i), |
61 | bmax, DMA_TO_DEVICE); | 63 | bmax, DMA_TO_DEVICE); |
62 | priv->tx_skbuff_dma[entry] = desc->des2; | 64 | if (dma_mapping_error(priv->device, desc->des2)) |
65 | return -1; | ||
66 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
63 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, | 67 | priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, |
64 | STMMAC_CHAIN_MODE); | 68 | STMMAC_CHAIN_MODE); |
65 | priv->hw->desc->set_tx_owner(desc); | 69 | priv->hw->desc->set_tx_owner(desc); |
@@ -69,7 +73,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
69 | desc->des2 = dma_map_single(priv->device, | 73 | desc->des2 = dma_map_single(priv->device, |
70 | (skb->data + bmax * i), len, | 74 | (skb->data + bmax * i), len, |
71 | DMA_TO_DEVICE); | 75 | DMA_TO_DEVICE); |
72 | priv->tx_skbuff_dma[entry] = desc->des2; | 76 | if (dma_mapping_error(priv->device, desc->des2)) |
77 | return -1; | ||
78 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 79 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_CHAIN_MODE); | 80 | STMMAC_CHAIN_MODE); |
75 | priv->hw->desc->set_tx_owner(desc); | 81 | priv->hw->desc->set_tx_owner(desc); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index de507c32036c..593e6c4144a7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -220,10 +220,10 @@ enum dma_irq_status { | |||
220 | handle_tx = 0x8, | 220 | handle_tx = 0x8, |
221 | }; | 221 | }; |
222 | 222 | ||
223 | #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 1) | 223 | #define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0) |
224 | #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 2) | 224 | #define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1) |
225 | #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 3) | 225 | #define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2) |
226 | #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 4) | 226 | #define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3) |
227 | 227 | ||
228 | #define CORE_PCS_ANE_COMPLETE (1 << 5) | 228 | #define CORE_PCS_ANE_COMPLETE (1 << 5) |
229 | #define CORE_PCS_LINK_STATUS (1 << 6) | 229 | #define CORE_PCS_LINK_STATUS (1 << 6) |
@@ -287,7 +287,7 @@ struct dma_features { | |||
287 | 287 | ||
288 | /* Default LPI timers */ | 288 | /* Default LPI timers */ |
289 | #define STMMAC_DEFAULT_LIT_LS 0x3E8 | 289 | #define STMMAC_DEFAULT_LIT_LS 0x3E8 |
290 | #define STMMAC_DEFAULT_TWT_LS 0x0 | 290 | #define STMMAC_DEFAULT_TWT_LS 0x1E |
291 | 291 | ||
292 | #define STMMAC_CHAIN_MODE 0x1 | 292 | #define STMMAC_CHAIN_MODE 0x1 |
293 | #define STMMAC_RING_MODE 0x2 | 293 | #define STMMAC_RING_MODE 0x2 |
@@ -425,7 +425,7 @@ struct stmmac_mode_ops { | |||
425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, | 425 | void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, |
426 | unsigned int extend_desc); | 426 | unsigned int extend_desc); |
427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); | 427 | unsigned int (*is_jumbo_frm) (int len, int ehn_desc); |
428 | unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); | 428 | int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum); |
429 | int (*set_16kib_bfsize)(int mtu); | 429 | int (*set_16kib_bfsize)(int mtu); |
430 | void (*init_desc3)(struct dma_desc *p); | 430 | void (*init_desc3)(struct dma_desc *p); |
431 | void (*refill_desc3) (void *priv, struct dma_desc *p); | 431 | void (*refill_desc3) (void *priv, struct dma_desc *p); |
@@ -445,6 +445,7 @@ struct mac_device_info { | |||
445 | int multicast_filter_bins; | 445 | int multicast_filter_bins; |
446 | int unicast_filter_entries; | 446 | int unicast_filter_entries; |
447 | int mcast_bits_log2; | 447 | int mcast_bits_log2; |
448 | unsigned int rx_csum; | ||
448 | }; | 449 | }; |
449 | 450 | ||
450 | struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, | 451 | struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index 71b5419256c1..64d8f56a9c17 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h | |||
@@ -153,7 +153,7 @@ enum inter_frame_gap { | |||
153 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ | 153 | #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ |
154 | 154 | ||
155 | #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ | 155 | #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | GMAC_CONTROL_ACS | \ |
156 | GMAC_CONTROL_BE) | 156 | GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) |
157 | 157 | ||
158 | /* GMAC Frame Filter defines */ | 158 | /* GMAC Frame Filter defines */ |
159 | #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ | 159 | #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index d8ef18786a1c..5efe60ea6526 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c | |||
@@ -58,7 +58,11 @@ static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw) | |||
58 | void __iomem *ioaddr = hw->pcsr; | 58 | void __iomem *ioaddr = hw->pcsr; |
59 | u32 value = readl(ioaddr + GMAC_CONTROL); | 59 | u32 value = readl(ioaddr + GMAC_CONTROL); |
60 | 60 | ||
61 | value |= GMAC_CONTROL_IPC; | 61 | if (hw->rx_csum) |
62 | value |= GMAC_CONTROL_IPC; | ||
63 | else | ||
64 | value &= ~GMAC_CONTROL_IPC; | ||
65 | |||
62 | writel(value, ioaddr + GMAC_CONTROL); | 66 | writel(value, ioaddr + GMAC_CONTROL); |
63 | 67 | ||
64 | value = readl(ioaddr + GMAC_CONTROL); | 68 | value = readl(ioaddr + GMAC_CONTROL); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h index 8607488cbcfc..192c2491330b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h | |||
@@ -68,7 +68,7 @@ struct stmmac_counters { | |||
68 | unsigned int mmc_rx_octetcount_g; | 68 | unsigned int mmc_rx_octetcount_g; |
69 | unsigned int mmc_rx_broadcastframe_g; | 69 | unsigned int mmc_rx_broadcastframe_g; |
70 | unsigned int mmc_rx_multicastframe_g; | 70 | unsigned int mmc_rx_multicastframe_g; |
71 | unsigned int mmc_rx_crc_errror; | 71 | unsigned int mmc_rx_crc_error; |
72 | unsigned int mmc_rx_align_error; | 72 | unsigned int mmc_rx_align_error; |
73 | unsigned int mmc_rx_run_error; | 73 | unsigned int mmc_rx_run_error; |
74 | unsigned int mmc_rx_jabber_error; | 74 | unsigned int mmc_rx_jabber_error; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index 50617c5a0bdb..08c483bd2ec7 100644 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c | |||
@@ -196,7 +196,7 @@ void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc) | |||
196 | mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); | 196 | mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); |
197 | mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); | 197 | mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); |
198 | mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); | 198 | mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); |
199 | mmc->mmc_rx_crc_errror += readl(ioaddr + MMC_RX_CRC_ERRROR); | 199 | mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); |
200 | mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); | 200 | mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); |
201 | mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); | 201 | mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); |
202 | mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); | 202 | mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index 650a4be6bce5..5dd50c6cda5b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -28,7 +28,7 @@ | |||
28 | 28 | ||
29 | #include "stmmac.h" | 29 | #include "stmmac.h" |
30 | 30 | ||
31 | static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | 31 | static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) |
32 | { | 32 | { |
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
@@ -53,7 +53,10 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
53 | 53 | ||
54 | desc->des2 = dma_map_single(priv->device, skb->data, | 54 | desc->des2 = dma_map_single(priv->device, skb->data, |
55 | bmax, DMA_TO_DEVICE); | 55 | bmax, DMA_TO_DEVICE); |
56 | priv->tx_skbuff_dma[entry] = desc->des2; | 56 | if (dma_mapping_error(priv->device, desc->des2)) |
57 | return -1; | ||
58 | |||
59 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
57 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 60 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
58 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, | 61 | priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, |
59 | STMMAC_RING_MODE); | 62 | STMMAC_RING_MODE); |
@@ -68,7 +71,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
68 | 71 | ||
69 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 72 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
70 | len, DMA_TO_DEVICE); | 73 | len, DMA_TO_DEVICE); |
71 | priv->tx_skbuff_dma[entry] = desc->des2; | 74 | if (dma_mapping_error(priv->device, desc->des2)) |
75 | return -1; | ||
76 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
72 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 77 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
73 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, | 78 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, |
74 | STMMAC_RING_MODE); | 79 | STMMAC_RING_MODE); |
@@ -77,7 +82,9 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
77 | } else { | 82 | } else { |
78 | desc->des2 = dma_map_single(priv->device, skb->data, | 83 | desc->des2 = dma_map_single(priv->device, skb->data, |
79 | nopaged_len, DMA_TO_DEVICE); | 84 | nopaged_len, DMA_TO_DEVICE); |
80 | priv->tx_skbuff_dma[entry] = desc->des2; | 85 | if (dma_mapping_error(priv->device, desc->des2)) |
86 | return -1; | ||
87 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
81 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; | 88 | desc->des3 = desc->des2 + BUF_SIZE_4KiB; |
82 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, | 89 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, |
83 | STMMAC_RING_MODE); | 90 | STMMAC_RING_MODE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index ca01035634a7..58097c0e2ad5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -34,6 +34,11 @@ | |||
34 | #include <linux/ptp_clock_kernel.h> | 34 | #include <linux/ptp_clock_kernel.h> |
35 | #include <linux/reset.h> | 35 | #include <linux/reset.h> |
36 | 36 | ||
37 | struct stmmac_tx_info { | ||
38 | dma_addr_t buf; | ||
39 | bool map_as_page; | ||
40 | }; | ||
41 | |||
37 | struct stmmac_priv { | 42 | struct stmmac_priv { |
38 | /* Frequently used values are kept adjacent for cache effect */ | 43 | /* Frequently used values are kept adjacent for cache effect */ |
39 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; | 44 | struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp; |
@@ -45,7 +50,7 @@ struct stmmac_priv { | |||
45 | u32 tx_count_frames; | 50 | u32 tx_count_frames; |
46 | u32 tx_coal_frames; | 51 | u32 tx_coal_frames; |
47 | u32 tx_coal_timer; | 52 | u32 tx_coal_timer; |
48 | dma_addr_t *tx_skbuff_dma; | 53 | struct stmmac_tx_info *tx_skbuff_dma; |
49 | dma_addr_t dma_tx_phy; | 54 | dma_addr_t dma_tx_phy; |
50 | int tx_coalesce; | 55 | int tx_coalesce; |
51 | int hwts_tx_en; | 56 | int hwts_tx_en; |
@@ -105,6 +110,8 @@ struct stmmac_priv { | |||
105 | struct ptp_clock *ptp_clock; | 110 | struct ptp_clock *ptp_clock; |
106 | struct ptp_clock_info ptp_clock_ops; | 111 | struct ptp_clock_info ptp_clock_ops; |
107 | unsigned int default_addend; | 112 | unsigned int default_addend; |
113 | struct clk *clk_ptp_ref; | ||
114 | unsigned int clk_ptp_rate; | ||
108 | u32 adv_ts; | 115 | u32 adv_ts; |
109 | int use_riwt; | 116 | int use_riwt; |
110 | int irq_wake; | 117 | int irq_wake; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 9af50bae4dde..cf4f38db1c0a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -175,7 +175,7 @@ static const struct stmmac_stats stmmac_mmc[] = { | |||
175 | STMMAC_MMC_STAT(mmc_rx_octetcount_g), | 175 | STMMAC_MMC_STAT(mmc_rx_octetcount_g), |
176 | STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), | 176 | STMMAC_MMC_STAT(mmc_rx_broadcastframe_g), |
177 | STMMAC_MMC_STAT(mmc_rx_multicastframe_g), | 177 | STMMAC_MMC_STAT(mmc_rx_multicastframe_g), |
178 | STMMAC_MMC_STAT(mmc_rx_crc_errror), | 178 | STMMAC_MMC_STAT(mmc_rx_crc_error), |
179 | STMMAC_MMC_STAT(mmc_rx_align_error), | 179 | STMMAC_MMC_STAT(mmc_rx_align_error), |
180 | STMMAC_MMC_STAT(mmc_rx_run_error), | 180 | STMMAC_MMC_STAT(mmc_rx_run_error), |
181 | STMMAC_MMC_STAT(mmc_rx_jabber_error), | 181 | STMMAC_MMC_STAT(mmc_rx_jabber_error), |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 08addd653728..6e6ee226de04 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -275,6 +275,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) | |||
275 | */ | 275 | */ |
276 | bool stmmac_eee_init(struct stmmac_priv *priv) | 276 | bool stmmac_eee_init(struct stmmac_priv *priv) |
277 | { | 277 | { |
278 | char *phy_bus_name = priv->plat->phy_bus_name; | ||
278 | bool ret = false; | 279 | bool ret = false; |
279 | 280 | ||
280 | /* Using PCS we cannot dial with the phy registers at this stage | 281 | /* Using PCS we cannot dial with the phy registers at this stage |
@@ -284,6 +285,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
284 | (priv->pcs == STMMAC_PCS_RTBI)) | 285 | (priv->pcs == STMMAC_PCS_RTBI)) |
285 | goto out; | 286 | goto out; |
286 | 287 | ||
288 | /* Never init EEE in case of a switch is attached */ | ||
289 | if (phy_bus_name && (!strcmp(phy_bus_name, "fixed"))) | ||
290 | goto out; | ||
291 | |||
287 | /* MAC core supports the EEE feature. */ | 292 | /* MAC core supports the EEE feature. */ |
288 | if (priv->dma_cap.eee) { | 293 | if (priv->dma_cap.eee) { |
289 | int tx_lpi_timer = priv->tx_lpi_timer; | 294 | int tx_lpi_timer = priv->tx_lpi_timer; |
@@ -316,10 +321,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv) | |||
316 | priv->hw->mac->set_eee_timer(priv->hw, | 321 | priv->hw->mac->set_eee_timer(priv->hw, |
317 | STMMAC_DEFAULT_LIT_LS, | 322 | STMMAC_DEFAULT_LIT_LS, |
318 | tx_lpi_timer); | 323 | tx_lpi_timer); |
319 | } else | 324 | } |
320 | /* Set HW EEE according to the speed */ | 325 | /* Set HW EEE according to the speed */ |
321 | priv->hw->mac->set_eee_pls(priv->hw, | 326 | priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); |
322 | priv->phydev->link); | ||
323 | 327 | ||
324 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); | 328 | pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
325 | 329 | ||
@@ -603,16 +607,16 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
603 | /* calculate default added value: | 607 | /* calculate default added value: |
604 | * formula is : | 608 | * formula is : |
605 | * addend = (2^32)/freq_div_ratio; | 609 | * addend = (2^32)/freq_div_ratio; |
606 | * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz | 610 | * where, freq_div_ratio = clk_ptp_ref_i/50MHz |
607 | * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK; | 611 | * hence, addend = ((2^32) * 50MHz)/clk_ptp_ref_i; |
608 | * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to | 612 | * NOTE: clk_ptp_ref_i should be >= 50MHz to |
609 | * achive 20ns accuracy. | 613 | * achive 20ns accuracy. |
610 | * | 614 | * |
611 | * 2^x * y == (y << x), hence | 615 | * 2^x * y == (y << x), hence |
612 | * 2^32 * 50000000 ==> (50000000 << 32) | 616 | * 2^32 * 50000000 ==> (50000000 << 32) |
613 | */ | 617 | */ |
614 | temp = (u64) (50000000ULL << 32); | 618 | temp = (u64) (50000000ULL << 32); |
615 | priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK); | 619 | priv->default_addend = div_u64(temp, priv->clk_ptp_rate); |
616 | priv->hw->ptp->config_addend(priv->ioaddr, | 620 | priv->hw->ptp->config_addend(priv->ioaddr, |
617 | priv->default_addend); | 621 | priv->default_addend); |
618 | 622 | ||
@@ -638,6 +642,16 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
638 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) | 642 | if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) |
639 | return -EOPNOTSUPP; | 643 | return -EOPNOTSUPP; |
640 | 644 | ||
645 | /* Fall-back to main clock in case of no PTP ref is passed */ | ||
646 | priv->clk_ptp_ref = devm_clk_get(priv->device, "clk_ptp_ref"); | ||
647 | if (IS_ERR(priv->clk_ptp_ref)) { | ||
648 | priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk); | ||
649 | priv->clk_ptp_ref = NULL; | ||
650 | } else { | ||
651 | clk_prepare_enable(priv->clk_ptp_ref); | ||
652 | priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref); | ||
653 | } | ||
654 | |||
641 | priv->adv_ts = 0; | 655 | priv->adv_ts = 0; |
642 | if (priv->dma_cap.atime_stamp && priv->extend_desc) | 656 | if (priv->dma_cap.atime_stamp && priv->extend_desc) |
643 | priv->adv_ts = 1; | 657 | priv->adv_ts = 1; |
@@ -657,6 +671,8 @@ static int stmmac_init_ptp(struct stmmac_priv *priv) | |||
657 | 671 | ||
658 | static void stmmac_release_ptp(struct stmmac_priv *priv) | 672 | static void stmmac_release_ptp(struct stmmac_priv *priv) |
659 | { | 673 | { |
674 | if (priv->clk_ptp_ref) | ||
675 | clk_disable_unprepare(priv->clk_ptp_ref); | ||
660 | stmmac_ptp_unregister(priv); | 676 | stmmac_ptp_unregister(priv); |
661 | } | 677 | } |
662 | 678 | ||
@@ -1061,7 +1077,8 @@ static int init_dma_desc_rings(struct net_device *dev) | |||
1061 | else | 1077 | else |
1062 | p = priv->dma_tx + i; | 1078 | p = priv->dma_tx + i; |
1063 | p->des2 = 0; | 1079 | p->des2 = 0; |
1064 | priv->tx_skbuff_dma[i] = 0; | 1080 | priv->tx_skbuff_dma[i].buf = 0; |
1081 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1065 | priv->tx_skbuff[i] = NULL; | 1082 | priv->tx_skbuff[i] = NULL; |
1066 | } | 1083 | } |
1067 | 1084 | ||
@@ -1100,17 +1117,24 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv) | |||
1100 | else | 1117 | else |
1101 | p = priv->dma_tx + i; | 1118 | p = priv->dma_tx + i; |
1102 | 1119 | ||
1103 | if (priv->tx_skbuff_dma[i]) { | 1120 | if (priv->tx_skbuff_dma[i].buf) { |
1104 | dma_unmap_single(priv->device, | 1121 | if (priv->tx_skbuff_dma[i].map_as_page) |
1105 | priv->tx_skbuff_dma[i], | 1122 | dma_unmap_page(priv->device, |
1106 | priv->hw->desc->get_tx_len(p), | 1123 | priv->tx_skbuff_dma[i].buf, |
1107 | DMA_TO_DEVICE); | 1124 | priv->hw->desc->get_tx_len(p), |
1108 | priv->tx_skbuff_dma[i] = 0; | 1125 | DMA_TO_DEVICE); |
1126 | else | ||
1127 | dma_unmap_single(priv->device, | ||
1128 | priv->tx_skbuff_dma[i].buf, | ||
1129 | priv->hw->desc->get_tx_len(p), | ||
1130 | DMA_TO_DEVICE); | ||
1109 | } | 1131 | } |
1110 | 1132 | ||
1111 | if (priv->tx_skbuff[i] != NULL) { | 1133 | if (priv->tx_skbuff[i] != NULL) { |
1112 | dev_kfree_skb_any(priv->tx_skbuff[i]); | 1134 | dev_kfree_skb_any(priv->tx_skbuff[i]); |
1113 | priv->tx_skbuff[i] = NULL; | 1135 | priv->tx_skbuff[i] = NULL; |
1136 | priv->tx_skbuff_dma[i].buf = 0; | ||
1137 | priv->tx_skbuff_dma[i].map_as_page = false; | ||
1114 | } | 1138 | } |
1115 | } | 1139 | } |
1116 | } | 1140 | } |
@@ -1131,7 +1155,8 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv) | |||
1131 | if (!priv->rx_skbuff) | 1155 | if (!priv->rx_skbuff) |
1132 | goto err_rx_skbuff; | 1156 | goto err_rx_skbuff; |
1133 | 1157 | ||
1134 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1158 | priv->tx_skbuff_dma = kmalloc_array(txsize, |
1159 | sizeof(*priv->tx_skbuff_dma), | ||
1135 | GFP_KERNEL); | 1160 | GFP_KERNEL); |
1136 | if (!priv->tx_skbuff_dma) | 1161 | if (!priv->tx_skbuff_dma) |
1137 | goto err_tx_skbuff_dma; | 1162 | goto err_tx_skbuff_dma; |
@@ -1293,12 +1318,19 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
1293 | pr_debug("%s: curr %d, dirty %d\n", __func__, | 1318 | pr_debug("%s: curr %d, dirty %d\n", __func__, |
1294 | priv->cur_tx, priv->dirty_tx); | 1319 | priv->cur_tx, priv->dirty_tx); |
1295 | 1320 | ||
1296 | if (likely(priv->tx_skbuff_dma[entry])) { | 1321 | if (likely(priv->tx_skbuff_dma[entry].buf)) { |
1297 | dma_unmap_single(priv->device, | 1322 | if (priv->tx_skbuff_dma[entry].map_as_page) |
1298 | priv->tx_skbuff_dma[entry], | 1323 | dma_unmap_page(priv->device, |
1299 | priv->hw->desc->get_tx_len(p), | 1324 | priv->tx_skbuff_dma[entry].buf, |
1300 | DMA_TO_DEVICE); | 1325 | priv->hw->desc->get_tx_len(p), |
1301 | priv->tx_skbuff_dma[entry] = 0; | 1326 | DMA_TO_DEVICE); |
1327 | else | ||
1328 | dma_unmap_single(priv->device, | ||
1329 | priv->tx_skbuff_dma[entry].buf, | ||
1330 | priv->hw->desc->get_tx_len(p), | ||
1331 | DMA_TO_DEVICE); | ||
1332 | priv->tx_skbuff_dma[entry].buf = 0; | ||
1333 | priv->tx_skbuff_dma[entry].map_as_page = false; | ||
1302 | } | 1334 | } |
1303 | priv->hw->mode->clean_desc3(priv, p); | 1335 | priv->hw->mode->clean_desc3(priv, p); |
1304 | 1336 | ||
@@ -1637,6 +1669,13 @@ static int stmmac_hw_setup(struct net_device *dev) | |||
1637 | /* Initialize the MAC Core */ | 1669 | /* Initialize the MAC Core */ |
1638 | priv->hw->mac->core_init(priv->hw, dev->mtu); | 1670 | priv->hw->mac->core_init(priv->hw, dev->mtu); |
1639 | 1671 | ||
1672 | ret = priv->hw->mac->rx_ipc(priv->hw); | ||
1673 | if (!ret) { | ||
1674 | pr_warn(" RX IPC Checksum Offload disabled\n"); | ||
1675 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; | ||
1676 | priv->hw->rx_csum = 0; | ||
1677 | } | ||
1678 | |||
1640 | /* Enable the MAC Rx/Tx */ | 1679 | /* Enable the MAC Rx/Tx */ |
1641 | stmmac_set_mac(priv->ioaddr, true); | 1680 | stmmac_set_mac(priv->ioaddr, true); |
1642 | 1681 | ||
@@ -1887,12 +1926,16 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1887 | if (likely(!is_jumbo)) { | 1926 | if (likely(!is_jumbo)) { |
1888 | desc->des2 = dma_map_single(priv->device, skb->data, | 1927 | desc->des2 = dma_map_single(priv->device, skb->data, |
1889 | nopaged_len, DMA_TO_DEVICE); | 1928 | nopaged_len, DMA_TO_DEVICE); |
1890 | priv->tx_skbuff_dma[entry] = desc->des2; | 1929 | if (dma_mapping_error(priv->device, desc->des2)) |
1930 | goto dma_map_err; | ||
1931 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1891 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, | 1932 | priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, |
1892 | csum_insertion, priv->mode); | 1933 | csum_insertion, priv->mode); |
1893 | } else { | 1934 | } else { |
1894 | desc = first; | 1935 | desc = first; |
1895 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); | 1936 | entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); |
1937 | if (unlikely(entry < 0)) | ||
1938 | goto dma_map_err; | ||
1896 | } | 1939 | } |
1897 | 1940 | ||
1898 | for (i = 0; i < nfrags; i++) { | 1941 | for (i = 0; i < nfrags; i++) { |
@@ -1908,7 +1951,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1908 | 1951 | ||
1909 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, | 1952 | desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len, |
1910 | DMA_TO_DEVICE); | 1953 | DMA_TO_DEVICE); |
1911 | priv->tx_skbuff_dma[entry] = desc->des2; | 1954 | if (dma_mapping_error(priv->device, desc->des2)) |
1955 | goto dma_map_err; /* should reuse desc w/o issues */ | ||
1956 | |||
1957 | priv->tx_skbuff_dma[entry].buf = desc->des2; | ||
1958 | priv->tx_skbuff_dma[entry].map_as_page = true; | ||
1912 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, | 1959 | priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion, |
1913 | priv->mode); | 1960 | priv->mode); |
1914 | wmb(); | 1961 | wmb(); |
@@ -1975,7 +2022,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1975 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); | 2022 | priv->hw->dma->enable_dma_transmission(priv->ioaddr); |
1976 | 2023 | ||
1977 | spin_unlock(&priv->tx_lock); | 2024 | spin_unlock(&priv->tx_lock); |
2025 | return NETDEV_TX_OK; | ||
1978 | 2026 | ||
2027 | dma_map_err: | ||
2028 | dev_err(priv->device, "Tx dma map failed\n"); | ||
2029 | dev_kfree_skb(skb); | ||
2030 | priv->dev->stats.tx_dropped++; | ||
1979 | return NETDEV_TX_OK; | 2031 | return NETDEV_TX_OK; |
1980 | } | 2032 | } |
1981 | 2033 | ||
@@ -2028,7 +2080,12 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) | |||
2028 | priv->rx_skbuff_dma[entry] = | 2080 | priv->rx_skbuff_dma[entry] = |
2029 | dma_map_single(priv->device, skb->data, bfsize, | 2081 | dma_map_single(priv->device, skb->data, bfsize, |
2030 | DMA_FROM_DEVICE); | 2082 | DMA_FROM_DEVICE); |
2031 | 2083 | if (dma_mapping_error(priv->device, | |
2084 | priv->rx_skbuff_dma[entry])) { | ||
2085 | dev_err(priv->device, "Rx dma map failed\n"); | ||
2086 | dev_kfree_skb(skb); | ||
2087 | break; | ||
2088 | } | ||
2032 | p->des2 = priv->rx_skbuff_dma[entry]; | 2089 | p->des2 = priv->rx_skbuff_dma[entry]; |
2033 | 2090 | ||
2034 | priv->hw->mode->refill_desc3(priv, p); | 2091 | priv->hw->mode->refill_desc3(priv, p); |
@@ -2055,7 +2112,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2055 | unsigned int entry = priv->cur_rx % rxsize; | 2112 | unsigned int entry = priv->cur_rx % rxsize; |
2056 | unsigned int next_entry; | 2113 | unsigned int next_entry; |
2057 | unsigned int count = 0; | 2114 | unsigned int count = 0; |
2058 | int coe = priv->plat->rx_coe; | 2115 | int coe = priv->hw->rx_csum; |
2059 | 2116 | ||
2060 | if (netif_msg_rx_status(priv)) { | 2117 | if (netif_msg_rx_status(priv)) { |
2061 | pr_debug("%s: descriptor ring:\n", __func__); | 2118 | pr_debug("%s: descriptor ring:\n", __func__); |
@@ -2276,8 +2333,7 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, | |||
2276 | 2333 | ||
2277 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) | 2334 | if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) |
2278 | features &= ~NETIF_F_RXCSUM; | 2335 | features &= ~NETIF_F_RXCSUM; |
2279 | else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1) | 2336 | |
2280 | features &= ~NETIF_F_IPV6_CSUM; | ||
2281 | if (!priv->plat->tx_coe) | 2337 | if (!priv->plat->tx_coe) |
2282 | features &= ~NETIF_F_ALL_CSUM; | 2338 | features &= ~NETIF_F_ALL_CSUM; |
2283 | 2339 | ||
@@ -2292,6 +2348,24 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev, | |||
2292 | return features; | 2348 | return features; |
2293 | } | 2349 | } |
2294 | 2350 | ||
2351 | static int stmmac_set_features(struct net_device *netdev, | ||
2352 | netdev_features_t features) | ||
2353 | { | ||
2354 | struct stmmac_priv *priv = netdev_priv(netdev); | ||
2355 | |||
2356 | /* Keep the COE Type in case of csum is supporting */ | ||
2357 | if (features & NETIF_F_RXCSUM) | ||
2358 | priv->hw->rx_csum = priv->plat->rx_coe; | ||
2359 | else | ||
2360 | priv->hw->rx_csum = 0; | ||
2361 | /* No check needed because rx_coe has been set before and it will be | ||
2362 | * fixed in case of issue. | ||
2363 | */ | ||
2364 | priv->hw->mac->rx_ipc(priv->hw); | ||
2365 | |||
2366 | return 0; | ||
2367 | } | ||
2368 | |||
2295 | /** | 2369 | /** |
2296 | * stmmac_interrupt - main ISR | 2370 | * stmmac_interrupt - main ISR |
2297 | * @irq: interrupt number. | 2371 | * @irq: interrupt number. |
@@ -2572,6 +2646,7 @@ static const struct net_device_ops stmmac_netdev_ops = { | |||
2572 | .ndo_stop = stmmac_release, | 2646 | .ndo_stop = stmmac_release, |
2573 | .ndo_change_mtu = stmmac_change_mtu, | 2647 | .ndo_change_mtu = stmmac_change_mtu, |
2574 | .ndo_fix_features = stmmac_fix_features, | 2648 | .ndo_fix_features = stmmac_fix_features, |
2649 | .ndo_set_features = stmmac_set_features, | ||
2575 | .ndo_set_rx_mode = stmmac_set_rx_mode, | 2650 | .ndo_set_rx_mode = stmmac_set_rx_mode, |
2576 | .ndo_tx_timeout = stmmac_tx_timeout, | 2651 | .ndo_tx_timeout = stmmac_tx_timeout, |
2577 | .ndo_do_ioctl = stmmac_ioctl, | 2652 | .ndo_do_ioctl = stmmac_ioctl, |
@@ -2592,7 +2667,6 @@ static const struct net_device_ops stmmac_netdev_ops = { | |||
2592 | */ | 2667 | */ |
2593 | static int stmmac_hw_init(struct stmmac_priv *priv) | 2668 | static int stmmac_hw_init(struct stmmac_priv *priv) |
2594 | { | 2669 | { |
2595 | int ret; | ||
2596 | struct mac_device_info *mac; | 2670 | struct mac_device_info *mac; |
2597 | 2671 | ||
2598 | /* Identify the MAC HW device */ | 2672 | /* Identify the MAC HW device */ |
@@ -2649,15 +2723,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) | |||
2649 | /* To use alternate (extended) or normal descriptor structures */ | 2723 | /* To use alternate (extended) or normal descriptor structures */ |
2650 | stmmac_selec_desc_mode(priv); | 2724 | stmmac_selec_desc_mode(priv); |
2651 | 2725 | ||
2652 | ret = priv->hw->mac->rx_ipc(priv->hw); | 2726 | if (priv->plat->rx_coe) { |
2653 | if (!ret) { | 2727 | priv->hw->rx_csum = priv->plat->rx_coe; |
2654 | pr_warn(" RX IPC Checksum Offload not configured.\n"); | ||
2655 | priv->plat->rx_coe = STMMAC_RX_COE_NONE; | ||
2656 | } | ||
2657 | |||
2658 | if (priv->plat->rx_coe) | ||
2659 | pr_info(" RX Checksum Offload Engine supported (type %d)\n", | 2728 | pr_info(" RX Checksum Offload Engine supported (type %d)\n", |
2660 | priv->plat->rx_coe); | 2729 | priv->plat->rx_coe); |
2730 | } | ||
2661 | if (priv->plat->tx_coe) | 2731 | if (priv->plat->tx_coe) |
2662 | pr_info(" TX Checksum insertion supported\n"); | 2732 | pr_info(" TX Checksum insertion supported\n"); |
2663 | 2733 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index b7ad3565566c..c5ee79d8a8c5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
@@ -206,6 +206,7 @@ void stmmac_ptp_unregister(struct stmmac_priv *priv) | |||
206 | { | 206 | { |
207 | if (priv->ptp_clock) { | 207 | if (priv->ptp_clock) { |
208 | ptp_clock_unregister(priv->ptp_clock); | 208 | ptp_clock_unregister(priv->ptp_clock); |
209 | priv->ptp_clock = NULL; | ||
209 | pr_debug("Removed PTP HW clock successfully on %s\n", | 210 | pr_debug("Removed PTP HW clock successfully on %s\n", |
210 | priv->dev->name); | 211 | priv->dev->name); |
211 | } | 212 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 3dbc047622fa..4535df37c227 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | |||
@@ -25,8 +25,6 @@ | |||
25 | #ifndef __STMMAC_PTP_H__ | 25 | #ifndef __STMMAC_PTP_H__ |
26 | #define __STMMAC_PTP_H__ | 26 | #define __STMMAC_PTP_H__ |
27 | 27 | ||
28 | #define STMMAC_SYSCLOCK 62500000 | ||
29 | |||
30 | /* IEEE 1588 PTP register offsets */ | 28 | /* IEEE 1588 PTP register offsets */ |
31 | #define PTP_TCR 0x0700 /* Timestamp Control Reg */ | 29 | #define PTP_TCR 0x0700 /* Timestamp Control Reg */ |
32 | #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ | 30 | #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ |
diff --git a/drivers/net/fddi/skfp/h/skfbi.h b/drivers/net/fddi/skfp/h/skfbi.h index c1ba26c06d73..3de2f0d15fe2 100644 --- a/drivers/net/fddi/skfp/h/skfbi.h +++ b/drivers/net/fddi/skfp/h/skfbi.h | |||
@@ -147,11 +147,6 @@ | |||
147 | #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ | 147 | #define PCI_MEM64BIT (2<<1) /* Base addr anywhere in 64 Bit range */ |
148 | #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ | 148 | #define PCI_MEMSPACE 0x00000001L /* Bit 0: Memory Space Indic. */ |
149 | 149 | ||
150 | /* PCI_BASE_2ND 32 bit 2nd Base address */ | ||
151 | #define PCI_IOBASE 0xffffff00L /* Bit 31..8: I/O Base address */ | ||
152 | #define PCI_IOSIZE 0x000000fcL /* Bit 7..2: I/O Size Requirements */ | ||
153 | #define PCI_IOSPACE 0x00000001L /* Bit 0: I/O Space Indicator */ | ||
154 | |||
155 | /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ | 150 | /* PCI_SUB_VID 16 bit Subsystem Vendor ID */ |
156 | /* PCI_SUB_ID 16 bit Subsystem ID */ | 151 | /* PCI_SUB_ID 16 bit Subsystem ID */ |
157 | 152 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c94e2a27446a..a854d38c231d 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -1036,31 +1036,31 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1036 | /* First check if the EEE ability is supported */ | 1036 | /* First check if the EEE ability is supported */ |
1037 | eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, | 1037 | eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE, |
1038 | MDIO_MMD_PCS, phydev->addr); | 1038 | MDIO_MMD_PCS, phydev->addr); |
1039 | if (eee_cap < 0) | 1039 | if (eee_cap <= 0) |
1040 | return eee_cap; | 1040 | goto eee_exit_err; |
1041 | 1041 | ||
1042 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); | 1042 | cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap); |
1043 | if (!cap) | 1043 | if (!cap) |
1044 | return -EPROTONOSUPPORT; | 1044 | goto eee_exit_err; |
1045 | 1045 | ||
1046 | /* Check which link settings negotiated and verify it in | 1046 | /* Check which link settings negotiated and verify it in |
1047 | * the EEE advertising registers. | 1047 | * the EEE advertising registers. |
1048 | */ | 1048 | */ |
1049 | eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, | 1049 | eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE, |
1050 | MDIO_MMD_AN, phydev->addr); | 1050 | MDIO_MMD_AN, phydev->addr); |
1051 | if (eee_lp < 0) | 1051 | if (eee_lp <= 0) |
1052 | return eee_lp; | 1052 | goto eee_exit_err; |
1053 | 1053 | ||
1054 | eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, | 1054 | eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV, |
1055 | MDIO_MMD_AN, phydev->addr); | 1055 | MDIO_MMD_AN, phydev->addr); |
1056 | if (eee_adv < 0) | 1056 | if (eee_adv <= 0) |
1057 | return eee_adv; | 1057 | goto eee_exit_err; |
1058 | 1058 | ||
1059 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); | 1059 | adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); |
1060 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); | 1060 | lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); |
1061 | idx = phy_find_setting(phydev->speed, phydev->duplex); | 1061 | idx = phy_find_setting(phydev->speed, phydev->duplex); |
1062 | if (!(lp & adv & settings[idx].setting)) | 1062 | if (!(lp & adv & settings[idx].setting)) |
1063 | return -EPROTONOSUPPORT; | 1063 | goto eee_exit_err; |
1064 | 1064 | ||
1065 | if (clk_stop_enable) { | 1065 | if (clk_stop_enable) { |
1066 | /* Configure the PHY to stop receiving xMII | 1066 | /* Configure the PHY to stop receiving xMII |
@@ -1080,7 +1080,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) | |||
1080 | 1080 | ||
1081 | return 0; /* EEE supported */ | 1081 | return 0; /* EEE supported */ |
1082 | } | 1082 | } |
1083 | 1083 | eee_exit_err: | |
1084 | return -EPROTONOSUPPORT; | 1084 | return -EPROTONOSUPPORT; |
1085 | } | 1085 | } |
1086 | EXPORT_SYMBOL(phy_init_eee); | 1086 | EXPORT_SYMBOL(phy_init_eee); |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index d6e90c72c257..6dfcbf523936 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -2056,7 +2056,6 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2056 | if (!netdev_mc_empty(netdev)) { | 2056 | if (!netdev_mc_empty(netdev)) { |
2057 | new_table = vmxnet3_copy_mc(netdev); | 2057 | new_table = vmxnet3_copy_mc(netdev); |
2058 | if (new_table) { | 2058 | if (new_table) { |
2059 | new_mode |= VMXNET3_RXM_MCAST; | ||
2060 | rxConf->mfTableLen = cpu_to_le16( | 2059 | rxConf->mfTableLen = cpu_to_le16( |
2061 | netdev_mc_count(netdev) * ETH_ALEN); | 2060 | netdev_mc_count(netdev) * ETH_ALEN); |
2062 | new_table_pa = dma_map_single( | 2061 | new_table_pa = dma_map_single( |
@@ -2064,15 +2063,18 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2064 | new_table, | 2063 | new_table, |
2065 | rxConf->mfTableLen, | 2064 | rxConf->mfTableLen, |
2066 | PCI_DMA_TODEVICE); | 2065 | PCI_DMA_TODEVICE); |
2066 | } | ||
2067 | |||
2068 | if (new_table_pa) { | ||
2069 | new_mode |= VMXNET3_RXM_MCAST; | ||
2067 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); | 2070 | rxConf->mfTablePA = cpu_to_le64(new_table_pa); |
2068 | } else { | 2071 | } else { |
2069 | netdev_info(netdev, "failed to copy mcast list" | 2072 | netdev_info(netdev, |
2070 | ", setting ALL_MULTI\n"); | 2073 | "failed to copy mcast list, setting ALL_MULTI\n"); |
2071 | new_mode |= VMXNET3_RXM_ALL_MULTI; | 2074 | new_mode |= VMXNET3_RXM_ALL_MULTI; |
2072 | } | 2075 | } |
2073 | } | 2076 | } |
2074 | 2077 | ||
2075 | |||
2076 | if (!(new_mode & VMXNET3_RXM_MCAST)) { | 2078 | if (!(new_mode & VMXNET3_RXM_MCAST)) { |
2077 | rxConf->mfTableLen = 0; | 2079 | rxConf->mfTableLen = 0; |
2078 | rxConf->mfTablePA = 0; | 2080 | rxConf->mfTablePA = 0; |
@@ -2091,11 +2093,10 @@ vmxnet3_set_mc(struct net_device *netdev) | |||
2091 | VMXNET3_CMD_UPDATE_MAC_FILTERS); | 2093 | VMXNET3_CMD_UPDATE_MAC_FILTERS); |
2092 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); | 2094 | spin_unlock_irqrestore(&adapter->cmd_lock, flags); |
2093 | 2095 | ||
2094 | if (new_table) { | 2096 | if (new_table_pa) |
2095 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, | 2097 | dma_unmap_single(&adapter->pdev->dev, new_table_pa, |
2096 | rxConf->mfTableLen, PCI_DMA_TODEVICE); | 2098 | rxConf->mfTableLen, PCI_DMA_TODEVICE); |
2097 | kfree(new_table); | 2099 | kfree(new_table); |
2098 | } | ||
2099 | } | 2100 | } |
2100 | 2101 | ||
2101 | void | 2102 | void |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 29ee77f2c97f..3759479f959a 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -69,10 +69,10 @@ | |||
69 | /* | 69 | /* |
70 | * Version numbers | 70 | * Version numbers |
71 | */ | 71 | */ |
72 | #define VMXNET3_DRIVER_VERSION_STRING "1.2.0.0-k" | 72 | #define VMXNET3_DRIVER_VERSION_STRING "1.2.1.0-k" |
73 | 73 | ||
74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 74 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01020000 | 75 | #define VMXNET3_DRIVER_VERSION_NUM 0x01020100 |
76 | 76 | ||
77 | #if defined(CONFIG_PCI_MSI) | 77 | #if defined(CONFIG_PCI_MSI) |
78 | /* RSS only makes sense if MSI-X is supported. */ | 78 | /* RSS only makes sense if MSI-X is supported. */ |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1fb7b37d1402..beb377b2d4b7 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1327,7 +1327,7 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1327 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1327 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
1328 | union vxlan_addr ipa = { | 1328 | union vxlan_addr ipa = { |
1329 | .sin.sin_addr.s_addr = tip, | 1329 | .sin.sin_addr.s_addr = tip, |
1330 | .sa.sa_family = AF_INET, | 1330 | .sin.sin_family = AF_INET, |
1331 | }; | 1331 | }; |
1332 | 1332 | ||
1333 | vxlan_ip_miss(dev, &ipa); | 1333 | vxlan_ip_miss(dev, &ipa); |
@@ -1488,7 +1488,7 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb) | |||
1488 | } else if (vxlan->flags & VXLAN_F_L3MISS) { | 1488 | } else if (vxlan->flags & VXLAN_F_L3MISS) { |
1489 | union vxlan_addr ipa = { | 1489 | union vxlan_addr ipa = { |
1490 | .sin6.sin6_addr = msg->target, | 1490 | .sin6.sin6_addr = msg->target, |
1491 | .sa.sa_family = AF_INET6, | 1491 | .sin6.sin6_family = AF_INET6, |
1492 | }; | 1492 | }; |
1493 | 1493 | ||
1494 | vxlan_ip_miss(dev, &ipa); | 1494 | vxlan_ip_miss(dev, &ipa); |
@@ -1521,7 +1521,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) | |||
1521 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { | 1521 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
1522 | union vxlan_addr ipa = { | 1522 | union vxlan_addr ipa = { |
1523 | .sin.sin_addr.s_addr = pip->daddr, | 1523 | .sin.sin_addr.s_addr = pip->daddr, |
1524 | .sa.sa_family = AF_INET, | 1524 | .sin.sin_family = AF_INET, |
1525 | }; | 1525 | }; |
1526 | 1526 | ||
1527 | vxlan_ip_miss(dev, &ipa); | 1527 | vxlan_ip_miss(dev, &ipa); |
@@ -1542,7 +1542,7 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb) | |||
1542 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { | 1542 | if (!n && (vxlan->flags & VXLAN_F_L3MISS)) { |
1543 | union vxlan_addr ipa = { | 1543 | union vxlan_addr ipa = { |
1544 | .sin6.sin6_addr = pip6->daddr, | 1544 | .sin6.sin6_addr = pip6->daddr, |
1545 | .sa.sa_family = AF_INET6, | 1545 | .sin6.sin6_family = AF_INET6, |
1546 | }; | 1546 | }; |
1547 | 1547 | ||
1548 | vxlan_ip_miss(dev, &ipa); | 1548 | vxlan_ip_miss(dev, &ipa); |
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c index 334c2ece855a..da92bfa76b7c 100644 --- a/drivers/net/wireless/at76c50x-usb.c +++ b/drivers/net/wireless/at76c50x-usb.c | |||
@@ -2423,8 +2423,6 @@ static void at76_delete_device(struct at76_priv *priv) | |||
2423 | 2423 | ||
2424 | kfree_skb(priv->rx_skb); | 2424 | kfree_skb(priv->rx_skb); |
2425 | 2425 | ||
2426 | usb_put_dev(priv->udev); | ||
2427 | |||
2428 | at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", | 2426 | at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw", |
2429 | __func__); | 2427 | __func__); |
2430 | ieee80211_free_hw(priv->hw); | 2428 | ieee80211_free_hw(priv->hw); |
@@ -2558,6 +2556,7 @@ static void at76_disconnect(struct usb_interface *interface) | |||
2558 | 2556 | ||
2559 | wiphy_info(priv->hw->wiphy, "disconnecting\n"); | 2557 | wiphy_info(priv->hw->wiphy, "disconnecting\n"); |
2560 | at76_delete_device(priv); | 2558 | at76_delete_device(priv); |
2559 | usb_put_dev(priv->udev); | ||
2561 | dev_info(&interface->dev, "disconnected\n"); | 2560 | dev_info(&interface->dev, "disconnected\n"); |
2562 | } | 2561 | } |
2563 | 2562 | ||
diff --git a/drivers/net/wireless/ath/ath9k/spectral.c b/drivers/net/wireless/ath/ath9k/spectral.c index 5fe29b9f8fa2..8f68426ca653 100644 --- a/drivers/net/wireless/ath/ath9k/spectral.c +++ b/drivers/net/wireless/ath/ath9k/spectral.c | |||
@@ -253,7 +253,7 @@ static ssize_t write_file_spec_scan_ctl(struct file *file, | |||
253 | 253 | ||
254 | if (strncmp("trigger", buf, 7) == 0) { | 254 | if (strncmp("trigger", buf, 7) == 0) { |
255 | ath9k_spectral_scan_trigger(sc->hw); | 255 | ath9k_spectral_scan_trigger(sc->hw); |
256 | } else if (strncmp("background", buf, 9) == 0) { | 256 | } else if (strncmp("background", buf, 10) == 0) { |
257 | ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); | 257 | ath9k_spectral_scan_config(sc->hw, SPECTRAL_BACKGROUND); |
258 | ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); | 258 | ath_dbg(common, CONFIG, "spectral scan: background mode enabled\n"); |
259 | } else if (strncmp("chanscan", buf, 8) == 0) { | 259 | } else if (strncmp("chanscan", buf, 8) == 0) { |
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 6451d2b6abcf..824f5e287783 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig | |||
@@ -51,7 +51,6 @@ config IWLWIFI_LEDS | |||
51 | 51 | ||
52 | config IWLDVM | 52 | config IWLDVM |
53 | tristate "Intel Wireless WiFi DVM Firmware support" | 53 | tristate "Intel Wireless WiFi DVM Firmware support" |
54 | depends on m | ||
55 | default IWLWIFI | 54 | default IWLWIFI |
56 | help | 55 | help |
57 | This is the driver that supports the DVM firmware which is | 56 | This is the driver that supports the DVM firmware which is |
@@ -60,7 +59,6 @@ config IWLDVM | |||
60 | 59 | ||
61 | config IWLMVM | 60 | config IWLMVM |
62 | tristate "Intel Wireless WiFi MVM Firmware support" | 61 | tristate "Intel Wireless WiFi MVM Firmware support" |
63 | depends on m | ||
64 | help | 62 | help |
65 | This is the driver that supports the MVM firmware which is | 63 | This is the driver that supports the MVM firmware which is |
66 | currently only available for 7260 and 3160 devices. | 64 | currently only available for 7260 and 3160 devices. |
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c index 6dc5dd3ced44..ed50de6362ed 100644 --- a/drivers/net/wireless/iwlwifi/dvm/rxon.c +++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c | |||
@@ -1068,6 +1068,13 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) | |||
1068 | /* recalculate basic rates */ | 1068 | /* recalculate basic rates */ |
1069 | iwl_calc_basic_rates(priv, ctx); | 1069 | iwl_calc_basic_rates(priv, ctx); |
1070 | 1070 | ||
1071 | /* | ||
1072 | * force CTS-to-self frames protection if RTS-CTS is not preferred | ||
1073 | * one aggregation protection method | ||
1074 | */ | ||
1075 | if (!priv->hw_params.use_rts_for_aggregation) | ||
1076 | ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; | ||
1077 | |||
1071 | if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || | 1078 | if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || |
1072 | !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) | 1079 | !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) |
1073 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; | 1080 | ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; |
@@ -1473,6 +1480,11 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw, | |||
1473 | else | 1480 | else |
1474 | ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; | 1481 | ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; |
1475 | 1482 | ||
1483 | if (bss_conf->use_cts_prot) | ||
1484 | ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; | ||
1485 | else | ||
1486 | ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; | ||
1487 | |||
1476 | memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); | 1488 | memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); |
1477 | 1489 | ||
1478 | if (vif->type == NL80211_IFTYPE_AP || | 1490 | if (vif->type == NL80211_IFTYPE_AP || |
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index 48730064da73..d67a37a786aa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c | |||
@@ -67,8 +67,8 @@ | |||
67 | #include "iwl-agn-hw.h" | 67 | #include "iwl-agn-hw.h" |
68 | 68 | ||
69 | /* Highest firmware API version supported */ | 69 | /* Highest firmware API version supported */ |
70 | #define IWL7260_UCODE_API_MAX 9 | 70 | #define IWL7260_UCODE_API_MAX 10 |
71 | #define IWL3160_UCODE_API_MAX 9 | 71 | #define IWL3160_UCODE_API_MAX 10 |
72 | 72 | ||
73 | /* Oldest version we won't warn about */ | 73 | /* Oldest version we won't warn about */ |
74 | #define IWL7260_UCODE_API_OK 9 | 74 | #define IWL7260_UCODE_API_OK 9 |
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index 44b19e015102..e93c6972290b 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c | |||
@@ -67,7 +67,7 @@ | |||
67 | #include "iwl-agn-hw.h" | 67 | #include "iwl-agn-hw.h" |
68 | 68 | ||
69 | /* Highest firmware API version supported */ | 69 | /* Highest firmware API version supported */ |
70 | #define IWL8000_UCODE_API_MAX 9 | 70 | #define IWL8000_UCODE_API_MAX 10 |
71 | 71 | ||
72 | /* Oldest version we won't warn about */ | 72 | /* Oldest version we won't warn about */ |
73 | #define IWL8000_UCODE_API_OK 8 | 73 | #define IWL8000_UCODE_API_OK 8 |
diff --git a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c index 33da3dfcfa4f..d4bd550f505c 100644 --- a/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/rtlwifi/btcoexist/halbtcoutsrc.c | |||
@@ -101,7 +101,7 @@ static bool halbtc_legacy(struct rtl_priv *adapter) | |||
101 | 101 | ||
102 | bool is_legacy = false; | 102 | bool is_legacy = false; |
103 | 103 | ||
104 | if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_B)) | 104 | if ((mac->mode == WIRELESS_MODE_B) || (mac->mode == WIRELESS_MODE_G)) |
105 | is_legacy = true; | 105 | is_legacy = true; |
106 | 106 | ||
107 | return is_legacy; | 107 | return is_legacy; |
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 361435f8608a..1ac6383e7947 100644 --- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c | |||
@@ -317,6 +317,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { | |||
317 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ | 317 | {RTL_USB_DEVICE(0x0bda, 0x5088, rtl92cu_hal_cfg)}, /*Thinkware-CC&C*/ |
318 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ | 318 | {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
319 | {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ | 319 | {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/ |
320 | {RTL_USB_DEVICE(0x0df6, 0x0070, rtl92cu_hal_cfg)}, /*Sitecom - 150N */ | ||
320 | {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ | 321 | {RTL_USB_DEVICE(0x0df6, 0x0077, rtl92cu_hal_cfg)}, /*Sitecom-WLA2100V2*/ |
321 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ | 322 | {RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/ |
322 | {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ | 323 | {RTL_USB_DEVICE(0x4856, 0x0091, rtl92cu_hal_cfg)}, /*NetweeN - Feixun*/ |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index e29e15dca86e..f379689dde30 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -576,6 +576,9 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
576 | init_waitqueue_head(&queue->dealloc_wq); | 576 | init_waitqueue_head(&queue->dealloc_wq); |
577 | atomic_set(&queue->inflight_packets, 0); | 577 | atomic_set(&queue->inflight_packets, 0); |
578 | 578 | ||
579 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
580 | XENVIF_NAPI_WEIGHT); | ||
581 | |||
579 | if (tx_evtchn == rx_evtchn) { | 582 | if (tx_evtchn == rx_evtchn) { |
580 | /* feature-split-event-channels == 0 */ | 583 | /* feature-split-event-channels == 0 */ |
581 | err = bind_interdomain_evtchn_to_irqhandler( | 584 | err = bind_interdomain_evtchn_to_irqhandler( |
@@ -629,9 +632,6 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref, | |||
629 | wake_up_process(queue->task); | 632 | wake_up_process(queue->task); |
630 | wake_up_process(queue->dealloc_task); | 633 | wake_up_process(queue->dealloc_task); |
631 | 634 | ||
632 | netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll, | ||
633 | XENVIF_NAPI_WEIGHT); | ||
634 | |||
635 | return 0; | 635 | return 0; |
636 | 636 | ||
637 | err_rx_unbind: | 637 | err_rx_unbind: |
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 9dd63b822025..e9bf2f47b61a 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -510,7 +510,7 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, | |||
510 | 510 | ||
511 | WARN_ON(nt->mw[mw_num].virt_addr == NULL); | 511 | WARN_ON(nt->mw[mw_num].virt_addr == NULL); |
512 | 512 | ||
513 | if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max) | 513 | if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max) |
514 | num_qps_mw = nt->max_qps / mw_max + 1; | 514 | num_qps_mw = nt->max_qps / mw_max + 1; |
515 | else | 515 | else |
516 | num_qps_mw = nt->max_qps / mw_max; | 516 | num_qps_mw = nt->max_qps / mw_max; |
@@ -576,6 +576,19 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) | |||
576 | return -ENOMEM; | 576 | return -ENOMEM; |
577 | } | 577 | } |
578 | 578 | ||
579 | /* | ||
580 | * we must ensure that the memory address allocated is BAR size | ||
581 | * aligned in order for the XLAT register to take the value. This | ||
582 | * is a requirement of the hardware. It is recommended to setup CMA | ||
583 | * for BAR sizes equal or greater than 4MB. | ||
584 | */ | ||
585 | if (!IS_ALIGNED(mw->dma_addr, mw->size)) { | ||
586 | dev_err(&pdev->dev, "DMA memory %pad not aligned to BAR size\n", | ||
587 | &mw->dma_addr); | ||
588 | ntb_free_mw(nt, num_mw); | ||
589 | return -ENOMEM; | ||
590 | } | ||
591 | |||
579 | /* Notify HW the memory location of the receive buffer */ | 592 | /* Notify HW the memory location of the receive buffer */ |
580 | ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); | 593 | ntb_set_mw_addr(nt->ndev, num_mw, mw->dma_addr); |
581 | 594 | ||
@@ -856,7 +869,7 @@ static int ntb_transport_init_queue(struct ntb_transport *nt, | |||
856 | qp->client_ready = NTB_LINK_DOWN; | 869 | qp->client_ready = NTB_LINK_DOWN; |
857 | qp->event_handler = NULL; | 870 | qp->event_handler = NULL; |
858 | 871 | ||
859 | if (nt->max_qps % mw_max && mw_num < nt->max_qps % mw_max) | 872 | if (nt->max_qps % mw_max && mw_num + 1 < nt->max_qps / mw_max) |
860 | num_qps_mw = nt->max_qps / mw_max + 1; | 873 | num_qps_mw = nt->max_qps / mw_max + 1; |
861 | else | 874 | else |
862 | num_qps_mw = nt->max_qps / mw_max; | 875 | num_qps_mw = nt->max_qps / mw_max; |
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c index 9eae9834bcc7..a0580afe1713 100644 --- a/drivers/parisc/dino.c +++ b/drivers/parisc/dino.c | |||
@@ -913,7 +913,7 @@ static int __init dino_probe(struct parisc_device *dev) | |||
913 | printk("%s version %s found at 0x%lx\n", name, version, hpa); | 913 | printk("%s version %s found at 0x%lx\n", name, version, hpa); |
914 | 914 | ||
915 | if (!request_mem_region(hpa, PAGE_SIZE, name)) { | 915 | if (!request_mem_region(hpa, PAGE_SIZE, name)) { |
916 | printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%ld)!\n", | 916 | printk(KERN_ERR "DINO: Hey! Someone took my MMIO space (0x%lx)!\n", |
917 | hpa); | 917 | hpa); |
918 | return 1; | 918 | return 1; |
919 | } | 919 | } |
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig index 8922c376456a..90f5ccacce4b 100644 --- a/drivers/pci/host/Kconfig +++ b/drivers/pci/host/Kconfig | |||
@@ -56,7 +56,7 @@ config PCI_HOST_GENERIC | |||
56 | controller, such as the one emulated by kvmtool. | 56 | controller, such as the one emulated by kvmtool. |
57 | 57 | ||
58 | config PCIE_SPEAR13XX | 58 | config PCIE_SPEAR13XX |
59 | tristate "STMicroelectronics SPEAr PCIe controller" | 59 | bool "STMicroelectronics SPEAr PCIe controller" |
60 | depends on ARCH_SPEAR13XX | 60 | depends on ARCH_SPEAR13XX |
61 | select PCIEPORTBUS | 61 | select PCIEPORTBUS |
62 | select PCIE_DW | 62 | select PCIE_DW |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index 0dd742719154..4ff8cbb620d3 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
@@ -41,9 +41,9 @@ config PHY_MVEBU_SATA | |||
41 | config PHY_MIPHY365X | 41 | config PHY_MIPHY365X |
42 | tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series" | 42 | tristate "STMicroelectronics MIPHY365X PHY driver for STiH41x series" |
43 | depends on ARCH_STI | 43 | depends on ARCH_STI |
44 | depends on GENERIC_PHY | ||
45 | depends on HAS_IOMEM | 44 | depends on HAS_IOMEM |
46 | depends on OF | 45 | depends on OF |
46 | select GENERIC_PHY | ||
47 | help | 47 | help |
48 | Enable this to support the miphy transceiver (for SATA/PCIE) | 48 | Enable this to support the miphy transceiver (for SATA/PCIE) |
49 | that is part of STMicroelectronics STiH41x SoC series. | 49 | that is part of STMicroelectronics STiH41x SoC series. |
diff --git a/drivers/phy/phy-exynos5-usbdrd.c b/drivers/phy/phy-exynos5-usbdrd.c index b05302b09c9f..392101c8d6b0 100644 --- a/drivers/phy/phy-exynos5-usbdrd.c +++ b/drivers/phy/phy-exynos5-usbdrd.c | |||
@@ -542,6 +542,7 @@ static const struct of_device_id exynos5_usbdrd_phy_of_match[] = { | |||
542 | }, | 542 | }, |
543 | { }, | 543 | { }, |
544 | }; | 544 | }; |
545 | MODULE_DEVICE_TABLE(of, exynos5_usbdrd_phy_of_match); | ||
545 | 546 | ||
546 | static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) | 547 | static int exynos5_usbdrd_phy_probe(struct platform_device *pdev) |
547 | { | 548 | { |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index e1a6623d4696..9cd33a4bcfb1 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/delay.h> | 34 | #include <linux/delay.h> |
35 | #include <linux/usb/otg.h> | 35 | #include <linux/usb/otg.h> |
36 | #include <linux/phy/phy.h> | 36 | #include <linux/phy/phy.h> |
37 | #include <linux/pm_runtime.h> | ||
37 | #include <linux/usb/musb-omap.h> | 38 | #include <linux/usb/musb-omap.h> |
38 | #include <linux/usb/ulpi.h> | 39 | #include <linux/usb/ulpi.h> |
39 | #include <linux/i2c/twl.h> | 40 | #include <linux/i2c/twl.h> |
@@ -422,37 +423,55 @@ static void twl4030_phy_power(struct twl4030_usb *twl, int on) | |||
422 | } | 423 | } |
423 | } | 424 | } |
424 | 425 | ||
425 | static int twl4030_phy_power_off(struct phy *phy) | 426 | static int twl4030_usb_runtime_suspend(struct device *dev) |
426 | { | 427 | { |
427 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 428 | struct twl4030_usb *twl = dev_get_drvdata(dev); |
428 | 429 | ||
430 | dev_dbg(twl->dev, "%s\n", __func__); | ||
429 | if (twl->asleep) | 431 | if (twl->asleep) |
430 | return 0; | 432 | return 0; |
431 | 433 | ||
432 | twl4030_phy_power(twl, 0); | 434 | twl4030_phy_power(twl, 0); |
433 | twl->asleep = 1; | 435 | twl->asleep = 1; |
434 | dev_dbg(twl->dev, "%s\n", __func__); | 436 | |
435 | return 0; | 437 | return 0; |
436 | } | 438 | } |
437 | 439 | ||
438 | static void __twl4030_phy_power_on(struct twl4030_usb *twl) | 440 | static int twl4030_usb_runtime_resume(struct device *dev) |
439 | { | 441 | { |
442 | struct twl4030_usb *twl = dev_get_drvdata(dev); | ||
443 | |||
444 | dev_dbg(twl->dev, "%s\n", __func__); | ||
445 | if (!twl->asleep) | ||
446 | return 0; | ||
447 | |||
440 | twl4030_phy_power(twl, 1); | 448 | twl4030_phy_power(twl, 1); |
441 | twl4030_i2c_access(twl, 1); | 449 | twl->asleep = 0; |
442 | twl4030_usb_set_mode(twl, twl->usb_mode); | 450 | |
443 | if (twl->usb_mode == T2_USB_MODE_ULPI) | 451 | return 0; |
444 | twl4030_i2c_access(twl, 0); | 452 | } |
453 | |||
454 | static int twl4030_phy_power_off(struct phy *phy) | ||
455 | { | ||
456 | struct twl4030_usb *twl = phy_get_drvdata(phy); | ||
457 | |||
458 | dev_dbg(twl->dev, "%s\n", __func__); | ||
459 | pm_runtime_mark_last_busy(twl->dev); | ||
460 | pm_runtime_put_autosuspend(twl->dev); | ||
461 | |||
462 | return 0; | ||
445 | } | 463 | } |
446 | 464 | ||
447 | static int twl4030_phy_power_on(struct phy *phy) | 465 | static int twl4030_phy_power_on(struct phy *phy) |
448 | { | 466 | { |
449 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 467 | struct twl4030_usb *twl = phy_get_drvdata(phy); |
450 | 468 | ||
451 | if (!twl->asleep) | ||
452 | return 0; | ||
453 | __twl4030_phy_power_on(twl); | ||
454 | twl->asleep = 0; | ||
455 | dev_dbg(twl->dev, "%s\n", __func__); | 469 | dev_dbg(twl->dev, "%s\n", __func__); |
470 | pm_runtime_get_sync(twl->dev); | ||
471 | twl4030_i2c_access(twl, 1); | ||
472 | twl4030_usb_set_mode(twl, twl->usb_mode); | ||
473 | if (twl->usb_mode == T2_USB_MODE_ULPI) | ||
474 | twl4030_i2c_access(twl, 0); | ||
456 | 475 | ||
457 | /* | 476 | /* |
458 | * XXX When VBUS gets driven after musb goes to A mode, | 477 | * XXX When VBUS gets driven after musb goes to A mode, |
@@ -558,9 +577,27 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl) | |||
558 | * USB_LINK_VBUS state. musb_hdrc won't care until it | 577 | * USB_LINK_VBUS state. musb_hdrc won't care until it |
559 | * starts to handle softconnect right. | 578 | * starts to handle softconnect right. |
560 | */ | 579 | */ |
580 | if ((status == OMAP_MUSB_VBUS_VALID) || | ||
581 | (status == OMAP_MUSB_ID_GROUND)) { | ||
582 | if (twl->asleep) | ||
583 | pm_runtime_get_sync(twl->dev); | ||
584 | } else { | ||
585 | if (!twl->asleep) { | ||
586 | pm_runtime_mark_last_busy(twl->dev); | ||
587 | pm_runtime_put_autosuspend(twl->dev); | ||
588 | } | ||
589 | } | ||
561 | omap_musb_mailbox(status); | 590 | omap_musb_mailbox(status); |
562 | } | 591 | } |
563 | sysfs_notify(&twl->dev->kobj, NULL, "vbus"); | 592 | |
593 | /* don't schedule during sleep - irq works right then */ | ||
594 | if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { | ||
595 | cancel_delayed_work(&twl->id_workaround_work); | ||
596 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
597 | } | ||
598 | |||
599 | if (irq) | ||
600 | sysfs_notify(&twl->dev->kobj, NULL, "vbus"); | ||
564 | 601 | ||
565 | return IRQ_HANDLED; | 602 | return IRQ_HANDLED; |
566 | } | 603 | } |
@@ -569,29 +606,8 @@ static void twl4030_id_workaround_work(struct work_struct *work) | |||
569 | { | 606 | { |
570 | struct twl4030_usb *twl = container_of(work, struct twl4030_usb, | 607 | struct twl4030_usb *twl = container_of(work, struct twl4030_usb, |
571 | id_workaround_work.work); | 608 | id_workaround_work.work); |
572 | enum omap_musb_vbus_id_status status; | ||
573 | bool status_changed = false; | ||
574 | |||
575 | status = twl4030_usb_linkstat(twl); | ||
576 | |||
577 | spin_lock_irq(&twl->lock); | ||
578 | if (status >= 0 && status != twl->linkstat) { | ||
579 | twl->linkstat = status; | ||
580 | status_changed = true; | ||
581 | } | ||
582 | spin_unlock_irq(&twl->lock); | ||
583 | |||
584 | if (status_changed) { | ||
585 | dev_dbg(twl->dev, "handle missing status change to %d\n", | ||
586 | status); | ||
587 | omap_musb_mailbox(status); | ||
588 | } | ||
589 | 609 | ||
590 | /* don't schedule during sleep - irq works right then */ | 610 | twl4030_usb_irq(0, twl); |
591 | if (status == OMAP_MUSB_ID_GROUND && !twl->asleep) { | ||
592 | cancel_delayed_work(&twl->id_workaround_work); | ||
593 | schedule_delayed_work(&twl->id_workaround_work, HZ); | ||
594 | } | ||
595 | } | 611 | } |
596 | 612 | ||
597 | static int twl4030_phy_init(struct phy *phy) | 613 | static int twl4030_phy_init(struct phy *phy) |
@@ -599,22 +615,17 @@ static int twl4030_phy_init(struct phy *phy) | |||
599 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 615 | struct twl4030_usb *twl = phy_get_drvdata(phy); |
600 | enum omap_musb_vbus_id_status status; | 616 | enum omap_musb_vbus_id_status status; |
601 | 617 | ||
602 | /* | 618 | pm_runtime_get_sync(twl->dev); |
603 | * Start in sleep state, we'll get called through set_suspend() | ||
604 | * callback when musb is runtime resumed and it's time to start. | ||
605 | */ | ||
606 | __twl4030_phy_power(twl, 0); | ||
607 | twl->asleep = 1; | ||
608 | |||
609 | status = twl4030_usb_linkstat(twl); | 619 | status = twl4030_usb_linkstat(twl); |
610 | twl->linkstat = status; | 620 | twl->linkstat = status; |
611 | 621 | ||
612 | if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) { | 622 | if (status == OMAP_MUSB_ID_GROUND || status == OMAP_MUSB_VBUS_VALID) |
613 | omap_musb_mailbox(twl->linkstat); | 623 | omap_musb_mailbox(twl->linkstat); |
614 | twl4030_phy_power_on(phy); | ||
615 | } | ||
616 | 624 | ||
617 | sysfs_notify(&twl->dev->kobj, NULL, "vbus"); | 625 | sysfs_notify(&twl->dev->kobj, NULL, "vbus"); |
626 | pm_runtime_mark_last_busy(twl->dev); | ||
627 | pm_runtime_put_autosuspend(twl->dev); | ||
628 | |||
618 | return 0; | 629 | return 0; |
619 | } | 630 | } |
620 | 631 | ||
@@ -650,6 +661,11 @@ static const struct phy_ops ops = { | |||
650 | .owner = THIS_MODULE, | 661 | .owner = THIS_MODULE, |
651 | }; | 662 | }; |
652 | 663 | ||
664 | static const struct dev_pm_ops twl4030_usb_pm_ops = { | ||
665 | SET_RUNTIME_PM_OPS(twl4030_usb_runtime_suspend, | ||
666 | twl4030_usb_runtime_resume, NULL) | ||
667 | }; | ||
668 | |||
653 | static int twl4030_usb_probe(struct platform_device *pdev) | 669 | static int twl4030_usb_probe(struct platform_device *pdev) |
654 | { | 670 | { |
655 | struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev); | 671 | struct twl4030_usb_data *pdata = dev_get_platdata(&pdev->dev); |
@@ -726,6 +742,11 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
726 | 742 | ||
727 | ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier); | 743 | ATOMIC_INIT_NOTIFIER_HEAD(&twl->phy.notifier); |
728 | 744 | ||
745 | pm_runtime_use_autosuspend(&pdev->dev); | ||
746 | pm_runtime_set_autosuspend_delay(&pdev->dev, 2000); | ||
747 | pm_runtime_enable(&pdev->dev); | ||
748 | pm_runtime_get_sync(&pdev->dev); | ||
749 | |||
729 | /* Our job is to use irqs and status from the power module | 750 | /* Our job is to use irqs and status from the power module |
730 | * to keep the transceiver disabled when nothing's connected. | 751 | * to keep the transceiver disabled when nothing's connected. |
731 | * | 752 | * |
@@ -744,6 +765,9 @@ static int twl4030_usb_probe(struct platform_device *pdev) | |||
744 | return status; | 765 | return status; |
745 | } | 766 | } |
746 | 767 | ||
768 | pm_runtime_mark_last_busy(&pdev->dev); | ||
769 | pm_runtime_put_autosuspend(twl->dev); | ||
770 | |||
747 | dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); | 771 | dev_info(&pdev->dev, "Initialized TWL4030 USB module\n"); |
748 | return 0; | 772 | return 0; |
749 | } | 773 | } |
@@ -753,6 +777,7 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
753 | struct twl4030_usb *twl = platform_get_drvdata(pdev); | 777 | struct twl4030_usb *twl = platform_get_drvdata(pdev); |
754 | int val; | 778 | int val; |
755 | 779 | ||
780 | pm_runtime_get_sync(twl->dev); | ||
756 | cancel_delayed_work(&twl->id_workaround_work); | 781 | cancel_delayed_work(&twl->id_workaround_work); |
757 | device_remove_file(twl->dev, &dev_attr_vbus); | 782 | device_remove_file(twl->dev, &dev_attr_vbus); |
758 | 783 | ||
@@ -772,9 +797,8 @@ static int twl4030_usb_remove(struct platform_device *pdev) | |||
772 | 797 | ||
773 | /* disable complete OTG block */ | 798 | /* disable complete OTG block */ |
774 | twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); | 799 | twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB); |
775 | 800 | pm_runtime_mark_last_busy(twl->dev); | |
776 | if (!twl->asleep) | 801 | pm_runtime_put(twl->dev); |
777 | twl4030_phy_power(twl, 0); | ||
778 | 802 | ||
779 | return 0; | 803 | return 0; |
780 | } | 804 | } |
@@ -792,6 +816,7 @@ static struct platform_driver twl4030_usb_driver = { | |||
792 | .remove = twl4030_usb_remove, | 816 | .remove = twl4030_usb_remove, |
793 | .driver = { | 817 | .driver = { |
794 | .name = "twl4030_usb", | 818 | .name = "twl4030_usb", |
819 | .pm = &twl4030_usb_pm_ops, | ||
795 | .owner = THIS_MODULE, | 820 | .owner = THIS_MODULE, |
796 | .of_match_table = of_match_ptr(twl4030_usb_id_table), | 821 | .of_match_table = of_match_ptr(twl4030_usb_id_table), |
797 | }, | 822 | }, |
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c index 9ca59a018743..e12e5b07f6d7 100644 --- a/drivers/pinctrl/pinctrl-baytrail.c +++ b/drivers/pinctrl/pinctrl-baytrail.c | |||
@@ -461,6 +461,7 @@ static struct irq_chip byt_irqchip = { | |||
461 | .irq_mask = byt_irq_mask, | 461 | .irq_mask = byt_irq_mask, |
462 | .irq_unmask = byt_irq_unmask, | 462 | .irq_unmask = byt_irq_unmask, |
463 | .irq_set_type = byt_irq_type, | 463 | .irq_set_type = byt_irq_type, |
464 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
464 | }; | 465 | }; |
465 | 466 | ||
466 | static void byt_gpio_irq_init_hw(struct byt_gpio *vg) | 467 | static void byt_gpio_irq_init_hw(struct byt_gpio *vg) |
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index fc468a3d95ce..02152de135b5 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c | |||
@@ -88,7 +88,6 @@ struct ideapad_private { | |||
88 | struct dentry *debug; | 88 | struct dentry *debug; |
89 | unsigned long cfg; | 89 | unsigned long cfg; |
90 | bool has_hw_rfkill_switch; | 90 | bool has_hw_rfkill_switch; |
91 | bool has_touchpad_control; | ||
92 | }; | 91 | }; |
93 | 92 | ||
94 | static bool no_bt_rfkill; | 93 | static bool no_bt_rfkill; |
@@ -456,7 +455,7 @@ struct ideapad_rfk_data { | |||
456 | int type; | 455 | int type; |
457 | }; | 456 | }; |
458 | 457 | ||
459 | const const struct ideapad_rfk_data ideapad_rfk_data[] = { | 458 | static const struct ideapad_rfk_data ideapad_rfk_data[] = { |
460 | { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN }, | 459 | { "ideapad_wlan", CFG_WIFI_BIT, VPCCMD_W_WIFI, RFKILL_TYPE_WLAN }, |
461 | { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH }, | 460 | { "ideapad_bluetooth", CFG_BT_BIT, VPCCMD_W_BT, RFKILL_TYPE_BLUETOOTH }, |
462 | { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN }, | 461 | { "ideapad_3g", CFG_3G_BIT, VPCCMD_W_3G, RFKILL_TYPE_WWAN }, |
@@ -767,9 +766,6 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv) | |||
767 | { | 766 | { |
768 | unsigned long value; | 767 | unsigned long value; |
769 | 768 | ||
770 | if (!priv->has_touchpad_control) | ||
771 | return; | ||
772 | |||
773 | /* Without reading from EC touchpad LED doesn't switch state */ | 769 | /* Without reading from EC touchpad LED doesn't switch state */ |
774 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { | 770 | if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { |
775 | /* Some IdeaPads don't really turn off touchpad - they only | 771 | /* Some IdeaPads don't really turn off touchpad - they only |
@@ -833,29 +829,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data) | |||
833 | * always results in 0 on these models, causing ideapad_laptop to wrongly | 829 | * always results in 0 on these models, causing ideapad_laptop to wrongly |
834 | * report all radios as hardware-blocked. | 830 | * report all radios as hardware-blocked. |
835 | */ | 831 | */ |
836 | static struct dmi_system_id no_hw_rfkill_list[] = { | 832 | static const struct dmi_system_id no_hw_rfkill_list[] = { |
837 | { | ||
838 | .ident = "Lenovo Yoga 2 11 / 13 / Pro", | ||
839 | .matches = { | ||
840 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
841 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), | ||
842 | }, | ||
843 | }, | ||
844 | {} | ||
845 | }; | ||
846 | |||
847 | /* | ||
848 | * Some models don't offer touchpad ctrl through the ideapad interface, causing | ||
849 | * ideapad_sync_touchpad_state to send wrong touchpad enable/disable events. | ||
850 | */ | ||
851 | static struct dmi_system_id no_touchpad_ctrl_list[] = { | ||
852 | { | ||
853 | .ident = "Lenovo Yoga 1 series", | ||
854 | .matches = { | ||
855 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
856 | DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga"), | ||
857 | }, | ||
858 | }, | ||
859 | { | 833 | { |
860 | .ident = "Lenovo Yoga 2 11 / 13 / Pro", | 834 | .ident = "Lenovo Yoga 2 11 / 13 / Pro", |
861 | .matches = { | 835 | .matches = { |
@@ -889,7 +863,6 @@ static int ideapad_acpi_add(struct platform_device *pdev) | |||
889 | priv->adev = adev; | 863 | priv->adev = adev; |
890 | priv->platform_device = pdev; | 864 | priv->platform_device = pdev; |
891 | priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list); | 865 | priv->has_hw_rfkill_switch = !dmi_check_system(no_hw_rfkill_list); |
892 | priv->has_touchpad_control = !dmi_check_system(no_touchpad_ctrl_list); | ||
893 | 866 | ||
894 | ret = ideapad_sysfs_init(priv); | 867 | ret = ideapad_sysfs_init(priv); |
895 | if (ret) | 868 | if (ret) |
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index b062d3d7b373..d0dce734b2ed 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c | |||
@@ -1255,10 +1255,15 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, | |||
1255 | const char *buf, size_t count) | 1255 | const char *buf, size_t count) |
1256 | { | 1256 | { |
1257 | struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); | 1257 | struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); |
1258 | int mode = -1; | 1258 | int mode; |
1259 | int time = -1; | 1259 | int time; |
1260 | int ret; | ||
1260 | 1261 | ||
1261 | if (sscanf(buf, "%i", &mode) != 1 && (mode != 2 || mode != 1)) | 1262 | |
1263 | ret = kstrtoint(buf, 0, &mode); | ||
1264 | if (ret) | ||
1265 | return ret; | ||
1266 | if (mode != SCI_KBD_MODE_FNZ && mode != SCI_KBD_MODE_AUTO) | ||
1262 | return -EINVAL; | 1267 | return -EINVAL; |
1263 | 1268 | ||
1264 | /* Set the Keyboard Backlight Mode where: | 1269 | /* Set the Keyboard Backlight Mode where: |
@@ -1266,11 +1271,12 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, | |||
1266 | * Auto - KBD backlight turns off automatically in given time | 1271 | * Auto - KBD backlight turns off automatically in given time |
1267 | * FN-Z - KBD backlight "toggles" when hotkey pressed | 1272 | * FN-Z - KBD backlight "toggles" when hotkey pressed |
1268 | */ | 1273 | */ |
1269 | if (mode != -1 && toshiba->kbd_mode != mode) { | 1274 | if (toshiba->kbd_mode != mode) { |
1270 | time = toshiba->kbd_time << HCI_MISC_SHIFT; | 1275 | time = toshiba->kbd_time << HCI_MISC_SHIFT; |
1271 | time = time + toshiba->kbd_mode; | 1276 | time = time + toshiba->kbd_mode; |
1272 | if (toshiba_kbd_illum_status_set(toshiba, time) < 0) | 1277 | ret = toshiba_kbd_illum_status_set(toshiba, time); |
1273 | return -EIO; | 1278 | if (ret) |
1279 | return ret; | ||
1274 | toshiba->kbd_mode = mode; | 1280 | toshiba->kbd_mode = mode; |
1275 | } | 1281 | } |
1276 | 1282 | ||
@@ -1857,9 +1863,16 @@ static int toshiba_acpi_resume(struct device *device) | |||
1857 | { | 1863 | { |
1858 | struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); | 1864 | struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); |
1859 | u32 result; | 1865 | u32 result; |
1866 | acpi_status status; | ||
1867 | |||
1868 | if (dev->hotkey_dev) { | ||
1869 | status = acpi_evaluate_object(dev->acpi_dev->handle, "ENAB", | ||
1870 | NULL, NULL); | ||
1871 | if (ACPI_FAILURE(status)) | ||
1872 | pr_info("Unable to re-enable hotkeys\n"); | ||
1860 | 1873 | ||
1861 | if (dev->hotkey_dev) | ||
1862 | hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result); | 1874 | hci_write1(dev, HCI_HOTKEY_EVENT, HCI_HOTKEY_ENABLE, &result); |
1875 | } | ||
1863 | 1876 | ||
1864 | return 0; | 1877 | return 0; |
1865 | } | 1878 | } |
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index b1cda6ffdbcc..45e05b32f9b6 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c | |||
@@ -953,6 +953,7 @@ static const struct x86_cpu_id rapl_ids[] = { | |||
953 | { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ | 953 | { X86_VENDOR_INTEL, 6, 0x3a},/* Ivy Bridge */ |
954 | { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ | 954 | { X86_VENDOR_INTEL, 6, 0x3c},/* Haswell */ |
955 | { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ | 955 | { X86_VENDOR_INTEL, 6, 0x3d},/* Broadwell */ |
956 | { X86_VENDOR_INTEL, 6, 0x3f},/* Haswell */ | ||
956 | { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ | 957 | { X86_VENDOR_INTEL, 6, 0x45},/* Haswell ULT */ |
957 | /* TODO: Add more CPU IDs after testing */ | 958 | /* TODO: Add more CPU IDs after testing */ |
958 | {} | 959 | {} |
@@ -1166,11 +1167,10 @@ static int rapl_detect_domains(struct rapl_package *rp, int cpu) | |||
1166 | 1167 | ||
1167 | for (i = 0; i < RAPL_DOMAIN_MAX; i++) { | 1168 | for (i = 0; i < RAPL_DOMAIN_MAX; i++) { |
1168 | /* use physical package id to read counters */ | 1169 | /* use physical package id to read counters */ |
1169 | if (!rapl_check_domain(cpu, i)) | 1170 | if (!rapl_check_domain(cpu, i)) { |
1170 | rp->domain_map |= 1 << i; | 1171 | rp->domain_map |= 1 << i; |
1171 | else | 1172 | pr_info("Found RAPL domain %s\n", rapl_domain_names[i]); |
1172 | pr_warn("RAPL domain %s detection failed\n", | 1173 | } |
1173 | rapl_domain_names[i]); | ||
1174 | } | 1174 | } |
1175 | rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); | 1175 | rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); |
1176 | if (!rp->nr_domains) { | 1176 | if (!rp->nr_domains) { |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 2ead7e78c456..14ba80bfa571 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -77,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx); | |||
77 | * strings when running as a module. | 77 | * strings when running as a module. |
78 | */ | 78 | */ |
79 | static char *dasd[256]; | 79 | static char *dasd[256]; |
80 | module_param_array(dasd, charp, NULL, 0); | 80 | module_param_array(dasd, charp, NULL, S_IRUGO); |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Single spinlock to protect devmap and servermap structures and lists. | 83 | * Single spinlock to protect devmap and servermap structures and lists. |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 97ef37b51068..e7646ce3d659 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -889,6 +889,7 @@ extern const struct attribute_group *qeth_generic_attr_groups[]; | |||
889 | extern const struct attribute_group *qeth_osn_attr_groups[]; | 889 | extern const struct attribute_group *qeth_osn_attr_groups[]; |
890 | extern struct workqueue_struct *qeth_wq; | 890 | extern struct workqueue_struct *qeth_wq; |
891 | 891 | ||
892 | int qeth_card_hw_is_reachable(struct qeth_card *); | ||
892 | const char *qeth_get_cardname_short(struct qeth_card *); | 893 | const char *qeth_get_cardname_short(struct qeth_card *); |
893 | int qeth_realloc_buffer_pool(struct qeth_card *, int); | 894 | int qeth_realloc_buffer_pool(struct qeth_card *, int); |
894 | int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); | 895 | int qeth_core_load_discipline(struct qeth_card *, enum qeth_discipline_id); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index c0d6ba8655c7..fd22c811cbe1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -73,6 +73,13 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | |||
73 | struct workqueue_struct *qeth_wq; | 73 | struct workqueue_struct *qeth_wq; |
74 | EXPORT_SYMBOL_GPL(qeth_wq); | 74 | EXPORT_SYMBOL_GPL(qeth_wq); |
75 | 75 | ||
76 | int qeth_card_hw_is_reachable(struct qeth_card *card) | ||
77 | { | ||
78 | return (card->state == CARD_STATE_SOFTSETUP) || | ||
79 | (card->state == CARD_STATE_UP); | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(qeth_card_hw_is_reachable); | ||
82 | |||
76 | static void qeth_close_dev_handler(struct work_struct *work) | 83 | static void qeth_close_dev_handler(struct work_struct *work) |
77 | { | 84 | { |
78 | struct qeth_card *card; | 85 | struct qeth_card *card; |
@@ -5790,6 +5797,7 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, | |||
5790 | struct qeth_card *card = netdev->ml_priv; | 5797 | struct qeth_card *card = netdev->ml_priv; |
5791 | enum qeth_link_types link_type; | 5798 | enum qeth_link_types link_type; |
5792 | struct carrier_info carrier_info; | 5799 | struct carrier_info carrier_info; |
5800 | int rc; | ||
5793 | u32 speed; | 5801 | u32 speed; |
5794 | 5802 | ||
5795 | if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) | 5803 | if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) |
@@ -5832,8 +5840,14 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev, | |||
5832 | /* Check if we can obtain more accurate information. */ | 5840 | /* Check if we can obtain more accurate information. */ |
5833 | /* If QUERY_CARD_INFO command is not supported or fails, */ | 5841 | /* If QUERY_CARD_INFO command is not supported or fails, */ |
5834 | /* just return the heuristics that was filled above. */ | 5842 | /* just return the heuristics that was filled above. */ |
5835 | if (qeth_query_card_info(card, &carrier_info) != 0) | 5843 | if (!qeth_card_hw_is_reachable(card)) |
5844 | return -ENODEV; | ||
5845 | rc = qeth_query_card_info(card, &carrier_info); | ||
5846 | if (rc == -EOPNOTSUPP) /* for old hardware, return heuristic */ | ||
5836 | return 0; | 5847 | return 0; |
5848 | if (rc) /* report error from the hardware operation */ | ||
5849 | return rc; | ||
5850 | /* on success, fill in the information got from the hardware */ | ||
5837 | 5851 | ||
5838 | netdev_dbg(netdev, | 5852 | netdev_dbg(netdev, |
5839 | "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", | 5853 | "card info: card_type=0x%02x, port_mode=0x%04x, port_speed=0x%08x\n", |
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c index ae1bc04b8653..59e3aa538b4d 100644 --- a/drivers/s390/net/qeth_l2_sys.c +++ b/drivers/s390/net/qeth_l2_sys.c | |||
@@ -5,17 +5,12 @@ | |||
5 | 5 | ||
6 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
7 | #include <asm/ebcdic.h> | 7 | #include <asm/ebcdic.h> |
8 | #include "qeth_core.h" | ||
8 | #include "qeth_l2.h" | 9 | #include "qeth_l2.h" |
9 | 10 | ||
10 | #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ | 11 | #define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \ |
11 | struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) | 12 | struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store) |
12 | 13 | ||
13 | static int qeth_card_hw_is_reachable(struct qeth_card *card) | ||
14 | { | ||
15 | return (card->state == CARD_STATE_SOFTSETUP) || | ||
16 | (card->state == CARD_STATE_UP); | ||
17 | } | ||
18 | |||
19 | static ssize_t qeth_bridge_port_role_state_show(struct device *dev, | 14 | static ssize_t qeth_bridge_port_role_state_show(struct device *dev, |
20 | struct device_attribute *attr, char *buf, | 15 | struct device_attribute *attr, char *buf, |
21 | int show_state) | 16 | int show_state) |
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c index 19396dc4ee47..bed2fedeb057 100644 --- a/drivers/ssb/b43_pci_bridge.c +++ b/drivers/ssb/b43_pci_bridge.c | |||
@@ -38,6 +38,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = { | |||
38 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, | 38 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432b) }, |
39 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) }, | 39 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x432c) }, |
40 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) }, | 40 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4350) }, |
41 | { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4351) }, | ||
41 | { 0, }, | 42 | { 0, }, |
42 | }; | 43 | }; |
43 | MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); | 44 | MODULE_DEVICE_TABLE(pci, b43_pci_bridge_tbl); |
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c index e7b2e0234196..69139ce7420d 100644 --- a/drivers/staging/android/sync.c +++ b/drivers/staging/android/sync.c | |||
@@ -199,7 +199,6 @@ struct sync_fence *sync_fence_create(const char *name, struct sync_pt *pt) | |||
199 | fence->num_fences = 1; | 199 | fence->num_fences = 1; |
200 | atomic_set(&fence->status, 1); | 200 | atomic_set(&fence->status, 1); |
201 | 201 | ||
202 | fence_get(&pt->base); | ||
203 | fence->cbs[0].sync_pt = &pt->base; | 202 | fence->cbs[0].sync_pt = &pt->base; |
204 | fence->cbs[0].fence = fence; | 203 | fence->cbs[0].fence = fence; |
205 | if (fence_add_callback(&pt->base, &fence->cbs[0].cb, | 204 | if (fence_add_callback(&pt->base, &fence->cbs[0].cb, |
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/staging/imx-drm/imx-ldb.c index 7e3f019d7e72..4662e00b456a 100644 --- a/drivers/staging/imx-drm/imx-ldb.c +++ b/drivers/staging/imx-drm/imx-ldb.c | |||
@@ -574,6 +574,9 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, | |||
574 | for (i = 0; i < 2; i++) { | 574 | for (i = 0; i < 2; i++) { |
575 | struct imx_ldb_channel *channel = &imx_ldb->channel[i]; | 575 | struct imx_ldb_channel *channel = &imx_ldb->channel[i]; |
576 | 576 | ||
577 | if (!channel->connector.funcs) | ||
578 | continue; | ||
579 | |||
577 | channel->connector.funcs->destroy(&channel->connector); | 580 | channel->connector.funcs->destroy(&channel->connector); |
578 | channel->encoder.funcs->destroy(&channel->encoder); | 581 | channel->encoder.funcs->destroy(&channel->encoder); |
579 | } | 582 | } |
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/staging/imx-drm/ipuv3-plane.c index 6f393a11f44d..50de10a550e9 100644 --- a/drivers/staging/imx-drm/ipuv3-plane.c +++ b/drivers/staging/imx-drm/ipuv3-plane.c | |||
@@ -281,7 +281,8 @@ static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode) | |||
281 | 281 | ||
282 | ipu_idmac_put(ipu_plane->ipu_ch); | 282 | ipu_idmac_put(ipu_plane->ipu_ch); |
283 | ipu_dmfc_put(ipu_plane->dmfc); | 283 | ipu_dmfc_put(ipu_plane->dmfc); |
284 | ipu_dp_put(ipu_plane->dp); | 284 | if (ipu_plane->dp) |
285 | ipu_dp_put(ipu_plane->dp); | ||
285 | } | 286 | } |
286 | } | 287 | } |
287 | 288 | ||
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c index 0367f5a2cfe4..0c59e26c0805 100644 --- a/drivers/staging/lustre/lustre/llite/llite_lib.c +++ b/drivers/staging/lustre/lustre/llite/llite_lib.c | |||
@@ -568,7 +568,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, | |||
568 | if (sb->s_root == NULL) { | 568 | if (sb->s_root == NULL) { |
569 | CERROR("%s: can't make root dentry\n", | 569 | CERROR("%s: can't make root dentry\n", |
570 | ll_get_fsname(sb, NULL, 0)); | 570 | ll_get_fsname(sb, NULL, 0)); |
571 | GOTO(out_root, err = -ENOMEM); | 571 | GOTO(out_lock_cn_cb, err = -ENOMEM); |
572 | } | 572 | } |
573 | 573 | ||
574 | sbi->ll_sdev_orig = sb->s_dev; | 574 | sbi->ll_sdev_orig = sb->s_dev; |
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c index 4db7987ec225..57d9df84ce5d 100644 --- a/drivers/tty/serial/8250/8250_dw.c +++ b/drivers/tty/serial/8250/8250_dw.c | |||
@@ -540,6 +540,7 @@ static const struct acpi_device_id dw8250_acpi_match[] = { | |||
540 | { "INT3434", 0 }, | 540 | { "INT3434", 0 }, |
541 | { "INT3435", 0 }, | 541 | { "INT3435", 0 }, |
542 | { "80860F0A", 0 }, | 542 | { "80860F0A", 0 }, |
543 | { "8086228A", 0 }, | ||
543 | { }, | 544 | { }, |
544 | }; | 545 | }; |
545 | MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); | 546 | MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); |
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c index 7b63677475c1..d7d4584549a5 100644 --- a/drivers/tty/serial/atmel_serial.c +++ b/drivers/tty/serial/atmel_serial.c | |||
@@ -527,6 +527,45 @@ static void atmel_enable_ms(struct uart_port *port) | |||
527 | } | 527 | } |
528 | 528 | ||
529 | /* | 529 | /* |
530 | * Disable modem status interrupts | ||
531 | */ | ||
532 | static void atmel_disable_ms(struct uart_port *port) | ||
533 | { | ||
534 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
535 | uint32_t idr = 0; | ||
536 | |||
537 | /* | ||
538 | * Interrupt should not be disabled twice | ||
539 | */ | ||
540 | if (!atmel_port->ms_irq_enabled) | ||
541 | return; | ||
542 | |||
543 | atmel_port->ms_irq_enabled = false; | ||
544 | |||
545 | if (atmel_port->gpio_irq[UART_GPIO_CTS] >= 0) | ||
546 | disable_irq(atmel_port->gpio_irq[UART_GPIO_CTS]); | ||
547 | else | ||
548 | idr |= ATMEL_US_CTSIC; | ||
549 | |||
550 | if (atmel_port->gpio_irq[UART_GPIO_DSR] >= 0) | ||
551 | disable_irq(atmel_port->gpio_irq[UART_GPIO_DSR]); | ||
552 | else | ||
553 | idr |= ATMEL_US_DSRIC; | ||
554 | |||
555 | if (atmel_port->gpio_irq[UART_GPIO_RI] >= 0) | ||
556 | disable_irq(atmel_port->gpio_irq[UART_GPIO_RI]); | ||
557 | else | ||
558 | idr |= ATMEL_US_RIIC; | ||
559 | |||
560 | if (atmel_port->gpio_irq[UART_GPIO_DCD] >= 0) | ||
561 | disable_irq(atmel_port->gpio_irq[UART_GPIO_DCD]); | ||
562 | else | ||
563 | idr |= ATMEL_US_DCDIC; | ||
564 | |||
565 | UART_PUT_IDR(port, idr); | ||
566 | } | ||
567 | |||
568 | /* | ||
530 | * Control the transmission of a break signal | 569 | * Control the transmission of a break signal |
531 | */ | 570 | */ |
532 | static void atmel_break_ctl(struct uart_port *port, int break_state) | 571 | static void atmel_break_ctl(struct uart_port *port, int break_state) |
@@ -1993,7 +2032,9 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1993 | 2032 | ||
1994 | /* CTS flow-control and modem-status interrupts */ | 2033 | /* CTS flow-control and modem-status interrupts */ |
1995 | if (UART_ENABLE_MS(port, termios->c_cflag)) | 2034 | if (UART_ENABLE_MS(port, termios->c_cflag)) |
1996 | port->ops->enable_ms(port); | 2035 | atmel_enable_ms(port); |
2036 | else | ||
2037 | atmel_disable_ms(port); | ||
1997 | 2038 | ||
1998 | spin_unlock_irqrestore(&port->lock, flags); | 2039 | spin_unlock_irqrestore(&port->lock, flags); |
1999 | } | 2040 | } |
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c index c3c252db8b61..7f8027f27ab9 100644 --- a/drivers/tty/serial/xilinx_uartps.c +++ b/drivers/tty/serial/xilinx_uartps.c | |||
@@ -581,7 +581,7 @@ static unsigned int cdns_uart_tx_empty(struct uart_port *port) | |||
581 | { | 581 | { |
582 | unsigned int status; | 582 | unsigned int status; |
583 | 583 | ||
584 | status = cdns_uart_readl(CDNS_UART_ISR_OFFSET) & CDNS_UART_IXR_TXEMPTY; | 584 | status = cdns_uart_readl(CDNS_UART_SR_OFFSET) & CDNS_UART_SR_TXEMPTY; |
585 | return status ? TIOCSER_TEMT : 0; | 585 | return status ? TIOCSER_TEMT : 0; |
586 | } | 586 | } |
587 | 587 | ||
diff --git a/drivers/usb/chipidea/ci_hdrc_msm.c b/drivers/usb/chipidea/ci_hdrc_msm.c index d72b9d2de2c5..4935ac38fd00 100644 --- a/drivers/usb/chipidea/ci_hdrc_msm.c +++ b/drivers/usb/chipidea/ci_hdrc_msm.c | |||
@@ -20,13 +20,13 @@ | |||
20 | static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) | 20 | static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) |
21 | { | 21 | { |
22 | struct device *dev = ci->gadget.dev.parent; | 22 | struct device *dev = ci->gadget.dev.parent; |
23 | int val; | ||
24 | 23 | ||
25 | switch (event) { | 24 | switch (event) { |
26 | case CI_HDRC_CONTROLLER_RESET_EVENT: | 25 | case CI_HDRC_CONTROLLER_RESET_EVENT: |
27 | dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n"); | 26 | dev_dbg(dev, "CI_HDRC_CONTROLLER_RESET_EVENT received\n"); |
28 | writel(0, USB_AHBBURST); | 27 | writel(0, USB_AHBBURST); |
29 | writel(0, USB_AHBMODE); | 28 | writel(0, USB_AHBMODE); |
29 | usb_phy_init(ci->transceiver); | ||
30 | break; | 30 | break; |
31 | case CI_HDRC_CONTROLLER_STOPPED_EVENT: | 31 | case CI_HDRC_CONTROLLER_STOPPED_EVENT: |
32 | dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n"); | 32 | dev_dbg(dev, "CI_HDRC_CONTROLLER_STOPPED_EVENT received\n"); |
@@ -34,10 +34,7 @@ static void ci_hdrc_msm_notify_event(struct ci_hdrc *ci, unsigned event) | |||
34 | * Put the transceiver in non-driving mode. Otherwise host | 34 | * Put the transceiver in non-driving mode. Otherwise host |
35 | * may not detect soft-disconnection. | 35 | * may not detect soft-disconnection. |
36 | */ | 36 | */ |
37 | val = usb_phy_io_read(ci->transceiver, ULPI_FUNC_CTRL); | 37 | usb_phy_notify_disconnect(ci->transceiver, USB_SPEED_UNKNOWN); |
38 | val &= ~ULPI_FUNC_CTRL_OPMODE_MASK; | ||
39 | val |= ULPI_FUNC_CTRL_OPMODE_NONDRIVING; | ||
40 | usb_phy_io_write(ci->transceiver, val, ULPI_FUNC_CTRL); | ||
41 | break; | 38 | break; |
42 | default: | 39 | default: |
43 | dev_dbg(dev, "unknown ci_hdrc event\n"); | 40 | dev_dbg(dev, "unknown ci_hdrc event\n"); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 46f5161c7891..d481c99a20d7 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -5024,9 +5024,10 @@ static void hub_events(void) | |||
5024 | 5024 | ||
5025 | hub = list_entry(tmp, struct usb_hub, event_list); | 5025 | hub = list_entry(tmp, struct usb_hub, event_list); |
5026 | kref_get(&hub->kref); | 5026 | kref_get(&hub->kref); |
5027 | hdev = hub->hdev; | ||
5028 | usb_get_dev(hdev); | ||
5027 | spin_unlock_irq(&hub_event_lock); | 5029 | spin_unlock_irq(&hub_event_lock); |
5028 | 5030 | ||
5029 | hdev = hub->hdev; | ||
5030 | hub_dev = hub->intfdev; | 5031 | hub_dev = hub->intfdev; |
5031 | intf = to_usb_interface(hub_dev); | 5032 | intf = to_usb_interface(hub_dev); |
5032 | dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", | 5033 | dev_dbg(hub_dev, "state %d ports %d chg %04x evt %04x\n", |
@@ -5139,6 +5140,7 @@ static void hub_events(void) | |||
5139 | usb_autopm_put_interface(intf); | 5140 | usb_autopm_put_interface(intf); |
5140 | loop_disconnected: | 5141 | loop_disconnected: |
5141 | usb_unlock_device(hdev); | 5142 | usb_unlock_device(hdev); |
5143 | usb_put_dev(hdev); | ||
5142 | kref_put(&hub->kref, hub_release); | 5144 | kref_put(&hub->kref, hub_release); |
5143 | 5145 | ||
5144 | } /* end while (1) */ | 5146 | } /* end while (1) */ |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 7c9618e916e2..ce6071d65d51 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
@@ -1649,6 +1649,7 @@ static void s3c_hsotg_txfifo_flush(struct s3c_hsotg *hsotg, unsigned int idx) | |||
1649 | dev_err(hsotg->dev, | 1649 | dev_err(hsotg->dev, |
1650 | "%s: timeout flushing fifo (GRSTCTL=%08x)\n", | 1650 | "%s: timeout flushing fifo (GRSTCTL=%08x)\n", |
1651 | __func__, val); | 1651 | __func__, val); |
1652 | break; | ||
1652 | } | 1653 | } |
1653 | 1654 | ||
1654 | udelay(1); | 1655 | udelay(1); |
@@ -2747,13 +2748,14 @@ static void s3c_hsotg_phy_enable(struct s3c_hsotg *hsotg) | |||
2747 | 2748 | ||
2748 | dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev); | 2749 | dev_dbg(hsotg->dev, "pdev 0x%p\n", pdev); |
2749 | 2750 | ||
2750 | if (hsotg->phy) { | 2751 | if (hsotg->uphy) |
2751 | phy_init(hsotg->phy); | ||
2752 | phy_power_on(hsotg->phy); | ||
2753 | } else if (hsotg->uphy) | ||
2754 | usb_phy_init(hsotg->uphy); | 2752 | usb_phy_init(hsotg->uphy); |
2755 | else if (hsotg->plat->phy_init) | 2753 | else if (hsotg->plat && hsotg->plat->phy_init) |
2756 | hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); | 2754 | hsotg->plat->phy_init(pdev, hsotg->plat->phy_type); |
2755 | else { | ||
2756 | phy_init(hsotg->phy); | ||
2757 | phy_power_on(hsotg->phy); | ||
2758 | } | ||
2757 | } | 2759 | } |
2758 | 2760 | ||
2759 | /** | 2761 | /** |
@@ -2767,13 +2769,14 @@ static void s3c_hsotg_phy_disable(struct s3c_hsotg *hsotg) | |||
2767 | { | 2769 | { |
2768 | struct platform_device *pdev = to_platform_device(hsotg->dev); | 2770 | struct platform_device *pdev = to_platform_device(hsotg->dev); |
2769 | 2771 | ||
2770 | if (hsotg->phy) { | 2772 | if (hsotg->uphy) |
2771 | phy_power_off(hsotg->phy); | ||
2772 | phy_exit(hsotg->phy); | ||
2773 | } else if (hsotg->uphy) | ||
2774 | usb_phy_shutdown(hsotg->uphy); | 2773 | usb_phy_shutdown(hsotg->uphy); |
2775 | else if (hsotg->plat->phy_exit) | 2774 | else if (hsotg->plat && hsotg->plat->phy_exit) |
2776 | hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); | 2775 | hsotg->plat->phy_exit(pdev, hsotg->plat->phy_type); |
2776 | else { | ||
2777 | phy_power_off(hsotg->phy); | ||
2778 | phy_exit(hsotg->phy); | ||
2779 | } | ||
2777 | } | 2780 | } |
2778 | 2781 | ||
2779 | /** | 2782 | /** |
@@ -2892,13 +2895,11 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget, | |||
2892 | return -ENODEV; | 2895 | return -ENODEV; |
2893 | 2896 | ||
2894 | /* all endpoints should be shutdown */ | 2897 | /* all endpoints should be shutdown */ |
2895 | for (ep = 0; ep < hsotg->num_of_eps; ep++) | 2898 | for (ep = 1; ep < hsotg->num_of_eps; ep++) |
2896 | s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); | 2899 | s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); |
2897 | 2900 | ||
2898 | spin_lock_irqsave(&hsotg->lock, flags); | 2901 | spin_lock_irqsave(&hsotg->lock, flags); |
2899 | 2902 | ||
2900 | s3c_hsotg_phy_disable(hsotg); | ||
2901 | |||
2902 | if (!driver) | 2903 | if (!driver) |
2903 | hsotg->driver = NULL; | 2904 | hsotg->driver = NULL; |
2904 | 2905 | ||
@@ -2941,7 +2942,6 @@ static int s3c_hsotg_pullup(struct usb_gadget *gadget, int is_on) | |||
2941 | s3c_hsotg_phy_enable(hsotg); | 2942 | s3c_hsotg_phy_enable(hsotg); |
2942 | s3c_hsotg_core_init(hsotg); | 2943 | s3c_hsotg_core_init(hsotg); |
2943 | } else { | 2944 | } else { |
2944 | s3c_hsotg_disconnect(hsotg); | ||
2945 | s3c_hsotg_phy_disable(hsotg); | 2945 | s3c_hsotg_phy_disable(hsotg); |
2946 | } | 2946 | } |
2947 | 2947 | ||
@@ -3441,13 +3441,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev) | |||
3441 | 3441 | ||
3442 | hsotg->irq = ret; | 3442 | hsotg->irq = ret; |
3443 | 3443 | ||
3444 | ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0, | ||
3445 | dev_name(dev), hsotg); | ||
3446 | if (ret < 0) { | ||
3447 | dev_err(dev, "cannot claim IRQ\n"); | ||
3448 | goto err_clk; | ||
3449 | } | ||
3450 | |||
3451 | dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); | 3444 | dev_info(dev, "regs %p, irq %d\n", hsotg->regs, hsotg->irq); |
3452 | 3445 | ||
3453 | hsotg->gadget.max_speed = USB_SPEED_HIGH; | 3446 | hsotg->gadget.max_speed = USB_SPEED_HIGH; |
@@ -3488,9 +3481,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev) | |||
3488 | if (hsotg->phy && (phy_get_bus_width(phy) == 8)) | 3481 | if (hsotg->phy && (phy_get_bus_width(phy) == 8)) |
3489 | hsotg->phyif = GUSBCFG_PHYIF8; | 3482 | hsotg->phyif = GUSBCFG_PHYIF8; |
3490 | 3483 | ||
3491 | if (hsotg->phy) | ||
3492 | phy_init(hsotg->phy); | ||
3493 | |||
3494 | /* usb phy enable */ | 3484 | /* usb phy enable */ |
3495 | s3c_hsotg_phy_enable(hsotg); | 3485 | s3c_hsotg_phy_enable(hsotg); |
3496 | 3486 | ||
@@ -3498,6 +3488,17 @@ static int s3c_hsotg_probe(struct platform_device *pdev) | |||
3498 | s3c_hsotg_init(hsotg); | 3488 | s3c_hsotg_init(hsotg); |
3499 | s3c_hsotg_hw_cfg(hsotg); | 3489 | s3c_hsotg_hw_cfg(hsotg); |
3500 | 3490 | ||
3491 | ret = devm_request_irq(&pdev->dev, hsotg->irq, s3c_hsotg_irq, 0, | ||
3492 | dev_name(dev), hsotg); | ||
3493 | if (ret < 0) { | ||
3494 | s3c_hsotg_phy_disable(hsotg); | ||
3495 | clk_disable_unprepare(hsotg->clk); | ||
3496 | regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), | ||
3497 | hsotg->supplies); | ||
3498 | dev_err(dev, "cannot claim IRQ\n"); | ||
3499 | goto err_clk; | ||
3500 | } | ||
3501 | |||
3501 | /* hsotg->num_of_eps holds number of EPs other than ep0 */ | 3502 | /* hsotg->num_of_eps holds number of EPs other than ep0 */ |
3502 | 3503 | ||
3503 | if (hsotg->num_of_eps == 0) { | 3504 | if (hsotg->num_of_eps == 0) { |
@@ -3582,9 +3583,6 @@ static int s3c_hsotg_remove(struct platform_device *pdev) | |||
3582 | usb_gadget_unregister_driver(hsotg->driver); | 3583 | usb_gadget_unregister_driver(hsotg->driver); |
3583 | } | 3584 | } |
3584 | 3585 | ||
3585 | s3c_hsotg_phy_disable(hsotg); | ||
3586 | if (hsotg->phy) | ||
3587 | phy_exit(hsotg->phy); | ||
3588 | clk_disable_unprepare(hsotg->clk); | 3586 | clk_disable_unprepare(hsotg->clk); |
3589 | 3587 | ||
3590 | return 0; | 3588 | return 0; |
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index b769c1faaf03..9069984fe5cf 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c | |||
@@ -799,20 +799,21 @@ static int dwc3_remove(struct platform_device *pdev) | |||
799 | { | 799 | { |
800 | struct dwc3 *dwc = platform_get_drvdata(pdev); | 800 | struct dwc3 *dwc = platform_get_drvdata(pdev); |
801 | 801 | ||
802 | dwc3_debugfs_exit(dwc); | ||
803 | dwc3_core_exit_mode(dwc); | ||
804 | dwc3_event_buffers_cleanup(dwc); | ||
805 | dwc3_free_event_buffers(dwc); | ||
806 | |||
802 | usb_phy_set_suspend(dwc->usb2_phy, 1); | 807 | usb_phy_set_suspend(dwc->usb2_phy, 1); |
803 | usb_phy_set_suspend(dwc->usb3_phy, 1); | 808 | usb_phy_set_suspend(dwc->usb3_phy, 1); |
804 | phy_power_off(dwc->usb2_generic_phy); | 809 | phy_power_off(dwc->usb2_generic_phy); |
805 | phy_power_off(dwc->usb3_generic_phy); | 810 | phy_power_off(dwc->usb3_generic_phy); |
806 | 811 | ||
812 | dwc3_core_exit(dwc); | ||
813 | |||
807 | pm_runtime_put_sync(&pdev->dev); | 814 | pm_runtime_put_sync(&pdev->dev); |
808 | pm_runtime_disable(&pdev->dev); | 815 | pm_runtime_disable(&pdev->dev); |
809 | 816 | ||
810 | dwc3_debugfs_exit(dwc); | ||
811 | dwc3_core_exit_mode(dwc); | ||
812 | dwc3_event_buffers_cleanup(dwc); | ||
813 | dwc3_free_event_buffers(dwc); | ||
814 | dwc3_core_exit(dwc); | ||
815 | |||
816 | return 0; | 817 | return 0; |
817 | } | 818 | } |
818 | 819 | ||
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 9dcfbe7cd5f5..fc0de3753648 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c | |||
@@ -576,9 +576,9 @@ static int dwc3_omap_remove(struct platform_device *pdev) | |||
576 | if (omap->extcon_id_dev.edev) | 576 | if (omap->extcon_id_dev.edev) |
577 | extcon_unregister_interest(&omap->extcon_id_dev); | 577 | extcon_unregister_interest(&omap->extcon_id_dev); |
578 | dwc3_omap_disable_irqs(omap); | 578 | dwc3_omap_disable_irqs(omap); |
579 | device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); | ||
579 | pm_runtime_put_sync(&pdev->dev); | 580 | pm_runtime_put_sync(&pdev->dev); |
580 | pm_runtime_disable(&pdev->dev); | 581 | pm_runtime_disable(&pdev->dev); |
581 | device_for_each_child(&pdev->dev, NULL, dwc3_omap_remove_core); | ||
582 | 582 | ||
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 349cacc577d8..490a6ca00733 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -527,7 +527,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep, | |||
527 | dep->stream_capable = true; | 527 | dep->stream_capable = true; |
528 | } | 528 | } |
529 | 529 | ||
530 | if (usb_endpoint_xfer_isoc(desc)) | 530 | if (!usb_endpoint_xfer_control(desc)) |
531 | params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; | 531 | params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN; |
532 | 532 | ||
533 | /* | 533 | /* |
@@ -1225,16 +1225,17 @@ static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request, | |||
1225 | 1225 | ||
1226 | int ret; | 1226 | int ret; |
1227 | 1227 | ||
1228 | spin_lock_irqsave(&dwc->lock, flags); | ||
1228 | if (!dep->endpoint.desc) { | 1229 | if (!dep->endpoint.desc) { |
1229 | dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", | 1230 | dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n", |
1230 | request, ep->name); | 1231 | request, ep->name); |
1232 | spin_unlock_irqrestore(&dwc->lock, flags); | ||
1231 | return -ESHUTDOWN; | 1233 | return -ESHUTDOWN; |
1232 | } | 1234 | } |
1233 | 1235 | ||
1234 | dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", | 1236 | dev_vdbg(dwc->dev, "queing request %p to %s length %d\n", |
1235 | request, ep->name, request->length); | 1237 | request, ep->name, request->length); |
1236 | 1238 | ||
1237 | spin_lock_irqsave(&dwc->lock, flags); | ||
1238 | ret = __dwc3_gadget_ep_queue(dep, req); | 1239 | ret = __dwc3_gadget_ep_queue(dep, req); |
1239 | spin_unlock_irqrestore(&dwc->lock, flags); | 1240 | spin_unlock_irqrestore(&dwc->lock, flags); |
1240 | 1241 | ||
@@ -2041,12 +2042,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, | |||
2041 | dwc3_endpoint_transfer_complete(dwc, dep, event); | 2042 | dwc3_endpoint_transfer_complete(dwc, dep, event); |
2042 | break; | 2043 | break; |
2043 | case DWC3_DEPEVT_XFERINPROGRESS: | 2044 | case DWC3_DEPEVT_XFERINPROGRESS: |
2044 | if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) { | ||
2045 | dev_dbg(dwc->dev, "%s is not an Isochronous endpoint\n", | ||
2046 | dep->name); | ||
2047 | return; | ||
2048 | } | ||
2049 | |||
2050 | dwc3_endpoint_transfer_complete(dwc, dep, event); | 2045 | dwc3_endpoint_transfer_complete(dwc, dep, event); |
2051 | break; | 2046 | break; |
2052 | case DWC3_DEPEVT_XFERNOTREADY: | 2047 | case DWC3_DEPEVT_XFERNOTREADY: |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index dc30adf15a01..0dc3552d1360 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -155,6 +155,12 @@ struct ffs_io_data { | |||
155 | struct usb_request *req; | 155 | struct usb_request *req; |
156 | }; | 156 | }; |
157 | 157 | ||
158 | struct ffs_desc_helper { | ||
159 | struct ffs_data *ffs; | ||
160 | unsigned interfaces_count; | ||
161 | unsigned eps_count; | ||
162 | }; | ||
163 | |||
158 | static int __must_check ffs_epfiles_create(struct ffs_data *ffs); | 164 | static int __must_check ffs_epfiles_create(struct ffs_data *ffs); |
159 | static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); | 165 | static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count); |
160 | 166 | ||
@@ -1830,7 +1836,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, | |||
1830 | u8 *valuep, struct usb_descriptor_header *desc, | 1836 | u8 *valuep, struct usb_descriptor_header *desc, |
1831 | void *priv) | 1837 | void *priv) |
1832 | { | 1838 | { |
1833 | struct ffs_data *ffs = priv; | 1839 | struct ffs_desc_helper *helper = priv; |
1840 | struct usb_endpoint_descriptor *d; | ||
1834 | 1841 | ||
1835 | ENTER(); | 1842 | ENTER(); |
1836 | 1843 | ||
@@ -1844,8 +1851,8 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, | |||
1844 | * encountered interface "n" then there are at least | 1851 | * encountered interface "n" then there are at least |
1845 | * "n+1" interfaces. | 1852 | * "n+1" interfaces. |
1846 | */ | 1853 | */ |
1847 | if (*valuep >= ffs->interfaces_count) | 1854 | if (*valuep >= helper->interfaces_count) |
1848 | ffs->interfaces_count = *valuep + 1; | 1855 | helper->interfaces_count = *valuep + 1; |
1849 | break; | 1856 | break; |
1850 | 1857 | ||
1851 | case FFS_STRING: | 1858 | case FFS_STRING: |
@@ -1853,14 +1860,22 @@ static int __ffs_data_do_entity(enum ffs_entity_type type, | |||
1853 | * Strings are indexed from 1 (0 is magic ;) reserved | 1860 | * Strings are indexed from 1 (0 is magic ;) reserved |
1854 | * for languages list or some such) | 1861 | * for languages list or some such) |
1855 | */ | 1862 | */ |
1856 | if (*valuep > ffs->strings_count) | 1863 | if (*valuep > helper->ffs->strings_count) |
1857 | ffs->strings_count = *valuep; | 1864 | helper->ffs->strings_count = *valuep; |
1858 | break; | 1865 | break; |
1859 | 1866 | ||
1860 | case FFS_ENDPOINT: | 1867 | case FFS_ENDPOINT: |
1861 | /* Endpoints are indexed from 1 as well. */ | 1868 | d = (void *)desc; |
1862 | if ((*valuep & USB_ENDPOINT_NUMBER_MASK) > ffs->eps_count) | 1869 | helper->eps_count++; |
1863 | ffs->eps_count = (*valuep & USB_ENDPOINT_NUMBER_MASK); | 1870 | if (helper->eps_count >= 15) |
1871 | return -EINVAL; | ||
1872 | /* Check if descriptors for any speed were already parsed */ | ||
1873 | if (!helper->ffs->eps_count && !helper->ffs->interfaces_count) | ||
1874 | helper->ffs->eps_addrmap[helper->eps_count] = | ||
1875 | d->bEndpointAddress; | ||
1876 | else if (helper->ffs->eps_addrmap[helper->eps_count] != | ||
1877 | d->bEndpointAddress) | ||
1878 | return -EINVAL; | ||
1864 | break; | 1879 | break; |
1865 | } | 1880 | } |
1866 | 1881 | ||
@@ -2053,6 +2068,7 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, | |||
2053 | char *data = _data, *raw_descs; | 2068 | char *data = _data, *raw_descs; |
2054 | unsigned os_descs_count = 0, counts[3], flags; | 2069 | unsigned os_descs_count = 0, counts[3], flags; |
2055 | int ret = -EINVAL, i; | 2070 | int ret = -EINVAL, i; |
2071 | struct ffs_desc_helper helper; | ||
2056 | 2072 | ||
2057 | ENTER(); | 2073 | ENTER(); |
2058 | 2074 | ||
@@ -2101,13 +2117,29 @@ static int __ffs_data_got_descs(struct ffs_data *ffs, | |||
2101 | 2117 | ||
2102 | /* Read descriptors */ | 2118 | /* Read descriptors */ |
2103 | raw_descs = data; | 2119 | raw_descs = data; |
2120 | helper.ffs = ffs; | ||
2104 | for (i = 0; i < 3; ++i) { | 2121 | for (i = 0; i < 3; ++i) { |
2105 | if (!counts[i]) | 2122 | if (!counts[i]) |
2106 | continue; | 2123 | continue; |
2124 | helper.interfaces_count = 0; | ||
2125 | helper.eps_count = 0; | ||
2107 | ret = ffs_do_descs(counts[i], data, len, | 2126 | ret = ffs_do_descs(counts[i], data, len, |
2108 | __ffs_data_do_entity, ffs); | 2127 | __ffs_data_do_entity, &helper); |
2109 | if (ret < 0) | 2128 | if (ret < 0) |
2110 | goto error; | 2129 | goto error; |
2130 | if (!ffs->eps_count && !ffs->interfaces_count) { | ||
2131 | ffs->eps_count = helper.eps_count; | ||
2132 | ffs->interfaces_count = helper.interfaces_count; | ||
2133 | } else { | ||
2134 | if (ffs->eps_count != helper.eps_count) { | ||
2135 | ret = -EINVAL; | ||
2136 | goto error; | ||
2137 | } | ||
2138 | if (ffs->interfaces_count != helper.interfaces_count) { | ||
2139 | ret = -EINVAL; | ||
2140 | goto error; | ||
2141 | } | ||
2142 | } | ||
2111 | data += ret; | 2143 | data += ret; |
2112 | len -= ret; | 2144 | len -= ret; |
2113 | } | 2145 | } |
@@ -2342,9 +2374,18 @@ static void ffs_event_add(struct ffs_data *ffs, | |||
2342 | spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); | 2374 | spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags); |
2343 | } | 2375 | } |
2344 | 2376 | ||
2345 | |||
2346 | /* Bind/unbind USB function hooks *******************************************/ | 2377 | /* Bind/unbind USB function hooks *******************************************/ |
2347 | 2378 | ||
2379 | static int ffs_ep_addr2idx(struct ffs_data *ffs, u8 endpoint_address) | ||
2380 | { | ||
2381 | int i; | ||
2382 | |||
2383 | for (i = 1; i < ARRAY_SIZE(ffs->eps_addrmap); ++i) | ||
2384 | if (ffs->eps_addrmap[i] == endpoint_address) | ||
2385 | return i; | ||
2386 | return -ENOENT; | ||
2387 | } | ||
2388 | |||
2348 | static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, | 2389 | static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, |
2349 | struct usb_descriptor_header *desc, | 2390 | struct usb_descriptor_header *desc, |
2350 | void *priv) | 2391 | void *priv) |
@@ -2378,7 +2419,10 @@ static int __ffs_func_bind_do_descs(enum ffs_entity_type type, u8 *valuep, | |||
2378 | if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) | 2419 | if (!desc || desc->bDescriptorType != USB_DT_ENDPOINT) |
2379 | return 0; | 2420 | return 0; |
2380 | 2421 | ||
2381 | idx = (ds->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK) - 1; | 2422 | idx = ffs_ep_addr2idx(func->ffs, ds->bEndpointAddress) - 1; |
2423 | if (idx < 0) | ||
2424 | return idx; | ||
2425 | |||
2382 | ffs_ep = func->eps + idx; | 2426 | ffs_ep = func->eps + idx; |
2383 | 2427 | ||
2384 | if (unlikely(ffs_ep->descs[ep_desc_id])) { | 2428 | if (unlikely(ffs_ep->descs[ep_desc_id])) { |
diff --git a/drivers/usb/gadget/function/u_fs.h b/drivers/usb/gadget/function/u_fs.h index 63d6e71569c1..d48897e8ffeb 100644 --- a/drivers/usb/gadget/function/u_fs.h +++ b/drivers/usb/gadget/function/u_fs.h | |||
@@ -224,6 +224,8 @@ struct ffs_data { | |||
224 | void *ms_os_descs_ext_prop_name_avail; | 224 | void *ms_os_descs_ext_prop_name_avail; |
225 | void *ms_os_descs_ext_prop_data_avail; | 225 | void *ms_os_descs_ext_prop_data_avail; |
226 | 226 | ||
227 | u8 eps_addrmap[15]; | ||
228 | |||
227 | unsigned short strings_count; | 229 | unsigned short strings_count; |
228 | unsigned short interfaces_count; | 230 | unsigned short interfaces_count; |
229 | unsigned short eps_count; | 231 | unsigned short eps_count; |
diff --git a/drivers/usb/gadget/udc/fusb300_udc.h b/drivers/usb/gadget/udc/fusb300_udc.h index ae811d8d38b4..ad39f892d200 100644 --- a/drivers/usb/gadget/udc/fusb300_udc.h +++ b/drivers/usb/gadget/udc/fusb300_udc.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | 13 | ||
14 | #ifndef __FUSB300_UDC_H__ | 14 | #ifndef __FUSB300_UDC_H__ |
15 | #define __FUSB300_UDC_H_ | 15 | #define __FUSB300_UDC_H__ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | 18 | ||
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index f4eac113690e..2e95715b50c0 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c | |||
@@ -3320,7 +3320,7 @@ static void handle_stat1_irqs(struct net2280 *dev, u32 stat) | |||
3320 | if (stat & tmp) { | 3320 | if (stat & tmp) { |
3321 | writel(tmp, &dev->regs->irqstat1); | 3321 | writel(tmp, &dev->regs->irqstat1); |
3322 | if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && | 3322 | if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) && |
3323 | (readl(&dev->usb->usbstat) & mask)) || | 3323 | ((readl(&dev->usb->usbstat) & mask) == 0)) || |
3324 | ((readl(&dev->usb->usbctl) & | 3324 | ((readl(&dev->usb->usbctl) & |
3325 | BIT(VBUS_PIN)) == 0)) && | 3325 | BIT(VBUS_PIN)) == 0)) && |
3326 | (dev->gadget.speed != USB_SPEED_UNKNOWN)) { | 3326 | (dev->gadget.speed != USB_SPEED_UNKNOWN)) { |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index aa79e8749040..69aece31143a 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -468,7 +468,8 @@ static void xhci_hub_report_usb2_link_state(u32 *status, u32 status_reg) | |||
468 | } | 468 | } |
469 | 469 | ||
470 | /* Updates Link Status for super Speed port */ | 470 | /* Updates Link Status for super Speed port */ |
471 | static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) | 471 | static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, |
472 | u32 *status, u32 status_reg) | ||
472 | { | 473 | { |
473 | u32 pls = status_reg & PORT_PLS_MASK; | 474 | u32 pls = status_reg & PORT_PLS_MASK; |
474 | 475 | ||
@@ -507,7 +508,8 @@ static void xhci_hub_report_usb3_link_state(u32 *status, u32 status_reg) | |||
507 | * in which sometimes the port enters compliance mode | 508 | * in which sometimes the port enters compliance mode |
508 | * caused by a delay on the host-device negotiation. | 509 | * caused by a delay on the host-device negotiation. |
509 | */ | 510 | */ |
510 | if (pls == USB_SS_PORT_LS_COMP_MOD) | 511 | if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && |
512 | (pls == USB_SS_PORT_LS_COMP_MOD)) | ||
511 | pls |= USB_PORT_STAT_CONNECTION; | 513 | pls |= USB_PORT_STAT_CONNECTION; |
512 | } | 514 | } |
513 | 515 | ||
@@ -666,7 +668,7 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
666 | } | 668 | } |
667 | /* Update Port Link State */ | 669 | /* Update Port Link State */ |
668 | if (hcd->speed == HCD_USB3) { | 670 | if (hcd->speed == HCD_USB3) { |
669 | xhci_hub_report_usb3_link_state(&status, raw_port_status); | 671 | xhci_hub_report_usb3_link_state(xhci, &status, raw_port_status); |
670 | /* | 672 | /* |
671 | * Verify if all USB3 Ports Have entered U0 already. | 673 | * Verify if all USB3 Ports Have entered U0 already. |
672 | * Delete Compliance Mode Timer if so. | 674 | * Delete Compliance Mode Timer if so. |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 8056d90690ee..8936211b161d 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1812,6 +1812,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1812 | 1812 | ||
1813 | if (xhci->lpm_command) | 1813 | if (xhci->lpm_command) |
1814 | xhci_free_command(xhci, xhci->lpm_command); | 1814 | xhci_free_command(xhci, xhci->lpm_command); |
1815 | xhci->lpm_command = NULL; | ||
1815 | if (xhci->cmd_ring) | 1816 | if (xhci->cmd_ring) |
1816 | xhci_ring_free(xhci, xhci->cmd_ring); | 1817 | xhci_ring_free(xhci, xhci->cmd_ring); |
1817 | xhci->cmd_ring = NULL; | 1818 | xhci->cmd_ring = NULL; |
@@ -1819,7 +1820,7 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) | |||
1819 | xhci_cleanup_command_queue(xhci); | 1820 | xhci_cleanup_command_queue(xhci); |
1820 | 1821 | ||
1821 | num_ports = HCS_MAX_PORTS(xhci->hcs_params1); | 1822 | num_ports = HCS_MAX_PORTS(xhci->hcs_params1); |
1822 | for (i = 0; i < num_ports; i++) { | 1823 | for (i = 0; i < num_ports && xhci->rh_bw; i++) { |
1823 | struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; | 1824 | struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table; |
1824 | for (j = 0; j < XHCI_MAX_INTERVAL; j++) { | 1825 | for (j = 0; j < XHCI_MAX_INTERVAL; j++) { |
1825 | struct list_head *ep = &bwt->interval_bw[j].endpoints; | 1826 | struct list_head *ep = &bwt->interval_bw[j].endpoints; |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c020b094fe7d..c4a8fca8ae93 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -3971,13 +3971,21 @@ static int __maybe_unused xhci_change_max_exit_latency(struct xhci_hcd *xhci, | |||
3971 | int ret; | 3971 | int ret; |
3972 | 3972 | ||
3973 | spin_lock_irqsave(&xhci->lock, flags); | 3973 | spin_lock_irqsave(&xhci->lock, flags); |
3974 | if (max_exit_latency == xhci->devs[udev->slot_id]->current_mel) { | 3974 | |
3975 | virt_dev = xhci->devs[udev->slot_id]; | ||
3976 | |||
3977 | /* | ||
3978 | * virt_dev might not exists yet if xHC resumed from hibernate (S4) and | ||
3979 | * xHC was re-initialized. Exit latency will be set later after | ||
3980 | * hub_port_finish_reset() is done and xhci->devs[] are re-allocated | ||
3981 | */ | ||
3982 | |||
3983 | if (!virt_dev || max_exit_latency == virt_dev->current_mel) { | ||
3975 | spin_unlock_irqrestore(&xhci->lock, flags); | 3984 | spin_unlock_irqrestore(&xhci->lock, flags); |
3976 | return 0; | 3985 | return 0; |
3977 | } | 3986 | } |
3978 | 3987 | ||
3979 | /* Attempt to issue an Evaluate Context command to change the MEL. */ | 3988 | /* Attempt to issue an Evaluate Context command to change the MEL. */ |
3980 | virt_dev = xhci->devs[udev->slot_id]; | ||
3981 | command = xhci->lpm_command; | 3989 | command = xhci->lpm_command; |
3982 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); | 3990 | ctrl_ctx = xhci_get_input_control_ctx(xhci, command->in_ctx); |
3983 | if (!ctrl_ctx) { | 3991 | if (!ctrl_ctx) { |
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index 47ae6455d073..3ee133f675ab 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c | |||
@@ -39,6 +39,7 @@ struct cppi41_dma_channel { | |||
39 | u32 transferred; | 39 | u32 transferred; |
40 | u32 packet_sz; | 40 | u32 packet_sz; |
41 | struct list_head tx_check; | 41 | struct list_head tx_check; |
42 | int tx_zlp; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | #define MUSB_DMA_NUM_CHANNELS 15 | 45 | #define MUSB_DMA_NUM_CHANNELS 15 |
@@ -122,6 +123,8 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) | |||
122 | { | 123 | { |
123 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; | 124 | struct musb_hw_ep *hw_ep = cppi41_channel->hw_ep; |
124 | struct musb *musb = hw_ep->musb; | 125 | struct musb *musb = hw_ep->musb; |
126 | void __iomem *epio = hw_ep->regs; | ||
127 | u16 csr; | ||
125 | 128 | ||
126 | if (!cppi41_channel->prog_len || | 129 | if (!cppi41_channel->prog_len || |
127 | (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { | 130 | (cppi41_channel->channel.status == MUSB_DMA_STATUS_FREE)) { |
@@ -131,15 +134,24 @@ static void cppi41_trans_done(struct cppi41_dma_channel *cppi41_channel) | |||
131 | cppi41_channel->transferred; | 134 | cppi41_channel->transferred; |
132 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; | 135 | cppi41_channel->channel.status = MUSB_DMA_STATUS_FREE; |
133 | cppi41_channel->channel.rx_packet_done = true; | 136 | cppi41_channel->channel.rx_packet_done = true; |
137 | |||
138 | /* | ||
139 | * transmit ZLP using PIO mode for transfers which size is | ||
140 | * multiple of EP packet size. | ||
141 | */ | ||
142 | if (cppi41_channel->tx_zlp && (cppi41_channel->transferred % | ||
143 | cppi41_channel->packet_sz) == 0) { | ||
144 | musb_ep_select(musb->mregs, hw_ep->epnum); | ||
145 | csr = MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY; | ||
146 | musb_writew(epio, MUSB_TXCSR, csr); | ||
147 | } | ||
134 | musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); | 148 | musb_dma_completion(musb, hw_ep->epnum, cppi41_channel->is_tx); |
135 | } else { | 149 | } else { |
136 | /* next iteration, reload */ | 150 | /* next iteration, reload */ |
137 | struct dma_chan *dc = cppi41_channel->dc; | 151 | struct dma_chan *dc = cppi41_channel->dc; |
138 | struct dma_async_tx_descriptor *dma_desc; | 152 | struct dma_async_tx_descriptor *dma_desc; |
139 | enum dma_transfer_direction direction; | 153 | enum dma_transfer_direction direction; |
140 | u16 csr; | ||
141 | u32 remain_bytes; | 154 | u32 remain_bytes; |
142 | void __iomem *epio = cppi41_channel->hw_ep->regs; | ||
143 | 155 | ||
144 | cppi41_channel->buf_addr += cppi41_channel->packet_sz; | 156 | cppi41_channel->buf_addr += cppi41_channel->packet_sz; |
145 | 157 | ||
@@ -363,6 +375,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel, | |||
363 | cppi41_channel->total_len = len; | 375 | cppi41_channel->total_len = len; |
364 | cppi41_channel->transferred = 0; | 376 | cppi41_channel->transferred = 0; |
365 | cppi41_channel->packet_sz = packet_sz; | 377 | cppi41_channel->packet_sz = packet_sz; |
378 | cppi41_channel->tx_zlp = (cppi41_channel->is_tx && mode) ? 1 : 0; | ||
366 | 379 | ||
367 | /* | 380 | /* |
368 | * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more | 381 | * Due to AM335x' Advisory 1.0.13 we are not allowed to transfer more |
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c index c42bdf0c4a1f..00972eca04e7 100644 --- a/drivers/usb/phy/phy-mxs-usb.c +++ b/drivers/usb/phy/phy-mxs-usb.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2012-2013 Freescale Semiconductor, Inc. | 2 | * Copyright 2012-2014 Freescale Semiconductor, Inc. |
3 | * Copyright (C) 2012 Marek Vasut <marex@denx.de> | 3 | * Copyright (C) 2012 Marek Vasut <marex@denx.de> |
4 | * on behalf of DENX Software Engineering GmbH | 4 | * on behalf of DENX Software Engineering GmbH |
5 | * | 5 | * |
@@ -125,7 +125,13 @@ static const struct mxs_phy_data imx6sl_phy_data = { | |||
125 | MXS_PHY_NEED_IP_FIX, | 125 | MXS_PHY_NEED_IP_FIX, |
126 | }; | 126 | }; |
127 | 127 | ||
128 | static const struct mxs_phy_data imx6sx_phy_data = { | ||
129 | .flags = MXS_PHY_DISCONNECT_LINE_WITHOUT_VBUS | | ||
130 | MXS_PHY_NEED_IP_FIX, | ||
131 | }; | ||
132 | |||
128 | static const struct of_device_id mxs_phy_dt_ids[] = { | 133 | static const struct of_device_id mxs_phy_dt_ids[] = { |
134 | { .compatible = "fsl,imx6sx-usbphy", .data = &imx6sx_phy_data, }, | ||
129 | { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, | 135 | { .compatible = "fsl,imx6sl-usbphy", .data = &imx6sl_phy_data, }, |
130 | { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, | 136 | { .compatible = "fsl,imx6q-usbphy", .data = &imx6q_phy_data, }, |
131 | { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, | 137 | { .compatible = "fsl,imx23-usbphy", .data = &imx23_phy_data, }, |
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 13b4fa287da8..886f1807a67b 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
@@ -878,8 +878,8 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy, | |||
878 | return -ENOMEM; | 878 | return -ENOMEM; |
879 | } | 879 | } |
880 | 880 | ||
881 | tegra_phy->config = devm_kzalloc(&pdev->dev, | 881 | tegra_phy->config = devm_kzalloc(&pdev->dev, sizeof(*config), |
882 | sizeof(*tegra_phy->config), GFP_KERNEL); | 882 | GFP_KERNEL); |
883 | if (!tegra_phy->config) { | 883 | if (!tegra_phy->config) { |
884 | dev_err(&pdev->dev, | 884 | dev_err(&pdev->dev, |
885 | "unable to allocate memory for USB UTMIP config\n"); | 885 | "unable to allocate memory for USB UTMIP config\n"); |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 4fd36530bfa3..b0c97a3f1bfe 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -108,19 +108,45 @@ static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe) | |||
108 | return list_first_entry(&pipe->list, struct usbhs_pkt, node); | 108 | return list_first_entry(&pipe->list, struct usbhs_pkt, node); |
109 | } | 109 | } |
110 | 110 | ||
111 | static void usbhsf_fifo_clear(struct usbhs_pipe *pipe, | ||
112 | struct usbhs_fifo *fifo); | ||
113 | static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe, | ||
114 | struct usbhs_fifo *fifo); | ||
115 | static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo, | ||
116 | struct usbhs_pkt *pkt); | ||
117 | #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) | ||
118 | #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) | ||
119 | static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map); | ||
111 | struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) | 120 | struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) |
112 | { | 121 | { |
113 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); | 122 | struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe); |
123 | struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe); | ||
114 | unsigned long flags; | 124 | unsigned long flags; |
115 | 125 | ||
116 | /******************** spin lock ********************/ | 126 | /******************** spin lock ********************/ |
117 | usbhs_lock(priv, flags); | 127 | usbhs_lock(priv, flags); |
118 | 128 | ||
129 | usbhs_pipe_disable(pipe); | ||
130 | |||
119 | if (!pkt) | 131 | if (!pkt) |
120 | pkt = __usbhsf_pkt_get(pipe); | 132 | pkt = __usbhsf_pkt_get(pipe); |
121 | 133 | ||
122 | if (pkt) | 134 | if (pkt) { |
135 | struct dma_chan *chan = NULL; | ||
136 | |||
137 | if (fifo) | ||
138 | chan = usbhsf_dma_chan_get(fifo, pkt); | ||
139 | if (chan) { | ||
140 | dmaengine_terminate_all(chan); | ||
141 | usbhsf_fifo_clear(pipe, fifo); | ||
142 | usbhsf_dma_unmap(pkt); | ||
143 | } | ||
144 | |||
123 | __usbhsf_pkt_del(pkt); | 145 | __usbhsf_pkt_del(pkt); |
146 | } | ||
147 | |||
148 | if (fifo) | ||
149 | usbhsf_fifo_unselect(pipe, fifo); | ||
124 | 150 | ||
125 | usbhs_unlock(priv, flags); | 151 | usbhs_unlock(priv, flags); |
126 | /******************** spin unlock ******************/ | 152 | /******************** spin unlock ******************/ |
@@ -544,6 +570,7 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done) | |||
544 | usbhsf_send_terminator(pipe, fifo); | 570 | usbhsf_send_terminator(pipe, fifo); |
545 | 571 | ||
546 | usbhsf_tx_irq_ctrl(pipe, !*is_done); | 572 | usbhsf_tx_irq_ctrl(pipe, !*is_done); |
573 | usbhs_pipe_running(pipe, !*is_done); | ||
547 | usbhs_pipe_enable(pipe); | 574 | usbhs_pipe_enable(pipe); |
548 | 575 | ||
549 | dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", | 576 | dev_dbg(dev, " send %d (%d/ %d/ %d/ %d)\n", |
@@ -570,12 +597,21 @@ usbhs_fifo_write_busy: | |||
570 | * retry in interrupt | 597 | * retry in interrupt |
571 | */ | 598 | */ |
572 | usbhsf_tx_irq_ctrl(pipe, 1); | 599 | usbhsf_tx_irq_ctrl(pipe, 1); |
600 | usbhs_pipe_running(pipe, 1); | ||
573 | 601 | ||
574 | return ret; | 602 | return ret; |
575 | } | 603 | } |
576 | 604 | ||
605 | static int usbhsf_pio_prepare_push(struct usbhs_pkt *pkt, int *is_done) | ||
606 | { | ||
607 | if (usbhs_pipe_is_running(pkt->pipe)) | ||
608 | return 0; | ||
609 | |||
610 | return usbhsf_pio_try_push(pkt, is_done); | ||
611 | } | ||
612 | |||
577 | struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { | 613 | struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = { |
578 | .prepare = usbhsf_pio_try_push, | 614 | .prepare = usbhsf_pio_prepare_push, |
579 | .try_run = usbhsf_pio_try_push, | 615 | .try_run = usbhsf_pio_try_push, |
580 | }; | 616 | }; |
581 | 617 | ||
@@ -589,6 +625,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) | |||
589 | if (usbhs_pipe_is_busy(pipe)) | 625 | if (usbhs_pipe_is_busy(pipe)) |
590 | return 0; | 626 | return 0; |
591 | 627 | ||
628 | if (usbhs_pipe_is_running(pipe)) | ||
629 | return 0; | ||
630 | |||
592 | /* | 631 | /* |
593 | * pipe enable to prepare packet receive | 632 | * pipe enable to prepare packet receive |
594 | */ | 633 | */ |
@@ -597,6 +636,7 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done) | |||
597 | 636 | ||
598 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); | 637 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length); |
599 | usbhs_pipe_enable(pipe); | 638 | usbhs_pipe_enable(pipe); |
639 | usbhs_pipe_running(pipe, 1); | ||
600 | usbhsf_rx_irq_ctrl(pipe, 1); | 640 | usbhsf_rx_irq_ctrl(pipe, 1); |
601 | 641 | ||
602 | return 0; | 642 | return 0; |
@@ -642,6 +682,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done) | |||
642 | (total_len < maxp)) { /* short packet */ | 682 | (total_len < maxp)) { /* short packet */ |
643 | *is_done = 1; | 683 | *is_done = 1; |
644 | usbhsf_rx_irq_ctrl(pipe, 0); | 684 | usbhsf_rx_irq_ctrl(pipe, 0); |
685 | usbhs_pipe_running(pipe, 0); | ||
645 | usbhs_pipe_disable(pipe); /* disable pipe first */ | 686 | usbhs_pipe_disable(pipe); /* disable pipe first */ |
646 | } | 687 | } |
647 | 688 | ||
@@ -763,8 +804,6 @@ static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe, | |||
763 | usbhs_bset(priv, fifo->sel, DREQE, dreqe); | 804 | usbhs_bset(priv, fifo->sel, DREQE, dreqe); |
764 | } | 805 | } |
765 | 806 | ||
766 | #define usbhsf_dma_map(p) __usbhsf_dma_map_ctrl(p, 1) | ||
767 | #define usbhsf_dma_unmap(p) __usbhsf_dma_map_ctrl(p, 0) | ||
768 | static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) | 807 | static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map) |
769 | { | 808 | { |
770 | struct usbhs_pipe *pipe = pkt->pipe; | 809 | struct usbhs_pipe *pipe = pkt->pipe; |
@@ -805,6 +844,7 @@ static void xfer_work(struct work_struct *work) | |||
805 | dev_dbg(dev, " %s %d (%d/ %d)\n", | 844 | dev_dbg(dev, " %s %d (%d/ %d)\n", |
806 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); | 845 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); |
807 | 846 | ||
847 | usbhs_pipe_running(pipe, 1); | ||
808 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); | 848 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); |
809 | usbhs_pipe_enable(pipe); | 849 | usbhs_pipe_enable(pipe); |
810 | usbhsf_dma_start(pipe, fifo); | 850 | usbhsf_dma_start(pipe, fifo); |
@@ -836,6 +876,10 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) | |||
836 | if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ | 876 | if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ |
837 | goto usbhsf_pio_prepare_push; | 877 | goto usbhsf_pio_prepare_push; |
838 | 878 | ||
879 | /* return at this time if the pipe is running */ | ||
880 | if (usbhs_pipe_is_running(pipe)) | ||
881 | return 0; | ||
882 | |||
839 | /* get enable DMA fifo */ | 883 | /* get enable DMA fifo */ |
840 | fifo = usbhsf_get_dma_fifo(priv, pkt); | 884 | fifo = usbhsf_get_dma_fifo(priv, pkt); |
841 | if (!fifo) | 885 | if (!fifo) |
@@ -869,15 +913,29 @@ usbhsf_pio_prepare_push: | |||
869 | static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) | 913 | static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done) |
870 | { | 914 | { |
871 | struct usbhs_pipe *pipe = pkt->pipe; | 915 | struct usbhs_pipe *pipe = pkt->pipe; |
916 | int is_short = pkt->trans % usbhs_pipe_get_maxpacket(pipe); | ||
917 | |||
918 | pkt->actual += pkt->trans; | ||
872 | 919 | ||
873 | pkt->actual = pkt->trans; | 920 | if (pkt->actual < pkt->length) |
921 | *is_done = 0; /* there are remainder data */ | ||
922 | else if (is_short) | ||
923 | *is_done = 1; /* short packet */ | ||
924 | else | ||
925 | *is_done = !pkt->zero; /* send zero packet? */ | ||
874 | 926 | ||
875 | *is_done = !pkt->zero; /* send zero packet ? */ | 927 | usbhs_pipe_running(pipe, !*is_done); |
876 | 928 | ||
877 | usbhsf_dma_stop(pipe, pipe->fifo); | 929 | usbhsf_dma_stop(pipe, pipe->fifo); |
878 | usbhsf_dma_unmap(pkt); | 930 | usbhsf_dma_unmap(pkt); |
879 | usbhsf_fifo_unselect(pipe, pipe->fifo); | 931 | usbhsf_fifo_unselect(pipe, pipe->fifo); |
880 | 932 | ||
933 | if (!*is_done) { | ||
934 | /* change handler to PIO */ | ||
935 | pkt->handler = &usbhs_fifo_pio_push_handler; | ||
936 | return pkt->handler->try_run(pkt, is_done); | ||
937 | } | ||
938 | |||
881 | return 0; | 939 | return 0; |
882 | } | 940 | } |
883 | 941 | ||
@@ -972,8 +1030,10 @@ static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done) | |||
972 | if ((pkt->actual == pkt->length) || /* receive all data */ | 1030 | if ((pkt->actual == pkt->length) || /* receive all data */ |
973 | (pkt->trans < maxp)) { /* short packet */ | 1031 | (pkt->trans < maxp)) { /* short packet */ |
974 | *is_done = 1; | 1032 | *is_done = 1; |
1033 | usbhs_pipe_running(pipe, 0); | ||
975 | } else { | 1034 | } else { |
976 | /* re-enable */ | 1035 | /* re-enable */ |
1036 | usbhs_pipe_running(pipe, 0); | ||
977 | usbhsf_prepare_pop(pkt, is_done); | 1037 | usbhsf_prepare_pop(pkt, is_done); |
978 | } | 1038 | } |
979 | 1039 | ||
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c index 6a030b931a3b..9a705b15b3a1 100644 --- a/drivers/usb/renesas_usbhs/mod.c +++ b/drivers/usb/renesas_usbhs/mod.c | |||
@@ -213,7 +213,10 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv, | |||
213 | { | 213 | { |
214 | struct usbhs_mod *mod = usbhs_mod_get_current(priv); | 214 | struct usbhs_mod *mod = usbhs_mod_get_current(priv); |
215 | u16 intenb0, intenb1; | 215 | u16 intenb0, intenb1; |
216 | unsigned long flags; | ||
216 | 217 | ||
218 | /******************** spin lock ********************/ | ||
219 | usbhs_lock(priv, flags); | ||
217 | state->intsts0 = usbhs_read(priv, INTSTS0); | 220 | state->intsts0 = usbhs_read(priv, INTSTS0); |
218 | state->intsts1 = usbhs_read(priv, INTSTS1); | 221 | state->intsts1 = usbhs_read(priv, INTSTS1); |
219 | 222 | ||
@@ -229,6 +232,8 @@ static int usbhs_status_get_each_irq(struct usbhs_priv *priv, | |||
229 | state->bempsts &= mod->irq_bempsts; | 232 | state->bempsts &= mod->irq_bempsts; |
230 | state->brdysts &= mod->irq_brdysts; | 233 | state->brdysts &= mod->irq_brdysts; |
231 | } | 234 | } |
235 | usbhs_unlock(priv, flags); | ||
236 | /******************** spin unlock ******************/ | ||
232 | 237 | ||
233 | /* | 238 | /* |
234 | * Check whether the irq enable registers and the irq status are set | 239 | * Check whether the irq enable registers and the irq status are set |
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c index 75fbcf6b102e..040bcefcb040 100644 --- a/drivers/usb/renesas_usbhs/pipe.c +++ b/drivers/usb/renesas_usbhs/pipe.c | |||
@@ -578,6 +578,19 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe) | |||
578 | return usbhsp_flags_has(pipe, IS_DIR_HOST); | 578 | return usbhsp_flags_has(pipe, IS_DIR_HOST); |
579 | } | 579 | } |
580 | 580 | ||
581 | int usbhs_pipe_is_running(struct usbhs_pipe *pipe) | ||
582 | { | ||
583 | return usbhsp_flags_has(pipe, IS_RUNNING); | ||
584 | } | ||
585 | |||
586 | void usbhs_pipe_running(struct usbhs_pipe *pipe, int running) | ||
587 | { | ||
588 | if (running) | ||
589 | usbhsp_flags_set(pipe, IS_RUNNING); | ||
590 | else | ||
591 | usbhsp_flags_clr(pipe, IS_RUNNING); | ||
592 | } | ||
593 | |||
581 | void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence) | 594 | void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence) |
582 | { | 595 | { |
583 | u16 mask = (SQCLR | SQSET); | 596 | u16 mask = (SQCLR | SQSET); |
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h index 406f36d050e4..d24a05972370 100644 --- a/drivers/usb/renesas_usbhs/pipe.h +++ b/drivers/usb/renesas_usbhs/pipe.h | |||
@@ -36,6 +36,7 @@ struct usbhs_pipe { | |||
36 | #define USBHS_PIPE_FLAGS_IS_USED (1 << 0) | 36 | #define USBHS_PIPE_FLAGS_IS_USED (1 << 0) |
37 | #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1) | 37 | #define USBHS_PIPE_FLAGS_IS_DIR_IN (1 << 1) |
38 | #define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2) | 38 | #define USBHS_PIPE_FLAGS_IS_DIR_HOST (1 << 2) |
39 | #define USBHS_PIPE_FLAGS_IS_RUNNING (1 << 3) | ||
39 | 40 | ||
40 | struct usbhs_pkt_handle *handler; | 41 | struct usbhs_pkt_handle *handler; |
41 | 42 | ||
@@ -80,6 +81,9 @@ int usbhs_pipe_probe(struct usbhs_priv *priv); | |||
80 | void usbhs_pipe_remove(struct usbhs_priv *priv); | 81 | void usbhs_pipe_remove(struct usbhs_priv *priv); |
81 | int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe); | 82 | int usbhs_pipe_is_dir_in(struct usbhs_pipe *pipe); |
82 | int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe); | 83 | int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe); |
84 | int usbhs_pipe_is_running(struct usbhs_pipe *pipe); | ||
85 | void usbhs_pipe_running(struct usbhs_pipe *pipe, int running); | ||
86 | |||
83 | void usbhs_pipe_init(struct usbhs_priv *priv, | 87 | void usbhs_pipe_init(struct usbhs_priv *priv, |
84 | int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map)); | 88 | int (*dma_map_ctrl)(struct usbhs_pkt *pkt, int map)); |
85 | int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe); | 89 | int usbhs_pipe_get_maxpacket(struct usbhs_pipe *pipe); |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 824ea5e7ec8b..dc72b924c399 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -728,6 +728,7 @@ static const struct usb_device_id id_table_combined[] = { | |||
728 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), | 728 | { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), |
729 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, | 729 | .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, |
730 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, | 730 | { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, |
731 | { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, | ||
731 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, | 732 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, |
732 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, | 733 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, |
733 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, | 734 | { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, |
@@ -939,6 +940,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
939 | { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, | 940 | { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) }, |
940 | /* Infineon Devices */ | 941 | /* Infineon Devices */ |
941 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, | 942 | { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) }, |
943 | /* GE Healthcare devices */ | ||
944 | { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) }, | ||
942 | { } /* Terminating entry */ | 945 | { } /* Terminating entry */ |
943 | }; | 946 | }; |
944 | 947 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 70b0b1d88ae9..5937b2d242f2 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -837,6 +837,12 @@ | |||
837 | #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ | 837 | #define TELLDUS_TELLSTICK_PID 0x0C30 /* RF control dongle 433 MHz using FT232RL */ |
838 | 838 | ||
839 | /* | 839 | /* |
840 | * NOVITUS printers | ||
841 | */ | ||
842 | #define NOVITUS_VID 0x1a28 | ||
843 | #define NOVITUS_BONO_E_PID 0x6010 | ||
844 | |||
845 | /* | ||
840 | * RT Systems programming cables for various ham radios | 846 | * RT Systems programming cables for various ham radios |
841 | */ | 847 | */ |
842 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ | 848 | #define RTSYSTEMS_VID 0x2100 /* Vendor ID */ |
@@ -1385,3 +1391,9 @@ | |||
1385 | * ekey biometric systems GmbH (http://ekey.net/) | 1391 | * ekey biometric systems GmbH (http://ekey.net/) |
1386 | */ | 1392 | */ |
1387 | #define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ | 1393 | #define FTDI_EKEY_CONV_USB_PID 0xCB08 /* Converter USB */ |
1394 | |||
1395 | /* | ||
1396 | * GE Healthcare devices | ||
1397 | */ | ||
1398 | #define GE_HEALTHCARE_VID 0x1901 | ||
1399 | #define GE_HEALTHCARE_NEMO_TRACKER_PID 0x0015 | ||
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 6f7f01eb556a..46179a0828eb 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -282,14 +282,19 @@ static const struct usb_device_id id_table[] = { | |||
282 | /* Sierra Wireless HSPA Non-Composite Device */ | 282 | /* Sierra Wireless HSPA Non-Composite Device */ |
283 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, | 283 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, |
284 | { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ | 284 | { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ |
285 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 285 | /* Sierra Wireless Direct IP modems */ |
286 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68A3, 0xFF, 0xFF, 0xFF), | ||
287 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | ||
288 | }, | ||
289 | { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), | ||
286 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 290 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
287 | }, | 291 | }, |
288 | /* AT&T Direct IP LTE modems */ | 292 | /* AT&T Direct IP LTE modems */ |
289 | { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), | 293 | { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), |
290 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 294 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
291 | }, | 295 | }, |
292 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ | 296 | /* Airprime/Sierra Wireless Direct IP modems */ |
297 | { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68A3, 0xFF, 0xFF, 0xFF), | ||
293 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 298 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
294 | }, | 299 | }, |
295 | 300 | ||
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c index 1a132e9e947a..c9bb107d5e5c 100644 --- a/drivers/usb/serial/zte_ev.c +++ b/drivers/usb/serial/zte_ev.c | |||
@@ -272,6 +272,14 @@ static void zte_ev_usb_serial_close(struct usb_serial_port *port) | |||
272 | } | 272 | } |
273 | 273 | ||
274 | static const struct usb_device_id id_table[] = { | 274 | static const struct usb_device_id id_table[] = { |
275 | { USB_DEVICE(0x19d2, 0xffec) }, | ||
276 | { USB_DEVICE(0x19d2, 0xffee) }, | ||
277 | { USB_DEVICE(0x19d2, 0xfff6) }, | ||
278 | { USB_DEVICE(0x19d2, 0xfff7) }, | ||
279 | { USB_DEVICE(0x19d2, 0xfff8) }, | ||
280 | { USB_DEVICE(0x19d2, 0xfff9) }, | ||
281 | { USB_DEVICE(0x19d2, 0xfffb) }, | ||
282 | { USB_DEVICE(0x19d2, 0xfffc) }, | ||
275 | /* MG880 */ | 283 | /* MG880 */ |
276 | { USB_DEVICE(0x19d2, 0xfffd) }, | 284 | { USB_DEVICE(0x19d2, 0xfffd) }, |
277 | { }, | 285 | { }, |
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 503ac5c8d80f..8a6f371ed6e7 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h | |||
@@ -59,10 +59,6 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
59 | unsigned long flags = id->driver_info; | 59 | unsigned long flags = id->driver_info; |
60 | int r, alt; | 60 | int r, alt; |
61 | 61 | ||
62 | usb_stor_adjust_quirks(udev, &flags); | ||
63 | |||
64 | if (flags & US_FL_IGNORE_UAS) | ||
65 | return 0; | ||
66 | 62 | ||
67 | alt = uas_find_uas_alt_setting(intf); | 63 | alt = uas_find_uas_alt_setting(intf); |
68 | if (alt < 0) | 64 | if (alt < 0) |
@@ -72,6 +68,29 @@ static int uas_use_uas_driver(struct usb_interface *intf, | |||
72 | if (r < 0) | 68 | if (r < 0) |
73 | return 0; | 69 | return 0; |
74 | 70 | ||
71 | /* | ||
72 | * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is | ||
73 | * broken on the ASM1051, use the number of streams to differentiate. | ||
74 | * New ASM1053-s also support 32 streams, but have a different prod-id. | ||
75 | */ | ||
76 | if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && | ||
77 | le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) { | ||
78 | if (udev->speed < USB_SPEED_SUPER) { | ||
79 | /* No streams info, assume ASM1051 */ | ||
80 | flags |= US_FL_IGNORE_UAS; | ||
81 | } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { | ||
82 | flags |= US_FL_IGNORE_UAS; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | usb_stor_adjust_quirks(udev, &flags); | ||
87 | |||
88 | if (flags & US_FL_IGNORE_UAS) { | ||
89 | dev_warn(&udev->dev, | ||
90 | "UAS is blacklisted for this device, using usb-storage instead\n"); | ||
91 | return 0; | ||
92 | } | ||
93 | |||
75 | if (udev->bus->sg_tablesize == 0) { | 94 | if (udev->bus->sg_tablesize == 0) { |
76 | dev_warn(&udev->dev, | 95 | dev_warn(&udev->dev, |
77 | "The driver for the USB controller %s does not support scatter-gather which is\n", | 96 | "The driver for the USB controller %s does not support scatter-gather which is\n", |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 7ef99b2f3aaf..60cfcbc78552 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -741,6 +741,12 @@ UNUSUAL_DEV( 0x059b, 0x0001, 0x0100, 0x0100, | |||
741 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 741 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
742 | US_FL_SINGLE_LUN ), | 742 | US_FL_SINGLE_LUN ), |
743 | 743 | ||
744 | UNUSUAL_DEV( 0x059b, 0x0040, 0x0100, 0x0100, | ||
745 | "Iomega", | ||
746 | "Jaz USB Adapter", | ||
747 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
748 | US_FL_SINGLE_LUN ), | ||
749 | |||
744 | /* Reported by <Hendryk.Pfeiffer@gmx.de> */ | 750 | /* Reported by <Hendryk.Pfeiffer@gmx.de> */ |
745 | UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, | 751 | UNUSUAL_DEV( 0x059f, 0x0643, 0x0000, 0x0000, |
746 | "LaCie", | 752 | "LaCie", |
diff --git a/drivers/uwb/lc-dev.c b/drivers/uwb/lc-dev.c index 80079b8fed15..d0303f0dbe15 100644 --- a/drivers/uwb/lc-dev.c +++ b/drivers/uwb/lc-dev.c | |||
@@ -431,16 +431,19 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) | |||
431 | uwb_dev->mac_addr = *bce->mac_addr; | 431 | uwb_dev->mac_addr = *bce->mac_addr; |
432 | uwb_dev->dev_addr = bce->dev_addr; | 432 | uwb_dev->dev_addr = bce->dev_addr; |
433 | dev_set_name(&uwb_dev->dev, "%s", macbuf); | 433 | dev_set_name(&uwb_dev->dev, "%s", macbuf); |
434 | |||
435 | /* plug the beacon cache */ | ||
436 | bce->uwb_dev = uwb_dev; | ||
437 | uwb_dev->bce = bce; | ||
438 | uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ | ||
439 | |||
434 | result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); | 440 | result = uwb_dev_add(uwb_dev, &rc->uwb_dev.dev, rc); |
435 | if (result < 0) { | 441 | if (result < 0) { |
436 | dev_err(dev, "new device %s: cannot instantiate device\n", | 442 | dev_err(dev, "new device %s: cannot instantiate device\n", |
437 | macbuf); | 443 | macbuf); |
438 | goto error_dev_add; | 444 | goto error_dev_add; |
439 | } | 445 | } |
440 | /* plug the beacon cache */ | 446 | |
441 | bce->uwb_dev = uwb_dev; | ||
442 | uwb_dev->bce = bce; | ||
443 | uwb_bce_get(bce); /* released in uwb_dev_sys_release() */ | ||
444 | dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", | 447 | dev_info(dev, "uwb device (mac %s dev %s) connected to %s %s\n", |
445 | macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, | 448 | macbuf, devbuf, rc->uwb_dev.dev.parent->bus->name, |
446 | dev_name(rc->uwb_dev.dev.parent)); | 449 | dev_name(rc->uwb_dev.dev.parent)); |
@@ -448,6 +451,8 @@ void uwbd_dev_onair(struct uwb_rc *rc, struct uwb_beca_e *bce) | |||
448 | return; | 451 | return; |
449 | 452 | ||
450 | error_dev_add: | 453 | error_dev_add: |
454 | bce->uwb_dev = NULL; | ||
455 | uwb_bce_put(bce); | ||
451 | kfree(uwb_dev); | 456 | kfree(uwb_dev); |
452 | return; | 457 | return; |
453 | } | 458 | } |
diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index a7b6217ac87b..6ad23bd3523a 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c | |||
@@ -639,9 +639,7 @@ static int clcdfb_of_init_tft_panel(struct clcd_fb *fb, u32 r0, u32 g0, u32 b0) | |||
639 | if (g0 != panels[i].g0) | 639 | if (g0 != panels[i].g0) |
640 | continue; | 640 | continue; |
641 | if (r0 == panels[i].r0 && b0 == panels[i].b0) | 641 | if (r0 == panels[i].r0 && b0 == panels[i].b0) |
642 | fb->panel->caps = panels[i].caps & CLCD_CAP_RGB; | 642 | fb->panel->caps = panels[i].caps; |
643 | if (r0 == panels[i].b0 && b0 == panels[i].r0) | ||
644 | fb->panel->caps = panels[i].caps & CLCD_CAP_BGR; | ||
645 | } | 643 | } |
646 | 644 | ||
647 | return fb->panel->caps ? 0 : -EINVAL; | 645 | return fb->panel->caps ? 0 : -EINVAL; |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 5c660c77f03b..1e0a317d3dcd 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -230,8 +230,8 @@ static enum bp_state reserve_additional_memory(long credit) | |||
230 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); | 230 | rc = add_memory(nid, hotplug_start_paddr, balloon_hotplug << PAGE_SHIFT); |
231 | 231 | ||
232 | if (rc) { | 232 | if (rc) { |
233 | pr_info("%s: add_memory() failed: %i\n", __func__, rc); | 233 | pr_warn("Cannot add additional memory (%i)\n", rc); |
234 | return BP_EAGAIN; | 234 | return BP_ECANCELED; |
235 | } | 235 | } |
236 | 236 | ||
237 | balloon_hotplug -= credit; | 237 | balloon_hotplug -= credit; |
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c index 787d17945418..e53fe191738c 100644 --- a/drivers/xen/gntalloc.c +++ b/drivers/xen/gntalloc.c | |||
@@ -124,7 +124,7 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, | |||
124 | int i, rc, readonly; | 124 | int i, rc, readonly; |
125 | LIST_HEAD(queue_gref); | 125 | LIST_HEAD(queue_gref); |
126 | LIST_HEAD(queue_file); | 126 | LIST_HEAD(queue_file); |
127 | struct gntalloc_gref *gref; | 127 | struct gntalloc_gref *gref, *next; |
128 | 128 | ||
129 | readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); | 129 | readonly = !(op->flags & GNTALLOC_FLAG_WRITABLE); |
130 | rc = -ENOMEM; | 130 | rc = -ENOMEM; |
@@ -141,13 +141,11 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op, | |||
141 | goto undo; | 141 | goto undo; |
142 | 142 | ||
143 | /* Grant foreign access to the page. */ | 143 | /* Grant foreign access to the page. */ |
144 | gref->gref_id = gnttab_grant_foreign_access(op->domid, | 144 | rc = gnttab_grant_foreign_access(op->domid, |
145 | pfn_to_mfn(page_to_pfn(gref->page)), readonly); | 145 | pfn_to_mfn(page_to_pfn(gref->page)), readonly); |
146 | if ((int)gref->gref_id < 0) { | 146 | if (rc < 0) |
147 | rc = gref->gref_id; | ||
148 | goto undo; | 147 | goto undo; |
149 | } | 148 | gref_ids[i] = gref->gref_id = rc; |
150 | gref_ids[i] = gref->gref_id; | ||
151 | } | 149 | } |
152 | 150 | ||
153 | /* Add to gref lists. */ | 151 | /* Add to gref lists. */ |
@@ -162,8 +160,8 @@ undo: | |||
162 | mutex_lock(&gref_mutex); | 160 | mutex_lock(&gref_mutex); |
163 | gref_size -= (op->count - i); | 161 | gref_size -= (op->count - i); |
164 | 162 | ||
165 | list_for_each_entry(gref, &queue_file, next_file) { | 163 | list_for_each_entry_safe(gref, next, &queue_file, next_file) { |
166 | /* __del_gref does not remove from queue_file */ | 164 | list_del(&gref->next_file); |
167 | __del_gref(gref); | 165 | __del_gref(gref); |
168 | } | 166 | } |
169 | 167 | ||
@@ -193,7 +191,7 @@ static void __del_gref(struct gntalloc_gref *gref) | |||
193 | 191 | ||
194 | gref->notify.flags = 0; | 192 | gref->notify.flags = 0; |
195 | 193 | ||
196 | if (gref->gref_id > 0) { | 194 | if (gref->gref_id) { |
197 | if (gnttab_query_foreign_access(gref->gref_id)) | 195 | if (gnttab_query_foreign_access(gref->gref_id)) |
198 | return; | 196 | return; |
199 | 197 | ||
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index 5f1e1f3cd186..f8bb36f9d9ce 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -103,16 +103,11 @@ static void do_suspend(void) | |||
103 | 103 | ||
104 | shutting_down = SHUTDOWN_SUSPEND; | 104 | shutting_down = SHUTDOWN_SUSPEND; |
105 | 105 | ||
106 | #ifdef CONFIG_PREEMPT | ||
107 | /* If the kernel is preemptible, we need to freeze all the processes | ||
108 | to prevent them from being in the middle of a pagetable update | ||
109 | during suspend. */ | ||
110 | err = freeze_processes(); | 106 | err = freeze_processes(); |
111 | if (err) { | 107 | if (err) { |
112 | pr_err("%s: freeze failed %d\n", __func__, err); | 108 | pr_err("%s: freeze failed %d\n", __func__, err); |
113 | goto out; | 109 | goto out; |
114 | } | 110 | } |
115 | #endif | ||
116 | 111 | ||
117 | err = dpm_suspend_start(PMSG_FREEZE); | 112 | err = dpm_suspend_start(PMSG_FREEZE); |
118 | if (err) { | 113 | if (err) { |
@@ -157,10 +152,8 @@ out_resume: | |||
157 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); | 152 | dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE); |
158 | 153 | ||
159 | out_thaw: | 154 | out_thaw: |
160 | #ifdef CONFIG_PREEMPT | ||
161 | thaw_processes(); | 155 | thaw_processes(); |
162 | out: | 156 | out: |
163 | #endif | ||
164 | shutting_down = SHUTDOWN_INVALID; | 157 | shutting_down = SHUTDOWN_INVALID; |
165 | } | 158 | } |
166 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ | 159 | #endif /* CONFIG_HIBERNATE_CALLBACKS */ |
@@ -793,6 +793,8 @@ void exit_aio(struct mm_struct *mm) | |||
793 | 793 | ||
794 | for (i = 0; i < table->nr; ++i) { | 794 | for (i = 0; i < table->nr; ++i) { |
795 | struct kioctx *ctx = table->table[i]; | 795 | struct kioctx *ctx = table->table[i]; |
796 | struct completion requests_done = | ||
797 | COMPLETION_INITIALIZER_ONSTACK(requests_done); | ||
796 | 798 | ||
797 | if (!ctx) | 799 | if (!ctx) |
798 | continue; | 800 | continue; |
@@ -804,7 +806,10 @@ void exit_aio(struct mm_struct *mm) | |||
804 | * that it needs to unmap the area, just set it to 0. | 806 | * that it needs to unmap the area, just set it to 0. |
805 | */ | 807 | */ |
806 | ctx->mmap_size = 0; | 808 | ctx->mmap_size = 0; |
807 | kill_ioctx(mm, ctx, NULL); | 809 | kill_ioctx(mm, ctx, &requests_done); |
810 | |||
811 | /* Wait until all IO for the context are done. */ | ||
812 | wait_for_completion(&requests_done); | ||
808 | } | 813 | } |
809 | 814 | ||
810 | RCU_INIT_POINTER(mm->ioctx_table, NULL); | 815 | RCU_INIT_POINTER(mm->ioctx_table, NULL); |
@@ -1111,6 +1116,12 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
1111 | tail = ring->tail; | 1116 | tail = ring->tail; |
1112 | kunmap_atomic(ring); | 1117 | kunmap_atomic(ring); |
1113 | 1118 | ||
1119 | /* | ||
1120 | * Ensure that once we've read the current tail pointer, that | ||
1121 | * we also see the events that were stored up to the tail. | ||
1122 | */ | ||
1123 | smp_rmb(); | ||
1124 | |||
1114 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); | 1125 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
1115 | 1126 | ||
1116 | if (head == tail) | 1127 | if (head == tail) |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 36861b7a6757..ff1cc0399b9a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1966,7 +1966,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
1966 | 1966 | ||
1967 | btrfs_init_log_ctx(&ctx); | 1967 | btrfs_init_log_ctx(&ctx); |
1968 | 1968 | ||
1969 | ret = btrfs_log_dentry_safe(trans, root, dentry, &ctx); | 1969 | ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx); |
1970 | if (ret < 0) { | 1970 | if (ret < 0) { |
1971 | /* Fallthrough and commit/free transaction. */ | 1971 | /* Fallthrough and commit/free transaction. */ |
1972 | ret = 1; | 1972 | ret = 1; |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 9c194bd74d6e..016c403bfe7e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -778,8 +778,12 @@ retry: | |||
778 | ins.offset, | 778 | ins.offset, |
779 | BTRFS_ORDERED_COMPRESSED, | 779 | BTRFS_ORDERED_COMPRESSED, |
780 | async_extent->compress_type); | 780 | async_extent->compress_type); |
781 | if (ret) | 781 | if (ret) { |
782 | btrfs_drop_extent_cache(inode, async_extent->start, | ||
783 | async_extent->start + | ||
784 | async_extent->ram_size - 1, 0); | ||
782 | goto out_free_reserve; | 785 | goto out_free_reserve; |
786 | } | ||
783 | 787 | ||
784 | /* | 788 | /* |
785 | * clear dirty, set writeback and unlock the pages. | 789 | * clear dirty, set writeback and unlock the pages. |
@@ -971,14 +975,14 @@ static noinline int cow_file_range(struct inode *inode, | |||
971 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, | 975 | ret = btrfs_add_ordered_extent(inode, start, ins.objectid, |
972 | ram_size, cur_alloc_size, 0); | 976 | ram_size, cur_alloc_size, 0); |
973 | if (ret) | 977 | if (ret) |
974 | goto out_reserve; | 978 | goto out_drop_extent_cache; |
975 | 979 | ||
976 | if (root->root_key.objectid == | 980 | if (root->root_key.objectid == |
977 | BTRFS_DATA_RELOC_TREE_OBJECTID) { | 981 | BTRFS_DATA_RELOC_TREE_OBJECTID) { |
978 | ret = btrfs_reloc_clone_csums(inode, start, | 982 | ret = btrfs_reloc_clone_csums(inode, start, |
979 | cur_alloc_size); | 983 | cur_alloc_size); |
980 | if (ret) | 984 | if (ret) |
981 | goto out_reserve; | 985 | goto out_drop_extent_cache; |
982 | } | 986 | } |
983 | 987 | ||
984 | if (disk_num_bytes < cur_alloc_size) | 988 | if (disk_num_bytes < cur_alloc_size) |
@@ -1006,6 +1010,8 @@ static noinline int cow_file_range(struct inode *inode, | |||
1006 | out: | 1010 | out: |
1007 | return ret; | 1011 | return ret; |
1008 | 1012 | ||
1013 | out_drop_extent_cache: | ||
1014 | btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); | ||
1009 | out_reserve: | 1015 | out_reserve: |
1010 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); | 1016 | btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); |
1011 | out_unlock: | 1017 | out_unlock: |
@@ -4242,7 +4248,8 @@ out: | |||
4242 | btrfs_abort_transaction(trans, root, ret); | 4248 | btrfs_abort_transaction(trans, root, ret); |
4243 | } | 4249 | } |
4244 | error: | 4250 | error: |
4245 | if (last_size != (u64)-1) | 4251 | if (last_size != (u64)-1 && |
4252 | root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) | ||
4246 | btrfs_ordered_update_i_size(inode, last_size, NULL); | 4253 | btrfs_ordered_update_i_size(inode, last_size, NULL); |
4247 | btrfs_free_path(path); | 4254 | btrfs_free_path(path); |
4248 | return err; | 4255 | return err; |
@@ -5627,6 +5634,17 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) | |||
5627 | return ret; | 5634 | return ret; |
5628 | } | 5635 | } |
5629 | 5636 | ||
5637 | static int btrfs_insert_inode_locked(struct inode *inode) | ||
5638 | { | ||
5639 | struct btrfs_iget_args args; | ||
5640 | args.location = &BTRFS_I(inode)->location; | ||
5641 | args.root = BTRFS_I(inode)->root; | ||
5642 | |||
5643 | return insert_inode_locked4(inode, | ||
5644 | btrfs_inode_hash(inode->i_ino, BTRFS_I(inode)->root), | ||
5645 | btrfs_find_actor, &args); | ||
5646 | } | ||
5647 | |||
5630 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | 5648 | static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, |
5631 | struct btrfs_root *root, | 5649 | struct btrfs_root *root, |
5632 | struct inode *dir, | 5650 | struct inode *dir, |
@@ -5719,10 +5737,19 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
5719 | sizes[1] = name_len + sizeof(*ref); | 5737 | sizes[1] = name_len + sizeof(*ref); |
5720 | } | 5738 | } |
5721 | 5739 | ||
5740 | location = &BTRFS_I(inode)->location; | ||
5741 | location->objectid = objectid; | ||
5742 | location->offset = 0; | ||
5743 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | ||
5744 | |||
5745 | ret = btrfs_insert_inode_locked(inode); | ||
5746 | if (ret < 0) | ||
5747 | goto fail; | ||
5748 | |||
5722 | path->leave_spinning = 1; | 5749 | path->leave_spinning = 1; |
5723 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); | 5750 | ret = btrfs_insert_empty_items(trans, root, path, key, sizes, nitems); |
5724 | if (ret != 0) | 5751 | if (ret != 0) |
5725 | goto fail; | 5752 | goto fail_unlock; |
5726 | 5753 | ||
5727 | inode_init_owner(inode, dir, mode); | 5754 | inode_init_owner(inode, dir, mode); |
5728 | inode_set_bytes(inode, 0); | 5755 | inode_set_bytes(inode, 0); |
@@ -5745,11 +5772,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
5745 | btrfs_mark_buffer_dirty(path->nodes[0]); | 5772 | btrfs_mark_buffer_dirty(path->nodes[0]); |
5746 | btrfs_free_path(path); | 5773 | btrfs_free_path(path); |
5747 | 5774 | ||
5748 | location = &BTRFS_I(inode)->location; | ||
5749 | location->objectid = objectid; | ||
5750 | location->offset = 0; | ||
5751 | btrfs_set_key_type(location, BTRFS_INODE_ITEM_KEY); | ||
5752 | |||
5753 | btrfs_inherit_iflags(inode, dir); | 5775 | btrfs_inherit_iflags(inode, dir); |
5754 | 5776 | ||
5755 | if (S_ISREG(mode)) { | 5777 | if (S_ISREG(mode)) { |
@@ -5760,7 +5782,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
5760 | BTRFS_INODE_NODATASUM; | 5782 | BTRFS_INODE_NODATASUM; |
5761 | } | 5783 | } |
5762 | 5784 | ||
5763 | btrfs_insert_inode_hash(inode); | ||
5764 | inode_tree_add(inode); | 5785 | inode_tree_add(inode); |
5765 | 5786 | ||
5766 | trace_btrfs_inode_new(inode); | 5787 | trace_btrfs_inode_new(inode); |
@@ -5775,6 +5796,9 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
5775 | btrfs_ino(inode), root->root_key.objectid, ret); | 5796 | btrfs_ino(inode), root->root_key.objectid, ret); |
5776 | 5797 | ||
5777 | return inode; | 5798 | return inode; |
5799 | |||
5800 | fail_unlock: | ||
5801 | unlock_new_inode(inode); | ||
5778 | fail: | 5802 | fail: |
5779 | if (dir && name) | 5803 | if (dir && name) |
5780 | BTRFS_I(dir)->index_cnt--; | 5804 | BTRFS_I(dir)->index_cnt--; |
@@ -5909,28 +5933,28 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
5909 | goto out_unlock; | 5933 | goto out_unlock; |
5910 | } | 5934 | } |
5911 | 5935 | ||
5912 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
5913 | if (err) { | ||
5914 | drop_inode = 1; | ||
5915 | goto out_unlock; | ||
5916 | } | ||
5917 | |||
5918 | /* | 5936 | /* |
5919 | * If the active LSM wants to access the inode during | 5937 | * If the active LSM wants to access the inode during |
5920 | * d_instantiate it needs these. Smack checks to see | 5938 | * d_instantiate it needs these. Smack checks to see |
5921 | * if the filesystem supports xattrs by looking at the | 5939 | * if the filesystem supports xattrs by looking at the |
5922 | * ops vector. | 5940 | * ops vector. |
5923 | */ | 5941 | */ |
5924 | |||
5925 | inode->i_op = &btrfs_special_inode_operations; | 5942 | inode->i_op = &btrfs_special_inode_operations; |
5926 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | 5943 | init_special_inode(inode, inode->i_mode, rdev); |
5944 | |||
5945 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
5927 | if (err) | 5946 | if (err) |
5928 | drop_inode = 1; | 5947 | goto out_unlock_inode; |
5929 | else { | 5948 | |
5930 | init_special_inode(inode, inode->i_mode, rdev); | 5949 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
5950 | if (err) { | ||
5951 | goto out_unlock_inode; | ||
5952 | } else { | ||
5931 | btrfs_update_inode(trans, root, inode); | 5953 | btrfs_update_inode(trans, root, inode); |
5954 | unlock_new_inode(inode); | ||
5932 | d_instantiate(dentry, inode); | 5955 | d_instantiate(dentry, inode); |
5933 | } | 5956 | } |
5957 | |||
5934 | out_unlock: | 5958 | out_unlock: |
5935 | btrfs_end_transaction(trans, root); | 5959 | btrfs_end_transaction(trans, root); |
5936 | btrfs_balance_delayed_items(root); | 5960 | btrfs_balance_delayed_items(root); |
@@ -5940,6 +5964,12 @@ out_unlock: | |||
5940 | iput(inode); | 5964 | iput(inode); |
5941 | } | 5965 | } |
5942 | return err; | 5966 | return err; |
5967 | |||
5968 | out_unlock_inode: | ||
5969 | drop_inode = 1; | ||
5970 | unlock_new_inode(inode); | ||
5971 | goto out_unlock; | ||
5972 | |||
5943 | } | 5973 | } |
5944 | 5974 | ||
5945 | static int btrfs_create(struct inode *dir, struct dentry *dentry, | 5975 | static int btrfs_create(struct inode *dir, struct dentry *dentry, |
@@ -5974,15 +6004,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
5974 | goto out_unlock; | 6004 | goto out_unlock; |
5975 | } | 6005 | } |
5976 | drop_inode_on_err = 1; | 6006 | drop_inode_on_err = 1; |
5977 | |||
5978 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
5979 | if (err) | ||
5980 | goto out_unlock; | ||
5981 | |||
5982 | err = btrfs_update_inode(trans, root, inode); | ||
5983 | if (err) | ||
5984 | goto out_unlock; | ||
5985 | |||
5986 | /* | 6007 | /* |
5987 | * If the active LSM wants to access the inode during | 6008 | * If the active LSM wants to access the inode during |
5988 | * d_instantiate it needs these. Smack checks to see | 6009 | * d_instantiate it needs these. Smack checks to see |
@@ -5991,14 +6012,23 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
5991 | */ | 6012 | */ |
5992 | inode->i_fop = &btrfs_file_operations; | 6013 | inode->i_fop = &btrfs_file_operations; |
5993 | inode->i_op = &btrfs_file_inode_operations; | 6014 | inode->i_op = &btrfs_file_inode_operations; |
6015 | inode->i_mapping->a_ops = &btrfs_aops; | ||
6016 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | ||
6017 | |||
6018 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
6019 | if (err) | ||
6020 | goto out_unlock_inode; | ||
6021 | |||
6022 | err = btrfs_update_inode(trans, root, inode); | ||
6023 | if (err) | ||
6024 | goto out_unlock_inode; | ||
5994 | 6025 | ||
5995 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | 6026 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
5996 | if (err) | 6027 | if (err) |
5997 | goto out_unlock; | 6028 | goto out_unlock_inode; |
5998 | 6029 | ||
5999 | inode->i_mapping->a_ops = &btrfs_aops; | ||
6000 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | ||
6001 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | 6030 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
6031 | unlock_new_inode(inode); | ||
6002 | d_instantiate(dentry, inode); | 6032 | d_instantiate(dentry, inode); |
6003 | 6033 | ||
6004 | out_unlock: | 6034 | out_unlock: |
@@ -6010,6 +6040,11 @@ out_unlock: | |||
6010 | btrfs_balance_delayed_items(root); | 6040 | btrfs_balance_delayed_items(root); |
6011 | btrfs_btree_balance_dirty(root); | 6041 | btrfs_btree_balance_dirty(root); |
6012 | return err; | 6042 | return err; |
6043 | |||
6044 | out_unlock_inode: | ||
6045 | unlock_new_inode(inode); | ||
6046 | goto out_unlock; | ||
6047 | |||
6013 | } | 6048 | } |
6014 | 6049 | ||
6015 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | 6050 | static int btrfs_link(struct dentry *old_dentry, struct inode *dir, |
@@ -6117,25 +6152,30 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
6117 | } | 6152 | } |
6118 | 6153 | ||
6119 | drop_on_err = 1; | 6154 | drop_on_err = 1; |
6155 | /* these must be set before we unlock the inode */ | ||
6156 | inode->i_op = &btrfs_dir_inode_operations; | ||
6157 | inode->i_fop = &btrfs_dir_file_operations; | ||
6120 | 6158 | ||
6121 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | 6159 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); |
6122 | if (err) | 6160 | if (err) |
6123 | goto out_fail; | 6161 | goto out_fail_inode; |
6124 | |||
6125 | inode->i_op = &btrfs_dir_inode_operations; | ||
6126 | inode->i_fop = &btrfs_dir_file_operations; | ||
6127 | 6162 | ||
6128 | btrfs_i_size_write(inode, 0); | 6163 | btrfs_i_size_write(inode, 0); |
6129 | err = btrfs_update_inode(trans, root, inode); | 6164 | err = btrfs_update_inode(trans, root, inode); |
6130 | if (err) | 6165 | if (err) |
6131 | goto out_fail; | 6166 | goto out_fail_inode; |
6132 | 6167 | ||
6133 | err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, | 6168 | err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, |
6134 | dentry->d_name.len, 0, index); | 6169 | dentry->d_name.len, 0, index); |
6135 | if (err) | 6170 | if (err) |
6136 | goto out_fail; | 6171 | goto out_fail_inode; |
6137 | 6172 | ||
6138 | d_instantiate(dentry, inode); | 6173 | d_instantiate(dentry, inode); |
6174 | /* | ||
6175 | * mkdir is special. We're unlocking after we call d_instantiate | ||
6176 | * to avoid a race with nfsd calling d_instantiate. | ||
6177 | */ | ||
6178 | unlock_new_inode(inode); | ||
6139 | drop_on_err = 0; | 6179 | drop_on_err = 0; |
6140 | 6180 | ||
6141 | out_fail: | 6181 | out_fail: |
@@ -6145,6 +6185,10 @@ out_fail: | |||
6145 | btrfs_balance_delayed_items(root); | 6185 | btrfs_balance_delayed_items(root); |
6146 | btrfs_btree_balance_dirty(root); | 6186 | btrfs_btree_balance_dirty(root); |
6147 | return err; | 6187 | return err; |
6188 | |||
6189 | out_fail_inode: | ||
6190 | unlock_new_inode(inode); | ||
6191 | goto out_fail; | ||
6148 | } | 6192 | } |
6149 | 6193 | ||
6150 | /* helper for btfs_get_extent. Given an existing extent in the tree, | 6194 | /* helper for btfs_get_extent. Given an existing extent in the tree, |
@@ -8100,6 +8144,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, | |||
8100 | 8144 | ||
8101 | set_nlink(inode, 1); | 8145 | set_nlink(inode, 1); |
8102 | btrfs_i_size_write(inode, 0); | 8146 | btrfs_i_size_write(inode, 0); |
8147 | unlock_new_inode(inode); | ||
8103 | 8148 | ||
8104 | err = btrfs_subvol_inherit_props(trans, new_root, parent_root); | 8149 | err = btrfs_subvol_inherit_props(trans, new_root, parent_root); |
8105 | if (err) | 8150 | if (err) |
@@ -8760,12 +8805,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
8760 | goto out_unlock; | 8805 | goto out_unlock; |
8761 | } | 8806 | } |
8762 | 8807 | ||
8763 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
8764 | if (err) { | ||
8765 | drop_inode = 1; | ||
8766 | goto out_unlock; | ||
8767 | } | ||
8768 | |||
8769 | /* | 8808 | /* |
8770 | * If the active LSM wants to access the inode during | 8809 | * If the active LSM wants to access the inode during |
8771 | * d_instantiate it needs these. Smack checks to see | 8810 | * d_instantiate it needs these. Smack checks to see |
@@ -8774,23 +8813,22 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
8774 | */ | 8813 | */ |
8775 | inode->i_fop = &btrfs_file_operations; | 8814 | inode->i_fop = &btrfs_file_operations; |
8776 | inode->i_op = &btrfs_file_inode_operations; | 8815 | inode->i_op = &btrfs_file_inode_operations; |
8816 | inode->i_mapping->a_ops = &btrfs_aops; | ||
8817 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | ||
8818 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | ||
8819 | |||
8820 | err = btrfs_init_inode_security(trans, inode, dir, &dentry->d_name); | ||
8821 | if (err) | ||
8822 | goto out_unlock_inode; | ||
8777 | 8823 | ||
8778 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); | 8824 | err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); |
8779 | if (err) | 8825 | if (err) |
8780 | drop_inode = 1; | 8826 | goto out_unlock_inode; |
8781 | else { | ||
8782 | inode->i_mapping->a_ops = &btrfs_aops; | ||
8783 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | ||
8784 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | ||
8785 | } | ||
8786 | if (drop_inode) | ||
8787 | goto out_unlock; | ||
8788 | 8827 | ||
8789 | path = btrfs_alloc_path(); | 8828 | path = btrfs_alloc_path(); |
8790 | if (!path) { | 8829 | if (!path) { |
8791 | err = -ENOMEM; | 8830 | err = -ENOMEM; |
8792 | drop_inode = 1; | 8831 | goto out_unlock_inode; |
8793 | goto out_unlock; | ||
8794 | } | 8832 | } |
8795 | key.objectid = btrfs_ino(inode); | 8833 | key.objectid = btrfs_ino(inode); |
8796 | key.offset = 0; | 8834 | key.offset = 0; |
@@ -8799,9 +8837,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
8799 | err = btrfs_insert_empty_item(trans, root, path, &key, | 8837 | err = btrfs_insert_empty_item(trans, root, path, &key, |
8800 | datasize); | 8838 | datasize); |
8801 | if (err) { | 8839 | if (err) { |
8802 | drop_inode = 1; | ||
8803 | btrfs_free_path(path); | 8840 | btrfs_free_path(path); |
8804 | goto out_unlock; | 8841 | goto out_unlock_inode; |
8805 | } | 8842 | } |
8806 | leaf = path->nodes[0]; | 8843 | leaf = path->nodes[0]; |
8807 | ei = btrfs_item_ptr(leaf, path->slots[0], | 8844 | ei = btrfs_item_ptr(leaf, path->slots[0], |
@@ -8825,12 +8862,15 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
8825 | inode_set_bytes(inode, name_len); | 8862 | inode_set_bytes(inode, name_len); |
8826 | btrfs_i_size_write(inode, name_len); | 8863 | btrfs_i_size_write(inode, name_len); |
8827 | err = btrfs_update_inode(trans, root, inode); | 8864 | err = btrfs_update_inode(trans, root, inode); |
8828 | if (err) | 8865 | if (err) { |
8829 | drop_inode = 1; | 8866 | drop_inode = 1; |
8867 | goto out_unlock_inode; | ||
8868 | } | ||
8869 | |||
8870 | unlock_new_inode(inode); | ||
8871 | d_instantiate(dentry, inode); | ||
8830 | 8872 | ||
8831 | out_unlock: | 8873 | out_unlock: |
8832 | if (!err) | ||
8833 | d_instantiate(dentry, inode); | ||
8834 | btrfs_end_transaction(trans, root); | 8874 | btrfs_end_transaction(trans, root); |
8835 | if (drop_inode) { | 8875 | if (drop_inode) { |
8836 | inode_dec_link_count(inode); | 8876 | inode_dec_link_count(inode); |
@@ -8838,6 +8878,11 @@ out_unlock: | |||
8838 | } | 8878 | } |
8839 | btrfs_btree_balance_dirty(root); | 8879 | btrfs_btree_balance_dirty(root); |
8840 | return err; | 8880 | return err; |
8881 | |||
8882 | out_unlock_inode: | ||
8883 | drop_inode = 1; | ||
8884 | unlock_new_inode(inode); | ||
8885 | goto out_unlock; | ||
8841 | } | 8886 | } |
8842 | 8887 | ||
8843 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, | 8888 | static int __btrfs_prealloc_file_range(struct inode *inode, int mode, |
@@ -9021,14 +9066,6 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
9021 | goto out; | 9066 | goto out; |
9022 | } | 9067 | } |
9023 | 9068 | ||
9024 | ret = btrfs_init_inode_security(trans, inode, dir, NULL); | ||
9025 | if (ret) | ||
9026 | goto out; | ||
9027 | |||
9028 | ret = btrfs_update_inode(trans, root, inode); | ||
9029 | if (ret) | ||
9030 | goto out; | ||
9031 | |||
9032 | inode->i_fop = &btrfs_file_operations; | 9069 | inode->i_fop = &btrfs_file_operations; |
9033 | inode->i_op = &btrfs_file_inode_operations; | 9070 | inode->i_op = &btrfs_file_inode_operations; |
9034 | 9071 | ||
@@ -9036,9 +9073,16 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
9036 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; | 9073 | inode->i_mapping->backing_dev_info = &root->fs_info->bdi; |
9037 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; | 9074 | BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; |
9038 | 9075 | ||
9076 | ret = btrfs_init_inode_security(trans, inode, dir, NULL); | ||
9077 | if (ret) | ||
9078 | goto out_inode; | ||
9079 | |||
9080 | ret = btrfs_update_inode(trans, root, inode); | ||
9081 | if (ret) | ||
9082 | goto out_inode; | ||
9039 | ret = btrfs_orphan_add(trans, inode); | 9083 | ret = btrfs_orphan_add(trans, inode); |
9040 | if (ret) | 9084 | if (ret) |
9041 | goto out; | 9085 | goto out_inode; |
9042 | 9086 | ||
9043 | /* | 9087 | /* |
9044 | * We set number of links to 0 in btrfs_new_inode(), and here we set | 9088 | * We set number of links to 0 in btrfs_new_inode(), and here we set |
@@ -9048,6 +9092,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
9048 | * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() | 9092 | * d_tmpfile() -> inode_dec_link_count() -> drop_nlink() |
9049 | */ | 9093 | */ |
9050 | set_nlink(inode, 1); | 9094 | set_nlink(inode, 1); |
9095 | unlock_new_inode(inode); | ||
9051 | d_tmpfile(dentry, inode); | 9096 | d_tmpfile(dentry, inode); |
9052 | mark_inode_dirty(inode); | 9097 | mark_inode_dirty(inode); |
9053 | 9098 | ||
@@ -9057,8 +9102,12 @@ out: | |||
9057 | iput(inode); | 9102 | iput(inode); |
9058 | btrfs_balance_delayed_items(root); | 9103 | btrfs_balance_delayed_items(root); |
9059 | btrfs_btree_balance_dirty(root); | 9104 | btrfs_btree_balance_dirty(root); |
9060 | |||
9061 | return ret; | 9105 | return ret; |
9106 | |||
9107 | out_inode: | ||
9108 | unlock_new_inode(inode); | ||
9109 | goto out; | ||
9110 | |||
9062 | } | 9111 | } |
9063 | 9112 | ||
9064 | static const struct inode_operations btrfs_dir_inode_operations = { | 9113 | static const struct inode_operations btrfs_dir_inode_operations = { |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index fce6fd0e3f50..8a8e29878c34 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1019,8 +1019,10 @@ static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em) | |||
1019 | return false; | 1019 | return false; |
1020 | 1020 | ||
1021 | next = defrag_lookup_extent(inode, em->start + em->len); | 1021 | next = defrag_lookup_extent(inode, em->start + em->len); |
1022 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE || | 1022 | if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE) |
1023 | (em->block_start + em->block_len == next->block_start)) | 1023 | ret = false; |
1024 | else if ((em->block_start + em->block_len == next->block_start) && | ||
1025 | (em->block_len > 128 * 1024 && next->block_len > 128 * 1024)) | ||
1024 | ret = false; | 1026 | ret = false; |
1025 | 1027 | ||
1026 | free_extent_map(next); | 1028 | free_extent_map(next); |
@@ -1055,7 +1057,6 @@ static int should_defrag_range(struct inode *inode, u64 start, int thresh, | |||
1055 | } | 1057 | } |
1056 | 1058 | ||
1057 | next_mergeable = defrag_check_next_extent(inode, em); | 1059 | next_mergeable = defrag_check_next_extent(inode, em); |
1058 | |||
1059 | /* | 1060 | /* |
1060 | * we hit a real extent, if it is big or the next extent is not a | 1061 | * we hit a real extent, if it is big or the next extent is not a |
1061 | * real extent, don't bother defragging it | 1062 | * real extent, don't bother defragging it |
@@ -1702,7 +1703,7 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, | |||
1702 | ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY | | 1703 | ~(BTRFS_SUBVOL_CREATE_ASYNC | BTRFS_SUBVOL_RDONLY | |
1703 | BTRFS_SUBVOL_QGROUP_INHERIT)) { | 1704 | BTRFS_SUBVOL_QGROUP_INHERIT)) { |
1704 | ret = -EOPNOTSUPP; | 1705 | ret = -EOPNOTSUPP; |
1705 | goto out; | 1706 | goto free_args; |
1706 | } | 1707 | } |
1707 | 1708 | ||
1708 | if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) | 1709 | if (vol_args->flags & BTRFS_SUBVOL_CREATE_ASYNC) |
@@ -1712,27 +1713,31 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file, | |||
1712 | if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { | 1713 | if (vol_args->flags & BTRFS_SUBVOL_QGROUP_INHERIT) { |
1713 | if (vol_args->size > PAGE_CACHE_SIZE) { | 1714 | if (vol_args->size > PAGE_CACHE_SIZE) { |
1714 | ret = -EINVAL; | 1715 | ret = -EINVAL; |
1715 | goto out; | 1716 | goto free_args; |
1716 | } | 1717 | } |
1717 | inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size); | 1718 | inherit = memdup_user(vol_args->qgroup_inherit, vol_args->size); |
1718 | if (IS_ERR(inherit)) { | 1719 | if (IS_ERR(inherit)) { |
1719 | ret = PTR_ERR(inherit); | 1720 | ret = PTR_ERR(inherit); |
1720 | goto out; | 1721 | goto free_args; |
1721 | } | 1722 | } |
1722 | } | 1723 | } |
1723 | 1724 | ||
1724 | ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, | 1725 | ret = btrfs_ioctl_snap_create_transid(file, vol_args->name, |
1725 | vol_args->fd, subvol, ptr, | 1726 | vol_args->fd, subvol, ptr, |
1726 | readonly, inherit); | 1727 | readonly, inherit); |
1728 | if (ret) | ||
1729 | goto free_inherit; | ||
1727 | 1730 | ||
1728 | if (ret == 0 && ptr && | 1731 | if (ptr && copy_to_user(arg + |
1729 | copy_to_user(arg + | 1732 | offsetof(struct btrfs_ioctl_vol_args_v2, |
1730 | offsetof(struct btrfs_ioctl_vol_args_v2, | 1733 | transid), |
1731 | transid), ptr, sizeof(*ptr))) | 1734 | ptr, sizeof(*ptr))) |
1732 | ret = -EFAULT; | 1735 | ret = -EFAULT; |
1733 | out: | 1736 | |
1734 | kfree(vol_args); | 1737 | free_inherit: |
1735 | kfree(inherit); | 1738 | kfree(inherit); |
1739 | free_args: | ||
1740 | kfree(vol_args); | ||
1736 | return ret; | 1741 | return ret; |
1737 | } | 1742 | } |
1738 | 1743 | ||
@@ -2652,7 +2657,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) | |||
2652 | vol_args = memdup_user(arg, sizeof(*vol_args)); | 2657 | vol_args = memdup_user(arg, sizeof(*vol_args)); |
2653 | if (IS_ERR(vol_args)) { | 2658 | if (IS_ERR(vol_args)) { |
2654 | ret = PTR_ERR(vol_args); | 2659 | ret = PTR_ERR(vol_args); |
2655 | goto out; | 2660 | goto err_drop; |
2656 | } | 2661 | } |
2657 | 2662 | ||
2658 | vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; | 2663 | vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; |
@@ -2670,6 +2675,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) | |||
2670 | 2675 | ||
2671 | out: | 2676 | out: |
2672 | kfree(vol_args); | 2677 | kfree(vol_args); |
2678 | err_drop: | ||
2673 | mnt_drop_write_file(file); | 2679 | mnt_drop_write_file(file); |
2674 | return ret; | 2680 | return ret; |
2675 | } | 2681 | } |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7e0e6e3029dd..d296efe2d3e7 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -94,8 +94,10 @@ | |||
94 | #define LOG_WALK_REPLAY_ALL 3 | 94 | #define LOG_WALK_REPLAY_ALL 3 |
95 | 95 | ||
96 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, | 96 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, |
97 | struct btrfs_root *root, struct inode *inode, | 97 | struct btrfs_root *root, struct inode *inode, |
98 | int inode_only); | 98 | int inode_only, |
99 | const loff_t start, | ||
100 | const loff_t end); | ||
99 | static int link_to_fixup_dir(struct btrfs_trans_handle *trans, | 101 | static int link_to_fixup_dir(struct btrfs_trans_handle *trans, |
100 | struct btrfs_root *root, | 102 | struct btrfs_root *root, |
101 | struct btrfs_path *path, u64 objectid); | 103 | struct btrfs_path *path, u64 objectid); |
@@ -3858,8 +3860,10 @@ process: | |||
3858 | * This handles both files and directories. | 3860 | * This handles both files and directories. |
3859 | */ | 3861 | */ |
3860 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, | 3862 | static int btrfs_log_inode(struct btrfs_trans_handle *trans, |
3861 | struct btrfs_root *root, struct inode *inode, | 3863 | struct btrfs_root *root, struct inode *inode, |
3862 | int inode_only) | 3864 | int inode_only, |
3865 | const loff_t start, | ||
3866 | const loff_t end) | ||
3863 | { | 3867 | { |
3864 | struct btrfs_path *path; | 3868 | struct btrfs_path *path; |
3865 | struct btrfs_path *dst_path; | 3869 | struct btrfs_path *dst_path; |
@@ -3876,6 +3880,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
3876 | int ins_nr; | 3880 | int ins_nr; |
3877 | bool fast_search = false; | 3881 | bool fast_search = false; |
3878 | u64 ino = btrfs_ino(inode); | 3882 | u64 ino = btrfs_ino(inode); |
3883 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
3879 | 3884 | ||
3880 | path = btrfs_alloc_path(); | 3885 | path = btrfs_alloc_path(); |
3881 | if (!path) | 3886 | if (!path) |
@@ -4049,13 +4054,35 @@ log_extents: | |||
4049 | goto out_unlock; | 4054 | goto out_unlock; |
4050 | } | 4055 | } |
4051 | } else if (inode_only == LOG_INODE_ALL) { | 4056 | } else if (inode_only == LOG_INODE_ALL) { |
4052 | struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; | ||
4053 | struct extent_map *em, *n; | 4057 | struct extent_map *em, *n; |
4054 | 4058 | ||
4055 | write_lock(&tree->lock); | 4059 | write_lock(&em_tree->lock); |
4056 | list_for_each_entry_safe(em, n, &tree->modified_extents, list) | 4060 | /* |
4057 | list_del_init(&em->list); | 4061 | * We can't just remove every em if we're called for a ranged |
4058 | write_unlock(&tree->lock); | 4062 | * fsync - that is, one that doesn't cover the whole possible |
4063 | * file range (0 to LLONG_MAX). This is because we can have | ||
4064 | * em's that fall outside the range we're logging and therefore | ||
4065 | * their ordered operations haven't completed yet | ||
4066 | * (btrfs_finish_ordered_io() not invoked yet). This means we | ||
4067 | * didn't get their respective file extent item in the fs/subvol | ||
4068 | * tree yet, and need to let the next fast fsync (one which | ||
4069 | * consults the list of modified extent maps) find the em so | ||
4070 | * that it logs a matching file extent item and waits for the | ||
4071 | * respective ordered operation to complete (if it's still | ||
4072 | * running). | ||
4073 | * | ||
4074 | * Removing every em outside the range we're logging would make | ||
4075 | * the next fast fsync not log their matching file extent items, | ||
4076 | * therefore making us lose data after a log replay. | ||
4077 | */ | ||
4078 | list_for_each_entry_safe(em, n, &em_tree->modified_extents, | ||
4079 | list) { | ||
4080 | const u64 mod_end = em->mod_start + em->mod_len - 1; | ||
4081 | |||
4082 | if (em->mod_start >= start && mod_end <= end) | ||
4083 | list_del_init(&em->list); | ||
4084 | } | ||
4085 | write_unlock(&em_tree->lock); | ||
4059 | } | 4086 | } |
4060 | 4087 | ||
4061 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { | 4088 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { |
@@ -4065,8 +4092,19 @@ log_extents: | |||
4065 | goto out_unlock; | 4092 | goto out_unlock; |
4066 | } | 4093 | } |
4067 | } | 4094 | } |
4068 | BTRFS_I(inode)->logged_trans = trans->transid; | 4095 | |
4069 | BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; | 4096 | write_lock(&em_tree->lock); |
4097 | /* | ||
4098 | * If we're doing a ranged fsync and there are still modified extents | ||
4099 | * in the list, we must run on the next fsync call as it might cover | ||
4100 | * those extents (a full fsync or an fsync for other range). | ||
4101 | */ | ||
4102 | if (list_empty(&em_tree->modified_extents)) { | ||
4103 | BTRFS_I(inode)->logged_trans = trans->transid; | ||
4104 | BTRFS_I(inode)->last_log_commit = | ||
4105 | BTRFS_I(inode)->last_sub_trans; | ||
4106 | } | ||
4107 | write_unlock(&em_tree->lock); | ||
4070 | out_unlock: | 4108 | out_unlock: |
4071 | if (unlikely(err)) | 4109 | if (unlikely(err)) |
4072 | btrfs_put_logged_extents(&logged_list); | 4110 | btrfs_put_logged_extents(&logged_list); |
@@ -4161,7 +4199,10 @@ out: | |||
4161 | */ | 4199 | */ |
4162 | static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | 4200 | static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, |
4163 | struct btrfs_root *root, struct inode *inode, | 4201 | struct btrfs_root *root, struct inode *inode, |
4164 | struct dentry *parent, int exists_only, | 4202 | struct dentry *parent, |
4203 | const loff_t start, | ||
4204 | const loff_t end, | ||
4205 | int exists_only, | ||
4165 | struct btrfs_log_ctx *ctx) | 4206 | struct btrfs_log_ctx *ctx) |
4166 | { | 4207 | { |
4167 | int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; | 4208 | int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; |
@@ -4207,7 +4248,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | |||
4207 | if (ret) | 4248 | if (ret) |
4208 | goto end_no_trans; | 4249 | goto end_no_trans; |
4209 | 4250 | ||
4210 | ret = btrfs_log_inode(trans, root, inode, inode_only); | 4251 | ret = btrfs_log_inode(trans, root, inode, inode_only, start, end); |
4211 | if (ret) | 4252 | if (ret) |
4212 | goto end_trans; | 4253 | goto end_trans; |
4213 | 4254 | ||
@@ -4235,7 +4276,8 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | |||
4235 | 4276 | ||
4236 | if (BTRFS_I(inode)->generation > | 4277 | if (BTRFS_I(inode)->generation > |
4237 | root->fs_info->last_trans_committed) { | 4278 | root->fs_info->last_trans_committed) { |
4238 | ret = btrfs_log_inode(trans, root, inode, inode_only); | 4279 | ret = btrfs_log_inode(trans, root, inode, inode_only, |
4280 | 0, LLONG_MAX); | ||
4239 | if (ret) | 4281 | if (ret) |
4240 | goto end_trans; | 4282 | goto end_trans; |
4241 | } | 4283 | } |
@@ -4269,13 +4311,15 @@ end_no_trans: | |||
4269 | */ | 4311 | */ |
4270 | int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, | 4312 | int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, |
4271 | struct btrfs_root *root, struct dentry *dentry, | 4313 | struct btrfs_root *root, struct dentry *dentry, |
4314 | const loff_t start, | ||
4315 | const loff_t end, | ||
4272 | struct btrfs_log_ctx *ctx) | 4316 | struct btrfs_log_ctx *ctx) |
4273 | { | 4317 | { |
4274 | struct dentry *parent = dget_parent(dentry); | 4318 | struct dentry *parent = dget_parent(dentry); |
4275 | int ret; | 4319 | int ret; |
4276 | 4320 | ||
4277 | ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, | 4321 | ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, |
4278 | 0, ctx); | 4322 | start, end, 0, ctx); |
4279 | dput(parent); | 4323 | dput(parent); |
4280 | 4324 | ||
4281 | return ret; | 4325 | return ret; |
@@ -4512,6 +4556,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, | |||
4512 | root->fs_info->last_trans_committed)) | 4556 | root->fs_info->last_trans_committed)) |
4513 | return 0; | 4557 | return 0; |
4514 | 4558 | ||
4515 | return btrfs_log_inode_parent(trans, root, inode, parent, 1, NULL); | 4559 | return btrfs_log_inode_parent(trans, root, inode, parent, 0, |
4560 | LLONG_MAX, 1, NULL); | ||
4516 | } | 4561 | } |
4517 | 4562 | ||
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 7f5b41bd5373..e2e798ae7cd7 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h | |||
@@ -59,6 +59,8 @@ int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, | |||
59 | int btrfs_recover_log_trees(struct btrfs_root *tree_root); | 59 | int btrfs_recover_log_trees(struct btrfs_root *tree_root); |
60 | int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, | 60 | int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, |
61 | struct btrfs_root *root, struct dentry *dentry, | 61 | struct btrfs_root *root, struct dentry *dentry, |
62 | const loff_t start, | ||
63 | const loff_t end, | ||
62 | struct btrfs_log_ctx *ctx); | 64 | struct btrfs_log_ctx *ctx); |
63 | int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | 65 | int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, |
64 | struct btrfs_root *root, | 66 | struct btrfs_root *root, |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 603f18a65c12..a2172f3f69e3 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -22,6 +22,11 @@ config CIFS | |||
22 | support for OS/2 and Windows ME and similar servers is provided as | 22 | support for OS/2 and Windows ME and similar servers is provided as |
23 | well. | 23 | well. |
24 | 24 | ||
25 | The module also provides optional support for the followon | ||
26 | protocols for CIFS including SMB3, which enables | ||
27 | useful performance and security features (see the description | ||
28 | of CONFIG_CIFS_SMB2). | ||
29 | |||
25 | The cifs module provides an advanced network file system | 30 | The cifs module provides an advanced network file system |
26 | client for mounting to CIFS compliant servers. It includes | 31 | client for mounting to CIFS compliant servers. It includes |
27 | support for DFS (hierarchical name space), secure per-user | 32 | support for DFS (hierarchical name space), secure per-user |
@@ -121,7 +126,8 @@ config CIFS_ACL | |||
121 | depends on CIFS_XATTR && KEYS | 126 | depends on CIFS_XATTR && KEYS |
122 | help | 127 | help |
123 | Allows fetching CIFS/NTFS ACL from the server. The DACL blob | 128 | Allows fetching CIFS/NTFS ACL from the server. The DACL blob |
124 | is handed over to the application/caller. | 129 | is handed over to the application/caller. See the man |
130 | page for getcifsacl for more information. | ||
125 | 131 | ||
126 | config CIFS_DEBUG | 132 | config CIFS_DEBUG |
127 | bool "Enable CIFS debugging routines" | 133 | bool "Enable CIFS debugging routines" |
@@ -162,7 +168,7 @@ config CIFS_NFSD_EXPORT | |||
162 | Allows NFS server to export a CIFS mounted share (nfsd over cifs) | 168 | Allows NFS server to export a CIFS mounted share (nfsd over cifs) |
163 | 169 | ||
164 | config CIFS_SMB2 | 170 | config CIFS_SMB2 |
165 | bool "SMB2 network file system support" | 171 | bool "SMB2 and SMB3 network file system support" |
166 | depends on CIFS && INET | 172 | depends on CIFS && INET |
167 | select NLS | 173 | select NLS |
168 | select KEYS | 174 | select KEYS |
@@ -170,16 +176,21 @@ config CIFS_SMB2 | |||
170 | select DNS_RESOLVER | 176 | select DNS_RESOLVER |
171 | 177 | ||
172 | help | 178 | help |
173 | This enables experimental support for the SMB2 (Server Message Block | 179 | This enables support for the Server Message Block version 2 |
174 | version 2) protocol. The SMB2 protocol is the successor to the | 180 | family of protocols, including SMB3. SMB3 support is |
175 | popular CIFS and SMB network file sharing protocols. SMB2 is the | 181 | enabled on mount by specifying "vers=3.0" in the mount |
176 | native file sharing mechanism for recent versions of Windows | 182 | options. These protocols are the successors to the popular |
177 | operating systems (since Vista). SMB2 enablement will eventually | 183 | CIFS and SMB network file sharing protocols. SMB3 is the |
178 | allow users better performance, security and features, than would be | 184 | native file sharing mechanism for the more recent |
179 | possible with cifs. Note that smb2 mount options also are simpler | 185 | versions of Windows (Windows 8 and Windows 2012 and |
180 | (compared to cifs) due to protocol improvements. | 186 | later) and Samba server and many others support SMB3 well. |
181 | 187 | In general SMB3 enables better performance, security | |
182 | Unless you are a developer or tester, say N. | 188 | and features, than would be possible with CIFS (Note that |
189 | when mounting to Samba, due to the CIFS POSIX extensions, | ||
190 | CIFS mounts can provide slightly better POSIX compatibility | ||
191 | than SMB3 mounts do though). Note that SMB2/SMB3 mount | ||
192 | options are also slightly simpler (compared to CIFS) due | ||
193 | to protocol improvements. | ||
183 | 194 | ||
184 | config CIFS_FSCACHE | 195 | config CIFS_FSCACHE |
185 | bool "Provide CIFS client caching support" | 196 | bool "Provide CIFS client caching support" |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index dfc731b02aa9..25b8392bfdd2 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -70,11 +70,6 @@ | |||
70 | #define SERVER_NAME_LENGTH 40 | 70 | #define SERVER_NAME_LENGTH 40 |
71 | #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) | 71 | #define SERVER_NAME_LEN_WITH_NULL (SERVER_NAME_LENGTH + 1) |
72 | 72 | ||
73 | /* used to define string lengths for reversing unicode strings */ | ||
74 | /* (256+1)*2 = 514 */ | ||
75 | /* (max path length + 1 for null) * 2 for unicode */ | ||
76 | #define MAX_NAME 514 | ||
77 | |||
78 | /* SMB echo "timeout" -- FIXME: tunable? */ | 73 | /* SMB echo "timeout" -- FIXME: tunable? */ |
79 | #define SMB_ECHO_INTERVAL (60 * HZ) | 74 | #define SMB_ECHO_INTERVAL (60 * HZ) |
80 | 75 | ||
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 03ed8a09581c..8a9fded7c135 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -837,7 +837,6 @@ cifs_demultiplex_thread(void *p) | |||
837 | struct TCP_Server_Info *server = p; | 837 | struct TCP_Server_Info *server = p; |
838 | unsigned int pdu_length; | 838 | unsigned int pdu_length; |
839 | char *buf = NULL; | 839 | char *buf = NULL; |
840 | struct task_struct *task_to_wake = NULL; | ||
841 | struct mid_q_entry *mid_entry; | 840 | struct mid_q_entry *mid_entry; |
842 | 841 | ||
843 | current->flags |= PF_MEMALLOC; | 842 | current->flags |= PF_MEMALLOC; |
@@ -928,19 +927,7 @@ cifs_demultiplex_thread(void *p) | |||
928 | if (server->smallbuf) /* no sense logging a debug message if NULL */ | 927 | if (server->smallbuf) /* no sense logging a debug message if NULL */ |
929 | cifs_small_buf_release(server->smallbuf); | 928 | cifs_small_buf_release(server->smallbuf); |
930 | 929 | ||
931 | task_to_wake = xchg(&server->tsk, NULL); | ||
932 | clean_demultiplex_info(server); | 930 | clean_demultiplex_info(server); |
933 | |||
934 | /* if server->tsk was NULL then wait for a signal before exiting */ | ||
935 | if (!task_to_wake) { | ||
936 | set_current_state(TASK_INTERRUPTIBLE); | ||
937 | while (!signal_pending(current)) { | ||
938 | schedule(); | ||
939 | set_current_state(TASK_INTERRUPTIBLE); | ||
940 | } | ||
941 | set_current_state(TASK_RUNNING); | ||
942 | } | ||
943 | |||
944 | module_put_and_exit(0); | 931 | module_put_and_exit(0); |
945 | } | 932 | } |
946 | 933 | ||
@@ -1600,6 +1587,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1600 | tmp_end++; | 1587 | tmp_end++; |
1601 | if (!(tmp_end < end && tmp_end[1] == delim)) { | 1588 | if (!(tmp_end < end && tmp_end[1] == delim)) { |
1602 | /* No it is not. Set the password to NULL */ | 1589 | /* No it is not. Set the password to NULL */ |
1590 | kfree(vol->password); | ||
1603 | vol->password = NULL; | 1591 | vol->password = NULL; |
1604 | break; | 1592 | break; |
1605 | } | 1593 | } |
@@ -1637,6 +1625,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1637 | options = end; | 1625 | options = end; |
1638 | } | 1626 | } |
1639 | 1627 | ||
1628 | kfree(vol->password); | ||
1640 | /* Now build new password string */ | 1629 | /* Now build new password string */ |
1641 | temp_len = strlen(value); | 1630 | temp_len = strlen(value); |
1642 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); | 1631 | vol->password = kzalloc(temp_len+1, GFP_KERNEL); |
@@ -2061,8 +2050,6 @@ cifs_find_tcp_session(struct smb_vol *vol) | |||
2061 | static void | 2050 | static void |
2062 | cifs_put_tcp_session(struct TCP_Server_Info *server) | 2051 | cifs_put_tcp_session(struct TCP_Server_Info *server) |
2063 | { | 2052 | { |
2064 | struct task_struct *task; | ||
2065 | |||
2066 | spin_lock(&cifs_tcp_ses_lock); | 2053 | spin_lock(&cifs_tcp_ses_lock); |
2067 | if (--server->srv_count > 0) { | 2054 | if (--server->srv_count > 0) { |
2068 | spin_unlock(&cifs_tcp_ses_lock); | 2055 | spin_unlock(&cifs_tcp_ses_lock); |
@@ -2086,10 +2073,6 @@ cifs_put_tcp_session(struct TCP_Server_Info *server) | |||
2086 | kfree(server->session_key.response); | 2073 | kfree(server->session_key.response); |
2087 | server->session_key.response = NULL; | 2074 | server->session_key.response = NULL; |
2088 | server->session_key.len = 0; | 2075 | server->session_key.len = 0; |
2089 | |||
2090 | task = xchg(&server->tsk, NULL); | ||
2091 | if (task) | ||
2092 | force_sig(SIGKILL, task); | ||
2093 | } | 2076 | } |
2094 | 2077 | ||
2095 | static struct TCP_Server_Info * | 2078 | static struct TCP_Server_Info * |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 3db0c5fd9a11..6cbd9c688cfe 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -497,6 +497,14 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry, | |||
497 | goto out; | 497 | goto out; |
498 | } | 498 | } |
499 | 499 | ||
500 | if (file->f_flags & O_DIRECT && | ||
501 | CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { | ||
502 | if (CIFS_SB(inode->i_sb)->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
503 | file->f_op = &cifs_file_direct_nobrl_ops; | ||
504 | else | ||
505 | file->f_op = &cifs_file_direct_ops; | ||
506 | } | ||
507 | |||
500 | file_info = cifs_new_fileinfo(&fid, file, tlink, oplock); | 508 | file_info = cifs_new_fileinfo(&fid, file, tlink, oplock); |
501 | if (file_info == NULL) { | 509 | if (file_info == NULL) { |
502 | if (server->ops->close) | 510 | if (server->ops->close) |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d5fec92e0360..7c018a1c52f7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -467,6 +467,14 @@ int cifs_open(struct inode *inode, struct file *file) | |||
467 | cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", | 467 | cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n", |
468 | inode, file->f_flags, full_path); | 468 | inode, file->f_flags, full_path); |
469 | 469 | ||
470 | if (file->f_flags & O_DIRECT && | ||
471 | cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) { | ||
472 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL) | ||
473 | file->f_op = &cifs_file_direct_nobrl_ops; | ||
474 | else | ||
475 | file->f_op = &cifs_file_direct_ops; | ||
476 | } | ||
477 | |||
470 | if (server->oplocks) | 478 | if (server->oplocks) |
471 | oplock = REQ_OPLOCK; | 479 | oplock = REQ_OPLOCK; |
472 | else | 480 | else |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 949ec909ec9a..7899a40465b3 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -1720,7 +1720,10 @@ cifs_rename2(struct inode *source_dir, struct dentry *source_dentry, | |||
1720 | unlink_target: | 1720 | unlink_target: |
1721 | /* Try unlinking the target dentry if it's not negative */ | 1721 | /* Try unlinking the target dentry if it's not negative */ |
1722 | if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) { | 1722 | if (target_dentry->d_inode && (rc == -EACCES || rc == -EEXIST)) { |
1723 | tmprc = cifs_unlink(target_dir, target_dentry); | 1723 | if (d_is_dir(target_dentry)) |
1724 | tmprc = cifs_rmdir(target_dir, target_dentry); | ||
1725 | else | ||
1726 | tmprc = cifs_unlink(target_dir, target_dentry); | ||
1724 | if (tmprc) | 1727 | if (tmprc) |
1725 | goto cifs_rename_exit; | 1728 | goto cifs_rename_exit; |
1726 | rc = cifs_do_rename(xid, source_dentry, from_name, | 1729 | rc = cifs_do_rename(xid, source_dentry, from_name, |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index 798c80a41c88..b334a89d6a66 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -596,8 +596,8 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos, | |||
596 | if (server->ops->dir_needs_close(cfile)) { | 596 | if (server->ops->dir_needs_close(cfile)) { |
597 | cfile->invalidHandle = true; | 597 | cfile->invalidHandle = true; |
598 | spin_unlock(&cifs_file_list_lock); | 598 | spin_unlock(&cifs_file_list_lock); |
599 | if (server->ops->close) | 599 | if (server->ops->close_dir) |
600 | server->ops->close(xid, tcon, &cfile->fid); | 600 | server->ops->close_dir(xid, tcon, &cfile->fid); |
601 | } else | 601 | } else |
602 | spin_unlock(&cifs_file_list_lock); | 602 | spin_unlock(&cifs_file_list_lock); |
603 | if (cfile->srch_inf.ntwrk_buf_start) { | 603 | if (cfile->srch_inf.ntwrk_buf_start) { |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 39ee32688eac..3a5e83317683 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -243,10 +243,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, | |||
243 | kfree(ses->serverOS); | 243 | kfree(ses->serverOS); |
244 | 244 | ||
245 | ses->serverOS = kzalloc(len + 1, GFP_KERNEL); | 245 | ses->serverOS = kzalloc(len + 1, GFP_KERNEL); |
246 | if (ses->serverOS) | 246 | if (ses->serverOS) { |
247 | strncpy(ses->serverOS, bcc_ptr, len); | 247 | strncpy(ses->serverOS, bcc_ptr, len); |
248 | if (strncmp(ses->serverOS, "OS/2", 4) == 0) | 248 | if (strncmp(ses->serverOS, "OS/2", 4) == 0) |
249 | cifs_dbg(FYI, "OS/2 server\n"); | 249 | cifs_dbg(FYI, "OS/2 server\n"); |
250 | } | ||
250 | 251 | ||
251 | bcc_ptr += len + 1; | 252 | bcc_ptr += len + 1; |
252 | bleft -= len + 1; | 253 | bleft -= len + 1; |
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c index 3f17b4550831..45992944e238 100644 --- a/fs/cifs/smb2file.c +++ b/fs/cifs/smb2file.c | |||
@@ -50,7 +50,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, | |||
50 | goto out; | 50 | goto out; |
51 | } | 51 | } |
52 | 52 | ||
53 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, | 53 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, |
54 | GFP_KERNEL); | 54 | GFP_KERNEL); |
55 | if (smb2_data == NULL) { | 55 | if (smb2_data == NULL) { |
56 | rc = -ENOMEM; | 56 | rc = -ENOMEM; |
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 0150182a4494..899bbc86f73e 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c | |||
@@ -131,7 +131,7 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
131 | *adjust_tz = false; | 131 | *adjust_tz = false; |
132 | *symlink = false; | 132 | *symlink = false; |
133 | 133 | ||
134 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, | 134 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, |
135 | GFP_KERNEL); | 135 | GFP_KERNEL); |
136 | if (smb2_data == NULL) | 136 | if (smb2_data == NULL) |
137 | return -ENOMEM; | 137 | return -ENOMEM; |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 5a48aa290dfe..f522193b7184 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -389,7 +389,7 @@ smb2_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
389 | int rc; | 389 | int rc; |
390 | struct smb2_file_all_info *smb2_data; | 390 | struct smb2_file_all_info *smb2_data; |
391 | 391 | ||
392 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + MAX_NAME * 2, | 392 | smb2_data = kzalloc(sizeof(struct smb2_file_all_info) + PATH_MAX * 2, |
393 | GFP_KERNEL); | 393 | GFP_KERNEL); |
394 | if (smb2_data == NULL) | 394 | if (smb2_data == NULL) |
395 | return -ENOMEM; | 395 | return -ENOMEM; |
@@ -1035,7 +1035,7 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon, | |||
1035 | if (keep_size == false) | 1035 | if (keep_size == false) |
1036 | return -EOPNOTSUPP; | 1036 | return -EOPNOTSUPP; |
1037 | 1037 | ||
1038 | /* | 1038 | /* |
1039 | * Must check if file sparse since fallocate -z (zero range) assumes | 1039 | * Must check if file sparse since fallocate -z (zero range) assumes |
1040 | * non-sparse allocation | 1040 | * non-sparse allocation |
1041 | */ | 1041 | */ |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index fa0dd044213b..74b3a6684383 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -530,7 +530,7 @@ SMB2_sess_setup(const unsigned int xid, struct cifs_ses *ses, | |||
530 | struct smb2_sess_setup_rsp *rsp = NULL; | 530 | struct smb2_sess_setup_rsp *rsp = NULL; |
531 | struct kvec iov[2]; | 531 | struct kvec iov[2]; |
532 | int rc = 0; | 532 | int rc = 0; |
533 | int resp_buftype; | 533 | int resp_buftype = CIFS_NO_BUFFER; |
534 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ | 534 | __le32 phase = NtLmNegotiate; /* NTLMSSP, if needed, is multistage */ |
535 | struct TCP_Server_Info *server = ses->server; | 535 | struct TCP_Server_Info *server = ses->server; |
536 | u16 blob_length = 0; | 536 | u16 blob_length = 0; |
@@ -1403,8 +1403,7 @@ SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, | |||
1403 | rsp = (struct smb2_close_rsp *)iov[0].iov_base; | 1403 | rsp = (struct smb2_close_rsp *)iov[0].iov_base; |
1404 | 1404 | ||
1405 | if (rc != 0) { | 1405 | if (rc != 0) { |
1406 | if (tcon) | 1406 | cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); |
1407 | cifs_stats_fail_inc(tcon, SMB2_CLOSE_HE); | ||
1408 | goto close_exit; | 1407 | goto close_exit; |
1409 | } | 1408 | } |
1410 | 1409 | ||
@@ -1533,7 +1532,7 @@ SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
1533 | { | 1532 | { |
1534 | return query_info(xid, tcon, persistent_fid, volatile_fid, | 1533 | return query_info(xid, tcon, persistent_fid, volatile_fid, |
1535 | FILE_ALL_INFORMATION, | 1534 | FILE_ALL_INFORMATION, |
1536 | sizeof(struct smb2_file_all_info) + MAX_NAME * 2, | 1535 | sizeof(struct smb2_file_all_info) + PATH_MAX * 2, |
1537 | sizeof(struct smb2_file_all_info), data); | 1536 | sizeof(struct smb2_file_all_info), data); |
1538 | } | 1537 | } |
1539 | 1538 | ||
diff --git a/fs/dcache.c b/fs/dcache.c index d30ce699ae4b..7a5b51440afa 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -106,8 +106,7 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent, | |||
106 | unsigned int hash) | 106 | unsigned int hash) |
107 | { | 107 | { |
108 | hash += (unsigned long) parent / L1_CACHE_BYTES; | 108 | hash += (unsigned long) parent / L1_CACHE_BYTES; |
109 | hash = hash + (hash >> d_hash_shift); | 109 | return dentry_hashtable + hash_32(hash, d_hash_shift); |
110 | return dentry_hashtable + (hash & d_hash_mask); | ||
111 | } | 110 | } |
112 | 111 | ||
113 | /* Statistics gathering. */ | 112 | /* Statistics gathering. */ |
@@ -2656,6 +2655,12 @@ static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon) | |||
2656 | dentry->d_parent = dentry; | 2655 | dentry->d_parent = dentry; |
2657 | list_del_init(&dentry->d_u.d_child); | 2656 | list_del_init(&dentry->d_u.d_child); |
2658 | anon->d_parent = dparent; | 2657 | anon->d_parent = dparent; |
2658 | if (likely(!d_unhashed(anon))) { | ||
2659 | hlist_bl_lock(&anon->d_sb->s_anon); | ||
2660 | __hlist_bl_del(&anon->d_hash); | ||
2661 | anon->d_hash.pprev = NULL; | ||
2662 | hlist_bl_unlock(&anon->d_sb->s_anon); | ||
2663 | } | ||
2659 | list_move(&anon->d_u.d_child, &dparent->d_subdirs); | 2664 | list_move(&anon->d_u.d_child, &dparent->d_subdirs); |
2660 | 2665 | ||
2661 | write_seqcount_end(&dentry->d_seq); | 2666 | write_seqcount_end(&dentry->d_seq); |
@@ -2714,7 +2719,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) | |||
2714 | write_seqlock(&rename_lock); | 2719 | write_seqlock(&rename_lock); |
2715 | __d_materialise_dentry(dentry, new); | 2720 | __d_materialise_dentry(dentry, new); |
2716 | write_sequnlock(&rename_lock); | 2721 | write_sequnlock(&rename_lock); |
2717 | __d_drop(new); | ||
2718 | _d_rehash(new); | 2722 | _d_rehash(new); |
2719 | spin_unlock(&new->d_lock); | 2723 | spin_unlock(&new->d_lock); |
2720 | spin_unlock(&inode->i_lock); | 2724 | spin_unlock(&inode->i_lock); |
@@ -2778,7 +2782,6 @@ struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode) | |||
2778 | * could splice into our tree? */ | 2782 | * could splice into our tree? */ |
2779 | __d_materialise_dentry(dentry, alias); | 2783 | __d_materialise_dentry(dentry, alias); |
2780 | write_sequnlock(&rename_lock); | 2784 | write_sequnlock(&rename_lock); |
2781 | __d_drop(alias); | ||
2782 | goto found; | 2785 | goto found; |
2783 | } else { | 2786 | } else { |
2784 | /* Nope, but we must(!) avoid directory | 2787 | /* Nope, but we must(!) avoid directory |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index b10b48c2a7af..7bcfff900f05 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -1852,7 +1852,8 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1852 | goto error_tgt_fput; | 1852 | goto error_tgt_fput; |
1853 | 1853 | ||
1854 | /* Check if EPOLLWAKEUP is allowed */ | 1854 | /* Check if EPOLLWAKEUP is allowed */ |
1855 | ep_take_care_of_epollwakeup(&epds); | 1855 | if (ep_op_has_event(op)) |
1856 | ep_take_care_of_epollwakeup(&epds); | ||
1856 | 1857 | ||
1857 | /* | 1858 | /* |
1858 | * We have to check that the file structure underneath the file descriptor | 1859 | * We have to check that the file structure underneath the file descriptor |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 90a3cdca3f88..603e4ebbd0ac 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -3240,6 +3240,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
3240 | &new.de, &new.inlined); | 3240 | &new.de, &new.inlined); |
3241 | if (IS_ERR(new.bh)) { | 3241 | if (IS_ERR(new.bh)) { |
3242 | retval = PTR_ERR(new.bh); | 3242 | retval = PTR_ERR(new.bh); |
3243 | new.bh = NULL; | ||
3243 | goto end_rename; | 3244 | goto end_rename; |
3244 | } | 3245 | } |
3245 | if (new.bh) { | 3246 | if (new.bh) { |
@@ -3386,6 +3387,7 @@ static int ext4_cross_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
3386 | &new.de, &new.inlined); | 3387 | &new.de, &new.inlined); |
3387 | if (IS_ERR(new.bh)) { | 3388 | if (IS_ERR(new.bh)) { |
3388 | retval = PTR_ERR(new.bh); | 3389 | retval = PTR_ERR(new.bh); |
3390 | new.bh = NULL; | ||
3389 | goto end_rename; | 3391 | goto end_rename; |
3390 | } | 3392 | } |
3391 | 3393 | ||
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bb0e80f03e2e..1e43b905ff98 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c | |||
@@ -575,6 +575,7 @@ handle_bb: | |||
575 | bh = bclean(handle, sb, block); | 575 | bh = bclean(handle, sb, block); |
576 | if (IS_ERR(bh)) { | 576 | if (IS_ERR(bh)) { |
577 | err = PTR_ERR(bh); | 577 | err = PTR_ERR(bh); |
578 | bh = NULL; | ||
578 | goto out; | 579 | goto out; |
579 | } | 580 | } |
580 | overhead = ext4_group_overhead_blocks(sb, group); | 581 | overhead = ext4_group_overhead_blocks(sb, group); |
@@ -603,6 +604,7 @@ handle_ib: | |||
603 | bh = bclean(handle, sb, block); | 604 | bh = bclean(handle, sb, block); |
604 | if (IS_ERR(bh)) { | 605 | if (IS_ERR(bh)) { |
605 | err = PTR_ERR(bh); | 606 | err = PTR_ERR(bh); |
607 | bh = NULL; | ||
606 | goto out; | 608 | goto out; |
607 | } | 609 | } |
608 | 610 | ||
diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 214fe1054fce..736a348509f7 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig | |||
@@ -23,7 +23,7 @@ config F2FS_STAT_FS | |||
23 | mounted as f2fs. Each file shows the whole f2fs information. | 23 | mounted as f2fs. Each file shows the whole f2fs information. |
24 | 24 | ||
25 | /sys/kernel/debug/f2fs/status includes: | 25 | /sys/kernel/debug/f2fs/status includes: |
26 | - major file system information managed by f2fs currently | 26 | - major filesystem information managed by f2fs currently |
27 | - average SIT information about whole segments | 27 | - average SIT information about whole segments |
28 | - current memory footprint consumed by f2fs. | 28 | - current memory footprint consumed by f2fs. |
29 | 29 | ||
@@ -68,6 +68,6 @@ config F2FS_CHECK_FS | |||
68 | bool "F2FS consistency checking feature" | 68 | bool "F2FS consistency checking feature" |
69 | depends on F2FS_FS | 69 | depends on F2FS_FS |
70 | help | 70 | help |
71 | Enables BUG_ONs which check the file system consistency in runtime. | 71 | Enables BUG_ONs which check the filesystem consistency in runtime. |
72 | 72 | ||
73 | If you want to improve the performance, say N. | 73 | If you want to improve the performance, say N. |
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 6aeed5bada52..ec3b7a5381fa 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c | |||
@@ -160,14 +160,11 @@ static int f2fs_write_meta_page(struct page *page, | |||
160 | goto redirty_out; | 160 | goto redirty_out; |
161 | if (wbc->for_reclaim) | 161 | if (wbc->for_reclaim) |
162 | goto redirty_out; | 162 | goto redirty_out; |
163 | 163 | if (unlikely(f2fs_cp_error(sbi))) | |
164 | /* Should not write any meta pages, if any IO error was occurred */ | 164 | goto redirty_out; |
165 | if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) | ||
166 | goto no_write; | ||
167 | 165 | ||
168 | f2fs_wait_on_page_writeback(page, META); | 166 | f2fs_wait_on_page_writeback(page, META); |
169 | write_meta_page(sbi, page); | 167 | write_meta_page(sbi, page); |
170 | no_write: | ||
171 | dec_page_count(sbi, F2FS_DIRTY_META); | 168 | dec_page_count(sbi, F2FS_DIRTY_META); |
172 | unlock_page(page); | 169 | unlock_page(page); |
173 | return 0; | 170 | return 0; |
@@ -348,7 +345,7 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) | |||
348 | return e ? true : false; | 345 | return e ? true : false; |
349 | } | 346 | } |
350 | 347 | ||
351 | static void release_dirty_inode(struct f2fs_sb_info *sbi) | 348 | void release_dirty_inode(struct f2fs_sb_info *sbi) |
352 | { | 349 | { |
353 | struct ino_entry *e, *tmp; | 350 | struct ino_entry *e, *tmp; |
354 | int i; | 351 | int i; |
@@ -446,8 +443,8 @@ static void write_orphan_inodes(struct f2fs_sb_info *sbi, block_t start_blk) | |||
446 | struct f2fs_orphan_block *orphan_blk = NULL; | 443 | struct f2fs_orphan_block *orphan_blk = NULL; |
447 | unsigned int nentries = 0; | 444 | unsigned int nentries = 0; |
448 | unsigned short index; | 445 | unsigned short index; |
449 | unsigned short orphan_blocks = (unsigned short)((sbi->n_orphans + | 446 | unsigned short orphan_blocks = |
450 | (F2FS_ORPHANS_PER_BLOCK - 1)) / F2FS_ORPHANS_PER_BLOCK); | 447 | (unsigned short)GET_ORPHAN_BLOCKS(sbi->n_orphans); |
451 | struct page *page = NULL; | 448 | struct page *page = NULL; |
452 | struct ino_entry *orphan = NULL; | 449 | struct ino_entry *orphan = NULL; |
453 | 450 | ||
@@ -737,7 +734,7 @@ retry: | |||
737 | /* | 734 | /* |
738 | * Freeze all the FS-operations for checkpoint. | 735 | * Freeze all the FS-operations for checkpoint. |
739 | */ | 736 | */ |
740 | static void block_operations(struct f2fs_sb_info *sbi) | 737 | static int block_operations(struct f2fs_sb_info *sbi) |
741 | { | 738 | { |
742 | struct writeback_control wbc = { | 739 | struct writeback_control wbc = { |
743 | .sync_mode = WB_SYNC_ALL, | 740 | .sync_mode = WB_SYNC_ALL, |
@@ -745,6 +742,7 @@ static void block_operations(struct f2fs_sb_info *sbi) | |||
745 | .for_reclaim = 0, | 742 | .for_reclaim = 0, |
746 | }; | 743 | }; |
747 | struct blk_plug plug; | 744 | struct blk_plug plug; |
745 | int err = 0; | ||
748 | 746 | ||
749 | blk_start_plug(&plug); | 747 | blk_start_plug(&plug); |
750 | 748 | ||
@@ -754,11 +752,15 @@ retry_flush_dents: | |||
754 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { | 752 | if (get_pages(sbi, F2FS_DIRTY_DENTS)) { |
755 | f2fs_unlock_all(sbi); | 753 | f2fs_unlock_all(sbi); |
756 | sync_dirty_dir_inodes(sbi); | 754 | sync_dirty_dir_inodes(sbi); |
755 | if (unlikely(f2fs_cp_error(sbi))) { | ||
756 | err = -EIO; | ||
757 | goto out; | ||
758 | } | ||
757 | goto retry_flush_dents; | 759 | goto retry_flush_dents; |
758 | } | 760 | } |
759 | 761 | ||
760 | /* | 762 | /* |
761 | * POR: we should ensure that there is no dirty node pages | 763 | * POR: we should ensure that there are no dirty node pages |
762 | * until finishing nat/sit flush. | 764 | * until finishing nat/sit flush. |
763 | */ | 765 | */ |
764 | retry_flush_nodes: | 766 | retry_flush_nodes: |
@@ -767,9 +769,16 @@ retry_flush_nodes: | |||
767 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { | 769 | if (get_pages(sbi, F2FS_DIRTY_NODES)) { |
768 | up_write(&sbi->node_write); | 770 | up_write(&sbi->node_write); |
769 | sync_node_pages(sbi, 0, &wbc); | 771 | sync_node_pages(sbi, 0, &wbc); |
772 | if (unlikely(f2fs_cp_error(sbi))) { | ||
773 | f2fs_unlock_all(sbi); | ||
774 | err = -EIO; | ||
775 | goto out; | ||
776 | } | ||
770 | goto retry_flush_nodes; | 777 | goto retry_flush_nodes; |
771 | } | 778 | } |
779 | out: | ||
772 | blk_finish_plug(&plug); | 780 | blk_finish_plug(&plug); |
781 | return err; | ||
773 | } | 782 | } |
774 | 783 | ||
775 | static void unblock_operations(struct f2fs_sb_info *sbi) | 784 | static void unblock_operations(struct f2fs_sb_info *sbi) |
@@ -813,8 +822,11 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
813 | discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); | 822 | discard_next_dnode(sbi, NEXT_FREE_BLKADDR(sbi, curseg)); |
814 | 823 | ||
815 | /* Flush all the NAT/SIT pages */ | 824 | /* Flush all the NAT/SIT pages */ |
816 | while (get_pages(sbi, F2FS_DIRTY_META)) | 825 | while (get_pages(sbi, F2FS_DIRTY_META)) { |
817 | sync_meta_pages(sbi, META, LONG_MAX); | 826 | sync_meta_pages(sbi, META, LONG_MAX); |
827 | if (unlikely(f2fs_cp_error(sbi))) | ||
828 | return; | ||
829 | } | ||
818 | 830 | ||
819 | next_free_nid(sbi, &last_nid); | 831 | next_free_nid(sbi, &last_nid); |
820 | 832 | ||
@@ -825,7 +837,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
825 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); | 837 | ckpt->elapsed_time = cpu_to_le64(get_mtime(sbi)); |
826 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); | 838 | ckpt->valid_block_count = cpu_to_le64(valid_user_blocks(sbi)); |
827 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); | 839 | ckpt->free_segment_count = cpu_to_le32(free_segments(sbi)); |
828 | for (i = 0; i < 3; i++) { | 840 | for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) { |
829 | ckpt->cur_node_segno[i] = | 841 | ckpt->cur_node_segno[i] = |
830 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); | 842 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_NODE)); |
831 | ckpt->cur_node_blkoff[i] = | 843 | ckpt->cur_node_blkoff[i] = |
@@ -833,7 +845,7 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
833 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = | 845 | ckpt->alloc_type[i + CURSEG_HOT_NODE] = |
834 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); | 846 | curseg_alloc_type(sbi, i + CURSEG_HOT_NODE); |
835 | } | 847 | } |
836 | for (i = 0; i < 3; i++) { | 848 | for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) { |
837 | ckpt->cur_data_segno[i] = | 849 | ckpt->cur_data_segno[i] = |
838 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); | 850 | cpu_to_le32(curseg_segno(sbi, i + CURSEG_HOT_DATA)); |
839 | ckpt->cur_data_blkoff[i] = | 851 | ckpt->cur_data_blkoff[i] = |
@@ -848,24 +860,23 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
848 | 860 | ||
849 | /* 2 cp + n data seg summary + orphan inode blocks */ | 861 | /* 2 cp + n data seg summary + orphan inode blocks */ |
850 | data_sum_blocks = npages_for_summary_flush(sbi); | 862 | data_sum_blocks = npages_for_summary_flush(sbi); |
851 | if (data_sum_blocks < 3) | 863 | if (data_sum_blocks < NR_CURSEG_DATA_TYPE) |
852 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | 864 | set_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
853 | else | 865 | else |
854 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); | 866 | clear_ckpt_flags(ckpt, CP_COMPACT_SUM_FLAG); |
855 | 867 | ||
856 | orphan_blocks = (sbi->n_orphans + F2FS_ORPHANS_PER_BLOCK - 1) | 868 | orphan_blocks = GET_ORPHAN_BLOCKS(sbi->n_orphans); |
857 | / F2FS_ORPHANS_PER_BLOCK; | ||
858 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + | 869 | ckpt->cp_pack_start_sum = cpu_to_le32(1 + cp_payload_blks + |
859 | orphan_blocks); | 870 | orphan_blocks); |
860 | 871 | ||
861 | if (is_umount) { | 872 | if (is_umount) { |
862 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 873 | set_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
863 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 874 | ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS+ |
864 | cp_payload_blks + data_sum_blocks + | 875 | cp_payload_blks + data_sum_blocks + |
865 | orphan_blocks + NR_CURSEG_NODE_TYPE); | 876 | orphan_blocks + NR_CURSEG_NODE_TYPE); |
866 | } else { | 877 | } else { |
867 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); | 878 | clear_ckpt_flags(ckpt, CP_UMOUNT_FLAG); |
868 | ckpt->cp_pack_total_block_count = cpu_to_le32(2 + | 879 | ckpt->cp_pack_total_block_count = cpu_to_le32(F2FS_CP_PACKS + |
869 | cp_payload_blks + data_sum_blocks + | 880 | cp_payload_blks + data_sum_blocks + |
870 | orphan_blocks); | 881 | orphan_blocks); |
871 | } | 882 | } |
@@ -924,6 +935,9 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
924 | /* wait for previous submitted node/meta pages writeback */ | 935 | /* wait for previous submitted node/meta pages writeback */ |
925 | wait_on_all_pages_writeback(sbi); | 936 | wait_on_all_pages_writeback(sbi); |
926 | 937 | ||
938 | if (unlikely(f2fs_cp_error(sbi))) | ||
939 | return; | ||
940 | |||
927 | filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); | 941 | filemap_fdatawait_range(NODE_MAPPING(sbi), 0, LONG_MAX); |
928 | filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); | 942 | filemap_fdatawait_range(META_MAPPING(sbi), 0, LONG_MAX); |
929 | 943 | ||
@@ -934,15 +948,17 @@ static void do_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
934 | /* Here, we only have one bio having CP pack */ | 948 | /* Here, we only have one bio having CP pack */ |
935 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); | 949 | sync_meta_pages(sbi, META_FLUSH, LONG_MAX); |
936 | 950 | ||
937 | if (!is_set_ckpt_flags(ckpt, CP_ERROR_FLAG)) { | 951 | release_dirty_inode(sbi); |
938 | clear_prefree_segments(sbi); | 952 | |
939 | release_dirty_inode(sbi); | 953 | if (unlikely(f2fs_cp_error(sbi))) |
940 | F2FS_RESET_SB_DIRT(sbi); | 954 | return; |
941 | } | 955 | |
956 | clear_prefree_segments(sbi); | ||
957 | F2FS_RESET_SB_DIRT(sbi); | ||
942 | } | 958 | } |
943 | 959 | ||
944 | /* | 960 | /* |
945 | * We guarantee that this checkpoint procedure should not fail. | 961 | * We guarantee that this checkpoint procedure will not fail. |
946 | */ | 962 | */ |
947 | void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | 963 | void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) |
948 | { | 964 | { |
@@ -952,7 +968,13 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
952 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); | 968 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "start block_ops"); |
953 | 969 | ||
954 | mutex_lock(&sbi->cp_mutex); | 970 | mutex_lock(&sbi->cp_mutex); |
955 | block_operations(sbi); | 971 | |
972 | if (!sbi->s_dirty) | ||
973 | goto out; | ||
974 | if (unlikely(f2fs_cp_error(sbi))) | ||
975 | goto out; | ||
976 | if (block_operations(sbi)) | ||
977 | goto out; | ||
956 | 978 | ||
957 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); | 979 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish block_ops"); |
958 | 980 | ||
@@ -976,9 +998,9 @@ void write_checkpoint(struct f2fs_sb_info *sbi, bool is_umount) | |||
976 | do_checkpoint(sbi, is_umount); | 998 | do_checkpoint(sbi, is_umount); |
977 | 999 | ||
978 | unblock_operations(sbi); | 1000 | unblock_operations(sbi); |
979 | mutex_unlock(&sbi->cp_mutex); | ||
980 | |||
981 | stat_inc_cp_count(sbi->stat_info); | 1001 | stat_inc_cp_count(sbi->stat_info); |
1002 | out: | ||
1003 | mutex_unlock(&sbi->cp_mutex); | ||
982 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); | 1004 | trace_f2fs_write_checkpoint(sbi->sb, is_umount, "finish checkpoint"); |
983 | } | 1005 | } |
984 | 1006 | ||
@@ -999,8 +1021,8 @@ void init_ino_entry_info(struct f2fs_sb_info *sbi) | |||
999 | * for cp pack we can have max 1020*504 orphan entries | 1021 | * for cp pack we can have max 1020*504 orphan entries |
1000 | */ | 1022 | */ |
1001 | sbi->n_orphans = 0; | 1023 | sbi->n_orphans = 0; |
1002 | sbi->max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) | 1024 | sbi->max_orphans = (sbi->blocks_per_seg - F2FS_CP_PACKS - |
1003 | * F2FS_ORPHANS_PER_BLOCK; | 1025 | NR_CURSEG_TYPE) * F2FS_ORPHANS_PER_BLOCK; |
1004 | } | 1026 | } |
1005 | 1027 | ||
1006 | int __init create_checkpoint_caches(void) | 1028 | int __init create_checkpoint_caches(void) |
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 03313099c51c..76de83e25a89 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c | |||
@@ -53,7 +53,7 @@ static void f2fs_write_end_io(struct bio *bio, int err) | |||
53 | struct page *page = bvec->bv_page; | 53 | struct page *page = bvec->bv_page; |
54 | 54 | ||
55 | if (unlikely(err)) { | 55 | if (unlikely(err)) { |
56 | SetPageError(page); | 56 | set_page_dirty(page); |
57 | set_bit(AS_EIO, &page->mapping->flags); | 57 | set_bit(AS_EIO, &page->mapping->flags); |
58 | f2fs_stop_checkpoint(sbi); | 58 | f2fs_stop_checkpoint(sbi); |
59 | } | 59 | } |
@@ -691,7 +691,7 @@ get_next: | |||
691 | allocated = true; | 691 | allocated = true; |
692 | blkaddr = dn.data_blkaddr; | 692 | blkaddr = dn.data_blkaddr; |
693 | } | 693 | } |
694 | /* Give more consecutive addresses for the read ahead */ | 694 | /* Give more consecutive addresses for the readahead */ |
695 | if (blkaddr == (bh_result->b_blocknr + ofs)) { | 695 | if (blkaddr == (bh_result->b_blocknr + ofs)) { |
696 | ofs++; | 696 | ofs++; |
697 | dn.ofs_in_node++; | 697 | dn.ofs_in_node++; |
@@ -739,7 +739,7 @@ static int f2fs_read_data_page(struct file *file, struct page *page) | |||
739 | 739 | ||
740 | trace_f2fs_readpage(page, DATA); | 740 | trace_f2fs_readpage(page, DATA); |
741 | 741 | ||
742 | /* If the file has inline data, try to read it directlly */ | 742 | /* If the file has inline data, try to read it directly */ |
743 | if (f2fs_has_inline_data(inode)) | 743 | if (f2fs_has_inline_data(inode)) |
744 | ret = f2fs_read_inline_data(inode, page); | 744 | ret = f2fs_read_inline_data(inode, page); |
745 | else | 745 | else |
@@ -836,10 +836,19 @@ write: | |||
836 | 836 | ||
837 | /* Dentry blocks are controlled by checkpoint */ | 837 | /* Dentry blocks are controlled by checkpoint */ |
838 | if (S_ISDIR(inode->i_mode)) { | 838 | if (S_ISDIR(inode->i_mode)) { |
839 | if (unlikely(f2fs_cp_error(sbi))) | ||
840 | goto redirty_out; | ||
839 | err = do_write_data_page(page, &fio); | 841 | err = do_write_data_page(page, &fio); |
840 | goto done; | 842 | goto done; |
841 | } | 843 | } |
842 | 844 | ||
845 | /* we should bypass data pages to proceed the kworkder jobs */ | ||
846 | if (unlikely(f2fs_cp_error(sbi))) { | ||
847 | SetPageError(page); | ||
848 | unlock_page(page); | ||
849 | return 0; | ||
850 | } | ||
851 | |||
843 | if (!wbc->for_reclaim) | 852 | if (!wbc->for_reclaim) |
844 | need_balance_fs = true; | 853 | need_balance_fs = true; |
845 | else if (has_not_enough_free_secs(sbi, 0)) | 854 | else if (has_not_enough_free_secs(sbi, 0)) |
@@ -927,7 +936,7 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to) | |||
927 | 936 | ||
928 | if (to > inode->i_size) { | 937 | if (to > inode->i_size) { |
929 | truncate_pagecache(inode, inode->i_size); | 938 | truncate_pagecache(inode, inode->i_size); |
930 | truncate_blocks(inode, inode->i_size); | 939 | truncate_blocks(inode, inode->i_size, true); |
931 | } | 940 | } |
932 | } | 941 | } |
933 | 942 | ||
@@ -946,7 +955,7 @@ static int f2fs_write_begin(struct file *file, struct address_space *mapping, | |||
946 | 955 | ||
947 | f2fs_balance_fs(sbi); | 956 | f2fs_balance_fs(sbi); |
948 | repeat: | 957 | repeat: |
949 | err = f2fs_convert_inline_data(inode, pos + len); | 958 | err = f2fs_convert_inline_data(inode, pos + len, NULL); |
950 | if (err) | 959 | if (err) |
951 | goto fail; | 960 | goto fail; |
952 | 961 | ||
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index a441ba33be11..fecebdbfd781 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c | |||
@@ -32,7 +32,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) | |||
32 | struct f2fs_stat_info *si = F2FS_STAT(sbi); | 32 | struct f2fs_stat_info *si = F2FS_STAT(sbi); |
33 | int i; | 33 | int i; |
34 | 34 | ||
35 | /* valid check of the segment numbers */ | 35 | /* validation check of the segment numbers */ |
36 | si->hit_ext = sbi->read_hit_ext; | 36 | si->hit_ext = sbi->read_hit_ext; |
37 | si->total_ext = sbi->total_hit_ext; | 37 | si->total_ext = sbi->total_hit_ext; |
38 | si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); | 38 | si->ndirty_node = get_pages(sbi, F2FS_DIRTY_NODES); |
@@ -152,7 +152,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) | |||
152 | si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi)); | 152 | si->base_mem += NR_DIRTY_TYPE * f2fs_bitmap_size(TOTAL_SEGS(sbi)); |
153 | si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi)); | 153 | si->base_mem += f2fs_bitmap_size(TOTAL_SECS(sbi)); |
154 | 154 | ||
155 | /* buld nm */ | 155 | /* build nm */ |
156 | si->base_mem += sizeof(struct f2fs_nm_info); | 156 | si->base_mem += sizeof(struct f2fs_nm_info); |
157 | si->base_mem += __bitmap_size(sbi, NAT_BITMAP); | 157 | si->base_mem += __bitmap_size(sbi, NAT_BITMAP); |
158 | 158 | ||
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index bcf893c3d903..155fb056b7f1 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c | |||
@@ -124,7 +124,7 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, | |||
124 | 124 | ||
125 | /* | 125 | /* |
126 | * For the most part, it should be a bug when name_len is zero. | 126 | * For the most part, it should be a bug when name_len is zero. |
127 | * We stop here for figuring out where the bugs are occurred. | 127 | * We stop here for figuring out where the bugs has occurred. |
128 | */ | 128 | */ |
129 | f2fs_bug_on(!de->name_len); | 129 | f2fs_bug_on(!de->name_len); |
130 | 130 | ||
@@ -391,7 +391,7 @@ put_error: | |||
391 | error: | 391 | error: |
392 | /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ | 392 | /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ |
393 | truncate_inode_pages(&inode->i_data, 0); | 393 | truncate_inode_pages(&inode->i_data, 0); |
394 | truncate_blocks(inode, 0); | 394 | truncate_blocks(inode, 0, false); |
395 | remove_dirty_dir_inode(inode); | 395 | remove_dirty_dir_inode(inode); |
396 | remove_inode_page(inode); | 396 | remove_inode_page(inode); |
397 | return ERR_PTR(err); | 397 | return ERR_PTR(err); |
@@ -563,7 +563,7 @@ fail: | |||
563 | } | 563 | } |
564 | 564 | ||
565 | /* | 565 | /* |
566 | * It only removes the dentry from the dentry page,corresponding name | 566 | * It only removes the dentry from the dentry page, corresponding name |
567 | * entry in name page does not need to be touched during deletion. | 567 | * entry in name page does not need to be touched during deletion. |
568 | */ | 568 | */ |
569 | void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, | 569 | void f2fs_delete_entry(struct f2fs_dir_entry *dentry, struct page *page, |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 4dab5338a97a..e921242186f6 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -24,7 +24,7 @@ | |||
24 | #define f2fs_bug_on(condition) BUG_ON(condition) | 24 | #define f2fs_bug_on(condition) BUG_ON(condition) |
25 | #define f2fs_down_write(x, y) down_write_nest_lock(x, y) | 25 | #define f2fs_down_write(x, y) down_write_nest_lock(x, y) |
26 | #else | 26 | #else |
27 | #define f2fs_bug_on(condition) | 27 | #define f2fs_bug_on(condition) WARN_ON(condition) |
28 | #define f2fs_down_write(x, y) down_write(x) | 28 | #define f2fs_down_write(x, y) down_write(x) |
29 | #endif | 29 | #endif |
30 | 30 | ||
@@ -395,7 +395,7 @@ enum count_type { | |||
395 | }; | 395 | }; |
396 | 396 | ||
397 | /* | 397 | /* |
398 | * The below are the page types of bios used in submti_bio(). | 398 | * The below are the page types of bios used in submit_bio(). |
399 | * The available types are: | 399 | * The available types are: |
400 | * DATA User data pages. It operates as async mode. | 400 | * DATA User data pages. It operates as async mode. |
401 | * NODE Node pages. It operates as async mode. | 401 | * NODE Node pages. It operates as async mode. |
@@ -470,7 +470,7 @@ struct f2fs_sb_info { | |||
470 | struct list_head dir_inode_list; /* dir inode list */ | 470 | struct list_head dir_inode_list; /* dir inode list */ |
471 | spinlock_t dir_inode_lock; /* for dir inode list lock */ | 471 | spinlock_t dir_inode_lock; /* for dir inode list lock */ |
472 | 472 | ||
473 | /* basic file system units */ | 473 | /* basic filesystem units */ |
474 | unsigned int log_sectors_per_block; /* log2 sectors per block */ | 474 | unsigned int log_sectors_per_block; /* log2 sectors per block */ |
475 | unsigned int log_blocksize; /* log2 block size */ | 475 | unsigned int log_blocksize; /* log2 block size */ |
476 | unsigned int blocksize; /* block size */ | 476 | unsigned int blocksize; /* block size */ |
@@ -799,7 +799,7 @@ static inline block_t __start_cp_addr(struct f2fs_sb_info *sbi) | |||
799 | 799 | ||
800 | /* | 800 | /* |
801 | * odd numbered checkpoint should at cp segment 0 | 801 | * odd numbered checkpoint should at cp segment 0 |
802 | * and even segent must be at cp segment 1 | 802 | * and even segment must be at cp segment 1 |
803 | */ | 803 | */ |
804 | if (!(ckpt_version & 1)) | 804 | if (!(ckpt_version & 1)) |
805 | start_addr += sbi->blocks_per_seg; | 805 | start_addr += sbi->blocks_per_seg; |
@@ -1096,6 +1096,11 @@ static inline int f2fs_readonly(struct super_block *sb) | |||
1096 | return sb->s_flags & MS_RDONLY; | 1096 | return sb->s_flags & MS_RDONLY; |
1097 | } | 1097 | } |
1098 | 1098 | ||
1099 | static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) | ||
1100 | { | ||
1101 | return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); | ||
1102 | } | ||
1103 | |||
1099 | static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi) | 1104 | static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi) |
1100 | { | 1105 | { |
1101 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); | 1106 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); |
@@ -1117,7 +1122,7 @@ static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi) | |||
1117 | */ | 1122 | */ |
1118 | int f2fs_sync_file(struct file *, loff_t, loff_t, int); | 1123 | int f2fs_sync_file(struct file *, loff_t, loff_t, int); |
1119 | void truncate_data_blocks(struct dnode_of_data *); | 1124 | void truncate_data_blocks(struct dnode_of_data *); |
1120 | int truncate_blocks(struct inode *, u64); | 1125 | int truncate_blocks(struct inode *, u64, bool); |
1121 | void f2fs_truncate(struct inode *); | 1126 | void f2fs_truncate(struct inode *); |
1122 | int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 1127 | int f2fs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
1123 | int f2fs_setattr(struct dentry *, struct iattr *); | 1128 | int f2fs_setattr(struct dentry *, struct iattr *); |
@@ -1202,10 +1207,8 @@ int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); | |||
1202 | bool alloc_nid(struct f2fs_sb_info *, nid_t *); | 1207 | bool alloc_nid(struct f2fs_sb_info *, nid_t *); |
1203 | void alloc_nid_done(struct f2fs_sb_info *, nid_t); | 1208 | void alloc_nid_done(struct f2fs_sb_info *, nid_t); |
1204 | void alloc_nid_failed(struct f2fs_sb_info *, nid_t); | 1209 | void alloc_nid_failed(struct f2fs_sb_info *, nid_t); |
1205 | void recover_node_page(struct f2fs_sb_info *, struct page *, | ||
1206 | struct f2fs_summary *, struct node_info *, block_t); | ||
1207 | void recover_inline_xattr(struct inode *, struct page *); | 1210 | void recover_inline_xattr(struct inode *, struct page *); |
1208 | bool recover_xattr_data(struct inode *, struct page *, block_t); | 1211 | void recover_xattr_data(struct inode *, struct page *, block_t); |
1209 | int recover_inode_page(struct f2fs_sb_info *, struct page *); | 1212 | int recover_inode_page(struct f2fs_sb_info *, struct page *); |
1210 | int restore_node_summary(struct f2fs_sb_info *, unsigned int, | 1213 | int restore_node_summary(struct f2fs_sb_info *, unsigned int, |
1211 | struct f2fs_summary_block *); | 1214 | struct f2fs_summary_block *); |
@@ -1238,8 +1241,6 @@ void write_data_page(struct page *, struct dnode_of_data *, block_t *, | |||
1238 | void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *); | 1241 | void rewrite_data_page(struct page *, block_t, struct f2fs_io_info *); |
1239 | void recover_data_page(struct f2fs_sb_info *, struct page *, | 1242 | void recover_data_page(struct f2fs_sb_info *, struct page *, |
1240 | struct f2fs_summary *, block_t, block_t); | 1243 | struct f2fs_summary *, block_t, block_t); |
1241 | void rewrite_node_page(struct f2fs_sb_info *, struct page *, | ||
1242 | struct f2fs_summary *, block_t, block_t); | ||
1243 | void allocate_data_block(struct f2fs_sb_info *, struct page *, | 1244 | void allocate_data_block(struct f2fs_sb_info *, struct page *, |
1244 | block_t, block_t *, struct f2fs_summary *, int); | 1245 | block_t, block_t *, struct f2fs_summary *, int); |
1245 | void f2fs_wait_on_page_writeback(struct page *, enum page_type); | 1246 | void f2fs_wait_on_page_writeback(struct page *, enum page_type); |
@@ -1262,6 +1263,7 @@ int ra_meta_pages(struct f2fs_sb_info *, int, int, int); | |||
1262 | long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); | 1263 | long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); |
1263 | void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type); | 1264 | void add_dirty_inode(struct f2fs_sb_info *, nid_t, int type); |
1264 | void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type); | 1265 | void remove_dirty_inode(struct f2fs_sb_info *, nid_t, int type); |
1266 | void release_dirty_inode(struct f2fs_sb_info *); | ||
1265 | bool exist_written_data(struct f2fs_sb_info *, nid_t, int); | 1267 | bool exist_written_data(struct f2fs_sb_info *, nid_t, int); |
1266 | int acquire_orphan_inode(struct f2fs_sb_info *); | 1268 | int acquire_orphan_inode(struct f2fs_sb_info *); |
1267 | void release_orphan_inode(struct f2fs_sb_info *); | 1269 | void release_orphan_inode(struct f2fs_sb_info *); |
@@ -1439,8 +1441,8 @@ extern const struct inode_operations f2fs_special_inode_operations; | |||
1439 | */ | 1441 | */ |
1440 | bool f2fs_may_inline(struct inode *); | 1442 | bool f2fs_may_inline(struct inode *); |
1441 | int f2fs_read_inline_data(struct inode *, struct page *); | 1443 | int f2fs_read_inline_data(struct inode *, struct page *); |
1442 | int f2fs_convert_inline_data(struct inode *, pgoff_t); | 1444 | int f2fs_convert_inline_data(struct inode *, pgoff_t, struct page *); |
1443 | int f2fs_write_inline_data(struct inode *, struct page *, unsigned int); | 1445 | int f2fs_write_inline_data(struct inode *, struct page *, unsigned int); |
1444 | void truncate_inline_data(struct inode *, u64); | 1446 | void truncate_inline_data(struct inode *, u64); |
1445 | int recover_inline_data(struct inode *, struct page *); | 1447 | bool recover_inline_data(struct inode *, struct page *); |
1446 | #endif | 1448 | #endif |
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 208f1a9bd569..060aee65aee8 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c | |||
@@ -41,6 +41,11 @@ static int f2fs_vm_page_mkwrite(struct vm_area_struct *vma, | |||
41 | 41 | ||
42 | sb_start_pagefault(inode->i_sb); | 42 | sb_start_pagefault(inode->i_sb); |
43 | 43 | ||
44 | /* force to convert with normal data indices */ | ||
45 | err = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, page); | ||
46 | if (err) | ||
47 | goto out; | ||
48 | |||
44 | /* block allocation */ | 49 | /* block allocation */ |
45 | f2fs_lock_op(sbi); | 50 | f2fs_lock_op(sbi); |
46 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 51 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
@@ -110,6 +115,25 @@ static int get_parent_ino(struct inode *inode, nid_t *pino) | |||
110 | return 1; | 115 | return 1; |
111 | } | 116 | } |
112 | 117 | ||
118 | static inline bool need_do_checkpoint(struct inode *inode) | ||
119 | { | ||
120 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
121 | bool need_cp = false; | ||
122 | |||
123 | if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) | ||
124 | need_cp = true; | ||
125 | else if (file_wrong_pino(inode)) | ||
126 | need_cp = true; | ||
127 | else if (!space_for_roll_forward(sbi)) | ||
128 | need_cp = true; | ||
129 | else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) | ||
130 | need_cp = true; | ||
131 | else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) | ||
132 | need_cp = true; | ||
133 | |||
134 | return need_cp; | ||
135 | } | ||
136 | |||
113 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | 137 | int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) |
114 | { | 138 | { |
115 | struct inode *inode = file->f_mapping->host; | 139 | struct inode *inode = file->f_mapping->host; |
@@ -154,23 +178,12 @@ int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
154 | /* guarantee free sections for fsync */ | 178 | /* guarantee free sections for fsync */ |
155 | f2fs_balance_fs(sbi); | 179 | f2fs_balance_fs(sbi); |
156 | 180 | ||
157 | down_read(&fi->i_sem); | ||
158 | |||
159 | /* | 181 | /* |
160 | * Both of fdatasync() and fsync() are able to be recovered from | 182 | * Both of fdatasync() and fsync() are able to be recovered from |
161 | * sudden-power-off. | 183 | * sudden-power-off. |
162 | */ | 184 | */ |
163 | if (!S_ISREG(inode->i_mode) || inode->i_nlink != 1) | 185 | down_read(&fi->i_sem); |
164 | need_cp = true; | 186 | need_cp = need_do_checkpoint(inode); |
165 | else if (file_wrong_pino(inode)) | ||
166 | need_cp = true; | ||
167 | else if (!space_for_roll_forward(sbi)) | ||
168 | need_cp = true; | ||
169 | else if (!is_checkpointed_node(sbi, F2FS_I(inode)->i_pino)) | ||
170 | need_cp = true; | ||
171 | else if (F2FS_I(inode)->xattr_ver == cur_cp_version(F2FS_CKPT(sbi))) | ||
172 | need_cp = true; | ||
173 | |||
174 | up_read(&fi->i_sem); | 187 | up_read(&fi->i_sem); |
175 | 188 | ||
176 | if (need_cp) { | 189 | if (need_cp) { |
@@ -288,7 +301,7 @@ static loff_t f2fs_seek_block(struct file *file, loff_t offset, int whence) | |||
288 | if (err && err != -ENOENT) { | 301 | if (err && err != -ENOENT) { |
289 | goto fail; | 302 | goto fail; |
290 | } else if (err == -ENOENT) { | 303 | } else if (err == -ENOENT) { |
291 | /* direct node is not exist */ | 304 | /* direct node does not exists */ |
292 | if (whence == SEEK_DATA) { | 305 | if (whence == SEEK_DATA) { |
293 | pgofs = PGOFS_OF_NEXT_DNODE(pgofs, | 306 | pgofs = PGOFS_OF_NEXT_DNODE(pgofs, |
294 | F2FS_I(inode)); | 307 | F2FS_I(inode)); |
@@ -417,7 +430,7 @@ out: | |||
417 | f2fs_put_page(page, 1); | 430 | f2fs_put_page(page, 1); |
418 | } | 431 | } |
419 | 432 | ||
420 | int truncate_blocks(struct inode *inode, u64 from) | 433 | int truncate_blocks(struct inode *inode, u64 from, bool lock) |
421 | { | 434 | { |
422 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 435 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
423 | unsigned int blocksize = inode->i_sb->s_blocksize; | 436 | unsigned int blocksize = inode->i_sb->s_blocksize; |
@@ -433,14 +446,16 @@ int truncate_blocks(struct inode *inode, u64 from) | |||
433 | free_from = (pgoff_t) | 446 | free_from = (pgoff_t) |
434 | ((from + blocksize - 1) >> (sbi->log_blocksize)); | 447 | ((from + blocksize - 1) >> (sbi->log_blocksize)); |
435 | 448 | ||
436 | f2fs_lock_op(sbi); | 449 | if (lock) |
450 | f2fs_lock_op(sbi); | ||
437 | 451 | ||
438 | set_new_dnode(&dn, inode, NULL, NULL, 0); | 452 | set_new_dnode(&dn, inode, NULL, NULL, 0); |
439 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); | 453 | err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); |
440 | if (err) { | 454 | if (err) { |
441 | if (err == -ENOENT) | 455 | if (err == -ENOENT) |
442 | goto free_next; | 456 | goto free_next; |
443 | f2fs_unlock_op(sbi); | 457 | if (lock) |
458 | f2fs_unlock_op(sbi); | ||
444 | trace_f2fs_truncate_blocks_exit(inode, err); | 459 | trace_f2fs_truncate_blocks_exit(inode, err); |
445 | return err; | 460 | return err; |
446 | } | 461 | } |
@@ -458,7 +473,8 @@ int truncate_blocks(struct inode *inode, u64 from) | |||
458 | f2fs_put_dnode(&dn); | 473 | f2fs_put_dnode(&dn); |
459 | free_next: | 474 | free_next: |
460 | err = truncate_inode_blocks(inode, free_from); | 475 | err = truncate_inode_blocks(inode, free_from); |
461 | f2fs_unlock_op(sbi); | 476 | if (lock) |
477 | f2fs_unlock_op(sbi); | ||
462 | done: | 478 | done: |
463 | /* lastly zero out the first data page */ | 479 | /* lastly zero out the first data page */ |
464 | truncate_partial_data_page(inode, from); | 480 | truncate_partial_data_page(inode, from); |
@@ -475,7 +491,7 @@ void f2fs_truncate(struct inode *inode) | |||
475 | 491 | ||
476 | trace_f2fs_truncate(inode); | 492 | trace_f2fs_truncate(inode); |
477 | 493 | ||
478 | if (!truncate_blocks(inode, i_size_read(inode))) { | 494 | if (!truncate_blocks(inode, i_size_read(inode), true)) { |
479 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 495 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
480 | mark_inode_dirty(inode); | 496 | mark_inode_dirty(inode); |
481 | } | 497 | } |
@@ -533,7 +549,7 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) | |||
533 | 549 | ||
534 | if ((attr->ia_valid & ATTR_SIZE) && | 550 | if ((attr->ia_valid & ATTR_SIZE) && |
535 | attr->ia_size != i_size_read(inode)) { | 551 | attr->ia_size != i_size_read(inode)) { |
536 | err = f2fs_convert_inline_data(inode, attr->ia_size); | 552 | err = f2fs_convert_inline_data(inode, attr->ia_size, NULL); |
537 | if (err) | 553 | if (err) |
538 | return err; | 554 | return err; |
539 | 555 | ||
@@ -622,7 +638,7 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) | |||
622 | loff_t off_start, off_end; | 638 | loff_t off_start, off_end; |
623 | int ret = 0; | 639 | int ret = 0; |
624 | 640 | ||
625 | ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1); | 641 | ret = f2fs_convert_inline_data(inode, MAX_INLINE_DATA + 1, NULL); |
626 | if (ret) | 642 | if (ret) |
627 | return ret; | 643 | return ret; |
628 | 644 | ||
@@ -678,7 +694,7 @@ static int expand_inode_data(struct inode *inode, loff_t offset, | |||
678 | if (ret) | 694 | if (ret) |
679 | return ret; | 695 | return ret; |
680 | 696 | ||
681 | ret = f2fs_convert_inline_data(inode, offset + len); | 697 | ret = f2fs_convert_inline_data(inode, offset + len, NULL); |
682 | if (ret) | 698 | if (ret) |
683 | return ret; | 699 | return ret; |
684 | 700 | ||
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index d7947d90ccc3..943a31db7cc3 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c | |||
@@ -58,7 +58,7 @@ static int gc_thread_func(void *data) | |||
58 | * 3. IO subsystem is idle by checking the # of requests in | 58 | * 3. IO subsystem is idle by checking the # of requests in |
59 | * bdev's request list. | 59 | * bdev's request list. |
60 | * | 60 | * |
61 | * Note) We have to avoid triggering GCs too much frequently. | 61 | * Note) We have to avoid triggering GCs frequently. |
62 | * Because it is possible that some segments can be | 62 | * Because it is possible that some segments can be |
63 | * invalidated soon after by user update or deletion. | 63 | * invalidated soon after by user update or deletion. |
64 | * So, I'd like to wait some time to collect dirty segments. | 64 | * So, I'd like to wait some time to collect dirty segments. |
@@ -222,7 +222,7 @@ static unsigned int get_cb_cost(struct f2fs_sb_info *sbi, unsigned int segno) | |||
222 | 222 | ||
223 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; | 223 | u = (vblocks * 100) >> sbi->log_blocks_per_seg; |
224 | 224 | ||
225 | /* Handle if the system time is changed by user */ | 225 | /* Handle if the system time has changed by the user */ |
226 | if (mtime < sit_i->min_mtime) | 226 | if (mtime < sit_i->min_mtime) |
227 | sit_i->min_mtime = mtime; | 227 | sit_i->min_mtime = mtime; |
228 | if (mtime > sit_i->max_mtime) | 228 | if (mtime > sit_i->max_mtime) |
@@ -593,7 +593,7 @@ next_step: | |||
593 | 593 | ||
594 | if (phase == 2) { | 594 | if (phase == 2) { |
595 | inode = f2fs_iget(sb, dni.ino); | 595 | inode = f2fs_iget(sb, dni.ino); |
596 | if (IS_ERR(inode)) | 596 | if (IS_ERR(inode) || is_bad_inode(inode)) |
597 | continue; | 597 | continue; |
598 | 598 | ||
599 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); | 599 | start_bidx = start_bidx_of_node(nofs, F2FS_I(inode)); |
@@ -693,7 +693,7 @@ int f2fs_gc(struct f2fs_sb_info *sbi) | |||
693 | gc_more: | 693 | gc_more: |
694 | if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) | 694 | if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) |
695 | goto stop; | 695 | goto stop; |
696 | if (unlikely(is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG))) | 696 | if (unlikely(f2fs_cp_error(sbi))) |
697 | goto stop; | 697 | goto stop; |
698 | 698 | ||
699 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { | 699 | if (gc_type == BG_GC && has_not_enough_free_secs(sbi, nfree)) { |
diff --git a/fs/f2fs/gc.h b/fs/f2fs/gc.h index 5d5eb6047bf4..16f0b2b22999 100644 --- a/fs/f2fs/gc.h +++ b/fs/f2fs/gc.h | |||
@@ -91,7 +91,7 @@ static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi) | |||
91 | block_t invalid_user_blocks = sbi->user_block_count - | 91 | block_t invalid_user_blocks = sbi->user_block_count - |
92 | written_block_count(sbi); | 92 | written_block_count(sbi); |
93 | /* | 93 | /* |
94 | * Background GC is triggered with the following condition. | 94 | * Background GC is triggered with the following conditions. |
95 | * 1. There are a number of invalid blocks. | 95 | * 1. There are a number of invalid blocks. |
96 | * 2. There is not enough free space. | 96 | * 2. There is not enough free space. |
97 | */ | 97 | */ |
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index 948d17bf7281..a844fcfb9a8d 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c | |||
@@ -42,7 +42,8 @@ static void TEA_transform(unsigned int buf[4], unsigned int const in[]) | |||
42 | buf[1] += b1; | 42 | buf[1] += b1; |
43 | } | 43 | } |
44 | 44 | ||
45 | static void str2hashbuf(const char *msg, size_t len, unsigned int *buf, int num) | 45 | static void str2hashbuf(const unsigned char *msg, size_t len, |
46 | unsigned int *buf, int num) | ||
46 | { | 47 | { |
47 | unsigned pad, val; | 48 | unsigned pad, val; |
48 | int i; | 49 | int i; |
@@ -73,9 +74,9 @@ f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info) | |||
73 | { | 74 | { |
74 | __u32 hash; | 75 | __u32 hash; |
75 | f2fs_hash_t f2fs_hash; | 76 | f2fs_hash_t f2fs_hash; |
76 | const char *p; | 77 | const unsigned char *p; |
77 | __u32 in[8], buf[4]; | 78 | __u32 in[8], buf[4]; |
78 | const char *name = name_info->name; | 79 | const unsigned char *name = name_info->name; |
79 | size_t len = name_info->len; | 80 | size_t len = name_info->len; |
80 | 81 | ||
81 | if ((len <= 2) && (name[0] == '.') && | 82 | if ((len <= 2) && (name[0] == '.') && |
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 5beeccef9ae1..3e8ecdf3742b 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c | |||
@@ -68,7 +68,7 @@ out: | |||
68 | 68 | ||
69 | static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) | 69 | static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) |
70 | { | 70 | { |
71 | int err; | 71 | int err = 0; |
72 | struct page *ipage; | 72 | struct page *ipage; |
73 | struct dnode_of_data dn; | 73 | struct dnode_of_data dn; |
74 | void *src_addr, *dst_addr; | 74 | void *src_addr, *dst_addr; |
@@ -86,6 +86,10 @@ static int __f2fs_convert_inline_data(struct inode *inode, struct page *page) | |||
86 | goto out; | 86 | goto out; |
87 | } | 87 | } |
88 | 88 | ||
89 | /* someone else converted inline_data already */ | ||
90 | if (!f2fs_has_inline_data(inode)) | ||
91 | goto out; | ||
92 | |||
89 | /* | 93 | /* |
90 | * i_addr[0] is not used for inline data, | 94 | * i_addr[0] is not used for inline data, |
91 | * so reserving new block will not destroy inline data | 95 | * so reserving new block will not destroy inline data |
@@ -124,9 +128,10 @@ out: | |||
124 | return err; | 128 | return err; |
125 | } | 129 | } |
126 | 130 | ||
127 | int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) | 131 | int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size, |
132 | struct page *page) | ||
128 | { | 133 | { |
129 | struct page *page; | 134 | struct page *new_page = page; |
130 | int err; | 135 | int err; |
131 | 136 | ||
132 | if (!f2fs_has_inline_data(inode)) | 137 | if (!f2fs_has_inline_data(inode)) |
@@ -134,17 +139,20 @@ int f2fs_convert_inline_data(struct inode *inode, pgoff_t to_size) | |||
134 | else if (to_size <= MAX_INLINE_DATA) | 139 | else if (to_size <= MAX_INLINE_DATA) |
135 | return 0; | 140 | return 0; |
136 | 141 | ||
137 | page = grab_cache_page(inode->i_mapping, 0); | 142 | if (!page || page->index != 0) { |
138 | if (!page) | 143 | new_page = grab_cache_page(inode->i_mapping, 0); |
139 | return -ENOMEM; | 144 | if (!new_page) |
145 | return -ENOMEM; | ||
146 | } | ||
140 | 147 | ||
141 | err = __f2fs_convert_inline_data(inode, page); | 148 | err = __f2fs_convert_inline_data(inode, new_page); |
142 | f2fs_put_page(page, 1); | 149 | if (!page || page->index != 0) |
150 | f2fs_put_page(new_page, 1); | ||
143 | return err; | 151 | return err; |
144 | } | 152 | } |
145 | 153 | ||
146 | int f2fs_write_inline_data(struct inode *inode, | 154 | int f2fs_write_inline_data(struct inode *inode, |
147 | struct page *page, unsigned size) | 155 | struct page *page, unsigned size) |
148 | { | 156 | { |
149 | void *src_addr, *dst_addr; | 157 | void *src_addr, *dst_addr; |
150 | struct page *ipage; | 158 | struct page *ipage; |
@@ -199,7 +207,7 @@ void truncate_inline_data(struct inode *inode, u64 from) | |||
199 | f2fs_put_page(ipage, 1); | 207 | f2fs_put_page(ipage, 1); |
200 | } | 208 | } |
201 | 209 | ||
202 | int recover_inline_data(struct inode *inode, struct page *npage) | 210 | bool recover_inline_data(struct inode *inode, struct page *npage) |
203 | { | 211 | { |
204 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 212 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
205 | struct f2fs_inode *ri = NULL; | 213 | struct f2fs_inode *ri = NULL; |
@@ -218,7 +226,7 @@ int recover_inline_data(struct inode *inode, struct page *npage) | |||
218 | ri = F2FS_INODE(npage); | 226 | ri = F2FS_INODE(npage); |
219 | 227 | ||
220 | if (f2fs_has_inline_data(inode) && | 228 | if (f2fs_has_inline_data(inode) && |
221 | ri && ri->i_inline & F2FS_INLINE_DATA) { | 229 | ri && (ri->i_inline & F2FS_INLINE_DATA)) { |
222 | process_inline: | 230 | process_inline: |
223 | ipage = get_node_page(sbi, inode->i_ino); | 231 | ipage = get_node_page(sbi, inode->i_ino); |
224 | f2fs_bug_on(IS_ERR(ipage)); | 232 | f2fs_bug_on(IS_ERR(ipage)); |
@@ -230,7 +238,7 @@ process_inline: | |||
230 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); | 238 | memcpy(dst_addr, src_addr, MAX_INLINE_DATA); |
231 | update_inode(inode, ipage); | 239 | update_inode(inode, ipage); |
232 | f2fs_put_page(ipage, 1); | 240 | f2fs_put_page(ipage, 1); |
233 | return -1; | 241 | return true; |
234 | } | 242 | } |
235 | 243 | ||
236 | if (f2fs_has_inline_data(inode)) { | 244 | if (f2fs_has_inline_data(inode)) { |
@@ -242,10 +250,10 @@ process_inline: | |||
242 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | 250 | clear_inode_flag(F2FS_I(inode), FI_INLINE_DATA); |
243 | update_inode(inode, ipage); | 251 | update_inode(inode, ipage); |
244 | f2fs_put_page(ipage, 1); | 252 | f2fs_put_page(ipage, 1); |
245 | } else if (ri && ri->i_inline & F2FS_INLINE_DATA) { | 253 | } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { |
246 | truncate_blocks(inode, 0); | 254 | truncate_blocks(inode, 0, false); |
247 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); | 255 | set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); |
248 | goto process_inline; | 256 | goto process_inline; |
249 | } | 257 | } |
250 | return 0; | 258 | return false; |
251 | } | 259 | } |
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 27b03776ffd2..ee103fd7283c 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c | |||
@@ -134,9 +134,7 @@ static int f2fs_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
134 | return 0; | 134 | return 0; |
135 | out: | 135 | out: |
136 | clear_nlink(inode); | 136 | clear_nlink(inode); |
137 | unlock_new_inode(inode); | 137 | iget_failed(inode); |
138 | make_bad_inode(inode); | ||
139 | iput(inode); | ||
140 | alloc_nid_failed(sbi, ino); | 138 | alloc_nid_failed(sbi, ino); |
141 | return err; | 139 | return err; |
142 | } | 140 | } |
@@ -229,7 +227,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) | |||
229 | f2fs_delete_entry(de, page, inode); | 227 | f2fs_delete_entry(de, page, inode); |
230 | f2fs_unlock_op(sbi); | 228 | f2fs_unlock_op(sbi); |
231 | 229 | ||
232 | /* In order to evict this inode, we set it dirty */ | 230 | /* In order to evict this inode, we set it dirty */ |
233 | mark_inode_dirty(inode); | 231 | mark_inode_dirty(inode); |
234 | fail: | 232 | fail: |
235 | trace_f2fs_unlink_exit(inode, err); | 233 | trace_f2fs_unlink_exit(inode, err); |
@@ -267,9 +265,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry, | |||
267 | return err; | 265 | return err; |
268 | out: | 266 | out: |
269 | clear_nlink(inode); | 267 | clear_nlink(inode); |
270 | unlock_new_inode(inode); | 268 | iget_failed(inode); |
271 | make_bad_inode(inode); | ||
272 | iput(inode); | ||
273 | alloc_nid_failed(sbi, inode->i_ino); | 269 | alloc_nid_failed(sbi, inode->i_ino); |
274 | return err; | 270 | return err; |
275 | } | 271 | } |
@@ -308,9 +304,7 @@ static int f2fs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
308 | out_fail: | 304 | out_fail: |
309 | clear_inode_flag(F2FS_I(inode), FI_INC_LINK); | 305 | clear_inode_flag(F2FS_I(inode), FI_INC_LINK); |
310 | clear_nlink(inode); | 306 | clear_nlink(inode); |
311 | unlock_new_inode(inode); | 307 | iget_failed(inode); |
312 | make_bad_inode(inode); | ||
313 | iput(inode); | ||
314 | alloc_nid_failed(sbi, inode->i_ino); | 308 | alloc_nid_failed(sbi, inode->i_ino); |
315 | return err; | 309 | return err; |
316 | } | 310 | } |
@@ -354,9 +348,7 @@ static int f2fs_mknod(struct inode *dir, struct dentry *dentry, | |||
354 | return 0; | 348 | return 0; |
355 | out: | 349 | out: |
356 | clear_nlink(inode); | 350 | clear_nlink(inode); |
357 | unlock_new_inode(inode); | 351 | iget_failed(inode); |
358 | make_bad_inode(inode); | ||
359 | iput(inode); | ||
360 | alloc_nid_failed(sbi, inode->i_ino); | 352 | alloc_nid_failed(sbi, inode->i_ino); |
361 | return err; | 353 | return err; |
362 | } | 354 | } |
@@ -688,9 +680,7 @@ release_out: | |||
688 | out: | 680 | out: |
689 | f2fs_unlock_op(sbi); | 681 | f2fs_unlock_op(sbi); |
690 | clear_nlink(inode); | 682 | clear_nlink(inode); |
691 | unlock_new_inode(inode); | 683 | iget_failed(inode); |
692 | make_bad_inode(inode); | ||
693 | iput(inode); | ||
694 | alloc_nid_failed(sbi, inode->i_ino); | 684 | alloc_nid_failed(sbi, inode->i_ino); |
695 | return err; | 685 | return err; |
696 | } | 686 | } |
@@ -704,7 +694,6 @@ const struct inode_operations f2fs_dir_inode_operations = { | |||
704 | .mkdir = f2fs_mkdir, | 694 | .mkdir = f2fs_mkdir, |
705 | .rmdir = f2fs_rmdir, | 695 | .rmdir = f2fs_rmdir, |
706 | .mknod = f2fs_mknod, | 696 | .mknod = f2fs_mknod, |
707 | .rename = f2fs_rename, | ||
708 | .rename2 = f2fs_rename2, | 697 | .rename2 = f2fs_rename2, |
709 | .tmpfile = f2fs_tmpfile, | 698 | .tmpfile = f2fs_tmpfile, |
710 | .getattr = f2fs_getattr, | 699 | .getattr = f2fs_getattr, |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index d3d90d284631..45378196e19a 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -237,7 +237,7 @@ retry: | |||
237 | nat_get_blkaddr(e) != NULL_ADDR && | 237 | nat_get_blkaddr(e) != NULL_ADDR && |
238 | new_blkaddr == NEW_ADDR); | 238 | new_blkaddr == NEW_ADDR); |
239 | 239 | ||
240 | /* increament version no as node is removed */ | 240 | /* increment version no as node is removed */ |
241 | if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { | 241 | if (nat_get_blkaddr(e) != NEW_ADDR && new_blkaddr == NULL_ADDR) { |
242 | unsigned char version = nat_get_version(e); | 242 | unsigned char version = nat_get_version(e); |
243 | nat_set_version(e, inc_node_version(version)); | 243 | nat_set_version(e, inc_node_version(version)); |
@@ -274,7 +274,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
274 | } | 274 | } |
275 | 275 | ||
276 | /* | 276 | /* |
277 | * This function returns always success | 277 | * This function always returns success |
278 | */ | 278 | */ |
279 | void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) | 279 | void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) |
280 | { | 280 | { |
@@ -650,7 +650,7 @@ static int truncate_partial_nodes(struct dnode_of_data *dn, | |||
650 | 650 | ||
651 | /* get indirect nodes in the path */ | 651 | /* get indirect nodes in the path */ |
652 | for (i = 0; i < idx + 1; i++) { | 652 | for (i = 0; i < idx + 1; i++) { |
653 | /* refernece count'll be increased */ | 653 | /* reference count'll be increased */ |
654 | pages[i] = get_node_page(sbi, nid[i]); | 654 | pages[i] = get_node_page(sbi, nid[i]); |
655 | if (IS_ERR(pages[i])) { | 655 | if (IS_ERR(pages[i])) { |
656 | err = PTR_ERR(pages[i]); | 656 | err = PTR_ERR(pages[i]); |
@@ -823,22 +823,26 @@ int truncate_xattr_node(struct inode *inode, struct page *page) | |||
823 | */ | 823 | */ |
824 | void remove_inode_page(struct inode *inode) | 824 | void remove_inode_page(struct inode *inode) |
825 | { | 825 | { |
826 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | ||
827 | struct page *page; | ||
828 | nid_t ino = inode->i_ino; | ||
829 | struct dnode_of_data dn; | 826 | struct dnode_of_data dn; |
830 | 827 | ||
831 | page = get_node_page(sbi, ino); | 828 | set_new_dnode(&dn, inode, NULL, NULL, inode->i_ino); |
832 | if (IS_ERR(page)) | 829 | if (get_dnode_of_data(&dn, 0, LOOKUP_NODE)) |
833 | return; | 830 | return; |
834 | 831 | ||
835 | if (truncate_xattr_node(inode, page)) { | 832 | if (truncate_xattr_node(inode, dn.inode_page)) { |
836 | f2fs_put_page(page, 1); | 833 | f2fs_put_dnode(&dn); |
837 | return; | 834 | return; |
838 | } | 835 | } |
839 | /* 0 is possible, after f2fs_new_inode() is failed */ | 836 | |
837 | /* remove potential inline_data blocks */ | ||
838 | if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | ||
839 | S_ISLNK(inode->i_mode)) | ||
840 | truncate_data_blocks_range(&dn, 1); | ||
841 | |||
842 | /* 0 is possible, after f2fs_new_inode() has failed */ | ||
840 | f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1); | 843 | f2fs_bug_on(inode->i_blocks != 0 && inode->i_blocks != 1); |
841 | set_new_dnode(&dn, inode, page, page, ino); | 844 | |
845 | /* will put inode & node pages */ | ||
842 | truncate_node(&dn); | 846 | truncate_node(&dn); |
843 | } | 847 | } |
844 | 848 | ||
@@ -1129,8 +1133,11 @@ continue_unlock: | |||
1129 | set_fsync_mark(page, 0); | 1133 | set_fsync_mark(page, 0); |
1130 | set_dentry_mark(page, 0); | 1134 | set_dentry_mark(page, 0); |
1131 | } | 1135 | } |
1132 | NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); | 1136 | |
1133 | wrote++; | 1137 | if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) |
1138 | unlock_page(page); | ||
1139 | else | ||
1140 | wrote++; | ||
1134 | 1141 | ||
1135 | if (--wbc->nr_to_write == 0) | 1142 | if (--wbc->nr_to_write == 0) |
1136 | break; | 1143 | break; |
@@ -1212,6 +1219,8 @@ static int f2fs_write_node_page(struct page *page, | |||
1212 | 1219 | ||
1213 | if (unlikely(sbi->por_doing)) | 1220 | if (unlikely(sbi->por_doing)) |
1214 | goto redirty_out; | 1221 | goto redirty_out; |
1222 | if (unlikely(f2fs_cp_error(sbi))) | ||
1223 | goto redirty_out; | ||
1215 | 1224 | ||
1216 | f2fs_wait_on_page_writeback(page, NODE); | 1225 | f2fs_wait_on_page_writeback(page, NODE); |
1217 | 1226 | ||
@@ -1540,15 +1549,6 @@ void alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid) | |||
1540 | kmem_cache_free(free_nid_slab, i); | 1549 | kmem_cache_free(free_nid_slab, i); |
1541 | } | 1550 | } |
1542 | 1551 | ||
1543 | void recover_node_page(struct f2fs_sb_info *sbi, struct page *page, | ||
1544 | struct f2fs_summary *sum, struct node_info *ni, | ||
1545 | block_t new_blkaddr) | ||
1546 | { | ||
1547 | rewrite_node_page(sbi, page, sum, ni->blk_addr, new_blkaddr); | ||
1548 | set_node_addr(sbi, ni, new_blkaddr, false); | ||
1549 | clear_node_page_dirty(page); | ||
1550 | } | ||
1551 | |||
1552 | void recover_inline_xattr(struct inode *inode, struct page *page) | 1552 | void recover_inline_xattr(struct inode *inode, struct page *page) |
1553 | { | 1553 | { |
1554 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 1554 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
@@ -1557,40 +1557,33 @@ void recover_inline_xattr(struct inode *inode, struct page *page) | |||
1557 | struct page *ipage; | 1557 | struct page *ipage; |
1558 | struct f2fs_inode *ri; | 1558 | struct f2fs_inode *ri; |
1559 | 1559 | ||
1560 | if (!f2fs_has_inline_xattr(inode)) | ||
1561 | return; | ||
1562 | |||
1563 | if (!IS_INODE(page)) | ||
1564 | return; | ||
1565 | |||
1566 | ri = F2FS_INODE(page); | ||
1567 | if (!(ri->i_inline & F2FS_INLINE_XATTR)) | ||
1568 | return; | ||
1569 | |||
1570 | ipage = get_node_page(sbi, inode->i_ino); | 1560 | ipage = get_node_page(sbi, inode->i_ino); |
1571 | f2fs_bug_on(IS_ERR(ipage)); | 1561 | f2fs_bug_on(IS_ERR(ipage)); |
1572 | 1562 | ||
1563 | ri = F2FS_INODE(page); | ||
1564 | if (!(ri->i_inline & F2FS_INLINE_XATTR)) { | ||
1565 | clear_inode_flag(F2FS_I(inode), FI_INLINE_XATTR); | ||
1566 | goto update_inode; | ||
1567 | } | ||
1568 | |||
1573 | dst_addr = inline_xattr_addr(ipage); | 1569 | dst_addr = inline_xattr_addr(ipage); |
1574 | src_addr = inline_xattr_addr(page); | 1570 | src_addr = inline_xattr_addr(page); |
1575 | inline_size = inline_xattr_size(inode); | 1571 | inline_size = inline_xattr_size(inode); |
1576 | 1572 | ||
1577 | f2fs_wait_on_page_writeback(ipage, NODE); | 1573 | f2fs_wait_on_page_writeback(ipage, NODE); |
1578 | memcpy(dst_addr, src_addr, inline_size); | 1574 | memcpy(dst_addr, src_addr, inline_size); |
1579 | 1575 | update_inode: | |
1580 | update_inode(inode, ipage); | 1576 | update_inode(inode, ipage); |
1581 | f2fs_put_page(ipage, 1); | 1577 | f2fs_put_page(ipage, 1); |
1582 | } | 1578 | } |
1583 | 1579 | ||
1584 | bool recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) | 1580 | void recover_xattr_data(struct inode *inode, struct page *page, block_t blkaddr) |
1585 | { | 1581 | { |
1586 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); | 1582 | struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); |
1587 | nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; | 1583 | nid_t prev_xnid = F2FS_I(inode)->i_xattr_nid; |
1588 | nid_t new_xnid = nid_of_node(page); | 1584 | nid_t new_xnid = nid_of_node(page); |
1589 | struct node_info ni; | 1585 | struct node_info ni; |
1590 | 1586 | ||
1591 | if (!f2fs_has_xattr_block(ofs_of_node(page))) | ||
1592 | return false; | ||
1593 | |||
1594 | /* 1: invalidate the previous xattr nid */ | 1587 | /* 1: invalidate the previous xattr nid */ |
1595 | if (!prev_xnid) | 1588 | if (!prev_xnid) |
1596 | goto recover_xnid; | 1589 | goto recover_xnid; |
@@ -1618,7 +1611,6 @@ recover_xnid: | |||
1618 | set_node_addr(sbi, &ni, blkaddr, false); | 1611 | set_node_addr(sbi, &ni, blkaddr, false); |
1619 | 1612 | ||
1620 | update_inode_page(inode); | 1613 | update_inode_page(inode); |
1621 | return true; | ||
1622 | } | 1614 | } |
1623 | 1615 | ||
1624 | int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | 1616 | int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) |
@@ -1637,7 +1629,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1637 | if (!ipage) | 1629 | if (!ipage) |
1638 | return -ENOMEM; | 1630 | return -ENOMEM; |
1639 | 1631 | ||
1640 | /* Should not use this inode from free nid list */ | 1632 | /* Should not use this inode from free nid list */ |
1641 | remove_free_nid(NM_I(sbi), ino); | 1633 | remove_free_nid(NM_I(sbi), ino); |
1642 | 1634 | ||
1643 | SetPageUptodate(ipage); | 1635 | SetPageUptodate(ipage); |
@@ -1651,6 +1643,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1651 | dst->i_blocks = cpu_to_le64(1); | 1643 | dst->i_blocks = cpu_to_le64(1); |
1652 | dst->i_links = cpu_to_le32(1); | 1644 | dst->i_links = cpu_to_le32(1); |
1653 | dst->i_xattr_nid = 0; | 1645 | dst->i_xattr_nid = 0; |
1646 | dst->i_inline = src->i_inline & F2FS_INLINE_XATTR; | ||
1654 | 1647 | ||
1655 | new_ni = old_ni; | 1648 | new_ni = old_ni; |
1656 | new_ni.ino = ino; | 1649 | new_ni.ino = ino; |
@@ -1659,13 +1652,14 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) | |||
1659 | WARN_ON(1); | 1652 | WARN_ON(1); |
1660 | set_node_addr(sbi, &new_ni, NEW_ADDR, false); | 1653 | set_node_addr(sbi, &new_ni, NEW_ADDR, false); |
1661 | inc_valid_inode_count(sbi); | 1654 | inc_valid_inode_count(sbi); |
1655 | set_page_dirty(ipage); | ||
1662 | f2fs_put_page(ipage, 1); | 1656 | f2fs_put_page(ipage, 1); |
1663 | return 0; | 1657 | return 0; |
1664 | } | 1658 | } |
1665 | 1659 | ||
1666 | /* | 1660 | /* |
1667 | * ra_sum_pages() merge contiguous pages into one bio and submit. | 1661 | * ra_sum_pages() merge contiguous pages into one bio and submit. |
1668 | * these pre-readed pages are alloced in bd_inode's mapping tree. | 1662 | * these pre-read pages are allocated in bd_inode's mapping tree. |
1669 | */ | 1663 | */ |
1670 | static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, | 1664 | static int ra_sum_pages(struct f2fs_sb_info *sbi, struct page **pages, |
1671 | int start, int nrpages) | 1665 | int start, int nrpages) |
@@ -1709,7 +1703,7 @@ int restore_node_summary(struct f2fs_sb_info *sbi, | |||
1709 | for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { | 1703 | for (i = 0; !err && i < last_offset; i += nrpages, addr += nrpages) { |
1710 | nrpages = min(last_offset - i, bio_blocks); | 1704 | nrpages = min(last_offset - i, bio_blocks); |
1711 | 1705 | ||
1712 | /* read ahead node pages */ | 1706 | /* readahead node pages */ |
1713 | nrpages = ra_sum_pages(sbi, pages, addr, nrpages); | 1707 | nrpages = ra_sum_pages(sbi, pages, addr, nrpages); |
1714 | if (!nrpages) | 1708 | if (!nrpages) |
1715 | return -ENOMEM; | 1709 | return -ENOMEM; |
@@ -1967,7 +1961,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) | |||
1967 | nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; | 1961 | nm_i->max_nid = NAT_ENTRY_PER_BLOCK * nat_blocks; |
1968 | 1962 | ||
1969 | /* not used nids: 0, node, meta, (and root counted as valid node) */ | 1963 | /* not used nids: 0, node, meta, (and root counted as valid node) */ |
1970 | nm_i->available_nids = nm_i->max_nid - 3; | 1964 | nm_i->available_nids = nm_i->max_nid - F2FS_RESERVED_NODE_NUM; |
1971 | nm_i->fcnt = 0; | 1965 | nm_i->fcnt = 0; |
1972 | nm_i->nat_cnt = 0; | 1966 | nm_i->nat_cnt = 0; |
1973 | nm_i->ram_thresh = DEF_RAM_THRESHOLD; | 1967 | nm_i->ram_thresh = DEF_RAM_THRESHOLD; |
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index fe1c6d921ba2..756c41cd2582 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c | |||
@@ -62,8 +62,10 @@ static int recover_dentry(struct page *ipage, struct inode *inode) | |||
62 | } | 62 | } |
63 | retry: | 63 | retry: |
64 | de = f2fs_find_entry(dir, &name, &page); | 64 | de = f2fs_find_entry(dir, &name, &page); |
65 | if (de && inode->i_ino == le32_to_cpu(de->ino)) | 65 | if (de && inode->i_ino == le32_to_cpu(de->ino)) { |
66 | clear_inode_flag(F2FS_I(inode), FI_INC_LINK); | ||
66 | goto out_unmap_put; | 67 | goto out_unmap_put; |
68 | } | ||
67 | if (de) { | 69 | if (de) { |
68 | einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); | 70 | einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino)); |
69 | if (IS_ERR(einode)) { | 71 | if (IS_ERR(einode)) { |
@@ -300,14 +302,19 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, | |||
300 | struct node_info ni; | 302 | struct node_info ni; |
301 | int err = 0, recovered = 0; | 303 | int err = 0, recovered = 0; |
302 | 304 | ||
303 | recover_inline_xattr(inode, page); | 305 | /* step 1: recover xattr */ |
304 | 306 | if (IS_INODE(page)) { | |
305 | if (recover_inline_data(inode, page)) | 307 | recover_inline_xattr(inode, page); |
308 | } else if (f2fs_has_xattr_block(ofs_of_node(page))) { | ||
309 | recover_xattr_data(inode, page, blkaddr); | ||
306 | goto out; | 310 | goto out; |
311 | } | ||
307 | 312 | ||
308 | if (recover_xattr_data(inode, page, blkaddr)) | 313 | /* step 2: recover inline data */ |
314 | if (recover_inline_data(inode, page)) | ||
309 | goto out; | 315 | goto out; |
310 | 316 | ||
317 | /* step 3: recover data indices */ | ||
311 | start = start_bidx_of_node(ofs_of_node(page), fi); | 318 | start = start_bidx_of_node(ofs_of_node(page), fi); |
312 | end = start + ADDRS_PER_PAGE(page, fi); | 319 | end = start + ADDRS_PER_PAGE(page, fi); |
313 | 320 | ||
@@ -364,8 +371,6 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, | |||
364 | fill_node_footer(dn.node_page, dn.nid, ni.ino, | 371 | fill_node_footer(dn.node_page, dn.nid, ni.ino, |
365 | ofs_of_node(page), false); | 372 | ofs_of_node(page), false); |
366 | set_page_dirty(dn.node_page); | 373 | set_page_dirty(dn.node_page); |
367 | |||
368 | recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr); | ||
369 | err: | 374 | err: |
370 | f2fs_put_dnode(&dn); | 375 | f2fs_put_dnode(&dn); |
371 | f2fs_unlock_op(sbi); | 376 | f2fs_unlock_op(sbi); |
@@ -452,6 +457,9 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) | |||
452 | /* step #1: find fsynced inode numbers */ | 457 | /* step #1: find fsynced inode numbers */ |
453 | sbi->por_doing = true; | 458 | sbi->por_doing = true; |
454 | 459 | ||
460 | /* prevent checkpoint */ | ||
461 | mutex_lock(&sbi->cp_mutex); | ||
462 | |||
455 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); | 463 | blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); |
456 | 464 | ||
457 | err = find_fsync_dnodes(sbi, &inode_list); | 465 | err = find_fsync_dnodes(sbi, &inode_list); |
@@ -465,7 +473,8 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) | |||
465 | 473 | ||
466 | /* step #2: recover data */ | 474 | /* step #2: recover data */ |
467 | err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); | 475 | err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE); |
468 | f2fs_bug_on(!list_empty(&inode_list)); | 476 | if (!err) |
477 | f2fs_bug_on(!list_empty(&inode_list)); | ||
469 | out: | 478 | out: |
470 | destroy_fsync_dnodes(&inode_list); | 479 | destroy_fsync_dnodes(&inode_list); |
471 | kmem_cache_destroy(fsync_entry_slab); | 480 | kmem_cache_destroy(fsync_entry_slab); |
@@ -482,8 +491,13 @@ out: | |||
482 | /* Flush all the NAT/SIT pages */ | 491 | /* Flush all the NAT/SIT pages */ |
483 | while (get_pages(sbi, F2FS_DIRTY_META)) | 492 | while (get_pages(sbi, F2FS_DIRTY_META)) |
484 | sync_meta_pages(sbi, META, LONG_MAX); | 493 | sync_meta_pages(sbi, META, LONG_MAX); |
494 | set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); | ||
495 | mutex_unlock(&sbi->cp_mutex); | ||
485 | } else if (need_writecp) { | 496 | } else if (need_writecp) { |
497 | mutex_unlock(&sbi->cp_mutex); | ||
486 | write_checkpoint(sbi, false); | 498 | write_checkpoint(sbi, false); |
499 | } else { | ||
500 | mutex_unlock(&sbi->cp_mutex); | ||
487 | } | 501 | } |
488 | return err; | 502 | return err; |
489 | } | 503 | } |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 0dfeebae2a50..0aa337cd5bba 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -62,7 +62,7 @@ static inline unsigned long __reverse_ffs(unsigned long word) | |||
62 | } | 62 | } |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c becasue | 65 | * __find_rev_next(_zero)_bit is copied from lib/find_next_bit.c because |
66 | * f2fs_set_bit makes MSB and LSB reversed in a byte. | 66 | * f2fs_set_bit makes MSB and LSB reversed in a byte. |
67 | * Example: | 67 | * Example: |
68 | * LSB <--> MSB | 68 | * LSB <--> MSB |
@@ -808,7 +808,7 @@ static void __refresh_next_blkoff(struct f2fs_sb_info *sbi, | |||
808 | } | 808 | } |
809 | 809 | ||
810 | /* | 810 | /* |
811 | * This function always allocates a used segment (from dirty seglist) by SSR | 811 | * This function always allocates a used segment(from dirty seglist) by SSR |
812 | * manner, so it should recover the existing segment information of valid blocks | 812 | * manner, so it should recover the existing segment information of valid blocks |
813 | */ | 813 | */ |
814 | static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) | 814 | static void change_curseg(struct f2fs_sb_info *sbi, int type, bool reuse) |
@@ -1103,55 +1103,6 @@ void recover_data_page(struct f2fs_sb_info *sbi, | |||
1103 | mutex_unlock(&curseg->curseg_mutex); | 1103 | mutex_unlock(&curseg->curseg_mutex); |
1104 | } | 1104 | } |
1105 | 1105 | ||
1106 | void rewrite_node_page(struct f2fs_sb_info *sbi, | ||
1107 | struct page *page, struct f2fs_summary *sum, | ||
1108 | block_t old_blkaddr, block_t new_blkaddr) | ||
1109 | { | ||
1110 | struct sit_info *sit_i = SIT_I(sbi); | ||
1111 | int type = CURSEG_WARM_NODE; | ||
1112 | struct curseg_info *curseg; | ||
1113 | unsigned int segno, old_cursegno; | ||
1114 | block_t next_blkaddr = next_blkaddr_of_node(page); | ||
1115 | unsigned int next_segno = GET_SEGNO(sbi, next_blkaddr); | ||
1116 | struct f2fs_io_info fio = { | ||
1117 | .type = NODE, | ||
1118 | .rw = WRITE_SYNC, | ||
1119 | }; | ||
1120 | |||
1121 | curseg = CURSEG_I(sbi, type); | ||
1122 | |||
1123 | mutex_lock(&curseg->curseg_mutex); | ||
1124 | mutex_lock(&sit_i->sentry_lock); | ||
1125 | |||
1126 | segno = GET_SEGNO(sbi, new_blkaddr); | ||
1127 | old_cursegno = curseg->segno; | ||
1128 | |||
1129 | /* change the current segment */ | ||
1130 | if (segno != curseg->segno) { | ||
1131 | curseg->next_segno = segno; | ||
1132 | change_curseg(sbi, type, true); | ||
1133 | } | ||
1134 | curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, new_blkaddr); | ||
1135 | __add_sum_entry(sbi, type, sum); | ||
1136 | |||
1137 | /* change the current log to the next block addr in advance */ | ||
1138 | if (next_segno != segno) { | ||
1139 | curseg->next_segno = next_segno; | ||
1140 | change_curseg(sbi, type, true); | ||
1141 | } | ||
1142 | curseg->next_blkoff = GET_BLKOFF_FROM_SEG0(sbi, next_blkaddr); | ||
1143 | |||
1144 | /* rewrite node page */ | ||
1145 | set_page_writeback(page); | ||
1146 | f2fs_submit_page_mbio(sbi, page, new_blkaddr, &fio); | ||
1147 | f2fs_submit_merged_bio(sbi, NODE, WRITE); | ||
1148 | refresh_sit_entry(sbi, old_blkaddr, new_blkaddr); | ||
1149 | locate_dirty_segment(sbi, old_cursegno); | ||
1150 | |||
1151 | mutex_unlock(&sit_i->sentry_lock); | ||
1152 | mutex_unlock(&curseg->curseg_mutex); | ||
1153 | } | ||
1154 | |||
1155 | static inline bool is_merged_page(struct f2fs_sb_info *sbi, | 1106 | static inline bool is_merged_page(struct f2fs_sb_info *sbi, |
1156 | struct page *page, enum page_type type) | 1107 | struct page *page, enum page_type type) |
1157 | { | 1108 | { |
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 55973f7b0330..ff483257283b 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h | |||
@@ -549,7 +549,7 @@ static inline void verify_block_addr(struct f2fs_sb_info *sbi, block_t blk_addr) | |||
549 | } | 549 | } |
550 | 550 | ||
551 | /* | 551 | /* |
552 | * Summary block is always treated as invalid block | 552 | * Summary block is always treated as an invalid block |
553 | */ | 553 | */ |
554 | static inline void check_block_count(struct f2fs_sb_info *sbi, | 554 | static inline void check_block_count(struct f2fs_sb_info *sbi, |
555 | int segno, struct f2fs_sit_entry *raw_sit) | 555 | int segno, struct f2fs_sit_entry *raw_sit) |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 657582fc7601..41bdf511003d 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -432,9 +432,15 @@ static void f2fs_put_super(struct super_block *sb) | |||
432 | stop_gc_thread(sbi); | 432 | stop_gc_thread(sbi); |
433 | 433 | ||
434 | /* We don't need to do checkpoint when it's clean */ | 434 | /* We don't need to do checkpoint when it's clean */ |
435 | if (sbi->s_dirty && get_pages(sbi, F2FS_DIRTY_NODES)) | 435 | if (sbi->s_dirty) |
436 | write_checkpoint(sbi, true); | 436 | write_checkpoint(sbi, true); |
437 | 437 | ||
438 | /* | ||
439 | * normally superblock is clean, so we need to release this. | ||
440 | * In addition, EIO will skip do checkpoint, we need this as well. | ||
441 | */ | ||
442 | release_dirty_inode(sbi); | ||
443 | |||
438 | iput(sbi->node_inode); | 444 | iput(sbi->node_inode); |
439 | iput(sbi->meta_inode); | 445 | iput(sbi->meta_inode); |
440 | 446 | ||
@@ -457,9 +463,6 @@ int f2fs_sync_fs(struct super_block *sb, int sync) | |||
457 | 463 | ||
458 | trace_f2fs_sync_fs(sb, sync); | 464 | trace_f2fs_sync_fs(sb, sync); |
459 | 465 | ||
460 | if (!sbi->s_dirty && !get_pages(sbi, F2FS_DIRTY_NODES)) | ||
461 | return 0; | ||
462 | |||
463 | if (sync) { | 466 | if (sync) { |
464 | mutex_lock(&sbi->gc_mutex); | 467 | mutex_lock(&sbi->gc_mutex); |
465 | write_checkpoint(sbi, false); | 468 | write_checkpoint(sbi, false); |
@@ -505,8 +508,8 @@ static int f2fs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
505 | buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; | 508 | buf->f_bfree = buf->f_blocks - valid_user_blocks(sbi) - ovp_count; |
506 | buf->f_bavail = user_block_count - valid_user_blocks(sbi); | 509 | buf->f_bavail = user_block_count - valid_user_blocks(sbi); |
507 | 510 | ||
508 | buf->f_files = sbi->total_node_count; | 511 | buf->f_files = sbi->total_node_count - F2FS_RESERVED_NODE_NUM; |
509 | buf->f_ffree = sbi->total_node_count - valid_inode_count(sbi); | 512 | buf->f_ffree = buf->f_files - valid_inode_count(sbi); |
510 | 513 | ||
511 | buf->f_namelen = F2FS_NAME_LEN; | 514 | buf->f_namelen = F2FS_NAME_LEN; |
512 | buf->f_fsid.val[0] = (u32)id; | 515 | buf->f_fsid.val[0] = (u32)id; |
@@ -663,7 +666,7 @@ restore_gc: | |||
663 | if (need_restart_gc) { | 666 | if (need_restart_gc) { |
664 | if (start_gc_thread(sbi)) | 667 | if (start_gc_thread(sbi)) |
665 | f2fs_msg(sbi->sb, KERN_WARNING, | 668 | f2fs_msg(sbi->sb, KERN_WARNING, |
666 | "background gc thread is stop"); | 669 | "background gc thread has stopped"); |
667 | } else if (need_stop_gc) { | 670 | } else if (need_stop_gc) { |
668 | stop_gc_thread(sbi); | 671 | stop_gc_thread(sbi); |
669 | } | 672 | } |
@@ -812,7 +815,7 @@ static int sanity_check_ckpt(struct f2fs_sb_info *sbi) | |||
812 | if (unlikely(fsmeta >= total)) | 815 | if (unlikely(fsmeta >= total)) |
813 | return 1; | 816 | return 1; |
814 | 817 | ||
815 | if (unlikely(is_set_ckpt_flags(ckpt, CP_ERROR_FLAG))) { | 818 | if (unlikely(f2fs_cp_error(sbi))) { |
816 | f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); | 819 | f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); |
817 | return 1; | 820 | return 1; |
818 | } | 821 | } |
@@ -899,8 +902,10 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
899 | struct buffer_head *raw_super_buf; | 902 | struct buffer_head *raw_super_buf; |
900 | struct inode *root; | 903 | struct inode *root; |
901 | long err = -EINVAL; | 904 | long err = -EINVAL; |
905 | bool retry = true; | ||
902 | int i; | 906 | int i; |
903 | 907 | ||
908 | try_onemore: | ||
904 | /* allocate memory for f2fs-specific super block info */ | 909 | /* allocate memory for f2fs-specific super block info */ |
905 | sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); | 910 | sbi = kzalloc(sizeof(struct f2fs_sb_info), GFP_KERNEL); |
906 | if (!sbi) | 911 | if (!sbi) |
@@ -1080,9 +1085,11 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) | |||
1080 | /* recover fsynced data */ | 1085 | /* recover fsynced data */ |
1081 | if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { | 1086 | if (!test_opt(sbi, DISABLE_ROLL_FORWARD)) { |
1082 | err = recover_fsync_data(sbi); | 1087 | err = recover_fsync_data(sbi); |
1083 | if (err) | 1088 | if (err) { |
1084 | f2fs_msg(sb, KERN_ERR, | 1089 | f2fs_msg(sb, KERN_ERR, |
1085 | "Cannot recover all fsync data errno=%ld", err); | 1090 | "Cannot recover all fsync data errno=%ld", err); |
1091 | goto free_kobj; | ||
1092 | } | ||
1086 | } | 1093 | } |
1087 | 1094 | ||
1088 | /* | 1095 | /* |
@@ -1123,6 +1130,13 @@ free_sb_buf: | |||
1123 | brelse(raw_super_buf); | 1130 | brelse(raw_super_buf); |
1124 | free_sbi: | 1131 | free_sbi: |
1125 | kfree(sbi); | 1132 | kfree(sbi); |
1133 | |||
1134 | /* give only one another chance */ | ||
1135 | if (retry) { | ||
1136 | retry = 0; | ||
1137 | shrink_dcache_sb(sb); | ||
1138 | goto try_onemore; | ||
1139 | } | ||
1126 | return err; | 1140 | return err; |
1127 | } | 1141 | } |
1128 | 1142 | ||
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 8bea941ee309..728a5dc3dc16 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c | |||
@@ -528,7 +528,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, | |||
528 | int free; | 528 | int free; |
529 | /* | 529 | /* |
530 | * If value is NULL, it is remove operation. | 530 | * If value is NULL, it is remove operation. |
531 | * In case of update operation, we caculate free. | 531 | * In case of update operation, we calculate free. |
532 | */ | 532 | */ |
533 | free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr); | 533 | free = MIN_OFFSET(inode) - ((char *)last - (char *)base_addr); |
534 | if (found) | 534 | if (found) |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 8f27c93f8d2e..ec9e082f9ecd 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -253,13 +253,11 @@ static int lockd_up_net(struct svc_serv *serv, struct net *net) | |||
253 | 253 | ||
254 | error = make_socks(serv, net); | 254 | error = make_socks(serv, net); |
255 | if (error < 0) | 255 | if (error < 0) |
256 | goto err_socks; | 256 | goto err_bind; |
257 | set_grace_period(net); | 257 | set_grace_period(net); |
258 | dprintk("lockd_up_net: per-net data created; net=%p\n", net); | 258 | dprintk("lockd_up_net: per-net data created; net=%p\n", net); |
259 | return 0; | 259 | return 0; |
260 | 260 | ||
261 | err_socks: | ||
262 | svc_rpcb_cleanup(serv, net); | ||
263 | err_bind: | 261 | err_bind: |
264 | ln->nlmsvc_users--; | 262 | ln->nlmsvc_users--; |
265 | return error; | 263 | return error; |
diff --git a/fs/namei.c b/fs/namei.c index a996bb48dfab..215e44254c53 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/device_cgroup.h> | 34 | #include <linux/device_cgroup.h> |
35 | #include <linux/fs_struct.h> | 35 | #include <linux/fs_struct.h> |
36 | #include <linux/posix_acl.h> | 36 | #include <linux/posix_acl.h> |
37 | #include <linux/hash.h> | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | 39 | ||
39 | #include "internal.h" | 40 | #include "internal.h" |
@@ -643,24 +644,22 @@ static int complete_walk(struct nameidata *nd) | |||
643 | 644 | ||
644 | static __always_inline void set_root(struct nameidata *nd) | 645 | static __always_inline void set_root(struct nameidata *nd) |
645 | { | 646 | { |
646 | if (!nd->root.mnt) | 647 | get_fs_root(current->fs, &nd->root); |
647 | get_fs_root(current->fs, &nd->root); | ||
648 | } | 648 | } |
649 | 649 | ||
650 | static int link_path_walk(const char *, struct nameidata *); | 650 | static int link_path_walk(const char *, struct nameidata *); |
651 | 651 | ||
652 | static __always_inline void set_root_rcu(struct nameidata *nd) | 652 | static __always_inline unsigned set_root_rcu(struct nameidata *nd) |
653 | { | 653 | { |
654 | if (!nd->root.mnt) { | 654 | struct fs_struct *fs = current->fs; |
655 | struct fs_struct *fs = current->fs; | 655 | unsigned seq, res; |
656 | unsigned seq; | ||
657 | 656 | ||
658 | do { | 657 | do { |
659 | seq = read_seqcount_begin(&fs->seq); | 658 | seq = read_seqcount_begin(&fs->seq); |
660 | nd->root = fs->root; | 659 | nd->root = fs->root; |
661 | nd->seq = __read_seqcount_begin(&nd->root.dentry->d_seq); | 660 | res = __read_seqcount_begin(&nd->root.dentry->d_seq); |
662 | } while (read_seqcount_retry(&fs->seq, seq)); | 661 | } while (read_seqcount_retry(&fs->seq, seq)); |
663 | } | 662 | return res; |
664 | } | 663 | } |
665 | 664 | ||
666 | static void path_put_conditional(struct path *path, struct nameidata *nd) | 665 | static void path_put_conditional(struct path *path, struct nameidata *nd) |
@@ -860,7 +859,8 @@ follow_link(struct path *link, struct nameidata *nd, void **p) | |||
860 | return PTR_ERR(s); | 859 | return PTR_ERR(s); |
861 | } | 860 | } |
862 | if (*s == '/') { | 861 | if (*s == '/') { |
863 | set_root(nd); | 862 | if (!nd->root.mnt) |
863 | set_root(nd); | ||
864 | path_put(&nd->path); | 864 | path_put(&nd->path); |
865 | nd->path = nd->root; | 865 | nd->path = nd->root; |
866 | path_get(&nd->root); | 866 | path_get(&nd->root); |
@@ -1137,13 +1137,15 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path, | |||
1137 | */ | 1137 | */ |
1138 | *inode = path->dentry->d_inode; | 1138 | *inode = path->dentry->d_inode; |
1139 | } | 1139 | } |
1140 | return read_seqretry(&mount_lock, nd->m_seq) && | 1140 | return !read_seqretry(&mount_lock, nd->m_seq) && |
1141 | !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); | 1141 | !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | static int follow_dotdot_rcu(struct nameidata *nd) | 1144 | static int follow_dotdot_rcu(struct nameidata *nd) |
1145 | { | 1145 | { |
1146 | set_root_rcu(nd); | 1146 | struct inode *inode = nd->inode; |
1147 | if (!nd->root.mnt) | ||
1148 | set_root_rcu(nd); | ||
1147 | 1149 | ||
1148 | while (1) { | 1150 | while (1) { |
1149 | if (nd->path.dentry == nd->root.dentry && | 1151 | if (nd->path.dentry == nd->root.dentry && |
@@ -1155,6 +1157,7 @@ static int follow_dotdot_rcu(struct nameidata *nd) | |||
1155 | struct dentry *parent = old->d_parent; | 1157 | struct dentry *parent = old->d_parent; |
1156 | unsigned seq; | 1158 | unsigned seq; |
1157 | 1159 | ||
1160 | inode = parent->d_inode; | ||
1158 | seq = read_seqcount_begin(&parent->d_seq); | 1161 | seq = read_seqcount_begin(&parent->d_seq); |
1159 | if (read_seqcount_retry(&old->d_seq, nd->seq)) | 1162 | if (read_seqcount_retry(&old->d_seq, nd->seq)) |
1160 | goto failed; | 1163 | goto failed; |
@@ -1164,6 +1167,7 @@ static int follow_dotdot_rcu(struct nameidata *nd) | |||
1164 | } | 1167 | } |
1165 | if (!follow_up_rcu(&nd->path)) | 1168 | if (!follow_up_rcu(&nd->path)) |
1166 | break; | 1169 | break; |
1170 | inode = nd->path.dentry->d_inode; | ||
1167 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | 1171 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); |
1168 | } | 1172 | } |
1169 | while (d_mountpoint(nd->path.dentry)) { | 1173 | while (d_mountpoint(nd->path.dentry)) { |
@@ -1173,11 +1177,12 @@ static int follow_dotdot_rcu(struct nameidata *nd) | |||
1173 | break; | 1177 | break; |
1174 | nd->path.mnt = &mounted->mnt; | 1178 | nd->path.mnt = &mounted->mnt; |
1175 | nd->path.dentry = mounted->mnt.mnt_root; | 1179 | nd->path.dentry = mounted->mnt.mnt_root; |
1180 | inode = nd->path.dentry->d_inode; | ||
1176 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); | 1181 | nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq); |
1177 | if (!read_seqretry(&mount_lock, nd->m_seq)) | 1182 | if (read_seqretry(&mount_lock, nd->m_seq)) |
1178 | goto failed; | 1183 | goto failed; |
1179 | } | 1184 | } |
1180 | nd->inode = nd->path.dentry->d_inode; | 1185 | nd->inode = inode; |
1181 | return 0; | 1186 | return 0; |
1182 | 1187 | ||
1183 | failed: | 1188 | failed: |
@@ -1256,7 +1261,8 @@ static void follow_mount(struct path *path) | |||
1256 | 1261 | ||
1257 | static void follow_dotdot(struct nameidata *nd) | 1262 | static void follow_dotdot(struct nameidata *nd) |
1258 | { | 1263 | { |
1259 | set_root(nd); | 1264 | if (!nd->root.mnt) |
1265 | set_root(nd); | ||
1260 | 1266 | ||
1261 | while(1) { | 1267 | while(1) { |
1262 | struct dentry *old = nd->path.dentry; | 1268 | struct dentry *old = nd->path.dentry; |
@@ -1634,8 +1640,7 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd) | |||
1634 | 1640 | ||
1635 | static inline unsigned int fold_hash(unsigned long hash) | 1641 | static inline unsigned int fold_hash(unsigned long hash) |
1636 | { | 1642 | { |
1637 | hash += hash >> (8*sizeof(int)); | 1643 | return hash_64(hash, 32); |
1638 | return hash; | ||
1639 | } | 1644 | } |
1640 | 1645 | ||
1641 | #else /* 32-bit case */ | 1646 | #else /* 32-bit case */ |
@@ -1669,13 +1674,14 @@ EXPORT_SYMBOL(full_name_hash); | |||
1669 | 1674 | ||
1670 | /* | 1675 | /* |
1671 | * Calculate the length and hash of the path component, and | 1676 | * Calculate the length and hash of the path component, and |
1672 | * return the length of the component; | 1677 | * fill in the qstr. return the "len" as the result. |
1673 | */ | 1678 | */ |
1674 | static inline unsigned long hash_name(const char *name, unsigned int *hashp) | 1679 | static inline unsigned long hash_name(const char *name, struct qstr *res) |
1675 | { | 1680 | { |
1676 | unsigned long a, b, adata, bdata, mask, hash, len; | 1681 | unsigned long a, b, adata, bdata, mask, hash, len; |
1677 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; | 1682 | const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; |
1678 | 1683 | ||
1684 | res->name = name; | ||
1679 | hash = a = 0; | 1685 | hash = a = 0; |
1680 | len = -sizeof(unsigned long); | 1686 | len = -sizeof(unsigned long); |
1681 | do { | 1687 | do { |
@@ -1691,9 +1697,10 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp) | |||
1691 | mask = create_zero_mask(adata | bdata); | 1697 | mask = create_zero_mask(adata | bdata); |
1692 | 1698 | ||
1693 | hash += a & zero_bytemask(mask); | 1699 | hash += a & zero_bytemask(mask); |
1694 | *hashp = fold_hash(hash); | 1700 | len += find_zero(mask); |
1701 | res->hash_len = hashlen_create(fold_hash(hash), len); | ||
1695 | 1702 | ||
1696 | return len + find_zero(mask); | 1703 | return len; |
1697 | } | 1704 | } |
1698 | 1705 | ||
1699 | #else | 1706 | #else |
@@ -1711,18 +1718,19 @@ EXPORT_SYMBOL(full_name_hash); | |||
1711 | * We know there's a real path component here of at least | 1718 | * We know there's a real path component here of at least |
1712 | * one character. | 1719 | * one character. |
1713 | */ | 1720 | */ |
1714 | static inline unsigned long hash_name(const char *name, unsigned int *hashp) | 1721 | static inline long hash_name(const char *name, struct qstr *res) |
1715 | { | 1722 | { |
1716 | unsigned long hash = init_name_hash(); | 1723 | unsigned long hash = init_name_hash(); |
1717 | unsigned long len = 0, c; | 1724 | unsigned long len = 0, c; |
1718 | 1725 | ||
1726 | res->name = name; | ||
1719 | c = (unsigned char)*name; | 1727 | c = (unsigned char)*name; |
1720 | do { | 1728 | do { |
1721 | len++; | 1729 | len++; |
1722 | hash = partial_name_hash(c, hash); | 1730 | hash = partial_name_hash(c, hash); |
1723 | c = (unsigned char)name[len]; | 1731 | c = (unsigned char)name[len]; |
1724 | } while (c && c != '/'); | 1732 | } while (c && c != '/'); |
1725 | *hashp = end_name_hash(hash); | 1733 | res->hash_len = hashlen_create(end_name_hash(hash), len); |
1726 | return len; | 1734 | return len; |
1727 | } | 1735 | } |
1728 | 1736 | ||
@@ -1756,9 +1764,7 @@ static int link_path_walk(const char *name, struct nameidata *nd) | |||
1756 | if (err) | 1764 | if (err) |
1757 | break; | 1765 | break; |
1758 | 1766 | ||
1759 | len = hash_name(name, &this.hash); | 1767 | len = hash_name(name, &this); |
1760 | this.name = name; | ||
1761 | this.len = len; | ||
1762 | 1768 | ||
1763 | type = LAST_NORM; | 1769 | type = LAST_NORM; |
1764 | if (name[0] == '.') switch (len) { | 1770 | if (name[0] == '.') switch (len) { |
@@ -1852,7 +1858,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, | |||
1852 | if (*name=='/') { | 1858 | if (*name=='/') { |
1853 | if (flags & LOOKUP_RCU) { | 1859 | if (flags & LOOKUP_RCU) { |
1854 | rcu_read_lock(); | 1860 | rcu_read_lock(); |
1855 | set_root_rcu(nd); | 1861 | nd->seq = set_root_rcu(nd); |
1856 | } else { | 1862 | } else { |
1857 | set_root(nd); | 1863 | set_root(nd); |
1858 | path_get(&nd->root); | 1864 | path_get(&nd->root); |
@@ -1903,7 +1909,14 @@ static int path_init(int dfd, const char *name, unsigned int flags, | |||
1903 | } | 1909 | } |
1904 | 1910 | ||
1905 | nd->inode = nd->path.dentry->d_inode; | 1911 | nd->inode = nd->path.dentry->d_inode; |
1906 | return 0; | 1912 | if (!(flags & LOOKUP_RCU)) |
1913 | return 0; | ||
1914 | if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq))) | ||
1915 | return 0; | ||
1916 | if (!(nd->flags & LOOKUP_ROOT)) | ||
1917 | nd->root.mnt = NULL; | ||
1918 | rcu_read_unlock(); | ||
1919 | return -ECHILD; | ||
1907 | } | 1920 | } |
1908 | 1921 | ||
1909 | static inline int lookup_last(struct nameidata *nd, struct path *path) | 1922 | static inline int lookup_last(struct nameidata *nd, struct path *path) |
diff --git a/fs/namespace.c b/fs/namespace.c index a01c7730e9af..ef42d9bee212 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1217,6 +1217,11 @@ static void namespace_unlock(void) | |||
1217 | head.first->pprev = &head.first; | 1217 | head.first->pprev = &head.first; |
1218 | INIT_HLIST_HEAD(&unmounted); | 1218 | INIT_HLIST_HEAD(&unmounted); |
1219 | 1219 | ||
1220 | /* undo decrements we'd done in umount_tree() */ | ||
1221 | hlist_for_each_entry(mnt, &head, mnt_hash) | ||
1222 | if (mnt->mnt_ex_mountpoint.mnt) | ||
1223 | mntget(mnt->mnt_ex_mountpoint.mnt); | ||
1224 | |||
1220 | up_write(&namespace_sem); | 1225 | up_write(&namespace_sem); |
1221 | 1226 | ||
1222 | synchronize_rcu(); | 1227 | synchronize_rcu(); |
@@ -1253,6 +1258,9 @@ void umount_tree(struct mount *mnt, int how) | |||
1253 | hlist_add_head(&p->mnt_hash, &tmp_list); | 1258 | hlist_add_head(&p->mnt_hash, &tmp_list); |
1254 | } | 1259 | } |
1255 | 1260 | ||
1261 | hlist_for_each_entry(p, &tmp_list, mnt_hash) | ||
1262 | list_del_init(&p->mnt_child); | ||
1263 | |||
1256 | if (how) | 1264 | if (how) |
1257 | propagate_umount(&tmp_list); | 1265 | propagate_umount(&tmp_list); |
1258 | 1266 | ||
@@ -1263,9 +1271,9 @@ void umount_tree(struct mount *mnt, int how) | |||
1263 | p->mnt_ns = NULL; | 1271 | p->mnt_ns = NULL; |
1264 | if (how < 2) | 1272 | if (how < 2) |
1265 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; | 1273 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
1266 | list_del_init(&p->mnt_child); | ||
1267 | if (mnt_has_parent(p)) { | 1274 | if (mnt_has_parent(p)) { |
1268 | put_mountpoint(p->mnt_mp); | 1275 | put_mountpoint(p->mnt_mp); |
1276 | mnt_add_count(p->mnt_parent, -1); | ||
1269 | /* move the reference to mountpoint into ->mnt_ex_mountpoint */ | 1277 | /* move the reference to mountpoint into ->mnt_ex_mountpoint */ |
1270 | p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; | 1278 | p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; |
1271 | p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; | 1279 | p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 1c5ff6d58385..6a4f3666e273 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1412,24 +1412,18 @@ int nfs_fs_proc_net_init(struct net *net) | |||
1412 | p = proc_create("volumes", S_IFREG|S_IRUGO, | 1412 | p = proc_create("volumes", S_IFREG|S_IRUGO, |
1413 | nn->proc_nfsfs, &nfs_volume_list_fops); | 1413 | nn->proc_nfsfs, &nfs_volume_list_fops); |
1414 | if (!p) | 1414 | if (!p) |
1415 | goto error_2; | 1415 | goto error_1; |
1416 | return 0; | 1416 | return 0; |
1417 | 1417 | ||
1418 | error_2: | ||
1419 | remove_proc_entry("servers", nn->proc_nfsfs); | ||
1420 | error_1: | 1418 | error_1: |
1421 | remove_proc_entry("fs/nfsfs", NULL); | 1419 | remove_proc_subtree("nfsfs", net->proc_net); |
1422 | error_0: | 1420 | error_0: |
1423 | return -ENOMEM; | 1421 | return -ENOMEM; |
1424 | } | 1422 | } |
1425 | 1423 | ||
1426 | void nfs_fs_proc_net_exit(struct net *net) | 1424 | void nfs_fs_proc_net_exit(struct net *net) |
1427 | { | 1425 | { |
1428 | struct nfs_net *nn = net_generic(net, nfs_net_id); | 1426 | remove_proc_subtree("nfsfs", net->proc_net); |
1429 | |||
1430 | remove_proc_entry("volumes", nn->proc_nfsfs); | ||
1431 | remove_proc_entry("servers", nn->proc_nfsfs); | ||
1432 | remove_proc_entry("fs/nfsfs", NULL); | ||
1433 | } | 1427 | } |
1434 | 1428 | ||
1435 | /* | 1429 | /* |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 1359c4a27393..90978075f730 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
@@ -1269,11 +1269,12 @@ filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) | |||
1269 | static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx) | 1269 | static void filelayout_retry_commit(struct nfs_commit_info *cinfo, int idx) |
1270 | { | 1270 | { |
1271 | struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; | 1271 | struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; |
1272 | struct pnfs_commit_bucket *bucket = fl_cinfo->buckets; | 1272 | struct pnfs_commit_bucket *bucket; |
1273 | struct pnfs_layout_segment *freeme; | 1273 | struct pnfs_layout_segment *freeme; |
1274 | int i; | 1274 | int i; |
1275 | 1275 | ||
1276 | for (i = idx; i < fl_cinfo->nbuckets; i++, bucket++) { | 1276 | for (i = idx; i < fl_cinfo->nbuckets; i++) { |
1277 | bucket = &fl_cinfo->buckets[i]; | ||
1277 | if (list_empty(&bucket->committing)) | 1278 | if (list_empty(&bucket->committing)) |
1278 | continue; | 1279 | continue; |
1279 | nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); | 1280 | nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo); |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 92193eddb41d..a8b855ab4e22 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -130,16 +130,15 @@ enum { | |||
130 | */ | 130 | */ |
131 | 131 | ||
132 | struct nfs4_lock_state { | 132 | struct nfs4_lock_state { |
133 | struct list_head ls_locks; /* Other lock stateids */ | 133 | struct list_head ls_locks; /* Other lock stateids */ |
134 | struct nfs4_state * ls_state; /* Pointer to open state */ | 134 | struct nfs4_state * ls_state; /* Pointer to open state */ |
135 | #define NFS_LOCK_INITIALIZED 0 | 135 | #define NFS_LOCK_INITIALIZED 0 |
136 | #define NFS_LOCK_LOST 1 | 136 | #define NFS_LOCK_LOST 1 |
137 | unsigned long ls_flags; | 137 | unsigned long ls_flags; |
138 | struct nfs_seqid_counter ls_seqid; | 138 | struct nfs_seqid_counter ls_seqid; |
139 | nfs4_stateid ls_stateid; | 139 | nfs4_stateid ls_stateid; |
140 | atomic_t ls_count; | 140 | atomic_t ls_count; |
141 | fl_owner_t ls_owner; | 141 | fl_owner_t ls_owner; |
142 | struct work_struct ls_release; | ||
143 | }; | 142 | }; |
144 | 143 | ||
145 | /* bits for nfs4_state->flags */ | 144 | /* bits for nfs4_state->flags */ |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a043f618cd5a..22fe35104c0c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -799,18 +799,6 @@ __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner) | |||
799 | return NULL; | 799 | return NULL; |
800 | } | 800 | } |
801 | 801 | ||
802 | static void | ||
803 | free_lock_state_work(struct work_struct *work) | ||
804 | { | ||
805 | struct nfs4_lock_state *lsp = container_of(work, | ||
806 | struct nfs4_lock_state, ls_release); | ||
807 | struct nfs4_state *state = lsp->ls_state; | ||
808 | struct nfs_server *server = state->owner->so_server; | ||
809 | struct nfs_client *clp = server->nfs_client; | ||
810 | |||
811 | clp->cl_mvops->free_lock_state(server, lsp); | ||
812 | } | ||
813 | |||
814 | /* | 802 | /* |
815 | * Return a compatible lock_state. If no initialized lock_state structure | 803 | * Return a compatible lock_state. If no initialized lock_state structure |
816 | * exists, return an uninitialized one. | 804 | * exists, return an uninitialized one. |
@@ -832,7 +820,6 @@ static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, f | |||
832 | if (lsp->ls_seqid.owner_id < 0) | 820 | if (lsp->ls_seqid.owner_id < 0) |
833 | goto out_free; | 821 | goto out_free; |
834 | INIT_LIST_HEAD(&lsp->ls_locks); | 822 | INIT_LIST_HEAD(&lsp->ls_locks); |
835 | INIT_WORK(&lsp->ls_release, free_lock_state_work); | ||
836 | return lsp; | 823 | return lsp; |
837 | out_free: | 824 | out_free: |
838 | kfree(lsp); | 825 | kfree(lsp); |
@@ -896,12 +883,13 @@ void nfs4_put_lock_state(struct nfs4_lock_state *lsp) | |||
896 | if (list_empty(&state->lock_states)) | 883 | if (list_empty(&state->lock_states)) |
897 | clear_bit(LK_STATE_IN_USE, &state->flags); | 884 | clear_bit(LK_STATE_IN_USE, &state->flags); |
898 | spin_unlock(&state->state_lock); | 885 | spin_unlock(&state->state_lock); |
899 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) | 886 | server = state->owner->so_server; |
900 | queue_work(nfsiod_workqueue, &lsp->ls_release); | 887 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { |
901 | else { | 888 | struct nfs_client *clp = server->nfs_client; |
902 | server = state->owner->so_server; | 889 | |
890 | clp->cl_mvops->free_lock_state(server, lsp); | ||
891 | } else | ||
903 | nfs4_free_lock_state(server, lsp); | 892 | nfs4_free_lock_state(server, lsp); |
904 | } | ||
905 | } | 893 | } |
906 | 894 | ||
907 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) | 895 | static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src) |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index f9821ce6658a..e94457c33ad6 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -2657,6 +2657,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, | |||
2657 | struct xdr_stream *xdr = cd->xdr; | 2657 | struct xdr_stream *xdr = cd->xdr; |
2658 | int start_offset = xdr->buf->len; | 2658 | int start_offset = xdr->buf->len; |
2659 | int cookie_offset; | 2659 | int cookie_offset; |
2660 | u32 name_and_cookie; | ||
2660 | int entry_bytes; | 2661 | int entry_bytes; |
2661 | __be32 nfserr = nfserr_toosmall; | 2662 | __be32 nfserr = nfserr_toosmall; |
2662 | __be64 wire_offset; | 2663 | __be64 wire_offset; |
@@ -2718,7 +2719,14 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen, | |||
2718 | cd->rd_maxcount -= entry_bytes; | 2719 | cd->rd_maxcount -= entry_bytes; |
2719 | if (!cd->rd_dircount) | 2720 | if (!cd->rd_dircount) |
2720 | goto fail; | 2721 | goto fail; |
2721 | cd->rd_dircount--; | 2722 | /* |
2723 | * RFC 3530 14.2.24 describes rd_dircount as only a "hint", so | ||
2724 | * let's always let through the first entry, at least: | ||
2725 | */ | ||
2726 | name_and_cookie = 4 * XDR_QUADLEN(namlen) + 8; | ||
2727 | if (name_and_cookie > cd->rd_dircount && cd->cookie_offset) | ||
2728 | goto fail; | ||
2729 | cd->rd_dircount -= min(cd->rd_dircount, name_and_cookie); | ||
2722 | cd->cookie_offset = cookie_offset; | 2730 | cd->cookie_offset = cookie_offset; |
2723 | skip_entry: | 2731 | skip_entry: |
2724 | cd->common.err = nfs_ok; | 2732 | cd->common.err = nfs_ok; |
@@ -3321,6 +3329,10 @@ nfsd4_encode_readdir(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4 | |||
3321 | } | 3329 | } |
3322 | maxcount = min_t(int, maxcount-16, bytes_left); | 3330 | maxcount = min_t(int, maxcount-16, bytes_left); |
3323 | 3331 | ||
3332 | /* RFC 3530 14.2.24 allows us to ignore dircount when it's 0: */ | ||
3333 | if (!readdir->rd_dircount) | ||
3334 | readdir->rd_dircount = INT_MAX; | ||
3335 | |||
3324 | readdir->xdr = xdr; | 3336 | readdir->xdr = xdr; |
3325 | readdir->rd_maxcount = maxcount; | 3337 | readdir->rd_maxcount = maxcount; |
3326 | readdir->common.err = 0; | 3338 | readdir->common.err = 0; |
diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c index 238a5930cb3c..9d7e2b9659cb 100644 --- a/fs/notify/fdinfo.c +++ b/fs/notify/fdinfo.c | |||
@@ -42,7 +42,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) | |||
42 | { | 42 | { |
43 | struct { | 43 | struct { |
44 | struct file_handle handle; | 44 | struct file_handle handle; |
45 | u8 pad[64]; | 45 | u8 pad[MAX_HANDLE_SZ]; |
46 | } f; | 46 | } f; |
47 | int size, ret, i; | 47 | int size, ret, i; |
48 | 48 | ||
@@ -50,7 +50,7 @@ static int show_mark_fhandle(struct seq_file *m, struct inode *inode) | |||
50 | size = f.handle.handle_bytes >> 2; | 50 | size = f.handle.handle_bytes >> 2; |
51 | 51 | ||
52 | ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); | 52 | ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); |
53 | if ((ret == 255) || (ret == -ENOSPC)) { | 53 | if ((ret == FILEID_INVALID) || (ret < 0)) { |
54 | WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); | 54 | WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); |
55 | return 0; | 55 | return 0; |
56 | } | 56 | } |
diff --git a/fs/pnode.c b/fs/pnode.c index 302bf22c4a30..aae331a5d03b 100644 --- a/fs/pnode.c +++ b/fs/pnode.c | |||
@@ -381,6 +381,7 @@ static void __propagate_umount(struct mount *mnt) | |||
381 | * other children | 381 | * other children |
382 | */ | 382 | */ |
383 | if (child && list_empty(&child->mnt_mounts)) { | 383 | if (child && list_empty(&child->mnt_mounts)) { |
384 | list_del_init(&child->mnt_child); | ||
384 | hlist_del_init_rcu(&child->mnt_hash); | 385 | hlist_del_init_rcu(&child->mnt_hash); |
385 | hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); | 386 | hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash); |
386 | } | 387 | } |
@@ -65,7 +65,7 @@ int sync_filesystem(struct super_block *sb) | |||
65 | return ret; | 65 | return ret; |
66 | return __sync_filesystem(sb, 1); | 66 | return __sync_filesystem(sb, 1); |
67 | } | 67 | } |
68 | EXPORT_SYMBOL_GPL(sync_filesystem); | 68 | EXPORT_SYMBOL(sync_filesystem); |
69 | 69 | ||
70 | static void sync_inodes_one_sb(struct super_block *sb, void *arg) | 70 | static void sync_inodes_one_sb(struct super_block *sb, void *arg) |
71 | { | 71 | { |
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c index 6eaf5edf1ea1..e77db621ec89 100644 --- a/fs/udf/ialloc.c +++ b/fs/udf/ialloc.c | |||
@@ -45,7 +45,7 @@ void udf_free_inode(struct inode *inode) | |||
45 | udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); | 45 | udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1); |
46 | } | 46 | } |
47 | 47 | ||
48 | struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) | 48 | struct inode *udf_new_inode(struct inode *dir, umode_t mode) |
49 | { | 49 | { |
50 | struct super_block *sb = dir->i_sb; | 50 | struct super_block *sb = dir->i_sb; |
51 | struct udf_sb_info *sbi = UDF_SB(sb); | 51 | struct udf_sb_info *sbi = UDF_SB(sb); |
@@ -55,14 +55,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) | |||
55 | struct udf_inode_info *iinfo; | 55 | struct udf_inode_info *iinfo; |
56 | struct udf_inode_info *dinfo = UDF_I(dir); | 56 | struct udf_inode_info *dinfo = UDF_I(dir); |
57 | struct logicalVolIntegrityDescImpUse *lvidiu; | 57 | struct logicalVolIntegrityDescImpUse *lvidiu; |
58 | int err; | ||
58 | 59 | ||
59 | inode = new_inode(sb); | 60 | inode = new_inode(sb); |
60 | 61 | ||
61 | if (!inode) { | 62 | if (!inode) |
62 | *err = -ENOMEM; | 63 | return ERR_PTR(-ENOMEM); |
63 | return NULL; | ||
64 | } | ||
65 | *err = -ENOSPC; | ||
66 | 64 | ||
67 | iinfo = UDF_I(inode); | 65 | iinfo = UDF_I(inode); |
68 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { | 66 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_EXTENDED_FE)) { |
@@ -80,21 +78,22 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) | |||
80 | } | 78 | } |
81 | if (!iinfo->i_ext.i_data) { | 79 | if (!iinfo->i_ext.i_data) { |
82 | iput(inode); | 80 | iput(inode); |
83 | *err = -ENOMEM; | 81 | return ERR_PTR(-ENOMEM); |
84 | return NULL; | ||
85 | } | 82 | } |
86 | 83 | ||
84 | err = -ENOSPC; | ||
87 | block = udf_new_block(dir->i_sb, NULL, | 85 | block = udf_new_block(dir->i_sb, NULL, |
88 | dinfo->i_location.partitionReferenceNum, | 86 | dinfo->i_location.partitionReferenceNum, |
89 | start, err); | 87 | start, &err); |
90 | if (*err) { | 88 | if (err) { |
91 | iput(inode); | 89 | iput(inode); |
92 | return NULL; | 90 | return ERR_PTR(err); |
93 | } | 91 | } |
94 | 92 | ||
95 | lvidiu = udf_sb_lvidiu(sb); | 93 | lvidiu = udf_sb_lvidiu(sb); |
96 | if (lvidiu) { | 94 | if (lvidiu) { |
97 | iinfo->i_unique = lvid_get_unique_id(sb); | 95 | iinfo->i_unique = lvid_get_unique_id(sb); |
96 | inode->i_generation = iinfo->i_unique; | ||
98 | mutex_lock(&sbi->s_alloc_mutex); | 97 | mutex_lock(&sbi->s_alloc_mutex); |
99 | if (S_ISDIR(mode)) | 98 | if (S_ISDIR(mode)) |
100 | le32_add_cpu(&lvidiu->numDirs, 1); | 99 | le32_add_cpu(&lvidiu->numDirs, 1); |
@@ -123,9 +122,12 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err) | |||
123 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; | 122 | iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG; |
124 | inode->i_mtime = inode->i_atime = inode->i_ctime = | 123 | inode->i_mtime = inode->i_atime = inode->i_ctime = |
125 | iinfo->i_crtime = current_fs_time(inode->i_sb); | 124 | iinfo->i_crtime = current_fs_time(inode->i_sb); |
126 | insert_inode_hash(inode); | 125 | if (unlikely(insert_inode_locked(inode) < 0)) { |
126 | make_bad_inode(inode); | ||
127 | iput(inode); | ||
128 | return ERR_PTR(-EIO); | ||
129 | } | ||
127 | mark_inode_dirty(inode); | 130 | mark_inode_dirty(inode); |
128 | 131 | ||
129 | *err = 0; | ||
130 | return inode; | 132 | return inode; |
131 | } | 133 | } |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 236cd48184c2..08598843288f 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -51,7 +51,6 @@ MODULE_LICENSE("GPL"); | |||
51 | 51 | ||
52 | static umode_t udf_convert_permissions(struct fileEntry *); | 52 | static umode_t udf_convert_permissions(struct fileEntry *); |
53 | static int udf_update_inode(struct inode *, int); | 53 | static int udf_update_inode(struct inode *, int); |
54 | static void udf_fill_inode(struct inode *, struct buffer_head *); | ||
55 | static int udf_sync_inode(struct inode *inode); | 54 | static int udf_sync_inode(struct inode *inode); |
56 | static int udf_alloc_i_data(struct inode *inode, size_t size); | 55 | static int udf_alloc_i_data(struct inode *inode, size_t size); |
57 | static sector_t inode_getblk(struct inode *, sector_t, int *, int *); | 56 | static sector_t inode_getblk(struct inode *, sector_t, int *, int *); |
@@ -1271,12 +1270,33 @@ update_time: | |||
1271 | return 0; | 1270 | return 0; |
1272 | } | 1271 | } |
1273 | 1272 | ||
1274 | static void __udf_read_inode(struct inode *inode) | 1273 | /* |
1274 | * Maximum length of linked list formed by ICB hierarchy. The chosen number is | ||
1275 | * arbitrary - just that we hopefully don't limit any real use of rewritten | ||
1276 | * inode on write-once media but avoid looping for too long on corrupted media. | ||
1277 | */ | ||
1278 | #define UDF_MAX_ICB_NESTING 1024 | ||
1279 | |||
1280 | static int udf_read_inode(struct inode *inode) | ||
1275 | { | 1281 | { |
1276 | struct buffer_head *bh = NULL; | 1282 | struct buffer_head *bh = NULL; |
1277 | struct fileEntry *fe; | 1283 | struct fileEntry *fe; |
1284 | struct extendedFileEntry *efe; | ||
1278 | uint16_t ident; | 1285 | uint16_t ident; |
1279 | struct udf_inode_info *iinfo = UDF_I(inode); | 1286 | struct udf_inode_info *iinfo = UDF_I(inode); |
1287 | struct udf_sb_info *sbi = UDF_SB(inode->i_sb); | ||
1288 | struct kernel_lb_addr *iloc = &iinfo->i_location; | ||
1289 | unsigned int link_count; | ||
1290 | unsigned int indirections = 0; | ||
1291 | int ret = -EIO; | ||
1292 | |||
1293 | reread: | ||
1294 | if (iloc->logicalBlockNum >= | ||
1295 | sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) { | ||
1296 | udf_debug("block=%d, partition=%d out of range\n", | ||
1297 | iloc->logicalBlockNum, iloc->partitionReferenceNum); | ||
1298 | return -EIO; | ||
1299 | } | ||
1280 | 1300 | ||
1281 | /* | 1301 | /* |
1282 | * Set defaults, but the inode is still incomplete! | 1302 | * Set defaults, but the inode is still incomplete! |
@@ -1290,78 +1310,54 @@ static void __udf_read_inode(struct inode *inode) | |||
1290 | * i_nlink = 1 | 1310 | * i_nlink = 1 |
1291 | * i_op = NULL; | 1311 | * i_op = NULL; |
1292 | */ | 1312 | */ |
1293 | bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident); | 1313 | bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident); |
1294 | if (!bh) { | 1314 | if (!bh) { |
1295 | udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); | 1315 | udf_err(inode->i_sb, "(ino %ld) failed !bh\n", inode->i_ino); |
1296 | make_bad_inode(inode); | 1316 | return -EIO; |
1297 | return; | ||
1298 | } | 1317 | } |
1299 | 1318 | ||
1300 | if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && | 1319 | if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE && |
1301 | ident != TAG_IDENT_USE) { | 1320 | ident != TAG_IDENT_USE) { |
1302 | udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", | 1321 | udf_err(inode->i_sb, "(ino %ld) failed ident=%d\n", |
1303 | inode->i_ino, ident); | 1322 | inode->i_ino, ident); |
1304 | brelse(bh); | 1323 | goto out; |
1305 | make_bad_inode(inode); | ||
1306 | return; | ||
1307 | } | 1324 | } |
1308 | 1325 | ||
1309 | fe = (struct fileEntry *)bh->b_data; | 1326 | fe = (struct fileEntry *)bh->b_data; |
1327 | efe = (struct extendedFileEntry *)bh->b_data; | ||
1310 | 1328 | ||
1311 | if (fe->icbTag.strategyType == cpu_to_le16(4096)) { | 1329 | if (fe->icbTag.strategyType == cpu_to_le16(4096)) { |
1312 | struct buffer_head *ibh; | 1330 | struct buffer_head *ibh; |
1313 | 1331 | ||
1314 | ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1, | 1332 | ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident); |
1315 | &ident); | ||
1316 | if (ident == TAG_IDENT_IE && ibh) { | 1333 | if (ident == TAG_IDENT_IE && ibh) { |
1317 | struct buffer_head *nbh = NULL; | ||
1318 | struct kernel_lb_addr loc; | 1334 | struct kernel_lb_addr loc; |
1319 | struct indirectEntry *ie; | 1335 | struct indirectEntry *ie; |
1320 | 1336 | ||
1321 | ie = (struct indirectEntry *)ibh->b_data; | 1337 | ie = (struct indirectEntry *)ibh->b_data; |
1322 | loc = lelb_to_cpu(ie->indirectICB.extLocation); | 1338 | loc = lelb_to_cpu(ie->indirectICB.extLocation); |
1323 | 1339 | ||
1324 | if (ie->indirectICB.extLength && | 1340 | if (ie->indirectICB.extLength) { |
1325 | (nbh = udf_read_ptagged(inode->i_sb, &loc, 0, | 1341 | brelse(ibh); |
1326 | &ident))) { | 1342 | memcpy(&iinfo->i_location, &loc, |
1327 | if (ident == TAG_IDENT_FE || | 1343 | sizeof(struct kernel_lb_addr)); |
1328 | ident == TAG_IDENT_EFE) { | 1344 | if (++indirections > UDF_MAX_ICB_NESTING) { |
1329 | memcpy(&iinfo->i_location, | 1345 | udf_err(inode->i_sb, |
1330 | &loc, | 1346 | "too many ICBs in ICB hierarchy" |
1331 | sizeof(struct kernel_lb_addr)); | 1347 | " (max %d supported)\n", |
1332 | brelse(bh); | 1348 | UDF_MAX_ICB_NESTING); |
1333 | brelse(ibh); | 1349 | goto out; |
1334 | brelse(nbh); | ||
1335 | __udf_read_inode(inode); | ||
1336 | return; | ||
1337 | } | 1350 | } |
1338 | brelse(nbh); | 1351 | brelse(bh); |
1352 | goto reread; | ||
1339 | } | 1353 | } |
1340 | } | 1354 | } |
1341 | brelse(ibh); | 1355 | brelse(ibh); |
1342 | } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { | 1356 | } else if (fe->icbTag.strategyType != cpu_to_le16(4)) { |
1343 | udf_err(inode->i_sb, "unsupported strategy type: %d\n", | 1357 | udf_err(inode->i_sb, "unsupported strategy type: %d\n", |
1344 | le16_to_cpu(fe->icbTag.strategyType)); | 1358 | le16_to_cpu(fe->icbTag.strategyType)); |
1345 | brelse(bh); | 1359 | goto out; |
1346 | make_bad_inode(inode); | ||
1347 | return; | ||
1348 | } | 1360 | } |
1349 | udf_fill_inode(inode, bh); | ||
1350 | |||
1351 | brelse(bh); | ||
1352 | } | ||
1353 | |||
1354 | static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | ||
1355 | { | ||
1356 | struct fileEntry *fe; | ||
1357 | struct extendedFileEntry *efe; | ||
1358 | struct udf_sb_info *sbi = UDF_SB(inode->i_sb); | ||
1359 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
1360 | unsigned int link_count; | ||
1361 | |||
1362 | fe = (struct fileEntry *)bh->b_data; | ||
1363 | efe = (struct extendedFileEntry *)bh->b_data; | ||
1364 | |||
1365 | if (fe->icbTag.strategyType == cpu_to_le16(4)) | 1361 | if (fe->icbTag.strategyType == cpu_to_le16(4)) |
1366 | iinfo->i_strat4096 = 0; | 1362 | iinfo->i_strat4096 = 0; |
1367 | else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ | 1363 | else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */ |
@@ -1378,11 +1374,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1378 | if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { | 1374 | if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) { |
1379 | iinfo->i_efe = 1; | 1375 | iinfo->i_efe = 1; |
1380 | iinfo->i_use = 0; | 1376 | iinfo->i_use = 0; |
1381 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | 1377 | ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - |
1382 | sizeof(struct extendedFileEntry))) { | 1378 | sizeof(struct extendedFileEntry)); |
1383 | make_bad_inode(inode); | 1379 | if (ret) |
1384 | return; | 1380 | goto out; |
1385 | } | ||
1386 | memcpy(iinfo->i_ext.i_data, | 1381 | memcpy(iinfo->i_ext.i_data, |
1387 | bh->b_data + sizeof(struct extendedFileEntry), | 1382 | bh->b_data + sizeof(struct extendedFileEntry), |
1388 | inode->i_sb->s_blocksize - | 1383 | inode->i_sb->s_blocksize - |
@@ -1390,11 +1385,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1390 | } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { | 1385 | } else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) { |
1391 | iinfo->i_efe = 0; | 1386 | iinfo->i_efe = 0; |
1392 | iinfo->i_use = 0; | 1387 | iinfo->i_use = 0; |
1393 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | 1388 | ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - |
1394 | sizeof(struct fileEntry))) { | 1389 | sizeof(struct fileEntry)); |
1395 | make_bad_inode(inode); | 1390 | if (ret) |
1396 | return; | 1391 | goto out; |
1397 | } | ||
1398 | memcpy(iinfo->i_ext.i_data, | 1392 | memcpy(iinfo->i_ext.i_data, |
1399 | bh->b_data + sizeof(struct fileEntry), | 1393 | bh->b_data + sizeof(struct fileEntry), |
1400 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); | 1394 | inode->i_sb->s_blocksize - sizeof(struct fileEntry)); |
@@ -1404,18 +1398,18 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1404 | iinfo->i_lenAlloc = le32_to_cpu( | 1398 | iinfo->i_lenAlloc = le32_to_cpu( |
1405 | ((struct unallocSpaceEntry *)bh->b_data)-> | 1399 | ((struct unallocSpaceEntry *)bh->b_data)-> |
1406 | lengthAllocDescs); | 1400 | lengthAllocDescs); |
1407 | if (udf_alloc_i_data(inode, inode->i_sb->s_blocksize - | 1401 | ret = udf_alloc_i_data(inode, inode->i_sb->s_blocksize - |
1408 | sizeof(struct unallocSpaceEntry))) { | 1402 | sizeof(struct unallocSpaceEntry)); |
1409 | make_bad_inode(inode); | 1403 | if (ret) |
1410 | return; | 1404 | goto out; |
1411 | } | ||
1412 | memcpy(iinfo->i_ext.i_data, | 1405 | memcpy(iinfo->i_ext.i_data, |
1413 | bh->b_data + sizeof(struct unallocSpaceEntry), | 1406 | bh->b_data + sizeof(struct unallocSpaceEntry), |
1414 | inode->i_sb->s_blocksize - | 1407 | inode->i_sb->s_blocksize - |
1415 | sizeof(struct unallocSpaceEntry)); | 1408 | sizeof(struct unallocSpaceEntry)); |
1416 | return; | 1409 | return 0; |
1417 | } | 1410 | } |
1418 | 1411 | ||
1412 | ret = -EIO; | ||
1419 | read_lock(&sbi->s_cred_lock); | 1413 | read_lock(&sbi->s_cred_lock); |
1420 | i_uid_write(inode, le32_to_cpu(fe->uid)); | 1414 | i_uid_write(inode, le32_to_cpu(fe->uid)); |
1421 | if (!uid_valid(inode->i_uid) || | 1415 | if (!uid_valid(inode->i_uid) || |
@@ -1441,8 +1435,10 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1441 | read_unlock(&sbi->s_cred_lock); | 1435 | read_unlock(&sbi->s_cred_lock); |
1442 | 1436 | ||
1443 | link_count = le16_to_cpu(fe->fileLinkCount); | 1437 | link_count = le16_to_cpu(fe->fileLinkCount); |
1444 | if (!link_count) | 1438 | if (!link_count) { |
1445 | link_count = 1; | 1439 | ret = -ESTALE; |
1440 | goto out; | ||
1441 | } | ||
1446 | set_nlink(inode, link_count); | 1442 | set_nlink(inode, link_count); |
1447 | 1443 | ||
1448 | inode->i_size = le64_to_cpu(fe->informationLength); | 1444 | inode->i_size = le64_to_cpu(fe->informationLength); |
@@ -1488,6 +1484,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1488 | iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); | 1484 | iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs); |
1489 | iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); | 1485 | iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint); |
1490 | } | 1486 | } |
1487 | inode->i_generation = iinfo->i_unique; | ||
1491 | 1488 | ||
1492 | switch (fe->icbTag.fileType) { | 1489 | switch (fe->icbTag.fileType) { |
1493 | case ICBTAG_FILE_TYPE_DIRECTORY: | 1490 | case ICBTAG_FILE_TYPE_DIRECTORY: |
@@ -1537,8 +1534,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1537 | default: | 1534 | default: |
1538 | udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", | 1535 | udf_err(inode->i_sb, "(ino %ld) failed unknown file type=%d\n", |
1539 | inode->i_ino, fe->icbTag.fileType); | 1536 | inode->i_ino, fe->icbTag.fileType); |
1540 | make_bad_inode(inode); | 1537 | goto out; |
1541 | return; | ||
1542 | } | 1538 | } |
1543 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { | 1539 | if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) { |
1544 | struct deviceSpec *dsea = | 1540 | struct deviceSpec *dsea = |
@@ -1549,8 +1545,12 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh) | |||
1549 | le32_to_cpu(dsea->minorDeviceIdent))); | 1545 | le32_to_cpu(dsea->minorDeviceIdent))); |
1550 | /* Developer ID ??? */ | 1546 | /* Developer ID ??? */ |
1551 | } else | 1547 | } else |
1552 | make_bad_inode(inode); | 1548 | goto out; |
1553 | } | 1549 | } |
1550 | ret = 0; | ||
1551 | out: | ||
1552 | brelse(bh); | ||
1553 | return ret; | ||
1554 | } | 1554 | } |
1555 | 1555 | ||
1556 | static int udf_alloc_i_data(struct inode *inode, size_t size) | 1556 | static int udf_alloc_i_data(struct inode *inode, size_t size) |
@@ -1664,7 +1664,7 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1664 | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); | 1664 | FE_PERM_U_DELETE | FE_PERM_U_CHATTR)); |
1665 | fe->permissions = cpu_to_le32(udfperms); | 1665 | fe->permissions = cpu_to_le32(udfperms); |
1666 | 1666 | ||
1667 | if (S_ISDIR(inode->i_mode)) | 1667 | if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0) |
1668 | fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); | 1668 | fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1); |
1669 | else | 1669 | else |
1670 | fe->fileLinkCount = cpu_to_le16(inode->i_nlink); | 1670 | fe->fileLinkCount = cpu_to_le16(inode->i_nlink); |
@@ -1830,32 +1830,23 @@ struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino) | |||
1830 | { | 1830 | { |
1831 | unsigned long block = udf_get_lb_pblock(sb, ino, 0); | 1831 | unsigned long block = udf_get_lb_pblock(sb, ino, 0); |
1832 | struct inode *inode = iget_locked(sb, block); | 1832 | struct inode *inode = iget_locked(sb, block); |
1833 | int err; | ||
1833 | 1834 | ||
1834 | if (!inode) | 1835 | if (!inode) |
1835 | return NULL; | 1836 | return ERR_PTR(-ENOMEM); |
1836 | |||
1837 | if (inode->i_state & I_NEW) { | ||
1838 | memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); | ||
1839 | __udf_read_inode(inode); | ||
1840 | unlock_new_inode(inode); | ||
1841 | } | ||
1842 | 1837 | ||
1843 | if (is_bad_inode(inode)) | 1838 | if (!(inode->i_state & I_NEW)) |
1844 | goto out_iput; | 1839 | return inode; |
1845 | 1840 | ||
1846 | if (ino->logicalBlockNum >= UDF_SB(sb)-> | 1841 | memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr)); |
1847 | s_partmaps[ino->partitionReferenceNum].s_partition_len) { | 1842 | err = udf_read_inode(inode); |
1848 | udf_debug("block=%d, partition=%d out of range\n", | 1843 | if (err < 0) { |
1849 | ino->logicalBlockNum, ino->partitionReferenceNum); | 1844 | iget_failed(inode); |
1850 | make_bad_inode(inode); | 1845 | return ERR_PTR(err); |
1851 | goto out_iput; | ||
1852 | } | 1846 | } |
1847 | unlock_new_inode(inode); | ||
1853 | 1848 | ||
1854 | return inode; | 1849 | return inode; |
1855 | |||
1856 | out_iput: | ||
1857 | iput(inode); | ||
1858 | return NULL; | ||
1859 | } | 1850 | } |
1860 | 1851 | ||
1861 | int udf_add_aext(struct inode *inode, struct extent_position *epos, | 1852 | int udf_add_aext(struct inode *inode, struct extent_position *epos, |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 83a06001742b..c12e260fd6c4 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
@@ -270,9 +270,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, | |||
270 | NULL, 0), | 270 | NULL, 0), |
271 | }; | 271 | }; |
272 | inode = udf_iget(dir->i_sb, lb); | 272 | inode = udf_iget(dir->i_sb, lb); |
273 | if (!inode) { | 273 | if (IS_ERR(inode)) |
274 | return ERR_PTR(-EACCES); | 274 | return inode; |
275 | } | ||
276 | } else | 275 | } else |
277 | #endif /* UDF_RECOVERY */ | 276 | #endif /* UDF_RECOVERY */ |
278 | 277 | ||
@@ -285,9 +284,8 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, | |||
285 | 284 | ||
286 | loc = lelb_to_cpu(cfi.icb.extLocation); | 285 | loc = lelb_to_cpu(cfi.icb.extLocation); |
287 | inode = udf_iget(dir->i_sb, &loc); | 286 | inode = udf_iget(dir->i_sb, &loc); |
288 | if (!inode) { | 287 | if (IS_ERR(inode)) |
289 | return ERR_PTR(-EACCES); | 288 | return ERR_CAST(inode); |
290 | } | ||
291 | } | 289 | } |
292 | 290 | ||
293 | return d_splice_alias(inode, dentry); | 291 | return d_splice_alias(inode, dentry); |
@@ -550,32 +548,18 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, | |||
550 | return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL); | 548 | return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL); |
551 | } | 549 | } |
552 | 550 | ||
553 | static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, | 551 | static int udf_add_nondir(struct dentry *dentry, struct inode *inode) |
554 | bool excl) | ||
555 | { | 552 | { |
553 | struct udf_inode_info *iinfo = UDF_I(inode); | ||
554 | struct inode *dir = dentry->d_parent->d_inode; | ||
556 | struct udf_fileident_bh fibh; | 555 | struct udf_fileident_bh fibh; |
557 | struct inode *inode; | ||
558 | struct fileIdentDesc cfi, *fi; | 556 | struct fileIdentDesc cfi, *fi; |
559 | int err; | 557 | int err; |
560 | struct udf_inode_info *iinfo; | ||
561 | |||
562 | inode = udf_new_inode(dir, mode, &err); | ||
563 | if (!inode) { | ||
564 | return err; | ||
565 | } | ||
566 | |||
567 | iinfo = UDF_I(inode); | ||
568 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
569 | inode->i_data.a_ops = &udf_adinicb_aops; | ||
570 | else | ||
571 | inode->i_data.a_ops = &udf_aops; | ||
572 | inode->i_op = &udf_file_inode_operations; | ||
573 | inode->i_fop = &udf_file_operations; | ||
574 | mark_inode_dirty(inode); | ||
575 | 558 | ||
576 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); | 559 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
577 | if (!fi) { | 560 | if (unlikely(!fi)) { |
578 | inode_dec_link_count(inode); | 561 | inode_dec_link_count(inode); |
562 | unlock_new_inode(inode); | ||
579 | iput(inode); | 563 | iput(inode); |
580 | return err; | 564 | return err; |
581 | } | 565 | } |
@@ -589,23 +573,21 @@ static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
589 | if (fibh.sbh != fibh.ebh) | 573 | if (fibh.sbh != fibh.ebh) |
590 | brelse(fibh.ebh); | 574 | brelse(fibh.ebh); |
591 | brelse(fibh.sbh); | 575 | brelse(fibh.sbh); |
576 | unlock_new_inode(inode); | ||
592 | d_instantiate(dentry, inode); | 577 | d_instantiate(dentry, inode); |
593 | 578 | ||
594 | return 0; | 579 | return 0; |
595 | } | 580 | } |
596 | 581 | ||
597 | static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | 582 | static int udf_create(struct inode *dir, struct dentry *dentry, umode_t mode, |
583 | bool excl) | ||
598 | { | 584 | { |
599 | struct inode *inode; | 585 | struct inode *inode = udf_new_inode(dir, mode); |
600 | struct udf_inode_info *iinfo; | ||
601 | int err; | ||
602 | 586 | ||
603 | inode = udf_new_inode(dir, mode, &err); | 587 | if (IS_ERR(inode)) |
604 | if (!inode) | 588 | return PTR_ERR(inode); |
605 | return err; | ||
606 | 589 | ||
607 | iinfo = UDF_I(inode); | 590 | if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) |
608 | if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
609 | inode->i_data.a_ops = &udf_adinicb_aops; | 591 | inode->i_data.a_ops = &udf_adinicb_aops; |
610 | else | 592 | else |
611 | inode->i_data.a_ops = &udf_aops; | 593 | inode->i_data.a_ops = &udf_aops; |
@@ -613,7 +595,25 @@ static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
613 | inode->i_fop = &udf_file_operations; | 595 | inode->i_fop = &udf_file_operations; |
614 | mark_inode_dirty(inode); | 596 | mark_inode_dirty(inode); |
615 | 597 | ||
598 | return udf_add_nondir(dentry, inode); | ||
599 | } | ||
600 | |||
601 | static int udf_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) | ||
602 | { | ||
603 | struct inode *inode = udf_new_inode(dir, mode); | ||
604 | |||
605 | if (IS_ERR(inode)) | ||
606 | return PTR_ERR(inode); | ||
607 | |||
608 | if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
609 | inode->i_data.a_ops = &udf_adinicb_aops; | ||
610 | else | ||
611 | inode->i_data.a_ops = &udf_aops; | ||
612 | inode->i_op = &udf_file_inode_operations; | ||
613 | inode->i_fop = &udf_file_operations; | ||
614 | mark_inode_dirty(inode); | ||
616 | d_tmpfile(dentry, inode); | 615 | d_tmpfile(dentry, inode); |
616 | unlock_new_inode(inode); | ||
617 | return 0; | 617 | return 0; |
618 | } | 618 | } |
619 | 619 | ||
@@ -621,44 +621,16 @@ static int udf_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
621 | dev_t rdev) | 621 | dev_t rdev) |
622 | { | 622 | { |
623 | struct inode *inode; | 623 | struct inode *inode; |
624 | struct udf_fileident_bh fibh; | ||
625 | struct fileIdentDesc cfi, *fi; | ||
626 | int err; | ||
627 | struct udf_inode_info *iinfo; | ||
628 | 624 | ||
629 | if (!old_valid_dev(rdev)) | 625 | if (!old_valid_dev(rdev)) |
630 | return -EINVAL; | 626 | return -EINVAL; |
631 | 627 | ||
632 | err = -EIO; | 628 | inode = udf_new_inode(dir, mode); |
633 | inode = udf_new_inode(dir, mode, &err); | 629 | if (IS_ERR(inode)) |
634 | if (!inode) | 630 | return PTR_ERR(inode); |
635 | goto out; | ||
636 | 631 | ||
637 | iinfo = UDF_I(inode); | ||
638 | init_special_inode(inode, mode, rdev); | 632 | init_special_inode(inode, mode, rdev); |
639 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); | 633 | return udf_add_nondir(dentry, inode); |
640 | if (!fi) { | ||
641 | inode_dec_link_count(inode); | ||
642 | iput(inode); | ||
643 | return err; | ||
644 | } | ||
645 | cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); | ||
646 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); | ||
647 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | ||
648 | cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); | ||
649 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | ||
650 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
651 | mark_inode_dirty(dir); | ||
652 | mark_inode_dirty(inode); | ||
653 | |||
654 | if (fibh.sbh != fibh.ebh) | ||
655 | brelse(fibh.ebh); | ||
656 | brelse(fibh.sbh); | ||
657 | d_instantiate(dentry, inode); | ||
658 | err = 0; | ||
659 | |||
660 | out: | ||
661 | return err; | ||
662 | } | 634 | } |
663 | 635 | ||
664 | static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | 636 | static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
@@ -670,10 +642,9 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
670 | struct udf_inode_info *dinfo = UDF_I(dir); | 642 | struct udf_inode_info *dinfo = UDF_I(dir); |
671 | struct udf_inode_info *iinfo; | 643 | struct udf_inode_info *iinfo; |
672 | 644 | ||
673 | err = -EIO; | 645 | inode = udf_new_inode(dir, S_IFDIR | mode); |
674 | inode = udf_new_inode(dir, S_IFDIR | mode, &err); | 646 | if (IS_ERR(inode)) |
675 | if (!inode) | 647 | return PTR_ERR(inode); |
676 | goto out; | ||
677 | 648 | ||
678 | iinfo = UDF_I(inode); | 649 | iinfo = UDF_I(inode); |
679 | inode->i_op = &udf_dir_inode_operations; | 650 | inode->i_op = &udf_dir_inode_operations; |
@@ -681,6 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
681 | fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err); | 652 | fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err); |
682 | if (!fi) { | 653 | if (!fi) { |
683 | inode_dec_link_count(inode); | 654 | inode_dec_link_count(inode); |
655 | unlock_new_inode(inode); | ||
684 | iput(inode); | 656 | iput(inode); |
685 | goto out; | 657 | goto out; |
686 | } | 658 | } |
@@ -699,6 +671,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
699 | if (!fi) { | 671 | if (!fi) { |
700 | clear_nlink(inode); | 672 | clear_nlink(inode); |
701 | mark_inode_dirty(inode); | 673 | mark_inode_dirty(inode); |
674 | unlock_new_inode(inode); | ||
702 | iput(inode); | 675 | iput(inode); |
703 | goto out; | 676 | goto out; |
704 | } | 677 | } |
@@ -710,6 +683,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
710 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | 683 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); |
711 | inc_nlink(dir); | 684 | inc_nlink(dir); |
712 | mark_inode_dirty(dir); | 685 | mark_inode_dirty(dir); |
686 | unlock_new_inode(inode); | ||
713 | d_instantiate(dentry, inode); | 687 | d_instantiate(dentry, inode); |
714 | if (fibh.sbh != fibh.ebh) | 688 | if (fibh.sbh != fibh.ebh) |
715 | brelse(fibh.ebh); | 689 | brelse(fibh.ebh); |
@@ -876,14 +850,11 @@ out: | |||
876 | static int udf_symlink(struct inode *dir, struct dentry *dentry, | 850 | static int udf_symlink(struct inode *dir, struct dentry *dentry, |
877 | const char *symname) | 851 | const char *symname) |
878 | { | 852 | { |
879 | struct inode *inode; | 853 | struct inode *inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO); |
880 | struct pathComponent *pc; | 854 | struct pathComponent *pc; |
881 | const char *compstart; | 855 | const char *compstart; |
882 | struct udf_fileident_bh fibh; | ||
883 | struct extent_position epos = {}; | 856 | struct extent_position epos = {}; |
884 | int eoffset, elen = 0; | 857 | int eoffset, elen = 0; |
885 | struct fileIdentDesc *fi; | ||
886 | struct fileIdentDesc cfi; | ||
887 | uint8_t *ea; | 858 | uint8_t *ea; |
888 | int err; | 859 | int err; |
889 | int block; | 860 | int block; |
@@ -892,9 +863,8 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
892 | struct udf_inode_info *iinfo; | 863 | struct udf_inode_info *iinfo; |
893 | struct super_block *sb = dir->i_sb; | 864 | struct super_block *sb = dir->i_sb; |
894 | 865 | ||
895 | inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); | 866 | if (IS_ERR(inode)) |
896 | if (!inode) | 867 | return PTR_ERR(inode); |
897 | goto out; | ||
898 | 868 | ||
899 | iinfo = UDF_I(inode); | 869 | iinfo = UDF_I(inode); |
900 | down_write(&iinfo->i_data_sem); | 870 | down_write(&iinfo->i_data_sem); |
@@ -1012,32 +982,15 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry, | |||
1012 | mark_inode_dirty(inode); | 982 | mark_inode_dirty(inode); |
1013 | up_write(&iinfo->i_data_sem); | 983 | up_write(&iinfo->i_data_sem); |
1014 | 984 | ||
1015 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); | 985 | err = udf_add_nondir(dentry, inode); |
1016 | if (!fi) | ||
1017 | goto out_fail; | ||
1018 | cfi.icb.extLength = cpu_to_le32(sb->s_blocksize); | ||
1019 | cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); | ||
1020 | if (UDF_SB(inode->i_sb)->s_lvid_bh) { | ||
1021 | *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = | ||
1022 | cpu_to_le32(lvid_get_unique_id(sb)); | ||
1023 | } | ||
1024 | udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); | ||
1025 | if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) | ||
1026 | mark_inode_dirty(dir); | ||
1027 | if (fibh.sbh != fibh.ebh) | ||
1028 | brelse(fibh.ebh); | ||
1029 | brelse(fibh.sbh); | ||
1030 | d_instantiate(dentry, inode); | ||
1031 | err = 0; | ||
1032 | |||
1033 | out: | 986 | out: |
1034 | kfree(name); | 987 | kfree(name); |
1035 | return err; | 988 | return err; |
1036 | 989 | ||
1037 | out_no_entry: | 990 | out_no_entry: |
1038 | up_write(&iinfo->i_data_sem); | 991 | up_write(&iinfo->i_data_sem); |
1039 | out_fail: | ||
1040 | inode_dec_link_count(inode); | 992 | inode_dec_link_count(inode); |
993 | unlock_new_inode(inode); | ||
1041 | iput(inode); | 994 | iput(inode); |
1042 | goto out; | 995 | goto out; |
1043 | } | 996 | } |
@@ -1222,7 +1175,7 @@ static struct dentry *udf_get_parent(struct dentry *child) | |||
1222 | struct udf_fileident_bh fibh; | 1175 | struct udf_fileident_bh fibh; |
1223 | 1176 | ||
1224 | if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) | 1177 | if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) |
1225 | goto out_unlock; | 1178 | return ERR_PTR(-EACCES); |
1226 | 1179 | ||
1227 | if (fibh.sbh != fibh.ebh) | 1180 | if (fibh.sbh != fibh.ebh) |
1228 | brelse(fibh.ebh); | 1181 | brelse(fibh.ebh); |
@@ -1230,12 +1183,10 @@ static struct dentry *udf_get_parent(struct dentry *child) | |||
1230 | 1183 | ||
1231 | tloc = lelb_to_cpu(cfi.icb.extLocation); | 1184 | tloc = lelb_to_cpu(cfi.icb.extLocation); |
1232 | inode = udf_iget(child->d_inode->i_sb, &tloc); | 1185 | inode = udf_iget(child->d_inode->i_sb, &tloc); |
1233 | if (!inode) | 1186 | if (IS_ERR(inode)) |
1234 | goto out_unlock; | 1187 | return ERR_CAST(inode); |
1235 | 1188 | ||
1236 | return d_obtain_alias(inode); | 1189 | return d_obtain_alias(inode); |
1237 | out_unlock: | ||
1238 | return ERR_PTR(-EACCES); | ||
1239 | } | 1190 | } |
1240 | 1191 | ||
1241 | 1192 | ||
@@ -1252,8 +1203,8 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block, | |||
1252 | loc.partitionReferenceNum = partref; | 1203 | loc.partitionReferenceNum = partref; |
1253 | inode = udf_iget(sb, &loc); | 1204 | inode = udf_iget(sb, &loc); |
1254 | 1205 | ||
1255 | if (inode == NULL) | 1206 | if (IS_ERR(inode)) |
1256 | return ERR_PTR(-ENOMEM); | 1207 | return ERR_CAST(inode); |
1257 | 1208 | ||
1258 | if (generation && inode->i_generation != generation) { | 1209 | if (generation && inode->i_generation != generation) { |
1259 | iput(inode); | 1210 | iput(inode); |
diff --git a/fs/udf/super.c b/fs/udf/super.c index 813da94d447b..5401fc33f5cc 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c | |||
@@ -961,12 +961,14 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb, | |||
961 | 961 | ||
962 | metadata_fe = udf_iget(sb, &addr); | 962 | metadata_fe = udf_iget(sb, &addr); |
963 | 963 | ||
964 | if (metadata_fe == NULL) | 964 | if (IS_ERR(metadata_fe)) { |
965 | udf_warn(sb, "metadata inode efe not found\n"); | 965 | udf_warn(sb, "metadata inode efe not found\n"); |
966 | else if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { | 966 | return metadata_fe; |
967 | } | ||
968 | if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) { | ||
967 | udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); | 969 | udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n"); |
968 | iput(metadata_fe); | 970 | iput(metadata_fe); |
969 | metadata_fe = NULL; | 971 | return ERR_PTR(-EIO); |
970 | } | 972 | } |
971 | 973 | ||
972 | return metadata_fe; | 974 | return metadata_fe; |
@@ -978,6 +980,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) | |||
978 | struct udf_part_map *map; | 980 | struct udf_part_map *map; |
979 | struct udf_meta_data *mdata; | 981 | struct udf_meta_data *mdata; |
980 | struct kernel_lb_addr addr; | 982 | struct kernel_lb_addr addr; |
983 | struct inode *fe; | ||
981 | 984 | ||
982 | map = &sbi->s_partmaps[partition]; | 985 | map = &sbi->s_partmaps[partition]; |
983 | mdata = &map->s_type_specific.s_metadata; | 986 | mdata = &map->s_type_specific.s_metadata; |
@@ -986,22 +989,24 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) | |||
986 | udf_debug("Metadata file location: block = %d part = %d\n", | 989 | udf_debug("Metadata file location: block = %d part = %d\n", |
987 | mdata->s_meta_file_loc, map->s_partition_num); | 990 | mdata->s_meta_file_loc, map->s_partition_num); |
988 | 991 | ||
989 | mdata->s_metadata_fe = udf_find_metadata_inode_efe(sb, | 992 | fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc, |
990 | mdata->s_meta_file_loc, map->s_partition_num); | 993 | map->s_partition_num); |
991 | 994 | if (IS_ERR(fe)) { | |
992 | if (mdata->s_metadata_fe == NULL) { | ||
993 | /* mirror file entry */ | 995 | /* mirror file entry */ |
994 | udf_debug("Mirror metadata file location: block = %d part = %d\n", | 996 | udf_debug("Mirror metadata file location: block = %d part = %d\n", |
995 | mdata->s_mirror_file_loc, map->s_partition_num); | 997 | mdata->s_mirror_file_loc, map->s_partition_num); |
996 | 998 | ||
997 | mdata->s_mirror_fe = udf_find_metadata_inode_efe(sb, | 999 | fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc, |
998 | mdata->s_mirror_file_loc, map->s_partition_num); | 1000 | map->s_partition_num); |
999 | 1001 | ||
1000 | if (mdata->s_mirror_fe == NULL) { | 1002 | if (IS_ERR(fe)) { |
1001 | udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); | 1003 | udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n"); |
1002 | return -EIO; | 1004 | return PTR_ERR(fe); |
1003 | } | 1005 | } |
1004 | } | 1006 | mdata->s_mirror_fe = fe; |
1007 | } else | ||
1008 | mdata->s_metadata_fe = fe; | ||
1009 | |||
1005 | 1010 | ||
1006 | /* | 1011 | /* |
1007 | * bitmap file entry | 1012 | * bitmap file entry |
@@ -1015,15 +1020,16 @@ static int udf_load_metadata_files(struct super_block *sb, int partition) | |||
1015 | udf_debug("Bitmap file location: block = %d part = %d\n", | 1020 | udf_debug("Bitmap file location: block = %d part = %d\n", |
1016 | addr.logicalBlockNum, addr.partitionReferenceNum); | 1021 | addr.logicalBlockNum, addr.partitionReferenceNum); |
1017 | 1022 | ||
1018 | mdata->s_bitmap_fe = udf_iget(sb, &addr); | 1023 | fe = udf_iget(sb, &addr); |
1019 | if (mdata->s_bitmap_fe == NULL) { | 1024 | if (IS_ERR(fe)) { |
1020 | if (sb->s_flags & MS_RDONLY) | 1025 | if (sb->s_flags & MS_RDONLY) |
1021 | udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); | 1026 | udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n"); |
1022 | else { | 1027 | else { |
1023 | udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); | 1028 | udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n"); |
1024 | return -EIO; | 1029 | return PTR_ERR(fe); |
1025 | } | 1030 | } |
1026 | } | 1031 | } else |
1032 | mdata->s_bitmap_fe = fe; | ||
1027 | } | 1033 | } |
1028 | 1034 | ||
1029 | udf_debug("udf_load_metadata_files Ok\n"); | 1035 | udf_debug("udf_load_metadata_files Ok\n"); |
@@ -1111,13 +1117,15 @@ static int udf_fill_partdesc_info(struct super_block *sb, | |||
1111 | phd->unallocSpaceTable.extPosition), | 1117 | phd->unallocSpaceTable.extPosition), |
1112 | .partitionReferenceNum = p_index, | 1118 | .partitionReferenceNum = p_index, |
1113 | }; | 1119 | }; |
1120 | struct inode *inode; | ||
1114 | 1121 | ||
1115 | map->s_uspace.s_table = udf_iget(sb, &loc); | 1122 | inode = udf_iget(sb, &loc); |
1116 | if (!map->s_uspace.s_table) { | 1123 | if (IS_ERR(inode)) { |
1117 | udf_debug("cannot load unallocSpaceTable (part %d)\n", | 1124 | udf_debug("cannot load unallocSpaceTable (part %d)\n", |
1118 | p_index); | 1125 | p_index); |
1119 | return -EIO; | 1126 | return PTR_ERR(inode); |
1120 | } | 1127 | } |
1128 | map->s_uspace.s_table = inode; | ||
1121 | map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; | 1129 | map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE; |
1122 | udf_debug("unallocSpaceTable (part %d) @ %ld\n", | 1130 | udf_debug("unallocSpaceTable (part %d) @ %ld\n", |
1123 | p_index, map->s_uspace.s_table->i_ino); | 1131 | p_index, map->s_uspace.s_table->i_ino); |
@@ -1144,14 +1152,15 @@ static int udf_fill_partdesc_info(struct super_block *sb, | |||
1144 | phd->freedSpaceTable.extPosition), | 1152 | phd->freedSpaceTable.extPosition), |
1145 | .partitionReferenceNum = p_index, | 1153 | .partitionReferenceNum = p_index, |
1146 | }; | 1154 | }; |
1155 | struct inode *inode; | ||
1147 | 1156 | ||
1148 | map->s_fspace.s_table = udf_iget(sb, &loc); | 1157 | inode = udf_iget(sb, &loc); |
1149 | if (!map->s_fspace.s_table) { | 1158 | if (IS_ERR(inode)) { |
1150 | udf_debug("cannot load freedSpaceTable (part %d)\n", | 1159 | udf_debug("cannot load freedSpaceTable (part %d)\n", |
1151 | p_index); | 1160 | p_index); |
1152 | return -EIO; | 1161 | return PTR_ERR(inode); |
1153 | } | 1162 | } |
1154 | 1163 | map->s_fspace.s_table = inode; | |
1155 | map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE; | 1164 | map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE; |
1156 | udf_debug("freedSpaceTable (part %d) @ %ld\n", | 1165 | udf_debug("freedSpaceTable (part %d) @ %ld\n", |
1157 | p_index, map->s_fspace.s_table->i_ino); | 1166 | p_index, map->s_fspace.s_table->i_ino); |
@@ -1178,6 +1187,7 @@ static void udf_find_vat_block(struct super_block *sb, int p_index, | |||
1178 | struct udf_part_map *map = &sbi->s_partmaps[p_index]; | 1187 | struct udf_part_map *map = &sbi->s_partmaps[p_index]; |
1179 | sector_t vat_block; | 1188 | sector_t vat_block; |
1180 | struct kernel_lb_addr ino; | 1189 | struct kernel_lb_addr ino; |
1190 | struct inode *inode; | ||
1181 | 1191 | ||
1182 | /* | 1192 | /* |
1183 | * VAT file entry is in the last recorded block. Some broken disks have | 1193 | * VAT file entry is in the last recorded block. Some broken disks have |
@@ -1186,10 +1196,13 @@ static void udf_find_vat_block(struct super_block *sb, int p_index, | |||
1186 | ino.partitionReferenceNum = type1_index; | 1196 | ino.partitionReferenceNum = type1_index; |
1187 | for (vat_block = start_block; | 1197 | for (vat_block = start_block; |
1188 | vat_block >= map->s_partition_root && | 1198 | vat_block >= map->s_partition_root && |
1189 | vat_block >= start_block - 3 && | 1199 | vat_block >= start_block - 3; vat_block--) { |
1190 | !sbi->s_vat_inode; vat_block--) { | ||
1191 | ino.logicalBlockNum = vat_block - map->s_partition_root; | 1200 | ino.logicalBlockNum = vat_block - map->s_partition_root; |
1192 | sbi->s_vat_inode = udf_iget(sb, &ino); | 1201 | inode = udf_iget(sb, &ino); |
1202 | if (!IS_ERR(inode)) { | ||
1203 | sbi->s_vat_inode = inode; | ||
1204 | break; | ||
1205 | } | ||
1193 | } | 1206 | } |
1194 | } | 1207 | } |
1195 | 1208 | ||
@@ -2205,10 +2218,10 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) | |||
2205 | /* assign inodes by physical block number */ | 2218 | /* assign inodes by physical block number */ |
2206 | /* perhaps it's not extensible enough, but for now ... */ | 2219 | /* perhaps it's not extensible enough, but for now ... */ |
2207 | inode = udf_iget(sb, &rootdir); | 2220 | inode = udf_iget(sb, &rootdir); |
2208 | if (!inode) { | 2221 | if (IS_ERR(inode)) { |
2209 | udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n", | 2222 | udf_err(sb, "Error in udf_iget, block=%d, partition=%d\n", |
2210 | rootdir.logicalBlockNum, rootdir.partitionReferenceNum); | 2223 | rootdir.logicalBlockNum, rootdir.partitionReferenceNum); |
2211 | ret = -EIO; | 2224 | ret = PTR_ERR(inode); |
2212 | goto error_out; | 2225 | goto error_out; |
2213 | } | 2226 | } |
2214 | 2227 | ||
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index be7dabbbcb49..742557be9936 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h | |||
@@ -143,7 +143,6 @@ extern int udf_expand_file_adinicb(struct inode *); | |||
143 | extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *); | 143 | extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *); |
144 | extern struct buffer_head *udf_bread(struct inode *, int, int, int *); | 144 | extern struct buffer_head *udf_bread(struct inode *, int, int, int *); |
145 | extern int udf_setsize(struct inode *, loff_t); | 145 | extern int udf_setsize(struct inode *, loff_t); |
146 | extern void udf_read_inode(struct inode *); | ||
147 | extern void udf_evict_inode(struct inode *); | 146 | extern void udf_evict_inode(struct inode *); |
148 | extern int udf_write_inode(struct inode *, struct writeback_control *wbc); | 147 | extern int udf_write_inode(struct inode *, struct writeback_control *wbc); |
149 | extern long udf_block_map(struct inode *, sector_t); | 148 | extern long udf_block_map(struct inode *, sector_t); |
@@ -209,7 +208,7 @@ extern int udf_CS0toUTF8(struct ustr *, const struct ustr *); | |||
209 | 208 | ||
210 | /* ialloc.c */ | 209 | /* ialloc.c */ |
211 | extern void udf_free_inode(struct inode *); | 210 | extern void udf_free_inode(struct inode *); |
212 | extern struct inode *udf_new_inode(struct inode *, umode_t, int *); | 211 | extern struct inode *udf_new_inode(struct inode *, umode_t); |
213 | 212 | ||
214 | /* truncate.c */ | 213 | /* truncate.c */ |
215 | extern void udf_truncate_tail_extent(struct inode *); | 214 | extern void udf_truncate_tail_extent(struct inode *); |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 7c580c97990e..be7d42c7d938 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -902,9 +902,6 @@ void ufs_evict_inode(struct inode * inode) | |||
902 | invalidate_inode_buffers(inode); | 902 | invalidate_inode_buffers(inode); |
903 | clear_inode(inode); | 903 | clear_inode(inode); |
904 | 904 | ||
905 | if (want_delete) { | 905 | if (want_delete) |
906 | lock_ufs(inode->i_sb); | 906 | ufs_free_inode(inode); |
907 | ufs_free_inode (inode); | ||
908 | unlock_ufs(inode->i_sb); | ||
909 | } | ||
910 | } | 907 | } |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 90d74b8f8eba..2df62a73f20c 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -126,12 +126,12 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
126 | if (l > sb->s_blocksize) | 126 | if (l > sb->s_blocksize) |
127 | goto out_notlocked; | 127 | goto out_notlocked; |
128 | 128 | ||
129 | lock_ufs(dir->i_sb); | ||
130 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); | 129 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); |
131 | err = PTR_ERR(inode); | 130 | err = PTR_ERR(inode); |
132 | if (IS_ERR(inode)) | 131 | if (IS_ERR(inode)) |
133 | goto out; | 132 | goto out_notlocked; |
134 | 133 | ||
134 | lock_ufs(dir->i_sb); | ||
135 | if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { | 135 | if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { |
136 | /* slow symlink */ | 136 | /* slow symlink */ |
137 | inode->i_op = &ufs_symlink_inode_operations; | 137 | inode->i_op = &ufs_symlink_inode_operations; |
@@ -181,13 +181,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
181 | struct inode * inode; | 181 | struct inode * inode; |
182 | int err; | 182 | int err; |
183 | 183 | ||
184 | lock_ufs(dir->i_sb); | ||
185 | inode_inc_link_count(dir); | ||
186 | |||
187 | inode = ufs_new_inode(dir, S_IFDIR|mode); | 184 | inode = ufs_new_inode(dir, S_IFDIR|mode); |
188 | err = PTR_ERR(inode); | ||
189 | if (IS_ERR(inode)) | 185 | if (IS_ERR(inode)) |
190 | goto out_dir; | 186 | return PTR_ERR(inode); |
191 | 187 | ||
192 | inode->i_op = &ufs_dir_inode_operations; | 188 | inode->i_op = &ufs_dir_inode_operations; |
193 | inode->i_fop = &ufs_dir_operations; | 189 | inode->i_fop = &ufs_dir_operations; |
@@ -195,6 +191,9 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
195 | 191 | ||
196 | inode_inc_link_count(inode); | 192 | inode_inc_link_count(inode); |
197 | 193 | ||
194 | lock_ufs(dir->i_sb); | ||
195 | inode_inc_link_count(dir); | ||
196 | |||
198 | err = ufs_make_empty(inode, dir); | 197 | err = ufs_make_empty(inode, dir); |
199 | if (err) | 198 | if (err) |
200 | goto out_fail; | 199 | goto out_fail; |
@@ -212,7 +211,6 @@ out_fail: | |||
212 | inode_dec_link_count(inode); | 211 | inode_dec_link_count(inode); |
213 | inode_dec_link_count(inode); | 212 | inode_dec_link_count(inode); |
214 | iput (inode); | 213 | iput (inode); |
215 | out_dir: | ||
216 | inode_dec_link_count(dir); | 214 | inode_dec_link_count(dir); |
217 | unlock_ufs(dir->i_sb); | 215 | unlock_ufs(dir->i_sb); |
218 | goto out; | 216 | goto out; |
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index de2d26d32844..86df952d3e24 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -5424,7 +5424,7 @@ xfs_bmap_shift_extents( | |||
5424 | struct xfs_bmap_free *flist, | 5424 | struct xfs_bmap_free *flist, |
5425 | int num_exts) | 5425 | int num_exts) |
5426 | { | 5426 | { |
5427 | struct xfs_btree_cur *cur; | 5427 | struct xfs_btree_cur *cur = NULL; |
5428 | struct xfs_bmbt_rec_host *gotp; | 5428 | struct xfs_bmbt_rec_host *gotp; |
5429 | struct xfs_bmbt_irec got; | 5429 | struct xfs_bmbt_irec got; |
5430 | struct xfs_bmbt_irec left; | 5430 | struct xfs_bmbt_irec left; |
@@ -5435,7 +5435,7 @@ xfs_bmap_shift_extents( | |||
5435 | int error = 0; | 5435 | int error = 0; |
5436 | int i; | 5436 | int i; |
5437 | int whichfork = XFS_DATA_FORK; | 5437 | int whichfork = XFS_DATA_FORK; |
5438 | int logflags; | 5438 | int logflags = 0; |
5439 | xfs_filblks_t blockcount = 0; | 5439 | xfs_filblks_t blockcount = 0; |
5440 | int total_extents; | 5440 | int total_extents; |
5441 | 5441 | ||
@@ -5478,16 +5478,11 @@ xfs_bmap_shift_extents( | |||
5478 | } | 5478 | } |
5479 | } | 5479 | } |
5480 | 5480 | ||
5481 | /* We are going to change core inode */ | ||
5482 | logflags = XFS_ILOG_CORE; | ||
5483 | if (ifp->if_flags & XFS_IFBROOT) { | 5481 | if (ifp->if_flags & XFS_IFBROOT) { |
5484 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); | 5482 | cur = xfs_bmbt_init_cursor(mp, tp, ip, whichfork); |
5485 | cur->bc_private.b.firstblock = *firstblock; | 5483 | cur->bc_private.b.firstblock = *firstblock; |
5486 | cur->bc_private.b.flist = flist; | 5484 | cur->bc_private.b.flist = flist; |
5487 | cur->bc_private.b.flags = 0; | 5485 | cur->bc_private.b.flags = 0; |
5488 | } else { | ||
5489 | cur = NULL; | ||
5490 | logflags |= XFS_ILOG_DEXT; | ||
5491 | } | 5486 | } |
5492 | 5487 | ||
5493 | /* | 5488 | /* |
@@ -5545,11 +5540,14 @@ xfs_bmap_shift_extents( | |||
5545 | blockcount = left.br_blockcount + | 5540 | blockcount = left.br_blockcount + |
5546 | got.br_blockcount; | 5541 | got.br_blockcount; |
5547 | xfs_iext_remove(ip, *current_ext, 1, 0); | 5542 | xfs_iext_remove(ip, *current_ext, 1, 0); |
5543 | logflags |= XFS_ILOG_CORE; | ||
5548 | if (cur) { | 5544 | if (cur) { |
5549 | error = xfs_btree_delete(cur, &i); | 5545 | error = xfs_btree_delete(cur, &i); |
5550 | if (error) | 5546 | if (error) |
5551 | goto del_cursor; | 5547 | goto del_cursor; |
5552 | XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor); | 5548 | XFS_WANT_CORRUPTED_GOTO(i == 1, del_cursor); |
5549 | } else { | ||
5550 | logflags |= XFS_ILOG_DEXT; | ||
5553 | } | 5551 | } |
5554 | XFS_IFORK_NEXT_SET(ip, whichfork, | 5552 | XFS_IFORK_NEXT_SET(ip, whichfork, |
5555 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | 5553 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); |
@@ -5575,6 +5573,7 @@ xfs_bmap_shift_extents( | |||
5575 | got.br_startoff = startoff; | 5573 | got.br_startoff = startoff; |
5576 | } | 5574 | } |
5577 | 5575 | ||
5576 | logflags |= XFS_ILOG_CORE; | ||
5578 | if (cur) { | 5577 | if (cur) { |
5579 | error = xfs_bmbt_update(cur, got.br_startoff, | 5578 | error = xfs_bmbt_update(cur, got.br_startoff, |
5580 | got.br_startblock, | 5579 | got.br_startblock, |
@@ -5582,6 +5581,8 @@ xfs_bmap_shift_extents( | |||
5582 | got.br_state); | 5581 | got.br_state); |
5583 | if (error) | 5582 | if (error) |
5584 | goto del_cursor; | 5583 | goto del_cursor; |
5584 | } else { | ||
5585 | logflags |= XFS_ILOG_DEXT; | ||
5585 | } | 5586 | } |
5586 | 5587 | ||
5587 | (*current_ext)++; | 5588 | (*current_ext)++; |
@@ -5597,6 +5598,7 @@ del_cursor: | |||
5597 | xfs_btree_del_cursor(cur, | 5598 | xfs_btree_del_cursor(cur, |
5598 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); | 5599 | error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); |
5599 | 5600 | ||
5600 | xfs_trans_log_inode(tp, ip, logflags); | 5601 | if (logflags) |
5602 | xfs_trans_log_inode(tp, ip, logflags); | ||
5601 | return error; | 5603 | return error; |
5602 | } | 5604 | } |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 11e9b4caa54f..b984647c24db 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -1753,11 +1753,72 @@ xfs_vm_readpages( | |||
1753 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); | 1753 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_blocks); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | /* | ||
1757 | * This is basically a copy of __set_page_dirty_buffers() with one | ||
1758 | * small tweak: buffers beyond EOF do not get marked dirty. If we mark them | ||
1759 | * dirty, we'll never be able to clean them because we don't write buffers | ||
1760 | * beyond EOF, and that means we can't invalidate pages that span EOF | ||
1761 | * that have been marked dirty. Further, the dirty state can leak into | ||
1762 | * the file interior if the file is extended, resulting in all sorts of | ||
1763 | * bad things happening as the state does not match the underlying data. | ||
1764 | * | ||
1765 | * XXX: this really indicates that bufferheads in XFS need to die. Warts like | ||
1766 | * this only exist because of bufferheads and how the generic code manages them. | ||
1767 | */ | ||
1768 | STATIC int | ||
1769 | xfs_vm_set_page_dirty( | ||
1770 | struct page *page) | ||
1771 | { | ||
1772 | struct address_space *mapping = page->mapping; | ||
1773 | struct inode *inode = mapping->host; | ||
1774 | loff_t end_offset; | ||
1775 | loff_t offset; | ||
1776 | int newly_dirty; | ||
1777 | |||
1778 | if (unlikely(!mapping)) | ||
1779 | return !TestSetPageDirty(page); | ||
1780 | |||
1781 | end_offset = i_size_read(inode); | ||
1782 | offset = page_offset(page); | ||
1783 | |||
1784 | spin_lock(&mapping->private_lock); | ||
1785 | if (page_has_buffers(page)) { | ||
1786 | struct buffer_head *head = page_buffers(page); | ||
1787 | struct buffer_head *bh = head; | ||
1788 | |||
1789 | do { | ||
1790 | if (offset < end_offset) | ||
1791 | set_buffer_dirty(bh); | ||
1792 | bh = bh->b_this_page; | ||
1793 | offset += 1 << inode->i_blkbits; | ||
1794 | } while (bh != head); | ||
1795 | } | ||
1796 | newly_dirty = !TestSetPageDirty(page); | ||
1797 | spin_unlock(&mapping->private_lock); | ||
1798 | |||
1799 | if (newly_dirty) { | ||
1800 | /* sigh - __set_page_dirty() is static, so copy it here, too */ | ||
1801 | unsigned long flags; | ||
1802 | |||
1803 | spin_lock_irqsave(&mapping->tree_lock, flags); | ||
1804 | if (page->mapping) { /* Race with truncate? */ | ||
1805 | WARN_ON_ONCE(!PageUptodate(page)); | ||
1806 | account_page_dirtied(page, mapping); | ||
1807 | radix_tree_tag_set(&mapping->page_tree, | ||
1808 | page_index(page), PAGECACHE_TAG_DIRTY); | ||
1809 | } | ||
1810 | spin_unlock_irqrestore(&mapping->tree_lock, flags); | ||
1811 | __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); | ||
1812 | } | ||
1813 | return newly_dirty; | ||
1814 | } | ||
1815 | |||
1756 | const struct address_space_operations xfs_address_space_operations = { | 1816 | const struct address_space_operations xfs_address_space_operations = { |
1757 | .readpage = xfs_vm_readpage, | 1817 | .readpage = xfs_vm_readpage, |
1758 | .readpages = xfs_vm_readpages, | 1818 | .readpages = xfs_vm_readpages, |
1759 | .writepage = xfs_vm_writepage, | 1819 | .writepage = xfs_vm_writepage, |
1760 | .writepages = xfs_vm_writepages, | 1820 | .writepages = xfs_vm_writepages, |
1821 | .set_page_dirty = xfs_vm_set_page_dirty, | ||
1761 | .releasepage = xfs_vm_releasepage, | 1822 | .releasepage = xfs_vm_releasepage, |
1762 | .invalidatepage = xfs_vm_invalidatepage, | 1823 | .invalidatepage = xfs_vm_invalidatepage, |
1763 | .write_begin = xfs_vm_write_begin, | 1824 | .write_begin = xfs_vm_write_begin, |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index 2f1e30d39a35..1707980f9a4b 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -1470,6 +1470,26 @@ xfs_collapse_file_space( | |||
1470 | start_fsb = XFS_B_TO_FSB(mp, offset + len); | 1470 | start_fsb = XFS_B_TO_FSB(mp, offset + len); |
1471 | shift_fsb = XFS_B_TO_FSB(mp, len); | 1471 | shift_fsb = XFS_B_TO_FSB(mp, len); |
1472 | 1472 | ||
1473 | /* | ||
1474 | * Writeback the entire file and force remove any post-eof blocks. The | ||
1475 | * writeback prevents changes to the extent list via concurrent | ||
1476 | * writeback and the eofblocks trim prevents the extent shift algorithm | ||
1477 | * from running into a post-eof delalloc extent. | ||
1478 | * | ||
1479 | * XXX: This is a temporary fix until the extent shift loop below is | ||
1480 | * converted to use offsets and lookups within the ILOCK rather than | ||
1481 | * carrying around the index into the extent list for the next | ||
1482 | * iteration. | ||
1483 | */ | ||
1484 | error = filemap_write_and_wait(VFS_I(ip)->i_mapping); | ||
1485 | if (error) | ||
1486 | return error; | ||
1487 | if (xfs_can_free_eofblocks(ip, true)) { | ||
1488 | error = xfs_free_eofblocks(mp, ip, false); | ||
1489 | if (error) | ||
1490 | return error; | ||
1491 | } | ||
1492 | |||
1473 | error = xfs_free_file_space(ip, offset, len); | 1493 | error = xfs_free_file_space(ip, offset, len); |
1474 | if (error) | 1494 | if (error) |
1475 | return error; | 1495 | return error; |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 076b1708d134..de5368c803f9 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -291,12 +291,22 @@ xfs_file_read_iter( | |||
291 | if (inode->i_mapping->nrpages) { | 291 | if (inode->i_mapping->nrpages) { |
292 | ret = filemap_write_and_wait_range( | 292 | ret = filemap_write_and_wait_range( |
293 | VFS_I(ip)->i_mapping, | 293 | VFS_I(ip)->i_mapping, |
294 | pos, -1); | 294 | pos, pos + size - 1); |
295 | if (ret) { | 295 | if (ret) { |
296 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); | 296 | xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); |
297 | return ret; | 297 | return ret; |
298 | } | 298 | } |
299 | truncate_pagecache_range(VFS_I(ip), pos, -1); | 299 | |
300 | /* | ||
301 | * Invalidate whole pages. This can return an error if | ||
302 | * we fail to invalidate a page, but this should never | ||
303 | * happen on XFS. Warn if it does fail. | ||
304 | */ | ||
305 | ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | ||
306 | pos >> PAGE_CACHE_SHIFT, | ||
307 | (pos + size - 1) >> PAGE_CACHE_SHIFT); | ||
308 | WARN_ON_ONCE(ret); | ||
309 | ret = 0; | ||
300 | } | 310 | } |
301 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 311 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
302 | } | 312 | } |
@@ -632,10 +642,19 @@ xfs_file_dio_aio_write( | |||
632 | 642 | ||
633 | if (mapping->nrpages) { | 643 | if (mapping->nrpages) { |
634 | ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, | 644 | ret = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, |
635 | pos, -1); | 645 | pos, pos + count - 1); |
636 | if (ret) | 646 | if (ret) |
637 | goto out; | 647 | goto out; |
638 | truncate_pagecache_range(VFS_I(ip), pos, -1); | 648 | /* |
649 | * Invalidate whole pages. This can return an error if | ||
650 | * we fail to invalidate a page, but this should never | ||
651 | * happen on XFS. Warn if it does fail. | ||
652 | */ | ||
653 | ret = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping, | ||
654 | pos >> PAGE_CACHE_SHIFT, | ||
655 | (pos + count - 1) >> PAGE_CACHE_SHIFT); | ||
656 | WARN_ON_ONCE(ret); | ||
657 | ret = 0; | ||
639 | } | 658 | } |
640 | 659 | ||
641 | /* | 660 | /* |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index bcfd808b1098..c1c9de19edbe 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -246,7 +246,6 @@ struct acpi_device_pnp { | |||
246 | acpi_device_name device_name; /* Driver-determined */ | 246 | acpi_device_name device_name; /* Driver-determined */ |
247 | acpi_device_class device_class; /* " */ | 247 | acpi_device_class device_class; /* " */ |
248 | union acpi_object *str_obj; /* unicode string for _STR method */ | 248 | union acpi_object *str_obj; /* unicode string for _STR method */ |
249 | unsigned long sun; /* _SUN */ | ||
250 | }; | 249 | }; |
251 | 250 | ||
252 | #define acpi_device_bid(d) ((d)->pnp.bus_id) | 251 | #define acpi_device_bid(d) ((d)->pnp.bus_id) |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index e4ae2ad48d07..75a227cc7ce2 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -55,6 +55,7 @@ struct qstr { | |||
55 | #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } | 55 | #define QSTR_INIT(n,l) { { { .len = l } }, .name = n } |
56 | #define hashlen_hash(hashlen) ((u32) (hashlen)) | 56 | #define hashlen_hash(hashlen) ((u32) (hashlen)) |
57 | #define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) | 57 | #define hashlen_len(hashlen) ((u32)((hashlen) >> 32)) |
58 | #define hashlen_create(hash,len) (((u64)(len)<<32)|(u32)(hash)) | ||
58 | 59 | ||
59 | struct dentry_stat_t { | 60 | struct dentry_stat_t { |
60 | long nr_dentry; | 61 | long nr_dentry; |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 6ff0b0b42d47..08ed2b0a96e6 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
@@ -24,6 +24,9 @@ | |||
24 | #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ | 24 | #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ |
25 | #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ | 25 | #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ |
26 | 26 | ||
27 | /* 0, 1(node nid), 2(meta nid) are reserved node id */ | ||
28 | #define F2FS_RESERVED_NODE_NUM 3 | ||
29 | |||
27 | #define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) | 30 | #define F2FS_ROOT_INO(sbi) (sbi->root_ino_num) |
28 | #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) | 31 | #define F2FS_NODE_INO(sbi) (sbi->node_ino_num) |
29 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) | 32 | #define F2FS_META_INO(sbi) (sbi->meta_ino_num) |
@@ -87,6 +90,8 @@ struct f2fs_super_block { | |||
87 | #define CP_ORPHAN_PRESENT_FLAG 0x00000002 | 90 | #define CP_ORPHAN_PRESENT_FLAG 0x00000002 |
88 | #define CP_UMOUNT_FLAG 0x00000001 | 91 | #define CP_UMOUNT_FLAG 0x00000001 |
89 | 92 | ||
93 | #define F2FS_CP_PACKS 2 /* # of checkpoint packs */ | ||
94 | |||
90 | struct f2fs_checkpoint { | 95 | struct f2fs_checkpoint { |
91 | __le64 checkpoint_ver; /* checkpoint block version number */ | 96 | __le64 checkpoint_ver; /* checkpoint block version number */ |
92 | __le64 user_block_count; /* # of user blocks */ | 97 | __le64 user_block_count; /* # of user blocks */ |
@@ -123,6 +128,9 @@ struct f2fs_checkpoint { | |||
123 | */ | 128 | */ |
124 | #define F2FS_ORPHANS_PER_BLOCK 1020 | 129 | #define F2FS_ORPHANS_PER_BLOCK 1020 |
125 | 130 | ||
131 | #define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \ | ||
132 | F2FS_ORPHANS_PER_BLOCK) | ||
133 | |||
126 | struct f2fs_orphan_block { | 134 | struct f2fs_orphan_block { |
127 | __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ | 135 | __le32 ino[F2FS_ORPHANS_PER_BLOCK]; /* inode numbers */ |
128 | __le32 reserved; /* reserved */ | 136 | __le32 reserved; /* reserved */ |
@@ -144,6 +152,7 @@ struct f2fs_extent { | |||
144 | #define F2FS_NAME_LEN 255 | 152 | #define F2FS_NAME_LEN 255 |
145 | #define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ | 153 | #define F2FS_INLINE_XATTR_ADDRS 50 /* 200 bytes for inline xattrs */ |
146 | #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ | 154 | #define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */ |
155 | #define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */ | ||
147 | #define ADDRS_PER_INODE(fi) addrs_per_inode(fi) | 156 | #define ADDRS_PER_INODE(fi) addrs_per_inode(fi) |
148 | #define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ | 157 | #define ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */ |
149 | #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ | 158 | #define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */ |
@@ -163,8 +172,9 @@ struct f2fs_extent { | |||
163 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ | 172 | #define MAX_INLINE_DATA (sizeof(__le32) * (DEF_ADDRS_PER_INODE - \ |
164 | F2FS_INLINE_XATTR_ADDRS - 1)) | 173 | F2FS_INLINE_XATTR_ADDRS - 1)) |
165 | 174 | ||
166 | #define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) \ | 175 | #define INLINE_DATA_OFFSET (PAGE_CACHE_SIZE - sizeof(struct node_footer) -\ |
167 | - sizeof(__le32) * (DEF_ADDRS_PER_INODE + 5 - 1)) | 176 | sizeof(__le32) * (DEF_ADDRS_PER_INODE + \ |
177 | DEF_NIDS_PER_INODE - 1)) | ||
168 | 178 | ||
169 | struct f2fs_inode { | 179 | struct f2fs_inode { |
170 | __le16 i_mode; /* file mode */ | 180 | __le16 i_mode; /* file mode */ |
@@ -194,7 +204,7 @@ struct f2fs_inode { | |||
194 | 204 | ||
195 | __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ | 205 | __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ |
196 | 206 | ||
197 | __le32 i_nid[5]; /* direct(2), indirect(2), | 207 | __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), |
198 | double_indirect(1) node id */ | 208 | double_indirect(1) node id */ |
199 | } __packed; | 209 | } __packed; |
200 | 210 | ||
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index c7e17de732f3..12f146fa6604 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
@@ -38,60 +38,32 @@ enum gpiod_flags { | |||
38 | struct gpio_desc *__must_check __gpiod_get(struct device *dev, | 38 | struct gpio_desc *__must_check __gpiod_get(struct device *dev, |
39 | const char *con_id, | 39 | const char *con_id, |
40 | enum gpiod_flags flags); | 40 | enum gpiod_flags flags); |
41 | #define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags) | ||
42 | #define gpiod_get(varargs...) __gpiod_get(varargs, 0) | ||
43 | struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, | 41 | struct gpio_desc *__must_check __gpiod_get_index(struct device *dev, |
44 | const char *con_id, | 42 | const char *con_id, |
45 | unsigned int idx, | 43 | unsigned int idx, |
46 | enum gpiod_flags flags); | 44 | enum gpiod_flags flags); |
47 | #define __gpiod_get_index(dev, con_id, index, flags, ...) \ | ||
48 | __gpiod_get_index(dev, con_id, index, flags) | ||
49 | #define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0) | ||
50 | struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, | 45 | struct gpio_desc *__must_check __gpiod_get_optional(struct device *dev, |
51 | const char *con_id, | 46 | const char *con_id, |
52 | enum gpiod_flags flags); | 47 | enum gpiod_flags flags); |
53 | #define __gpiod_get_optional(dev, con_id, flags, ...) \ | ||
54 | __gpiod_get_optional(dev, con_id, flags) | ||
55 | #define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0) | ||
56 | struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, | 48 | struct gpio_desc *__must_check __gpiod_get_index_optional(struct device *dev, |
57 | const char *con_id, | 49 | const char *con_id, |
58 | unsigned int index, | 50 | unsigned int index, |
59 | enum gpiod_flags flags); | 51 | enum gpiod_flags flags); |
60 | #define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \ | ||
61 | __gpiod_get_index_optional(dev, con_id, index, flags) | ||
62 | #define gpiod_get_index_optional(varargs...) \ | ||
63 | __gpiod_get_index_optional(varargs, 0) | ||
64 | |||
65 | void gpiod_put(struct gpio_desc *desc); | 52 | void gpiod_put(struct gpio_desc *desc); |
66 | 53 | ||
67 | struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, | 54 | struct gpio_desc *__must_check __devm_gpiod_get(struct device *dev, |
68 | const char *con_id, | 55 | const char *con_id, |
69 | enum gpiod_flags flags); | 56 | enum gpiod_flags flags); |
70 | #define __devm_gpiod_get(dev, con_id, flags, ...) \ | ||
71 | __devm_gpiod_get(dev, con_id, flags) | ||
72 | #define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0) | ||
73 | struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, | 57 | struct gpio_desc *__must_check __devm_gpiod_get_index(struct device *dev, |
74 | const char *con_id, | 58 | const char *con_id, |
75 | unsigned int idx, | 59 | unsigned int idx, |
76 | enum gpiod_flags flags); | 60 | enum gpiod_flags flags); |
77 | #define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \ | ||
78 | __devm_gpiod_get_index(dev, con_id, index, flags) | ||
79 | #define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0) | ||
80 | struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, | 61 | struct gpio_desc *__must_check __devm_gpiod_get_optional(struct device *dev, |
81 | const char *con_id, | 62 | const char *con_id, |
82 | enum gpiod_flags flags); | 63 | enum gpiod_flags flags); |
83 | #define __devm_gpiod_get_optional(dev, con_id, flags, ...) \ | ||
84 | __devm_gpiod_get_optional(dev, con_id, flags) | ||
85 | #define devm_gpiod_get_optional(varargs...) \ | ||
86 | __devm_gpiod_get_optional(varargs, 0) | ||
87 | struct gpio_desc *__must_check | 64 | struct gpio_desc *__must_check |
88 | __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, | 65 | __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, |
89 | unsigned int index, enum gpiod_flags flags); | 66 | unsigned int index, enum gpiod_flags flags); |
90 | #define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \ | ||
91 | __devm_gpiod_get_index_optional(dev, con_id, index, flags) | ||
92 | #define devm_gpiod_get_index_optional(varargs...) \ | ||
93 | __devm_gpiod_get_index_optional(varargs, 0) | ||
94 | |||
95 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); | 67 | void devm_gpiod_put(struct device *dev, struct gpio_desc *desc); |
96 | 68 | ||
97 | int gpiod_get_direction(const struct gpio_desc *desc); | 69 | int gpiod_get_direction(const struct gpio_desc *desc); |
@@ -124,27 +96,31 @@ int desc_to_gpio(const struct gpio_desc *desc); | |||
124 | 96 | ||
125 | #else /* CONFIG_GPIOLIB */ | 97 | #else /* CONFIG_GPIOLIB */ |
126 | 98 | ||
127 | static inline struct gpio_desc *__must_check gpiod_get(struct device *dev, | 99 | static inline struct gpio_desc *__must_check __gpiod_get(struct device *dev, |
128 | const char *con_id) | 100 | const char *con_id, |
101 | enum gpiod_flags flags) | ||
129 | { | 102 | { |
130 | return ERR_PTR(-ENOSYS); | 103 | return ERR_PTR(-ENOSYS); |
131 | } | 104 | } |
132 | static inline struct gpio_desc *__must_check gpiod_get_index(struct device *dev, | 105 | static inline struct gpio_desc *__must_check |
133 | const char *con_id, | 106 | __gpiod_get_index(struct device *dev, |
134 | unsigned int idx) | 107 | const char *con_id, |
108 | unsigned int idx, | ||
109 | enum gpiod_flags flags) | ||
135 | { | 110 | { |
136 | return ERR_PTR(-ENOSYS); | 111 | return ERR_PTR(-ENOSYS); |
137 | } | 112 | } |
138 | 113 | ||
139 | static inline struct gpio_desc *__must_check | 114 | static inline struct gpio_desc *__must_check |
140 | gpiod_get_optional(struct device *dev, const char *con_id) | 115 | __gpiod_get_optional(struct device *dev, const char *con_id, |
116 | enum gpiod_flags flags) | ||
141 | { | 117 | { |
142 | return ERR_PTR(-ENOSYS); | 118 | return ERR_PTR(-ENOSYS); |
143 | } | 119 | } |
144 | 120 | ||
145 | static inline struct gpio_desc *__must_check | 121 | static inline struct gpio_desc *__must_check |
146 | gpiod_get_index_optional(struct device *dev, const char *con_id, | 122 | __gpiod_get_index_optional(struct device *dev, const char *con_id, |
147 | unsigned int index) | 123 | unsigned int index, enum gpiod_flags flags) |
148 | { | 124 | { |
149 | return ERR_PTR(-ENOSYS); | 125 | return ERR_PTR(-ENOSYS); |
150 | } | 126 | } |
@@ -157,28 +133,33 @@ static inline void gpiod_put(struct gpio_desc *desc) | |||
157 | WARN_ON(1); | 133 | WARN_ON(1); |
158 | } | 134 | } |
159 | 135 | ||
160 | static inline struct gpio_desc *__must_check devm_gpiod_get(struct device *dev, | 136 | static inline struct gpio_desc *__must_check |
161 | const char *con_id) | 137 | __devm_gpiod_get(struct device *dev, |
138 | const char *con_id, | ||
139 | enum gpiod_flags flags) | ||
162 | { | 140 | { |
163 | return ERR_PTR(-ENOSYS); | 141 | return ERR_PTR(-ENOSYS); |
164 | } | 142 | } |
165 | static inline | 143 | static inline |
166 | struct gpio_desc *__must_check devm_gpiod_get_index(struct device *dev, | 144 | struct gpio_desc *__must_check |
167 | const char *con_id, | 145 | __devm_gpiod_get_index(struct device *dev, |
168 | unsigned int idx) | 146 | const char *con_id, |
147 | unsigned int idx, | ||
148 | enum gpiod_flags flags) | ||
169 | { | 149 | { |
170 | return ERR_PTR(-ENOSYS); | 150 | return ERR_PTR(-ENOSYS); |
171 | } | 151 | } |
172 | 152 | ||
173 | static inline struct gpio_desc *__must_check | 153 | static inline struct gpio_desc *__must_check |
174 | devm_gpiod_get_optional(struct device *dev, const char *con_id) | 154 | __devm_gpiod_get_optional(struct device *dev, const char *con_id, |
155 | enum gpiod_flags flags) | ||
175 | { | 156 | { |
176 | return ERR_PTR(-ENOSYS); | 157 | return ERR_PTR(-ENOSYS); |
177 | } | 158 | } |
178 | 159 | ||
179 | static inline struct gpio_desc *__must_check | 160 | static inline struct gpio_desc *__must_check |
180 | devm_gpiod_get_index_optional(struct device *dev, const char *con_id, | 161 | __devm_gpiod_get_index_optional(struct device *dev, const char *con_id, |
181 | unsigned int index) | 162 | unsigned int index, enum gpiod_flags flags) |
182 | { | 163 | { |
183 | return ERR_PTR(-ENOSYS); | 164 | return ERR_PTR(-ENOSYS); |
184 | } | 165 | } |
@@ -303,9 +284,43 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) | |||
303 | return -EINVAL; | 284 | return -EINVAL; |
304 | } | 285 | } |
305 | 286 | ||
306 | |||
307 | #endif /* CONFIG_GPIOLIB */ | 287 | #endif /* CONFIG_GPIOLIB */ |
308 | 288 | ||
289 | /* | ||
290 | * Vararg-hacks! This is done to transition the kernel to always pass | ||
291 | * the options flags argument to the below functions. During a transition | ||
292 | * phase these vararg macros make both old-and-newstyle code compile, | ||
293 | * but when all calls to the elder API are removed, these should go away | ||
294 | * and the __gpiod_get() etc functions above be renamed just gpiod_get() | ||
295 | * etc. | ||
296 | */ | ||
297 | #define __gpiod_get(dev, con_id, flags, ...) __gpiod_get(dev, con_id, flags) | ||
298 | #define gpiod_get(varargs...) __gpiod_get(varargs, 0) | ||
299 | #define __gpiod_get_index(dev, con_id, index, flags, ...) \ | ||
300 | __gpiod_get_index(dev, con_id, index, flags) | ||
301 | #define gpiod_get_index(varargs...) __gpiod_get_index(varargs, 0) | ||
302 | #define __gpiod_get_optional(dev, con_id, flags, ...) \ | ||
303 | __gpiod_get_optional(dev, con_id, flags) | ||
304 | #define gpiod_get_optional(varargs...) __gpiod_get_optional(varargs, 0) | ||
305 | #define __gpiod_get_index_optional(dev, con_id, index, flags, ...) \ | ||
306 | __gpiod_get_index_optional(dev, con_id, index, flags) | ||
307 | #define gpiod_get_index_optional(varargs...) \ | ||
308 | __gpiod_get_index_optional(varargs, 0) | ||
309 | #define __devm_gpiod_get(dev, con_id, flags, ...) \ | ||
310 | __devm_gpiod_get(dev, con_id, flags) | ||
311 | #define devm_gpiod_get(varargs...) __devm_gpiod_get(varargs, 0) | ||
312 | #define __devm_gpiod_get_index(dev, con_id, index, flags, ...) \ | ||
313 | __devm_gpiod_get_index(dev, con_id, index, flags) | ||
314 | #define devm_gpiod_get_index(varargs...) __devm_gpiod_get_index(varargs, 0) | ||
315 | #define __devm_gpiod_get_optional(dev, con_id, flags, ...) \ | ||
316 | __devm_gpiod_get_optional(dev, con_id, flags) | ||
317 | #define devm_gpiod_get_optional(varargs...) \ | ||
318 | __devm_gpiod_get_optional(varargs, 0) | ||
319 | #define __devm_gpiod_get_index_optional(dev, con_id, index, flags, ...) \ | ||
320 | __devm_gpiod_get_index_optional(dev, con_id, index, flags) | ||
321 | #define devm_gpiod_get_index_optional(varargs...) \ | ||
322 | __devm_gpiod_get_index_optional(varargs, 0) | ||
323 | |||
309 | #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) | 324 | #if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_GPIO_SYSFS) |
310 | 325 | ||
311 | int gpiod_export(struct gpio_desc *desc, bool direction_may_change); | 326 | int gpiod_export(struct gpio_desc *desc, bool direction_may_change); |
diff --git a/include/linux/hash.h b/include/linux/hash.h index bd1754c7ecef..d0494c399392 100644 --- a/include/linux/hash.h +++ b/include/linux/hash.h | |||
@@ -37,6 +37,9 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) | |||
37 | { | 37 | { |
38 | u64 hash = val; | 38 | u64 hash = val; |
39 | 39 | ||
40 | #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 | ||
41 | hash = hash * GOLDEN_RATIO_PRIME_64; | ||
42 | #else | ||
40 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ | 43 | /* Sigh, gcc can't optimise this alone like it does for 32 bits. */ |
41 | u64 n = hash; | 44 | u64 n = hash; |
42 | n <<= 18; | 45 | n <<= 18; |
@@ -51,6 +54,7 @@ static __always_inline u64 hash_64(u64 val, unsigned int bits) | |||
51 | hash += n; | 54 | hash += n; |
52 | n <<= 2; | 55 | n <<= 2; |
53 | hash += n; | 56 | hash += n; |
57 | #endif | ||
54 | 58 | ||
55 | /* High bits are more random, so use them. */ | 59 | /* High bits are more random, so use them. */ |
56 | return hash >> (64 - bits); | 60 | return hash >> (64 - bits); |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 1f44466c1e9d..c367cbdf73ab 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -258,23 +258,11 @@ extern unsigned long preset_lpj; | |||
258 | #define SEC_JIFFIE_SC (32 - SHIFT_HZ) | 258 | #define SEC_JIFFIE_SC (32 - SHIFT_HZ) |
259 | #endif | 259 | #endif |
260 | #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) | 260 | #define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29) |
261 | #define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19) | ||
262 | #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ | 261 | #define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\ |
263 | TICK_NSEC -1) / (u64)TICK_NSEC)) | 262 | TICK_NSEC -1) / (u64)TICK_NSEC)) |
264 | 263 | ||
265 | #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ | 264 | #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ |
266 | TICK_NSEC -1) / (u64)TICK_NSEC)) | 265 | TICK_NSEC -1) / (u64)TICK_NSEC)) |
267 | #define USEC_CONVERSION \ | ||
268 | ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\ | ||
269 | TICK_NSEC -1) / (u64)TICK_NSEC)) | ||
270 | /* | ||
271 | * USEC_ROUND is used in the timeval to jiffie conversion. See there | ||
272 | * for more details. It is the scaled resolution rounding value. Note | ||
273 | * that it is a 64-bit value. Since, when it is applied, we are already | ||
274 | * in jiffies (albit scaled), it is nothing but the bits we will shift | ||
275 | * off. | ||
276 | */ | ||
277 | #define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1) | ||
278 | /* | 266 | /* |
279 | * The maximum jiffie value is (MAX_INT >> 1). Here we translate that | 267 | * The maximum jiffie value is (MAX_INT >> 1). Here we translate that |
280 | * into seconds. The 64-bit case will overflow if we are not careful, | 268 | * into seconds. The 64-bit case will overflow if we are not careful, |
diff --git a/include/linux/leds.h b/include/linux/leds.h index 6a599dce7f9d..e43686472197 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/list.h> | 15 | #include <linux/list.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | #include <linux/rwsem.h> | 17 | #include <linux/rwsem.h> |
18 | #include <linux/timer.h> | ||
18 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
19 | 20 | ||
20 | struct device; | 21 | struct device; |
@@ -68,7 +69,7 @@ struct led_classdev { | |||
68 | const char *default_trigger; /* Trigger to use */ | 69 | const char *default_trigger; /* Trigger to use */ |
69 | 70 | ||
70 | unsigned long blink_delay_on, blink_delay_off; | 71 | unsigned long blink_delay_on, blink_delay_off; |
71 | struct delayed_work blink_work; | 72 | struct timer_list blink_timer; |
72 | int blink_brightness; | 73 | int blink_brightness; |
73 | 74 | ||
74 | struct work_struct set_brightness_work; | 75 | struct work_struct set_brightness_work; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 071f6b234604..511c6e0d21a9 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -1196,6 +1196,9 @@ int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, | |||
1196 | enum mlx4_net_trans_rule_id id); | 1196 | enum mlx4_net_trans_rule_id id); |
1197 | int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); | 1197 | int mlx4_hw_rule_sz(struct mlx4_dev *dev, enum mlx4_net_trans_rule_id id); |
1198 | 1198 | ||
1199 | int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr, | ||
1200 | int port, int qpn, u16 prio, u64 *reg_id); | ||
1201 | |||
1199 | void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, | 1202 | void mlx4_sync_pkey_table(struct mlx4_dev *dev, int slave, int port, |
1200 | int i, int val); | 1203 | int i, int val); |
1201 | 1204 | ||
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index 3083c53e0270..c300db3ae285 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -949,7 +949,7 @@ static inline int jedec_feature(struct nand_chip *chip) | |||
949 | : 0; | 949 | : 0; |
950 | } | 950 | } |
951 | 951 | ||
952 | /** | 952 | /* |
953 | * struct nand_sdr_timings - SDR NAND chip timings | 953 | * struct nand_sdr_timings - SDR NAND chip timings |
954 | * | 954 | * |
955 | * This struct defines the timing requirements of a SDR NAND chip. | 955 | * This struct defines the timing requirements of a SDR NAND chip. |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 38377392d082..c8e388e5fccc 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3176,7 +3176,7 @@ static inline int __dev_uc_sync(struct net_device *dev, | |||
3176 | } | 3176 | } |
3177 | 3177 | ||
3178 | /** | 3178 | /** |
3179 | * __dev_uc_unsync - Remove synchonized addresses from device | 3179 | * __dev_uc_unsync - Remove synchronized addresses from device |
3180 | * @dev: device to sync | 3180 | * @dev: device to sync |
3181 | * @unsync: function to call if address should be removed | 3181 | * @unsync: function to call if address should be removed |
3182 | * | 3182 | * |
@@ -3220,7 +3220,7 @@ static inline int __dev_mc_sync(struct net_device *dev, | |||
3220 | } | 3220 | } |
3221 | 3221 | ||
3222 | /** | 3222 | /** |
3223 | * __dev_mc_unsync - Remove synchonized addresses from device | 3223 | * __dev_mc_unsync - Remove synchronized addresses from device |
3224 | * @dev: device to sync | 3224 | * @dev: device to sync |
3225 | * @unsync: function to call if address should be removed | 3225 | * @unsync: function to call if address should be removed |
3226 | * | 3226 | * |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 2077489f9887..2517ece98820 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/in6.h> | 9 | #include <linux/in6.h> |
10 | #include <linux/wait.h> | 10 | #include <linux/wait.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/static_key.h> | ||
12 | #include <uapi/linux/netfilter.h> | 13 | #include <uapi/linux/netfilter.h> |
13 | #ifdef CONFIG_NETFILTER | 14 | #ifdef CONFIG_NETFILTER |
14 | static inline int NF_DROP_GETERR(int verdict) | 15 | static inline int NF_DROP_GETERR(int verdict) |
@@ -99,9 +100,9 @@ void nf_unregister_sockopt(struct nf_sockopt_ops *reg); | |||
99 | 100 | ||
100 | extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 101 | extern struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
101 | 102 | ||
102 | #if defined(CONFIG_JUMP_LABEL) | 103 | #ifdef HAVE_JUMP_LABEL |
103 | #include <linux/static_key.h> | ||
104 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 104 | extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
105 | |||
105 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) | 106 | static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) |
106 | { | 107 | { |
107 | if (__builtin_constant_p(pf) && | 108 | if (__builtin_constant_p(pf) && |
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 7c1d252b20c0..ebc4c76ffb73 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
@@ -60,7 +60,7 @@ struct generic_pm_domain { | |||
60 | struct mutex lock; | 60 | struct mutex lock; |
61 | struct dev_power_governor *gov; | 61 | struct dev_power_governor *gov; |
62 | struct work_struct power_off_work; | 62 | struct work_struct power_off_work; |
63 | char *name; | 63 | const char *name; |
64 | unsigned int in_progress; /* Number of devices being suspended now */ | 64 | unsigned int in_progress; /* Number of devices being suspended now */ |
65 | atomic_t sd_count; /* Number of subdomains with power "on" */ | 65 | atomic_t sd_count; /* Number of subdomains with power "on" */ |
66 | enum gpd_status status; /* Current state of the domain */ | 66 | enum gpd_status status; /* Current state of the domain */ |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index bbe03a1924c0..4efa1ed8a2b0 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -218,6 +218,8 @@ enum regulator_type { | |||
218 | * @linear_min_sel: Minimal selector for starting linear mapping | 218 | * @linear_min_sel: Minimal selector for starting linear mapping |
219 | * @fixed_uV: Fixed voltage of rails. | 219 | * @fixed_uV: Fixed voltage of rails. |
220 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) | 220 | * @ramp_delay: Time to settle down after voltage change (unit: uV/us) |
221 | * @linear_ranges: A constant table of possible voltage ranges. | ||
222 | * @n_linear_ranges: Number of entries in the @linear_ranges table. | ||
221 | * @volt_table: Voltage mapping table (if table based mapping) | 223 | * @volt_table: Voltage mapping table (if table based mapping) |
222 | * | 224 | * |
223 | * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ | 225 | * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 730e638c5589..0b08d05d470b 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -85,6 +85,7 @@ struct regulator_state { | |||
85 | * bootloader then it will be enabled when the constraints are | 85 | * bootloader then it will be enabled when the constraints are |
86 | * applied. | 86 | * applied. |
87 | * @apply_uV: Apply the voltage constraint when initialising. | 87 | * @apply_uV: Apply the voltage constraint when initialising. |
88 | * @ramp_disable: Disable ramp delay when initialising or when setting voltage. | ||
88 | * | 89 | * |
89 | * @input_uV: Input voltage for regulator when supplied by another regulator. | 90 | * @input_uV: Input voltage for regulator when supplied by another regulator. |
90 | * | 91 | * |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 059052306831..9a82c7dc3fdd 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -183,13 +183,8 @@ static inline bool tick_nohz_full_cpu(int cpu) | |||
183 | 183 | ||
184 | extern void tick_nohz_init(void); | 184 | extern void tick_nohz_init(void); |
185 | extern void __tick_nohz_full_check(void); | 185 | extern void __tick_nohz_full_check(void); |
186 | extern void tick_nohz_full_kick(void); | ||
186 | extern void tick_nohz_full_kick_cpu(int cpu); | 187 | extern void tick_nohz_full_kick_cpu(int cpu); |
187 | |||
188 | static inline void tick_nohz_full_kick(void) | ||
189 | { | ||
190 | tick_nohz_full_kick_cpu(smp_processor_id()); | ||
191 | } | ||
192 | |||
193 | extern void tick_nohz_full_kick_all(void); | 188 | extern void tick_nohz_full_kick_all(void); |
194 | extern void __tick_nohz_task_switch(struct task_struct *tsk); | 189 | extern void __tick_nohz_task_switch(struct task_struct *tsk); |
195 | #else | 190 | #else |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index b5d5af3aa469..6f884e6c731e 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
@@ -464,6 +464,8 @@ struct hci_conn_params { | |||
464 | HCI_AUTO_CONN_ALWAYS, | 464 | HCI_AUTO_CONN_ALWAYS, |
465 | HCI_AUTO_CONN_LINK_LOSS, | 465 | HCI_AUTO_CONN_LINK_LOSS, |
466 | } auto_connect; | 466 | } auto_connect; |
467 | |||
468 | struct hci_conn *conn; | ||
467 | }; | 469 | }; |
468 | 470 | ||
469 | extern struct list_head hci_dev_list; | 471 | extern struct list_head hci_dev_list; |
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h index e2070960bac0..8170f8d7052b 100644 --- a/include/net/netns/ieee802154_6lowpan.h +++ b/include/net/netns/ieee802154_6lowpan.h | |||
@@ -16,7 +16,6 @@ struct netns_sysctl_lowpan { | |||
16 | struct netns_ieee802154_lowpan { | 16 | struct netns_ieee802154_lowpan { |
17 | struct netns_sysctl_lowpan sysctl; | 17 | struct netns_sysctl_lowpan sysctl; |
18 | struct netns_frags frags; | 18 | struct netns_frags frags; |
19 | int max_dsize; | ||
20 | }; | 19 | }; |
21 | 20 | ||
22 | #endif | 21 | #endif |
diff --git a/include/net/regulatory.h b/include/net/regulatory.h index 259992444e80..dad7ab20a8cb 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h | |||
@@ -167,7 +167,7 @@ struct ieee80211_reg_rule { | |||
167 | struct ieee80211_regdomain { | 167 | struct ieee80211_regdomain { |
168 | struct rcu_head rcu_head; | 168 | struct rcu_head rcu_head; |
169 | u32 n_reg_rules; | 169 | u32 n_reg_rules; |
170 | char alpha2[2]; | 170 | char alpha2[3]; |
171 | enum nl80211_dfs_regions dfs_region; | 171 | enum nl80211_dfs_regions dfs_region; |
172 | struct ieee80211_reg_rule reg_rules[]; | 172 | struct ieee80211_reg_rule reg_rules[]; |
173 | }; | 173 | }; |
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index f6e7397e799d..9fbd856e6713 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -320,6 +320,19 @@ static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) | |||
320 | return asoc ? asoc->assoc_id : 0; | 320 | return asoc ? asoc->assoc_id : 0; |
321 | } | 321 | } |
322 | 322 | ||
323 | static inline enum sctp_sstat_state | ||
324 | sctp_assoc_to_state(const struct sctp_association *asoc) | ||
325 | { | ||
326 | /* SCTP's uapi always had SCTP_EMPTY(=0) as a dummy state, but we | ||
327 | * got rid of it in kernel space. Therefore SCTP_CLOSED et al | ||
328 | * start at =1 in user space, but actually as =0 in kernel space. | ||
329 | * Now that we can not break user space and SCTP_EMPTY is exposed | ||
330 | * there, we need to fix it up with an ugly offset not to break | ||
331 | * applications. :( | ||
332 | */ | ||
333 | return asoc->state + 1; | ||
334 | } | ||
335 | |||
323 | /* Look up the association by its id. */ | 336 | /* Look up the association by its id. */ |
324 | struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); | 337 | struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id); |
325 | 338 | ||
diff --git a/include/net/sock.h b/include/net/sock.h index 7f2ab72f321a..b9a5bd0ed9f3 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -2165,9 +2165,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb) | |||
2165 | */ | 2165 | */ |
2166 | if (sock_flag(sk, SOCK_RCVTSTAMP) || | 2166 | if (sock_flag(sk, SOCK_RCVTSTAMP) || |
2167 | (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || | 2167 | (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) || |
2168 | (kt.tv64 && | 2168 | (kt.tv64 && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) || |
2169 | (sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE || | ||
2170 | skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP)) || | ||
2171 | (hwtstamps->hwtstamp.tv64 && | 2169 | (hwtstamps->hwtstamp.tv64 && |
2172 | (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) | 2170 | (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE))) |
2173 | __sock_recv_timestamp(msg, sk, skb); | 2171 | __sock_recv_timestamp(msg, sk, skb); |
diff --git a/include/net/wimax.h b/include/net/wimax.h index e52ef5357e08..c52b68577cb0 100644 --- a/include/net/wimax.h +++ b/include/net/wimax.h | |||
@@ -290,7 +290,7 @@ struct wimax_dev; | |||
290 | * This operation has to be synchronous, and return only when the | 290 | * This operation has to be synchronous, and return only when the |
291 | * reset is complete. In case of having had to resort to bus/cold | 291 | * reset is complete. In case of having had to resort to bus/cold |
292 | * reset implying a device disconnection, the call is allowed to | 292 | * reset implying a device disconnection, the call is allowed to |
293 | * return inmediately. | 293 | * return immediately. |
294 | * NOTE: wimax_dev->mutex is NOT locked when this op is being | 294 | * NOTE: wimax_dev->mutex is NOT locked when this op is being |
295 | * called; however, wimax_dev->mutex_reset IS locked to ensure | 295 | * called; however, wimax_dev->mutex_reset IS locked to ensure |
296 | * serialization of calls to wimax_reset(). | 296 | * serialization of calls to wimax_reset(). |
diff --git a/include/sound/soc.h b/include/sound/soc.h index be6ecae247b0..c83a334dd00f 100644 --- a/include/sound/soc.h +++ b/include/sound/soc.h | |||
@@ -277,7 +277,7 @@ | |||
277 | .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \ | 277 | .access = SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE | \ |
278 | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \ | 278 | SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK, \ |
279 | .tlv.c = (snd_soc_bytes_tlv_callback), \ | 279 | .tlv.c = (snd_soc_bytes_tlv_callback), \ |
280 | .info = snd_soc_info_bytes_ext, \ | 280 | .info = snd_soc_bytes_info_ext, \ |
281 | .private_value = (unsigned long)&(struct soc_bytes_ext) \ | 281 | .private_value = (unsigned long)&(struct soc_bytes_ext) \ |
282 | {.max = xcount, .get = xhandler_get, .put = xhandler_put, } } | 282 | {.max = xcount, .get = xhandler_get, .put = xhandler_put, } } |
283 | #define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \ | 283 | #define SOC_SINGLE_XR_SX(xname, xregbase, xregcount, xnbits, \ |
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 1c09820df585..3608bebd3d9c 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(softirq, | |||
107 | * @vec_nr: softirq vector number | 107 | * @vec_nr: softirq vector number |
108 | * | 108 | * |
109 | * When used in combination with the softirq_exit tracepoint | 109 | * When used in combination with the softirq_exit tracepoint |
110 | * we can determine the softirq handler runtine. | 110 | * we can determine the softirq handler routine. |
111 | */ | 111 | */ |
112 | DEFINE_EVENT(softirq, softirq_entry, | 112 | DEFINE_EVENT(softirq, softirq_entry, |
113 | 113 | ||
@@ -121,7 +121,7 @@ DEFINE_EVENT(softirq, softirq_entry, | |||
121 | * @vec_nr: softirq vector number | 121 | * @vec_nr: softirq vector number |
122 | * | 122 | * |
123 | * When used in combination with the softirq_entry tracepoint | 123 | * When used in combination with the softirq_entry tracepoint |
124 | * we can determine the softirq handler runtine. | 124 | * we can determine the softirq handler routine. |
125 | */ | 125 | */ |
126 | DEFINE_EVENT(softirq, softirq_exit, | 126 | DEFINE_EVENT(softirq, softirq_exit, |
127 | 127 | ||
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild index 24e9033f8b3f..be88166349a1 100644 --- a/include/uapi/linux/Kbuild +++ b/include/uapi/linux/Kbuild | |||
@@ -240,6 +240,7 @@ header-y += matroxfb.h | |||
240 | header-y += mdio.h | 240 | header-y += mdio.h |
241 | header-y += media.h | 241 | header-y += media.h |
242 | header-y += mei.h | 242 | header-y += mei.h |
243 | header-y += memfd.h | ||
243 | header-y += mempolicy.h | 244 | header-y += mempolicy.h |
244 | header-y += meye.h | 245 | header-y += meye.h |
245 | header-y += mic_common.h | 246 | header-y += mic_common.h |
@@ -395,6 +396,7 @@ header-y += un.h | |||
395 | header-y += unistd.h | 396 | header-y += unistd.h |
396 | header-y += unix_diag.h | 397 | header-y += unix_diag.h |
397 | header-y += usbdevice_fs.h | 398 | header-y += usbdevice_fs.h |
399 | header-y += usbip.h | ||
398 | header-y += utime.h | 400 | header-y += utime.h |
399 | header-y += utsname.h | 401 | header-y += utsname.h |
400 | header-y += uuid.h | 402 | header-y += uuid.h |
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index 19df18c9b8be..1874ebe9ac1e 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
@@ -165,6 +165,7 @@ struct input_keymap_entry { | |||
165 | #define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ | 165 | #define INPUT_PROP_BUTTONPAD 0x02 /* has button(s) under pad */ |
166 | #define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ | 166 | #define INPUT_PROP_SEMI_MT 0x03 /* touch rectangle only */ |
167 | #define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */ | 167 | #define INPUT_PROP_TOPBUTTONPAD 0x04 /* softbuttons at top of pad */ |
168 | #define INPUT_PROP_POINTING_STICK 0x05 /* is a pointing stick */ | ||
168 | 169 | ||
169 | #define INPUT_PROP_MAX 0x1f | 170 | #define INPUT_PROP_MAX 0x1f |
170 | #define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) | 171 | #define INPUT_PROP_CNT (INPUT_PROP_MAX + 1) |
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h index 131a6ccdba25..14334d0161d5 100644 --- a/include/xen/interface/features.h +++ b/include/xen/interface/features.h | |||
@@ -53,6 +53,9 @@ | |||
53 | /* operation as Dom0 is supported */ | 53 | /* operation as Dom0 is supported */ |
54 | #define XENFEAT_dom0 11 | 54 | #define XENFEAT_dom0 11 |
55 | 55 | ||
56 | /* Xen also maps grant references at pfn = mfn */ | ||
57 | #define XENFEAT_grant_map_identity 12 | ||
58 | |||
56 | #define XENFEAT_NR_SUBMAPS 1 | 59 | #define XENFEAT_NR_SUBMAPS 1 |
57 | 60 | ||
58 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ | 61 | #endif /* __XEN_PUBLIC_FEATURES_H__ */ |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 7dc8788cfd52..940aced4ed00 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -1035,6 +1035,11 @@ static void cgroup_get(struct cgroup *cgrp) | |||
1035 | css_get(&cgrp->self); | 1035 | css_get(&cgrp->self); |
1036 | } | 1036 | } |
1037 | 1037 | ||
1038 | static bool cgroup_tryget(struct cgroup *cgrp) | ||
1039 | { | ||
1040 | return css_tryget(&cgrp->self); | ||
1041 | } | ||
1042 | |||
1038 | static void cgroup_put(struct cgroup *cgrp) | 1043 | static void cgroup_put(struct cgroup *cgrp) |
1039 | { | 1044 | { |
1040 | css_put(&cgrp->self); | 1045 | css_put(&cgrp->self); |
@@ -1147,7 +1152,8 @@ static struct cgroup *cgroup_kn_lock_live(struct kernfs_node *kn) | |||
1147 | * protection against removal. Ensure @cgrp stays accessible and | 1152 | * protection against removal. Ensure @cgrp stays accessible and |
1148 | * break the active_ref protection. | 1153 | * break the active_ref protection. |
1149 | */ | 1154 | */ |
1150 | cgroup_get(cgrp); | 1155 | if (!cgroup_tryget(cgrp)) |
1156 | return NULL; | ||
1151 | kernfs_break_active_protection(kn); | 1157 | kernfs_break_active_protection(kn); |
1152 | 1158 | ||
1153 | mutex_lock(&cgroup_mutex); | 1159 | mutex_lock(&cgroup_mutex); |
@@ -3271,8 +3277,17 @@ int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts) | |||
3271 | { | 3277 | { |
3272 | struct cftype *cft; | 3278 | struct cftype *cft; |
3273 | 3279 | ||
3274 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) | 3280 | /* |
3275 | cft->flags |= __CFTYPE_NOT_ON_DFL; | 3281 | * If legacy_flies_on_dfl, we want to show the legacy files on the |
3282 | * dfl hierarchy but iff the target subsystem hasn't been updated | ||
3283 | * for the dfl hierarchy yet. | ||
3284 | */ | ||
3285 | if (!cgroup_legacy_files_on_dfl || | ||
3286 | ss->dfl_cftypes != ss->legacy_cftypes) { | ||
3287 | for (cft = cfts; cft && cft->name[0] != '\0'; cft++) | ||
3288 | cft->flags |= __CFTYPE_NOT_ON_DFL; | ||
3289 | } | ||
3290 | |||
3276 | return cgroup_add_cftypes(ss, cfts); | 3291 | return cgroup_add_cftypes(ss, cfts); |
3277 | } | 3292 | } |
3278 | 3293 | ||
@@ -4387,6 +4402,15 @@ static void css_release_work_fn(struct work_struct *work) | |||
4387 | /* cgroup release path */ | 4402 | /* cgroup release path */ |
4388 | cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); | 4403 | cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); |
4389 | cgrp->id = -1; | 4404 | cgrp->id = -1; |
4405 | |||
4406 | /* | ||
4407 | * There are two control paths which try to determine | ||
4408 | * cgroup from dentry without going through kernfs - | ||
4409 | * cgroupstats_build() and css_tryget_online_from_dir(). | ||
4410 | * Those are supported by RCU protecting clearing of | ||
4411 | * cgrp->kn->priv backpointer. | ||
4412 | */ | ||
4413 | RCU_INIT_POINTER(*(void __rcu __force **)&cgrp->kn->priv, NULL); | ||
4390 | } | 4414 | } |
4391 | 4415 | ||
4392 | mutex_unlock(&cgroup_mutex); | 4416 | mutex_unlock(&cgroup_mutex); |
@@ -4543,6 +4567,11 @@ static int cgroup_mkdir(struct kernfs_node *parent_kn, const char *name, | |||
4543 | struct cftype *base_files; | 4567 | struct cftype *base_files; |
4544 | int ssid, ret; | 4568 | int ssid, ret; |
4545 | 4569 | ||
4570 | /* Do not accept '\n' to prevent making /proc/<pid>/cgroup unparsable. | ||
4571 | */ | ||
4572 | if (strchr(name, '\n')) | ||
4573 | return -EINVAL; | ||
4574 | |||
4546 | parent = cgroup_kn_lock_live(parent_kn); | 4575 | parent = cgroup_kn_lock_live(parent_kn); |
4547 | if (!parent) | 4576 | if (!parent) |
4548 | return -ENODEV; | 4577 | return -ENODEV; |
@@ -4820,16 +4849,6 @@ static int cgroup_rmdir(struct kernfs_node *kn) | |||
4820 | 4849 | ||
4821 | cgroup_kn_unlock(kn); | 4850 | cgroup_kn_unlock(kn); |
4822 | 4851 | ||
4823 | /* | ||
4824 | * There are two control paths which try to determine cgroup from | ||
4825 | * dentry without going through kernfs - cgroupstats_build() and | ||
4826 | * css_tryget_online_from_dir(). Those are supported by RCU | ||
4827 | * protecting clearing of cgrp->kn->priv backpointer, which should | ||
4828 | * happen after all files under it have been removed. | ||
4829 | */ | ||
4830 | if (!ret) | ||
4831 | RCU_INIT_POINTER(*(void __rcu __force **)&kn->priv, NULL); | ||
4832 | |||
4833 | cgroup_put(cgrp); | 4852 | cgroup_put(cgrp); |
4834 | return ret; | 4853 | return ret; |
4835 | } | 4854 | } |
@@ -5416,7 +5435,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | |||
5416 | /* | 5435 | /* |
5417 | * This path doesn't originate from kernfs and @kn could already | 5436 | * This path doesn't originate from kernfs and @kn could already |
5418 | * have been or be removed at any point. @kn->priv is RCU | 5437 | * have been or be removed at any point. @kn->priv is RCU |
5419 | * protected for this access. See cgroup_rmdir() for details. | 5438 | * protected for this access. See css_release_work_fn() for details. |
5420 | */ | 5439 | */ |
5421 | cgrp = rcu_dereference(kn->priv); | 5440 | cgrp = rcu_dereference(kn->priv); |
5422 | if (cgrp) | 5441 | if (cgrp) |
diff --git a/kernel/compat.c b/kernel/compat.c index 633394f442f8..ebb3c369d03d 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -226,7 +226,7 @@ static long compat_nanosleep_restart(struct restart_block *restart) | |||
226 | ret = hrtimer_nanosleep_restart(restart); | 226 | ret = hrtimer_nanosleep_restart(restart); |
227 | set_fs(oldfs); | 227 | set_fs(oldfs); |
228 | 228 | ||
229 | if (ret) { | 229 | if (ret == -ERESTART_RESTARTBLOCK) { |
230 | rmtp = restart->nanosleep.compat_rmtp; | 230 | rmtp = restart->nanosleep.compat_rmtp; |
231 | 231 | ||
232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 232 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
@@ -256,7 +256,26 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); | 256 | HRTIMER_MODE_REL, CLOCK_MONOTONIC); |
257 | set_fs(oldfs); | 257 | set_fs(oldfs); |
258 | 258 | ||
259 | if (ret) { | 259 | /* |
260 | * hrtimer_nanosleep() can only return 0 or | ||
261 | * -ERESTART_RESTARTBLOCK here because: | ||
262 | * | ||
263 | * - we call it with HRTIMER_MODE_REL and therefor exclude the | ||
264 | * -ERESTARTNOHAND return path. | ||
265 | * | ||
266 | * - we supply the rmtp argument from the task stack (due to | ||
267 | * the necessary compat conversion. So the update cannot | ||
268 | * fail, which excludes the -EFAULT return path as well. If | ||
269 | * it fails nevertheless we have a bigger problem and wont | ||
270 | * reach this place anymore. | ||
271 | * | ||
272 | * - if the return value is 0, we do not have to update rmtp | ||
273 | * because there is no remaining time. | ||
274 | * | ||
275 | * We check for -ERESTART_RESTARTBLOCK nevertheless if the | ||
276 | * core implementation decides to return random nonsense. | ||
277 | */ | ||
278 | if (ret == -ERESTART_RESTARTBLOCK) { | ||
260 | struct restart_block *restart | 279 | struct restart_block *restart |
261 | = ¤t_thread_info()->restart_block; | 280 | = ¤t_thread_info()->restart_block; |
262 | 281 | ||
@@ -266,7 +285,6 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct compat_timespec __user *, rqtp, | |||
266 | if (rmtp && compat_put_timespec(&rmt, rmtp)) | 285 | if (rmtp && compat_put_timespec(&rmt, rmtp)) |
267 | return -EFAULT; | 286 | return -EFAULT; |
268 | } | 287 | } |
269 | |||
270 | return ret; | 288 | return ret; |
271 | } | 289 | } |
272 | 290 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index d3a9d946d0b7..815d7af2ffe8 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2592,6 +2592,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | |||
2592 | * shared futexes. We need to compare the keys: | 2592 | * shared futexes. We need to compare the keys: |
2593 | */ | 2593 | */ |
2594 | if (match_futex(&q.key, &key2)) { | 2594 | if (match_futex(&q.key, &key2)) { |
2595 | queue_unlock(hb); | ||
2595 | ret = -EINVAL; | 2596 | ret = -EINVAL; |
2596 | goto out_put_keys; | 2597 | goto out_put_keys; |
2597 | } | 2598 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index a2b28a2fd7b1..6223fab9a9d2 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -517,6 +517,7 @@ out: | |||
517 | chip->irq_eoi(&desc->irq_data); | 517 | chip->irq_eoi(&desc->irq_data); |
518 | raw_spin_unlock(&desc->lock); | 518 | raw_spin_unlock(&desc->lock); |
519 | } | 519 | } |
520 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | ||
520 | 521 | ||
521 | /** | 522 | /** |
522 | * handle_edge_irq - edge type IRQ handler | 523 | * handle_edge_irq - edge type IRQ handler |
diff --git a/kernel/kcmp.c b/kernel/kcmp.c index e30ac0fe61c3..0aa69ea1d8fd 100644 --- a/kernel/kcmp.c +++ b/kernel/kcmp.c | |||
@@ -44,11 +44,12 @@ static long kptr_obfuscate(long v, int type) | |||
44 | */ | 44 | */ |
45 | static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) | 45 | static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type) |
46 | { | 46 | { |
47 | long ret; | 47 | long t1, t2; |
48 | 48 | ||
49 | ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type); | 49 | t1 = kptr_obfuscate((long)v1, type); |
50 | t2 = kptr_obfuscate((long)v2, type); | ||
50 | 51 | ||
51 | return (ret < 0) | ((ret > 0) << 1); | 52 | return (t1 < t2) | ((t1 > t2) << 1); |
52 | } | 53 | } |
53 | 54 | ||
54 | /* The caller must have pinned the task */ | 55 | /* The caller must have pinned the task */ |
diff --git a/kernel/power/power.h b/kernel/power/power.h index 5d49dcac2537..2df883a9d3cb 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h | |||
@@ -179,6 +179,7 @@ extern void swsusp_show_speed(struct timeval *, struct timeval *, | |||
179 | 179 | ||
180 | #ifdef CONFIG_SUSPEND | 180 | #ifdef CONFIG_SUSPEND |
181 | /* kernel/power/suspend.c */ | 181 | /* kernel/power/suspend.c */ |
182 | extern const char *pm_labels[]; | ||
182 | extern const char *pm_states[]; | 183 | extern const char *pm_states[]; |
183 | 184 | ||
184 | extern int suspend_devices_and_enter(suspend_state_t state); | 185 | extern int suspend_devices_and_enter(suspend_state_t state); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 6dadb25cb0d8..18c62195660f 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | #include "power.h" | 32 | #include "power.h" |
33 | 33 | ||
34 | static const char *pm_labels[] = { "mem", "standby", "freeze", }; | 34 | const char *pm_labels[] = { "mem", "standby", "freeze", NULL }; |
35 | const char *pm_states[PM_SUSPEND_MAX]; | 35 | const char *pm_states[PM_SUSPEND_MAX]; |
36 | 36 | ||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c index 2f524928b6aa..bd91bc177c93 100644 --- a/kernel/power/suspend_test.c +++ b/kernel/power/suspend_test.c | |||
@@ -129,20 +129,20 @@ static int __init has_wakealarm(struct device *dev, const void *data) | |||
129 | * at startup time. They're normally disabled, for faster boot and because | 129 | * at startup time. They're normally disabled, for faster boot and because |
130 | * we can't know which states really work on this particular system. | 130 | * we can't know which states really work on this particular system. |
131 | */ | 131 | */ |
132 | static suspend_state_t test_state __initdata = PM_SUSPEND_ON; | 132 | static const char *test_state_label __initdata; |
133 | 133 | ||
134 | static char warn_bad_state[] __initdata = | 134 | static char warn_bad_state[] __initdata = |
135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; | 135 | KERN_WARNING "PM: can't test '%s' suspend state\n"; |
136 | 136 | ||
137 | static int __init setup_test_suspend(char *value) | 137 | static int __init setup_test_suspend(char *value) |
138 | { | 138 | { |
139 | suspend_state_t i; | 139 | int i; |
140 | 140 | ||
141 | /* "=mem" ==> "mem" */ | 141 | /* "=mem" ==> "mem" */ |
142 | value++; | 142 | value++; |
143 | for (i = PM_SUSPEND_MIN; i < PM_SUSPEND_MAX; i++) | 143 | for (i = 0; pm_labels[i]; i++) |
144 | if (!strcmp(pm_states[i], value)) { | 144 | if (!strcmp(pm_labels[i], value)) { |
145 | test_state = i; | 145 | test_state_label = pm_labels[i]; |
146 | return 0; | 146 | return 0; |
147 | } | 147 | } |
148 | 148 | ||
@@ -158,13 +158,21 @@ static int __init test_suspend(void) | |||
158 | 158 | ||
159 | struct rtc_device *rtc = NULL; | 159 | struct rtc_device *rtc = NULL; |
160 | struct device *dev; | 160 | struct device *dev; |
161 | suspend_state_t test_state; | ||
161 | 162 | ||
162 | /* PM is initialized by now; is that state testable? */ | 163 | /* PM is initialized by now; is that state testable? */ |
163 | if (test_state == PM_SUSPEND_ON) | 164 | if (!test_state_label) |
164 | goto done; | 165 | return 0; |
165 | if (!pm_states[test_state]) { | 166 | |
166 | printk(warn_bad_state, pm_states[test_state]); | 167 | for (test_state = PM_SUSPEND_MIN; test_state < PM_SUSPEND_MAX; test_state++) { |
167 | goto done; | 168 | const char *state_label = pm_states[test_state]; |
169 | |||
170 | if (state_label && !strcmp(test_state_label, state_label)) | ||
171 | break; | ||
172 | } | ||
173 | if (test_state == PM_SUSPEND_MAX) { | ||
174 | printk(warn_bad_state, test_state_label); | ||
175 | return 0; | ||
168 | } | 176 | } |
169 | 177 | ||
170 | /* RTCs have initialized by now too ... can we use one? */ | 178 | /* RTCs have initialized by now too ... can we use one? */ |
@@ -173,13 +181,12 @@ static int __init test_suspend(void) | |||
173 | rtc = rtc_class_open(dev_name(dev)); | 181 | rtc = rtc_class_open(dev_name(dev)); |
174 | if (!rtc) { | 182 | if (!rtc) { |
175 | printk(warn_no_rtc); | 183 | printk(warn_no_rtc); |
176 | goto done; | 184 | return 0; |
177 | } | 185 | } |
178 | 186 | ||
179 | /* go for it */ | 187 | /* go for it */ |
180 | test_wakealarm(rtc, test_state); | 188 | test_wakealarm(rtc, test_state); |
181 | rtc_class_close(rtc); | 189 | rtc_class_close(rtc); |
182 | done: | ||
183 | return 0; | 190 | return 0; |
184 | } | 191 | } |
185 | late_initcall(test_suspend); | 192 | late_initcall(test_suspend); |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index e04c455a0e38..1ce770687ea8 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -1665,15 +1665,15 @@ asmlinkage int vprintk_emit(int facility, int level, | |||
1665 | raw_spin_lock(&logbuf_lock); | 1665 | raw_spin_lock(&logbuf_lock); |
1666 | logbuf_cpu = this_cpu; | 1666 | logbuf_cpu = this_cpu; |
1667 | 1667 | ||
1668 | if (recursion_bug) { | 1668 | if (unlikely(recursion_bug)) { |
1669 | static const char recursion_msg[] = | 1669 | static const char recursion_msg[] = |
1670 | "BUG: recent printk recursion!"; | 1670 | "BUG: recent printk recursion!"; |
1671 | 1671 | ||
1672 | recursion_bug = 0; | 1672 | recursion_bug = 0; |
1673 | text_len = strlen(recursion_msg); | ||
1674 | /* emit KERN_CRIT message */ | 1673 | /* emit KERN_CRIT message */ |
1675 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, | 1674 | printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0, |
1676 | NULL, 0, recursion_msg, text_len); | 1675 | NULL, 0, recursion_msg, |
1676 | strlen(recursion_msg)); | ||
1677 | } | 1677 | } |
1678 | 1678 | ||
1679 | /* | 1679 | /* |
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index 71e64c718f75..6a86eb7bac45 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h | |||
@@ -358,7 +358,7 @@ struct rcu_data { | |||
358 | struct rcu_head **nocb_gp_tail; | 358 | struct rcu_head **nocb_gp_tail; |
359 | long nocb_gp_count; | 359 | long nocb_gp_count; |
360 | long nocb_gp_count_lazy; | 360 | long nocb_gp_count_lazy; |
361 | bool nocb_leader_wake; /* Is the nocb leader thread awake? */ | 361 | bool nocb_leader_sleep; /* Is the nocb leader thread asleep? */ |
362 | struct rcu_data *nocb_next_follower; | 362 | struct rcu_data *nocb_next_follower; |
363 | /* Next follower in wakeup chain. */ | 363 | /* Next follower in wakeup chain. */ |
364 | 364 | ||
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 00dc411e9676..a7997e272564 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h | |||
@@ -2074,9 +2074,9 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force) | |||
2074 | 2074 | ||
2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) | 2075 | if (!ACCESS_ONCE(rdp_leader->nocb_kthread)) |
2076 | return; | 2076 | return; |
2077 | if (!ACCESS_ONCE(rdp_leader->nocb_leader_wake) || force) { | 2077 | if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) { |
2078 | /* Prior xchg orders against prior callback enqueue. */ | 2078 | /* Prior xchg orders against prior callback enqueue. */ |
2079 | ACCESS_ONCE(rdp_leader->nocb_leader_wake) = true; | 2079 | ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false; |
2080 | wake_up(&rdp_leader->nocb_wq); | 2080 | wake_up(&rdp_leader->nocb_wq); |
2081 | } | 2081 | } |
2082 | } | 2082 | } |
@@ -2253,7 +2253,7 @@ wait_again: | |||
2253 | if (!rcu_nocb_poll) { | 2253 | if (!rcu_nocb_poll) { |
2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); | 2254 | trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); |
2255 | wait_event_interruptible(my_rdp->nocb_wq, | 2255 | wait_event_interruptible(my_rdp->nocb_wq, |
2256 | ACCESS_ONCE(my_rdp->nocb_leader_wake)); | 2256 | !ACCESS_ONCE(my_rdp->nocb_leader_sleep)); |
2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ | 2257 | /* Memory barrier handled by smp_mb() calls below and repoll. */ |
2258 | } else if (firsttime) { | 2258 | } else if (firsttime) { |
2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ | 2259 | firsttime = false; /* Don't drown trace log with "Poll"! */ |
@@ -2292,12 +2292,12 @@ wait_again: | |||
2292 | schedule_timeout_interruptible(1); | 2292 | schedule_timeout_interruptible(1); |
2293 | 2293 | ||
2294 | /* Rescan in case we were a victim of memory ordering. */ | 2294 | /* Rescan in case we were a victim of memory ordering. */ |
2295 | my_rdp->nocb_leader_wake = false; | 2295 | my_rdp->nocb_leader_sleep = true; |
2296 | smp_mb(); /* Ensure _wake false before scan. */ | 2296 | smp_mb(); /* Ensure _sleep true before scan. */ |
2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) | 2297 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) |
2298 | if (ACCESS_ONCE(rdp->nocb_head)) { | 2298 | if (ACCESS_ONCE(rdp->nocb_head)) { |
2299 | /* Found CB, so short-circuit next wait. */ | 2299 | /* Found CB, so short-circuit next wait. */ |
2300 | my_rdp->nocb_leader_wake = true; | 2300 | my_rdp->nocb_leader_sleep = false; |
2301 | break; | 2301 | break; |
2302 | } | 2302 | } |
2303 | goto wait_again; | 2303 | goto wait_again; |
@@ -2307,17 +2307,17 @@ wait_again: | |||
2307 | rcu_nocb_wait_gp(my_rdp); | 2307 | rcu_nocb_wait_gp(my_rdp); |
2308 | 2308 | ||
2309 | /* | 2309 | /* |
2310 | * We left ->nocb_leader_wake set to reduce cache thrashing. | 2310 | * We left ->nocb_leader_sleep unset to reduce cache thrashing. |
2311 | * We clear it now, but recheck for new callbacks while | 2311 | * We set it now, but recheck for new callbacks while |
2312 | * traversing our follower list. | 2312 | * traversing our follower list. |
2313 | */ | 2313 | */ |
2314 | my_rdp->nocb_leader_wake = false; | 2314 | my_rdp->nocb_leader_sleep = true; |
2315 | smp_mb(); /* Ensure _wake false before scan of ->nocb_head. */ | 2315 | smp_mb(); /* Ensure _sleep true before scan of ->nocb_head. */ |
2316 | 2316 | ||
2317 | /* Each pass through the following loop wakes a follower, if needed. */ | 2317 | /* Each pass through the following loop wakes a follower, if needed. */ |
2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { | 2318 | for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) { |
2319 | if (ACCESS_ONCE(rdp->nocb_head)) | 2319 | if (ACCESS_ONCE(rdp->nocb_head)) |
2320 | my_rdp->nocb_leader_wake = true; /* No need to wait. */ | 2320 | my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/ |
2321 | if (!rdp->nocb_gp_head) | 2321 | if (!rdp->nocb_gp_head) |
2322 | continue; /* No CBs, so no need to wake follower. */ | 2322 | continue; /* No CBs, so no need to wake follower. */ |
2323 | 2323 | ||
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 4aec4a457431..a7077d3ae52f 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -464,18 +464,26 @@ static enum alarmtimer_type clock2alarm(clockid_t clockid) | |||
464 | static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, | 464 | static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm, |
465 | ktime_t now) | 465 | ktime_t now) |
466 | { | 466 | { |
467 | unsigned long flags; | ||
467 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, | 468 | struct k_itimer *ptr = container_of(alarm, struct k_itimer, |
468 | it.alarm.alarmtimer); | 469 | it.alarm.alarmtimer); |
469 | if (posix_timer_event(ptr, 0) != 0) | 470 | enum alarmtimer_restart result = ALARMTIMER_NORESTART; |
470 | ptr->it_overrun++; | 471 | |
472 | spin_lock_irqsave(&ptr->it_lock, flags); | ||
473 | if ((ptr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) { | ||
474 | if (posix_timer_event(ptr, 0) != 0) | ||
475 | ptr->it_overrun++; | ||
476 | } | ||
471 | 477 | ||
472 | /* Re-add periodic timers */ | 478 | /* Re-add periodic timers */ |
473 | if (ptr->it.alarm.interval.tv64) { | 479 | if (ptr->it.alarm.interval.tv64) { |
474 | ptr->it_overrun += alarm_forward(alarm, now, | 480 | ptr->it_overrun += alarm_forward(alarm, now, |
475 | ptr->it.alarm.interval); | 481 | ptr->it.alarm.interval); |
476 | return ALARMTIMER_RESTART; | 482 | result = ALARMTIMER_RESTART; |
477 | } | 483 | } |
478 | return ALARMTIMER_NORESTART; | 484 | spin_unlock_irqrestore(&ptr->it_lock, flags); |
485 | |||
486 | return result; | ||
479 | } | 487 | } |
480 | 488 | ||
481 | /** | 489 | /** |
@@ -541,18 +549,22 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
541 | * @new_timer: k_itimer pointer | 549 | * @new_timer: k_itimer pointer |
542 | * @cur_setting: itimerspec data to fill | 550 | * @cur_setting: itimerspec data to fill |
543 | * | 551 | * |
544 | * Copies the itimerspec data out from the k_itimer | 552 | * Copies out the current itimerspec data |
545 | */ | 553 | */ |
546 | static void alarm_timer_get(struct k_itimer *timr, | 554 | static void alarm_timer_get(struct k_itimer *timr, |
547 | struct itimerspec *cur_setting) | 555 | struct itimerspec *cur_setting) |
548 | { | 556 | { |
549 | memset(cur_setting, 0, sizeof(struct itimerspec)); | 557 | ktime_t relative_expiry_time = |
558 | alarm_expires_remaining(&(timr->it.alarm.alarmtimer)); | ||
559 | |||
560 | if (ktime_to_ns(relative_expiry_time) > 0) { | ||
561 | cur_setting->it_value = ktime_to_timespec(relative_expiry_time); | ||
562 | } else { | ||
563 | cur_setting->it_value.tv_sec = 0; | ||
564 | cur_setting->it_value.tv_nsec = 0; | ||
565 | } | ||
550 | 566 | ||
551 | cur_setting->it_interval = | 567 | cur_setting->it_interval = ktime_to_timespec(timr->it.alarm.interval); |
552 | ktime_to_timespec(timr->it.alarm.interval); | ||
553 | cur_setting->it_value = | ||
554 | ktime_to_timespec(timr->it.alarm.alarmtimer.node.expires); | ||
555 | return; | ||
556 | } | 568 | } |
557 | 569 | ||
558 | /** | 570 | /** |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 99aa6ee3908f..f654a8a298fa 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -225,6 +225,20 @@ static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { | |||
225 | }; | 225 | }; |
226 | 226 | ||
227 | /* | 227 | /* |
228 | * Kick this CPU if it's full dynticks in order to force it to | ||
229 | * re-evaluate its dependency on the tick and restart it if necessary. | ||
230 | * This kick, unlike tick_nohz_full_kick_cpu() and tick_nohz_full_kick_all(), | ||
231 | * is NMI safe. | ||
232 | */ | ||
233 | void tick_nohz_full_kick(void) | ||
234 | { | ||
235 | if (!tick_nohz_full_cpu(smp_processor_id())) | ||
236 | return; | ||
237 | |||
238 | irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); | ||
239 | } | ||
240 | |||
241 | /* | ||
228 | * Kick the CPU if it's full dynticks in order to force it to | 242 | * Kick the CPU if it's full dynticks in order to force it to |
229 | * re-evaluate its dependency on the tick and restart it if necessary. | 243 | * re-evaluate its dependency on the tick and restart it if necessary. |
230 | */ | 244 | */ |
diff --git a/kernel/time/time.c b/kernel/time/time.c index f0294ba14634..a9ae20fb0b11 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
@@ -559,17 +559,20 @@ EXPORT_SYMBOL(usecs_to_jiffies); | |||
559 | * that a remainder subtract here would not do the right thing as the | 559 | * that a remainder subtract here would not do the right thing as the |
560 | * resolution values don't fall on second boundries. I.e. the line: | 560 | * resolution values don't fall on second boundries. I.e. the line: |
561 | * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. | 561 | * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding. |
562 | * Note that due to the small error in the multiplier here, this | ||
563 | * rounding is incorrect for sufficiently large values of tv_nsec, but | ||
564 | * well formed timespecs should have tv_nsec < NSEC_PER_SEC, so we're | ||
565 | * OK. | ||
562 | * | 566 | * |
563 | * Rather, we just shift the bits off the right. | 567 | * Rather, we just shift the bits off the right. |
564 | * | 568 | * |
565 | * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec | 569 | * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec |
566 | * value to a scaled second value. | 570 | * value to a scaled second value. |
567 | */ | 571 | */ |
568 | unsigned long | 572 | static unsigned long |
569 | timespec_to_jiffies(const struct timespec *value) | 573 | __timespec_to_jiffies(unsigned long sec, long nsec) |
570 | { | 574 | { |
571 | unsigned long sec = value->tv_sec; | 575 | nsec = nsec + TICK_NSEC - 1; |
572 | long nsec = value->tv_nsec + TICK_NSEC - 1; | ||
573 | 576 | ||
574 | if (sec >= MAX_SEC_IN_JIFFIES){ | 577 | if (sec >= MAX_SEC_IN_JIFFIES){ |
575 | sec = MAX_SEC_IN_JIFFIES; | 578 | sec = MAX_SEC_IN_JIFFIES; |
@@ -580,6 +583,13 @@ timespec_to_jiffies(const struct timespec *value) | |||
580 | (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; | 583 | (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; |
581 | 584 | ||
582 | } | 585 | } |
586 | |||
587 | unsigned long | ||
588 | timespec_to_jiffies(const struct timespec *value) | ||
589 | { | ||
590 | return __timespec_to_jiffies(value->tv_sec, value->tv_nsec); | ||
591 | } | ||
592 | |||
583 | EXPORT_SYMBOL(timespec_to_jiffies); | 593 | EXPORT_SYMBOL(timespec_to_jiffies); |
584 | 594 | ||
585 | void | 595 | void |
@@ -596,31 +606,27 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value) | |||
596 | } | 606 | } |
597 | EXPORT_SYMBOL(jiffies_to_timespec); | 607 | EXPORT_SYMBOL(jiffies_to_timespec); |
598 | 608 | ||
599 | /* Same for "timeval" | 609 | /* |
600 | * | 610 | * We could use a similar algorithm to timespec_to_jiffies (with a |
601 | * Well, almost. The problem here is that the real system resolution is | 611 | * different multiplier for usec instead of nsec). But this has a |
602 | * in nanoseconds and the value being converted is in micro seconds. | 612 | * problem with rounding: we can't exactly add TICK_NSEC - 1 to the |
603 | * Also for some machines (those that use HZ = 1024, in-particular), | 613 | * usec value, since it's not necessarily integral. |
604 | * there is a LARGE error in the tick size in microseconds. | 614 | * |
605 | 615 | * We could instead round in the intermediate scaled representation | |
606 | * The solution we use is to do the rounding AFTER we convert the | 616 | * (i.e. in units of 1/2^(large scale) jiffies) but that's also |
607 | * microsecond part. Thus the USEC_ROUND, the bits to be shifted off. | 617 | * perilous: the scaling introduces a small positive error, which |
608 | * Instruction wise, this should cost only an additional add with carry | 618 | * combined with a division-rounding-upward (i.e. adding 2^(scale) - 1 |
609 | * instruction above the way it was done above. | 619 | * units to the intermediate before shifting) leads to accidental |
620 | * overflow and overestimates. | ||
621 | * | ||
622 | * At the cost of one additional multiplication by a constant, just | ||
623 | * use the timespec implementation. | ||
610 | */ | 624 | */ |
611 | unsigned long | 625 | unsigned long |
612 | timeval_to_jiffies(const struct timeval *value) | 626 | timeval_to_jiffies(const struct timeval *value) |
613 | { | 627 | { |
614 | unsigned long sec = value->tv_sec; | 628 | return __timespec_to_jiffies(value->tv_sec, |
615 | long usec = value->tv_usec; | 629 | value->tv_usec * NSEC_PER_USEC); |
616 | |||
617 | if (sec >= MAX_SEC_IN_JIFFIES){ | ||
618 | sec = MAX_SEC_IN_JIFFIES; | ||
619 | usec = 0; | ||
620 | } | ||
621 | return (((u64)sec * SEC_CONVERSION) + | ||
622 | (((u64)usec * USEC_CONVERSION + USEC_ROUND) >> | ||
623 | (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC; | ||
624 | } | 630 | } |
625 | EXPORT_SYMBOL(timeval_to_jiffies); | 631 | EXPORT_SYMBOL(timeval_to_jiffies); |
626 | 632 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index fb4a9c2cf8d9..ec1791fae965 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -442,11 +442,12 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
442 | tk->ntp_error = 0; | 442 | tk->ntp_error = 0; |
443 | ntp_clear(); | 443 | ntp_clear(); |
444 | } | 444 | } |
445 | update_vsyscall(tk); | ||
446 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
447 | 445 | ||
448 | tk_update_ktime_data(tk); | 446 | tk_update_ktime_data(tk); |
449 | 447 | ||
448 | update_vsyscall(tk); | ||
449 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | ||
450 | |||
450 | if (action & TK_MIRROR) | 451 | if (action & TK_MIRROR) |
451 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 452 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
452 | sizeof(tk_core.timekeeper)); | 453 | sizeof(tk_core.timekeeper)); |
diff --git a/lib/Kconfig b/lib/Kconfig index a5ce0c7f6c30..54cf309a92a5 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -51,6 +51,9 @@ config PERCPU_RWSEM | |||
51 | config ARCH_USE_CMPXCHG_LOCKREF | 51 | config ARCH_USE_CMPXCHG_LOCKREF |
52 | bool | 52 | bool |
53 | 53 | ||
54 | config ARCH_HAS_FAST_MULTIPLIER | ||
55 | bool | ||
56 | |||
54 | config CRC_CCITT | 57 | config CRC_CCITT |
55 | tristate "CRC-CCITT functions" | 58 | tristate "CRC-CCITT functions" |
56 | help | 59 | help |
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c0b1007011e1..2404d03e251a 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -1723,11 +1723,13 @@ ascend_old_tree: | |||
1723 | shortcut = assoc_array_ptr_to_shortcut(ptr); | 1723 | shortcut = assoc_array_ptr_to_shortcut(ptr); |
1724 | slot = shortcut->parent_slot; | 1724 | slot = shortcut->parent_slot; |
1725 | cursor = shortcut->back_pointer; | 1725 | cursor = shortcut->back_pointer; |
1726 | if (!cursor) | ||
1727 | goto gc_complete; | ||
1726 | } else { | 1728 | } else { |
1727 | slot = node->parent_slot; | 1729 | slot = node->parent_slot; |
1728 | cursor = ptr; | 1730 | cursor = ptr; |
1729 | } | 1731 | } |
1730 | BUG_ON(!ptr); | 1732 | BUG_ON(!cursor); |
1731 | node = assoc_array_ptr_to_node(cursor); | 1733 | node = assoc_array_ptr_to_node(cursor); |
1732 | slot++; | 1734 | slot++; |
1733 | goto continue_node; | 1735 | goto continue_node; |
@@ -1735,7 +1737,7 @@ ascend_old_tree: | |||
1735 | gc_complete: | 1737 | gc_complete: |
1736 | edit->set[0].to = new_root; | 1738 | edit->set[0].to = new_root; |
1737 | assoc_array_apply_edit(edit); | 1739 | assoc_array_apply_edit(edit); |
1738 | edit->array->nr_leaves_on_tree = nr_leaves_on_tree; | 1740 | array->nr_leaves_on_tree = nr_leaves_on_tree; |
1739 | return 0; | 1741 | return 0; |
1740 | 1742 | ||
1741 | enomem: | 1743 | enomem: |
diff --git a/lib/hweight.c b/lib/hweight.c index b7d81ba143d1..9a5c1f221558 100644 --- a/lib/hweight.c +++ b/lib/hweight.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | unsigned int __sw_hweight32(unsigned int w) | 12 | unsigned int __sw_hweight32(unsigned int w) |
13 | { | 13 | { |
14 | #ifdef ARCH_HAS_FAST_MULTIPLIER | 14 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
15 | w -= (w >> 1) & 0x55555555; | 15 | w -= (w >> 1) & 0x55555555; |
16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); | 16 | w = (w & 0x33333333) + ((w >> 2) & 0x33333333); |
17 | w = (w + (w >> 4)) & 0x0f0f0f0f; | 17 | w = (w + (w >> 4)) & 0x0f0f0f0f; |
@@ -49,7 +49,7 @@ unsigned long __sw_hweight64(__u64 w) | |||
49 | return __sw_hweight32((unsigned int)(w >> 32)) + | 49 | return __sw_hweight32((unsigned int)(w >> 32)) + |
50 | __sw_hweight32((unsigned int)w); | 50 | __sw_hweight32((unsigned int)w); |
51 | #elif BITS_PER_LONG == 64 | 51 | #elif BITS_PER_LONG == 64 |
52 | #ifdef ARCH_HAS_FAST_MULTIPLIER | 52 | #ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER |
53 | w -= (w >> 1) & 0x5555555555555555ul; | 53 | w -= (w >> 1) & 0x5555555555555555ul; |
54 | w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); | 54 | w = (w & 0x3333333333333333ul) + ((w >> 2) & 0x3333333333333333ul); |
55 | w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; | 55 | w = (w + (w >> 4)) & 0x0f0f0f0f0f0f0f0ful; |
diff --git a/lib/string.c b/lib/string.c index 992bf30af759..f3c6ff596414 100644 --- a/lib/string.c +++ b/lib/string.c | |||
@@ -807,9 +807,9 @@ void *memchr_inv(const void *start, int c, size_t bytes) | |||
807 | return check_bytes8(start, value, bytes); | 807 | return check_bytes8(start, value, bytes); |
808 | 808 | ||
809 | value64 = value; | 809 | value64 = value; |
810 | #if defined(ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 | 810 | #if defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) && BITS_PER_LONG == 64 |
811 | value64 *= 0x0101010101010101; | 811 | value64 *= 0x0101010101010101; |
812 | #elif defined(ARCH_HAS_FAST_MULTIPLIER) | 812 | #elif defined(CONFIG_ARCH_HAS_FAST_MULTIPLIER) |
813 | value64 *= 0x01010101; | 813 | value64 *= 0x01010101; |
814 | value64 |= value64 << 32; | 814 | value64 |= value64 << 32; |
815 | #else | 815 | #else |
diff --git a/mm/memblock.c b/mm/memblock.c index 70fad0c0dafb..6ecb0d937fb5 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -816,6 +816,10 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, | |||
816 | if (nid != NUMA_NO_NODE && nid != m_nid) | 816 | if (nid != NUMA_NO_NODE && nid != m_nid) |
817 | continue; | 817 | continue; |
818 | 818 | ||
819 | /* skip hotpluggable memory regions if needed */ | ||
820 | if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) | ||
821 | continue; | ||
822 | |||
819 | if (!type_b) { | 823 | if (!type_b) { |
820 | if (out_start) | 824 | if (out_start) |
821 | *out_start = m_start; | 825 | *out_start = m_start; |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ec4dcf1b9562..085dc6d2f876 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -2534,6 +2534,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, | |||
2534 | unsigned long long size; | 2534 | unsigned long long size; |
2535 | int ret = 0; | 2535 | int ret = 0; |
2536 | 2536 | ||
2537 | if (mem_cgroup_is_root(memcg)) | ||
2538 | goto done; | ||
2537 | retry: | 2539 | retry: |
2538 | if (consume_stock(memcg, nr_pages)) | 2540 | if (consume_stock(memcg, nr_pages)) |
2539 | goto done; | 2541 | goto done; |
@@ -2611,9 +2613,7 @@ nomem: | |||
2611 | if (!(gfp_mask & __GFP_NOFAIL)) | 2613 | if (!(gfp_mask & __GFP_NOFAIL)) |
2612 | return -ENOMEM; | 2614 | return -ENOMEM; |
2613 | bypass: | 2615 | bypass: |
2614 | memcg = root_mem_cgroup; | 2616 | return -EINTR; |
2615 | ret = -EINTR; | ||
2616 | goto retry; | ||
2617 | 2617 | ||
2618 | done_restock: | 2618 | done_restock: |
2619 | if (batch > nr_pages) | 2619 | if (batch > nr_pages) |
@@ -2626,6 +2626,9 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) | |||
2626 | { | 2626 | { |
2627 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2627 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2628 | 2628 | ||
2629 | if (mem_cgroup_is_root(memcg)) | ||
2630 | return; | ||
2631 | |||
2629 | res_counter_uncharge(&memcg->res, bytes); | 2632 | res_counter_uncharge(&memcg->res, bytes); |
2630 | if (do_swap_account) | 2633 | if (do_swap_account) |
2631 | res_counter_uncharge(&memcg->memsw, bytes); | 2634 | res_counter_uncharge(&memcg->memsw, bytes); |
@@ -2640,6 +2643,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, | |||
2640 | { | 2643 | { |
2641 | unsigned long bytes = nr_pages * PAGE_SIZE; | 2644 | unsigned long bytes = nr_pages * PAGE_SIZE; |
2642 | 2645 | ||
2646 | if (mem_cgroup_is_root(memcg)) | ||
2647 | return; | ||
2648 | |||
2643 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); | 2649 | res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); |
2644 | if (do_swap_account) | 2650 | if (do_swap_account) |
2645 | res_counter_uncharge_until(&memcg->memsw, | 2651 | res_counter_uncharge_until(&memcg->memsw, |
@@ -4093,6 +4099,46 @@ out: | |||
4093 | return retval; | 4099 | return retval; |
4094 | } | 4100 | } |
4095 | 4101 | ||
4102 | static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, | ||
4103 | enum mem_cgroup_stat_index idx) | ||
4104 | { | ||
4105 | struct mem_cgroup *iter; | ||
4106 | long val = 0; | ||
4107 | |||
4108 | /* Per-cpu values can be negative, use a signed accumulator */ | ||
4109 | for_each_mem_cgroup_tree(iter, memcg) | ||
4110 | val += mem_cgroup_read_stat(iter, idx); | ||
4111 | |||
4112 | if (val < 0) /* race ? */ | ||
4113 | val = 0; | ||
4114 | return val; | ||
4115 | } | ||
4116 | |||
4117 | static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) | ||
4118 | { | ||
4119 | u64 val; | ||
4120 | |||
4121 | if (!mem_cgroup_is_root(memcg)) { | ||
4122 | if (!swap) | ||
4123 | return res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4124 | else | ||
4125 | return res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4126 | } | ||
4127 | |||
4128 | /* | ||
4129 | * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS | ||
4130 | * as well as in MEM_CGROUP_STAT_RSS_HUGE. | ||
4131 | */ | ||
4132 | val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); | ||
4133 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); | ||
4134 | |||
4135 | if (swap) | ||
4136 | val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); | ||
4137 | |||
4138 | return val << PAGE_SHIFT; | ||
4139 | } | ||
4140 | |||
4141 | |||
4096 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | 4142 | static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, |
4097 | struct cftype *cft) | 4143 | struct cftype *cft) |
4098 | { | 4144 | { |
@@ -4102,8 +4148,12 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, | |||
4102 | 4148 | ||
4103 | switch (type) { | 4149 | switch (type) { |
4104 | case _MEM: | 4150 | case _MEM: |
4151 | if (name == RES_USAGE) | ||
4152 | return mem_cgroup_usage(memcg, false); | ||
4105 | return res_counter_read_u64(&memcg->res, name); | 4153 | return res_counter_read_u64(&memcg->res, name); |
4106 | case _MEMSWAP: | 4154 | case _MEMSWAP: |
4155 | if (name == RES_USAGE) | ||
4156 | return mem_cgroup_usage(memcg, true); | ||
4107 | return res_counter_read_u64(&memcg->memsw, name); | 4157 | return res_counter_read_u64(&memcg->memsw, name); |
4108 | case _KMEM: | 4158 | case _KMEM: |
4109 | return res_counter_read_u64(&memcg->kmem, name); | 4159 | return res_counter_read_u64(&memcg->kmem, name); |
@@ -4572,10 +4622,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) | |||
4572 | if (!t) | 4622 | if (!t) |
4573 | goto unlock; | 4623 | goto unlock; |
4574 | 4624 | ||
4575 | if (!swap) | 4625 | usage = mem_cgroup_usage(memcg, swap); |
4576 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | ||
4577 | else | ||
4578 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | ||
4579 | 4626 | ||
4580 | /* | 4627 | /* |
4581 | * current_threshold points to threshold just below or equal to usage. | 4628 | * current_threshold points to threshold just below or equal to usage. |
@@ -4673,10 +4720,10 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, | |||
4673 | 4720 | ||
4674 | if (type == _MEM) { | 4721 | if (type == _MEM) { |
4675 | thresholds = &memcg->thresholds; | 4722 | thresholds = &memcg->thresholds; |
4676 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4723 | usage = mem_cgroup_usage(memcg, false); |
4677 | } else if (type == _MEMSWAP) { | 4724 | } else if (type == _MEMSWAP) { |
4678 | thresholds = &memcg->memsw_thresholds; | 4725 | thresholds = &memcg->memsw_thresholds; |
4679 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4726 | usage = mem_cgroup_usage(memcg, true); |
4680 | } else | 4727 | } else |
4681 | BUG(); | 4728 | BUG(); |
4682 | 4729 | ||
@@ -4762,10 +4809,10 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, | |||
4762 | 4809 | ||
4763 | if (type == _MEM) { | 4810 | if (type == _MEM) { |
4764 | thresholds = &memcg->thresholds; | 4811 | thresholds = &memcg->thresholds; |
4765 | usage = res_counter_read_u64(&memcg->res, RES_USAGE); | 4812 | usage = mem_cgroup_usage(memcg, false); |
4766 | } else if (type == _MEMSWAP) { | 4813 | } else if (type == _MEMSWAP) { |
4767 | thresholds = &memcg->memsw_thresholds; | 4814 | thresholds = &memcg->memsw_thresholds; |
4768 | usage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 4815 | usage = mem_cgroup_usage(memcg, true); |
4769 | } else | 4816 | } else |
4770 | BUG(); | 4817 | BUG(); |
4771 | 4818 | ||
@@ -5525,9 +5572,9 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) | |||
5525 | * core guarantees its existence. | 5572 | * core guarantees its existence. |
5526 | */ | 5573 | */ |
5527 | } else { | 5574 | } else { |
5528 | res_counter_init(&memcg->res, &root_mem_cgroup->res); | 5575 | res_counter_init(&memcg->res, NULL); |
5529 | res_counter_init(&memcg->memsw, &root_mem_cgroup->memsw); | 5576 | res_counter_init(&memcg->memsw, NULL); |
5530 | res_counter_init(&memcg->kmem, &root_mem_cgroup->kmem); | 5577 | res_counter_init(&memcg->kmem, NULL); |
5531 | /* | 5578 | /* |
5532 | * Deeper hierachy with use_hierarchy == false doesn't make | 5579 | * Deeper hierachy with use_hierarchy == false doesn't make |
5533 | * much sense so let cgroup subsystem know about this | 5580 | * much sense so let cgroup subsystem know about this |
@@ -5969,8 +6016,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5969 | /* we must fixup refcnts and charges */ | 6016 | /* we must fixup refcnts and charges */ |
5970 | if (mc.moved_swap) { | 6017 | if (mc.moved_swap) { |
5971 | /* uncharge swap account from the old cgroup */ | 6018 | /* uncharge swap account from the old cgroup */ |
5972 | res_counter_uncharge(&mc.from->memsw, | 6019 | if (!mem_cgroup_is_root(mc.from)) |
5973 | PAGE_SIZE * mc.moved_swap); | 6020 | res_counter_uncharge(&mc.from->memsw, |
6021 | PAGE_SIZE * mc.moved_swap); | ||
5974 | 6022 | ||
5975 | for (i = 0; i < mc.moved_swap; i++) | 6023 | for (i = 0; i < mc.moved_swap; i++) |
5976 | css_put(&mc.from->css); | 6024 | css_put(&mc.from->css); |
@@ -5979,8 +6027,9 @@ static void __mem_cgroup_clear_mc(void) | |||
5979 | * we charged both to->res and to->memsw, so we should | 6027 | * we charged both to->res and to->memsw, so we should |
5980 | * uncharge to->res. | 6028 | * uncharge to->res. |
5981 | */ | 6029 | */ |
5982 | res_counter_uncharge(&mc.to->res, | 6030 | if (!mem_cgroup_is_root(mc.to)) |
5983 | PAGE_SIZE * mc.moved_swap); | 6031 | res_counter_uncharge(&mc.to->res, |
6032 | PAGE_SIZE * mc.moved_swap); | ||
5984 | /* we've already done css_get(mc.to) */ | 6033 | /* we've already done css_get(mc.to) */ |
5985 | mc.moved_swap = 0; | 6034 | mc.moved_swap = 0; |
5986 | } | 6035 | } |
@@ -6345,7 +6394,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) | |||
6345 | rcu_read_lock(); | 6394 | rcu_read_lock(); |
6346 | memcg = mem_cgroup_lookup(id); | 6395 | memcg = mem_cgroup_lookup(id); |
6347 | if (memcg) { | 6396 | if (memcg) { |
6348 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | 6397 | if (!mem_cgroup_is_root(memcg)) |
6398 | res_counter_uncharge(&memcg->memsw, PAGE_SIZE); | ||
6349 | mem_cgroup_swap_statistics(memcg, false); | 6399 | mem_cgroup_swap_statistics(memcg, false); |
6350 | css_put(&memcg->css); | 6400 | css_put(&memcg->css); |
6351 | } | 6401 | } |
@@ -6509,12 +6559,15 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, | |||
6509 | { | 6559 | { |
6510 | unsigned long flags; | 6560 | unsigned long flags; |
6511 | 6561 | ||
6512 | if (nr_mem) | 6562 | if (!mem_cgroup_is_root(memcg)) { |
6513 | res_counter_uncharge(&memcg->res, nr_mem * PAGE_SIZE); | 6563 | if (nr_mem) |
6514 | if (nr_memsw) | 6564 | res_counter_uncharge(&memcg->res, |
6515 | res_counter_uncharge(&memcg->memsw, nr_memsw * PAGE_SIZE); | 6565 | nr_mem * PAGE_SIZE); |
6516 | 6566 | if (nr_memsw) | |
6517 | memcg_oom_recover(memcg); | 6567 | res_counter_uncharge(&memcg->memsw, |
6568 | nr_memsw * PAGE_SIZE); | ||
6569 | memcg_oom_recover(memcg); | ||
6570 | } | ||
6518 | 6571 | ||
6519 | local_irq_save(flags); | 6572 | local_irq_save(flags); |
6520 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); | 6573 | __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); |
@@ -369,20 +369,20 @@ static int browse_rb(struct rb_root *root) | |||
369 | struct vm_area_struct *vma; | 369 | struct vm_area_struct *vma; |
370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); | 370 | vma = rb_entry(nd, struct vm_area_struct, vm_rb); |
371 | if (vma->vm_start < prev) { | 371 | if (vma->vm_start < prev) { |
372 | pr_info("vm_start %lx prev %lx\n", vma->vm_start, prev); | 372 | pr_emerg("vm_start %lx prev %lx\n", vma->vm_start, prev); |
373 | bug = 1; | 373 | bug = 1; |
374 | } | 374 | } |
375 | if (vma->vm_start < pend) { | 375 | if (vma->vm_start < pend) { |
376 | pr_info("vm_start %lx pend %lx\n", vma->vm_start, pend); | 376 | pr_emerg("vm_start %lx pend %lx\n", vma->vm_start, pend); |
377 | bug = 1; | 377 | bug = 1; |
378 | } | 378 | } |
379 | if (vma->vm_start > vma->vm_end) { | 379 | if (vma->vm_start > vma->vm_end) { |
380 | pr_info("vm_end %lx < vm_start %lx\n", | 380 | pr_emerg("vm_end %lx < vm_start %lx\n", |
381 | vma->vm_end, vma->vm_start); | 381 | vma->vm_end, vma->vm_start); |
382 | bug = 1; | 382 | bug = 1; |
383 | } | 383 | } |
384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { | 384 | if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) { |
385 | pr_info("free gap %lx, correct %lx\n", | 385 | pr_emerg("free gap %lx, correct %lx\n", |
386 | vma->rb_subtree_gap, | 386 | vma->rb_subtree_gap, |
387 | vma_compute_subtree_gap(vma)); | 387 | vma_compute_subtree_gap(vma)); |
388 | bug = 1; | 388 | bug = 1; |
@@ -396,7 +396,7 @@ static int browse_rb(struct rb_root *root) | |||
396 | for (nd = pn; nd; nd = rb_prev(nd)) | 396 | for (nd = pn; nd; nd = rb_prev(nd)) |
397 | j++; | 397 | j++; |
398 | if (i != j) { | 398 | if (i != j) { |
399 | pr_info("backwards %d, forwards %d\n", j, i); | 399 | pr_emerg("backwards %d, forwards %d\n", j, i); |
400 | bug = 1; | 400 | bug = 1; |
401 | } | 401 | } |
402 | return bug ? -1 : i; | 402 | return bug ? -1 : i; |
@@ -431,17 +431,17 @@ static void validate_mm(struct mm_struct *mm) | |||
431 | i++; | 431 | i++; |
432 | } | 432 | } |
433 | if (i != mm->map_count) { | 433 | if (i != mm->map_count) { |
434 | pr_info("map_count %d vm_next %d\n", mm->map_count, i); | 434 | pr_emerg("map_count %d vm_next %d\n", mm->map_count, i); |
435 | bug = 1; | 435 | bug = 1; |
436 | } | 436 | } |
437 | if (highest_address != mm->highest_vm_end) { | 437 | if (highest_address != mm->highest_vm_end) { |
438 | pr_info("mm->highest_vm_end %lx, found %lx\n", | 438 | pr_emerg("mm->highest_vm_end %lx, found %lx\n", |
439 | mm->highest_vm_end, highest_address); | 439 | mm->highest_vm_end, highest_address); |
440 | bug = 1; | 440 | bug = 1; |
441 | } | 441 | } |
442 | i = browse_rb(&mm->mm_rb); | 442 | i = browse_rb(&mm->mm_rb); |
443 | if (i != mm->map_count) { | 443 | if (i != mm->map_count) { |
444 | pr_info("map_count %d rb %d\n", mm->map_count, i); | 444 | pr_emerg("map_count %d rb %d\n", mm->map_count, i); |
445 | bug = 1; | 445 | bug = 1; |
446 | } | 446 | } |
447 | BUG_ON(bug); | 447 | BUG_ON(bug); |
diff --git a/mm/nobootmem.c b/mm/nobootmem.c index 7ed58602e71b..7c7ab32ee503 100644 --- a/mm/nobootmem.c +++ b/mm/nobootmem.c | |||
@@ -119,6 +119,8 @@ static unsigned long __init free_low_memory_core_early(void) | |||
119 | phys_addr_t start, end; | 119 | phys_addr_t start, end; |
120 | u64 i; | 120 | u64 i; |
121 | 121 | ||
122 | memblock_clear_hotplug(0, -1); | ||
123 | |||
122 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) | 124 | for_each_free_mem_range(i, NUMA_NO_NODE, &start, &end, NULL) |
123 | count += __free_memory_core(start, end); | 125 | count += __free_memory_core(start, end); |
124 | 126 | ||
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c index 3707c71ae4cd..51108165f829 100644 --- a/mm/percpu-vm.c +++ b/mm/percpu-vm.c | |||
@@ -108,7 +108,7 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |||
108 | int page_start, int page_end) | 108 | int page_start, int page_end) |
109 | { | 109 | { |
110 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; | 110 | const gfp_t gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_COLD; |
111 | unsigned int cpu; | 111 | unsigned int cpu, tcpu; |
112 | int i; | 112 | int i; |
113 | 113 | ||
114 | for_each_possible_cpu(cpu) { | 114 | for_each_possible_cpu(cpu) { |
@@ -116,14 +116,23 @@ static int pcpu_alloc_pages(struct pcpu_chunk *chunk, | |||
116 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; | 116 | struct page **pagep = &pages[pcpu_page_idx(cpu, i)]; |
117 | 117 | ||
118 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); | 118 | *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); |
119 | if (!*pagep) { | 119 | if (!*pagep) |
120 | pcpu_free_pages(chunk, pages, populated, | 120 | goto err; |
121 | page_start, page_end); | ||
122 | return -ENOMEM; | ||
123 | } | ||
124 | } | 121 | } |
125 | } | 122 | } |
126 | return 0; | 123 | return 0; |
124 | |||
125 | err: | ||
126 | while (--i >= page_start) | ||
127 | __free_page(pages[pcpu_page_idx(cpu, i)]); | ||
128 | |||
129 | for_each_possible_cpu(tcpu) { | ||
130 | if (tcpu == cpu) | ||
131 | break; | ||
132 | for (i = page_start; i < page_end; i++) | ||
133 | __free_page(pages[pcpu_page_idx(tcpu, i)]); | ||
134 | } | ||
135 | return -ENOMEM; | ||
127 | } | 136 | } |
128 | 137 | ||
129 | /** | 138 | /** |
@@ -263,6 +272,7 @@ err: | |||
263 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), | 272 | __pcpu_unmap_pages(pcpu_chunk_addr(chunk, tcpu, page_start), |
264 | page_end - page_start); | 273 | page_end - page_start); |
265 | } | 274 | } |
275 | pcpu_post_unmap_tlb_flush(chunk, page_start, page_end); | ||
266 | return err; | 276 | return err; |
267 | } | 277 | } |
268 | 278 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index 2139e30a4b44..da997f9800bd 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1932,6 +1932,8 @@ void __init setup_per_cpu_areas(void) | |||
1932 | 1932 | ||
1933 | if (pcpu_setup_first_chunk(ai, fc) < 0) | 1933 | if (pcpu_setup_first_chunk(ai, fc) < 0) |
1934 | panic("Failed to initialize percpu areas."); | 1934 | panic("Failed to initialize percpu areas."); |
1935 | |||
1936 | pcpu_free_alloc_info(ai); | ||
1935 | } | 1937 | } |
1936 | 1938 | ||
1937 | #endif /* CONFIG_SMP */ | 1939 | #endif /* CONFIG_SMP */ |
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index b50dabb3f86a..faff6247ac8f 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -589,6 +589,14 @@ EXPORT_SYMBOL(hci_get_route); | |||
589 | void hci_le_conn_failed(struct hci_conn *conn, u8 status) | 589 | void hci_le_conn_failed(struct hci_conn *conn, u8 status) |
590 | { | 590 | { |
591 | struct hci_dev *hdev = conn->hdev; | 591 | struct hci_dev *hdev = conn->hdev; |
592 | struct hci_conn_params *params; | ||
593 | |||
594 | params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, | ||
595 | conn->dst_type); | ||
596 | if (params && params->conn) { | ||
597 | hci_conn_drop(params->conn); | ||
598 | params->conn = NULL; | ||
599 | } | ||
592 | 600 | ||
593 | conn->state = BT_CLOSED; | 601 | conn->state = BT_CLOSED; |
594 | 602 | ||
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c32d361c0cf7..1d9c29a00568 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c | |||
@@ -2536,8 +2536,13 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev) | |||
2536 | { | 2536 | { |
2537 | struct hci_conn_params *p; | 2537 | struct hci_conn_params *p; |
2538 | 2538 | ||
2539 | list_for_each_entry(p, &hdev->le_conn_params, list) | 2539 | list_for_each_entry(p, &hdev->le_conn_params, list) { |
2540 | if (p->conn) { | ||
2541 | hci_conn_drop(p->conn); | ||
2542 | p->conn = NULL; | ||
2543 | } | ||
2540 | list_del_init(&p->action); | 2544 | list_del_init(&p->action); |
2545 | } | ||
2541 | 2546 | ||
2542 | BT_DBG("All LE pending actions cleared"); | 2547 | BT_DBG("All LE pending actions cleared"); |
2543 | } | 2548 | } |
@@ -2578,8 +2583,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) | |||
2578 | 2583 | ||
2579 | hci_dev_lock(hdev); | 2584 | hci_dev_lock(hdev); |
2580 | hci_inquiry_cache_flush(hdev); | 2585 | hci_inquiry_cache_flush(hdev); |
2581 | hci_conn_hash_flush(hdev); | ||
2582 | hci_pend_le_actions_clear(hdev); | 2586 | hci_pend_le_actions_clear(hdev); |
2587 | hci_conn_hash_flush(hdev); | ||
2583 | hci_dev_unlock(hdev); | 2588 | hci_dev_unlock(hdev); |
2584 | 2589 | ||
2585 | hci_notify(hdev, HCI_DEV_DOWN); | 2590 | hci_notify(hdev, HCI_DEV_DOWN); |
@@ -3727,6 +3732,9 @@ void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) | |||
3727 | if (!params) | 3732 | if (!params) |
3728 | return; | 3733 | return; |
3729 | 3734 | ||
3735 | if (params->conn) | ||
3736 | hci_conn_drop(params->conn); | ||
3737 | |||
3730 | list_del(¶ms->action); | 3738 | list_del(¶ms->action); |
3731 | list_del(¶ms->list); | 3739 | list_del(¶ms->list); |
3732 | kfree(params); | 3740 | kfree(params); |
@@ -3757,6 +3765,8 @@ void hci_conn_params_clear_all(struct hci_dev *hdev) | |||
3757 | struct hci_conn_params *params, *tmp; | 3765 | struct hci_conn_params *params, *tmp; |
3758 | 3766 | ||
3759 | list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { | 3767 | list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { |
3768 | if (params->conn) | ||
3769 | hci_conn_drop(params->conn); | ||
3760 | list_del(¶ms->action); | 3770 | list_del(¶ms->action); |
3761 | list_del(¶ms->list); | 3771 | list_del(¶ms->list); |
3762 | kfree(params); | 3772 | kfree(params); |
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index be35598984d9..a6000823f0ff 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c | |||
@@ -4221,8 +4221,13 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) | |||
4221 | hci_proto_connect_cfm(conn, ev->status); | 4221 | hci_proto_connect_cfm(conn, ev->status); |
4222 | 4222 | ||
4223 | params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); | 4223 | params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); |
4224 | if (params) | 4224 | if (params) { |
4225 | list_del_init(¶ms->action); | 4225 | list_del_init(¶ms->action); |
4226 | if (params->conn) { | ||
4227 | hci_conn_drop(params->conn); | ||
4228 | params->conn = NULL; | ||
4229 | } | ||
4230 | } | ||
4226 | 4231 | ||
4227 | unlock: | 4232 | unlock: |
4228 | hci_update_background_scan(hdev); | 4233 | hci_update_background_scan(hdev); |
@@ -4304,8 +4309,16 @@ static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr, | |||
4304 | 4309 | ||
4305 | conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, | 4310 | conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW, |
4306 | HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); | 4311 | HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER); |
4307 | if (!IS_ERR(conn)) | 4312 | if (!IS_ERR(conn)) { |
4313 | /* Store the pointer since we don't really have any | ||
4314 | * other owner of the object besides the params that | ||
4315 | * triggered it. This way we can abort the connection if | ||
4316 | * the parameters get removed and keep the reference | ||
4317 | * count consistent once the connection is established. | ||
4318 | */ | ||
4319 | params->conn = conn; | ||
4308 | return; | 4320 | return; |
4321 | } | ||
4309 | 4322 | ||
4310 | switch (PTR_ERR(conn)) { | 4323 | switch (PTR_ERR(conn)) { |
4311 | case -EBUSY: | 4324 | case -EBUSY: |
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 96238ba95f2b..de6662b14e1f 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c | |||
@@ -13,8 +13,6 @@ | |||
13 | #include "auth_x.h" | 13 | #include "auth_x.h" |
14 | #include "auth_x_protocol.h" | 14 | #include "auth_x_protocol.h" |
15 | 15 | ||
16 | #define TEMP_TICKET_BUF_LEN 256 | ||
17 | |||
18 | static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); | 16 | static void ceph_x_validate_tickets(struct ceph_auth_client *ac, int *pneed); |
19 | 17 | ||
20 | static int ceph_x_is_authenticated(struct ceph_auth_client *ac) | 18 | static int ceph_x_is_authenticated(struct ceph_auth_client *ac) |
@@ -64,7 +62,7 @@ static int ceph_x_encrypt(struct ceph_crypto_key *secret, | |||
64 | } | 62 | } |
65 | 63 | ||
66 | static int ceph_x_decrypt(struct ceph_crypto_key *secret, | 64 | static int ceph_x_decrypt(struct ceph_crypto_key *secret, |
67 | void **p, void *end, void *obuf, size_t olen) | 65 | void **p, void *end, void **obuf, size_t olen) |
68 | { | 66 | { |
69 | struct ceph_x_encrypt_header head; | 67 | struct ceph_x_encrypt_header head; |
70 | size_t head_len = sizeof(head); | 68 | size_t head_len = sizeof(head); |
@@ -75,8 +73,14 @@ static int ceph_x_decrypt(struct ceph_crypto_key *secret, | |||
75 | return -EINVAL; | 73 | return -EINVAL; |
76 | 74 | ||
77 | dout("ceph_x_decrypt len %d\n", len); | 75 | dout("ceph_x_decrypt len %d\n", len); |
78 | ret = ceph_decrypt2(secret, &head, &head_len, obuf, &olen, | 76 | if (*obuf == NULL) { |
79 | *p, len); | 77 | *obuf = kmalloc(len, GFP_NOFS); |
78 | if (!*obuf) | ||
79 | return -ENOMEM; | ||
80 | olen = len; | ||
81 | } | ||
82 | |||
83 | ret = ceph_decrypt2(secret, &head, &head_len, *obuf, &olen, *p, len); | ||
80 | if (ret) | 84 | if (ret) |
81 | return ret; | 85 | return ret; |
82 | if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) | 86 | if (head.struct_v != 1 || le64_to_cpu(head.magic) != CEPHX_ENC_MAGIC) |
@@ -129,139 +133,120 @@ static void remove_ticket_handler(struct ceph_auth_client *ac, | |||
129 | kfree(th); | 133 | kfree(th); |
130 | } | 134 | } |
131 | 135 | ||
132 | static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | 136 | static int process_one_ticket(struct ceph_auth_client *ac, |
133 | struct ceph_crypto_key *secret, | 137 | struct ceph_crypto_key *secret, |
134 | void *buf, void *end) | 138 | void **p, void *end) |
135 | { | 139 | { |
136 | struct ceph_x_info *xi = ac->private; | 140 | struct ceph_x_info *xi = ac->private; |
137 | int num; | 141 | int type; |
138 | void *p = buf; | 142 | u8 tkt_struct_v, blob_struct_v; |
143 | struct ceph_x_ticket_handler *th; | ||
144 | void *dbuf = NULL; | ||
145 | void *dp, *dend; | ||
146 | int dlen; | ||
147 | char is_enc; | ||
148 | struct timespec validity; | ||
149 | struct ceph_crypto_key old_key; | ||
150 | void *ticket_buf = NULL; | ||
151 | void *tp, *tpend; | ||
152 | struct ceph_timespec new_validity; | ||
153 | struct ceph_crypto_key new_session_key; | ||
154 | struct ceph_buffer *new_ticket_blob; | ||
155 | unsigned long new_expires, new_renew_after; | ||
156 | u64 new_secret_id; | ||
139 | int ret; | 157 | int ret; |
140 | char *dbuf; | ||
141 | char *ticket_buf; | ||
142 | u8 reply_struct_v; | ||
143 | 158 | ||
144 | dbuf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); | 159 | ceph_decode_need(p, end, sizeof(u32) + 1, bad); |
145 | if (!dbuf) | ||
146 | return -ENOMEM; | ||
147 | 160 | ||
148 | ret = -ENOMEM; | 161 | type = ceph_decode_32(p); |
149 | ticket_buf = kmalloc(TEMP_TICKET_BUF_LEN, GFP_NOFS); | 162 | dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); |
150 | if (!ticket_buf) | ||
151 | goto out_dbuf; | ||
152 | 163 | ||
153 | ceph_decode_need(&p, end, 1 + sizeof(u32), bad); | 164 | tkt_struct_v = ceph_decode_8(p); |
154 | reply_struct_v = ceph_decode_8(&p); | 165 | if (tkt_struct_v != 1) |
155 | if (reply_struct_v != 1) | ||
156 | goto bad; | 166 | goto bad; |
157 | num = ceph_decode_32(&p); | ||
158 | dout("%d tickets\n", num); | ||
159 | while (num--) { | ||
160 | int type; | ||
161 | u8 tkt_struct_v, blob_struct_v; | ||
162 | struct ceph_x_ticket_handler *th; | ||
163 | void *dp, *dend; | ||
164 | int dlen; | ||
165 | char is_enc; | ||
166 | struct timespec validity; | ||
167 | struct ceph_crypto_key old_key; | ||
168 | void *tp, *tpend; | ||
169 | struct ceph_timespec new_validity; | ||
170 | struct ceph_crypto_key new_session_key; | ||
171 | struct ceph_buffer *new_ticket_blob; | ||
172 | unsigned long new_expires, new_renew_after; | ||
173 | u64 new_secret_id; | ||
174 | |||
175 | ceph_decode_need(&p, end, sizeof(u32) + 1, bad); | ||
176 | |||
177 | type = ceph_decode_32(&p); | ||
178 | dout(" ticket type %d %s\n", type, ceph_entity_type_name(type)); | ||
179 | |||
180 | tkt_struct_v = ceph_decode_8(&p); | ||
181 | if (tkt_struct_v != 1) | ||
182 | goto bad; | ||
183 | |||
184 | th = get_ticket_handler(ac, type); | ||
185 | if (IS_ERR(th)) { | ||
186 | ret = PTR_ERR(th); | ||
187 | goto out; | ||
188 | } | ||
189 | 167 | ||
190 | /* blob for me */ | 168 | th = get_ticket_handler(ac, type); |
191 | dlen = ceph_x_decrypt(secret, &p, end, dbuf, | 169 | if (IS_ERR(th)) { |
192 | TEMP_TICKET_BUF_LEN); | 170 | ret = PTR_ERR(th); |
193 | if (dlen <= 0) { | 171 | goto out; |
194 | ret = dlen; | 172 | } |
195 | goto out; | ||
196 | } | ||
197 | dout(" decrypted %d bytes\n", dlen); | ||
198 | dend = dbuf + dlen; | ||
199 | dp = dbuf; | ||
200 | 173 | ||
201 | tkt_struct_v = ceph_decode_8(&dp); | 174 | /* blob for me */ |
202 | if (tkt_struct_v != 1) | 175 | dlen = ceph_x_decrypt(secret, p, end, &dbuf, 0); |
203 | goto bad; | 176 | if (dlen <= 0) { |
177 | ret = dlen; | ||
178 | goto out; | ||
179 | } | ||
180 | dout(" decrypted %d bytes\n", dlen); | ||
181 | dp = dbuf; | ||
182 | dend = dp + dlen; | ||
204 | 183 | ||
205 | memcpy(&old_key, &th->session_key, sizeof(old_key)); | 184 | tkt_struct_v = ceph_decode_8(&dp); |
206 | ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); | 185 | if (tkt_struct_v != 1) |
207 | if (ret) | 186 | goto bad; |
208 | goto out; | ||
209 | 187 | ||
210 | ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); | 188 | memcpy(&old_key, &th->session_key, sizeof(old_key)); |
211 | ceph_decode_timespec(&validity, &new_validity); | 189 | ret = ceph_crypto_key_decode(&new_session_key, &dp, dend); |
212 | new_expires = get_seconds() + validity.tv_sec; | 190 | if (ret) |
213 | new_renew_after = new_expires - (validity.tv_sec / 4); | 191 | goto out; |
214 | dout(" expires=%lu renew_after=%lu\n", new_expires, | ||
215 | new_renew_after); | ||
216 | 192 | ||
217 | /* ticket blob for service */ | 193 | ceph_decode_copy(&dp, &new_validity, sizeof(new_validity)); |
218 | ceph_decode_8_safe(&p, end, is_enc, bad); | 194 | ceph_decode_timespec(&validity, &new_validity); |
219 | tp = ticket_buf; | 195 | new_expires = get_seconds() + validity.tv_sec; |
220 | if (is_enc) { | 196 | new_renew_after = new_expires - (validity.tv_sec / 4); |
221 | /* encrypted */ | 197 | dout(" expires=%lu renew_after=%lu\n", new_expires, |
222 | dout(" encrypted ticket\n"); | 198 | new_renew_after); |
223 | dlen = ceph_x_decrypt(&old_key, &p, end, ticket_buf, | 199 | |
224 | TEMP_TICKET_BUF_LEN); | 200 | /* ticket blob for service */ |
225 | if (dlen < 0) { | 201 | ceph_decode_8_safe(p, end, is_enc, bad); |
226 | ret = dlen; | 202 | if (is_enc) { |
227 | goto out; | 203 | /* encrypted */ |
228 | } | 204 | dout(" encrypted ticket\n"); |
229 | dlen = ceph_decode_32(&tp); | 205 | dlen = ceph_x_decrypt(&old_key, p, end, &ticket_buf, 0); |
230 | } else { | 206 | if (dlen < 0) { |
231 | /* unencrypted */ | 207 | ret = dlen; |
232 | ceph_decode_32_safe(&p, end, dlen, bad); | 208 | goto out; |
233 | ceph_decode_need(&p, end, dlen, bad); | ||
234 | ceph_decode_copy(&p, ticket_buf, dlen); | ||
235 | } | 209 | } |
236 | tpend = tp + dlen; | 210 | tp = ticket_buf; |
237 | dout(" ticket blob is %d bytes\n", dlen); | 211 | dlen = ceph_decode_32(&tp); |
238 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); | 212 | } else { |
239 | blob_struct_v = ceph_decode_8(&tp); | 213 | /* unencrypted */ |
240 | new_secret_id = ceph_decode_64(&tp); | 214 | ceph_decode_32_safe(p, end, dlen, bad); |
241 | ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); | 215 | ticket_buf = kmalloc(dlen, GFP_NOFS); |
242 | if (ret) | 216 | if (!ticket_buf) { |
217 | ret = -ENOMEM; | ||
243 | goto out; | 218 | goto out; |
244 | 219 | } | |
245 | /* all is well, update our ticket */ | 220 | tp = ticket_buf; |
246 | ceph_crypto_key_destroy(&th->session_key); | 221 | ceph_decode_need(p, end, dlen, bad); |
247 | if (th->ticket_blob) | 222 | ceph_decode_copy(p, ticket_buf, dlen); |
248 | ceph_buffer_put(th->ticket_blob); | ||
249 | th->session_key = new_session_key; | ||
250 | th->ticket_blob = new_ticket_blob; | ||
251 | th->validity = new_validity; | ||
252 | th->secret_id = new_secret_id; | ||
253 | th->expires = new_expires; | ||
254 | th->renew_after = new_renew_after; | ||
255 | dout(" got ticket service %d (%s) secret_id %lld len %d\n", | ||
256 | type, ceph_entity_type_name(type), th->secret_id, | ||
257 | (int)th->ticket_blob->vec.iov_len); | ||
258 | xi->have_keys |= th->service; | ||
259 | } | 223 | } |
224 | tpend = tp + dlen; | ||
225 | dout(" ticket blob is %d bytes\n", dlen); | ||
226 | ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); | ||
227 | blob_struct_v = ceph_decode_8(&tp); | ||
228 | new_secret_id = ceph_decode_64(&tp); | ||
229 | ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); | ||
230 | if (ret) | ||
231 | goto out; | ||
232 | |||
233 | /* all is well, update our ticket */ | ||
234 | ceph_crypto_key_destroy(&th->session_key); | ||
235 | if (th->ticket_blob) | ||
236 | ceph_buffer_put(th->ticket_blob); | ||
237 | th->session_key = new_session_key; | ||
238 | th->ticket_blob = new_ticket_blob; | ||
239 | th->validity = new_validity; | ||
240 | th->secret_id = new_secret_id; | ||
241 | th->expires = new_expires; | ||
242 | th->renew_after = new_renew_after; | ||
243 | dout(" got ticket service %d (%s) secret_id %lld len %d\n", | ||
244 | type, ceph_entity_type_name(type), th->secret_id, | ||
245 | (int)th->ticket_blob->vec.iov_len); | ||
246 | xi->have_keys |= th->service; | ||
260 | 247 | ||
261 | ret = 0; | ||
262 | out: | 248 | out: |
263 | kfree(ticket_buf); | 249 | kfree(ticket_buf); |
264 | out_dbuf: | ||
265 | kfree(dbuf); | 250 | kfree(dbuf); |
266 | return ret; | 251 | return ret; |
267 | 252 | ||
@@ -270,6 +255,34 @@ bad: | |||
270 | goto out; | 255 | goto out; |
271 | } | 256 | } |
272 | 257 | ||
258 | static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, | ||
259 | struct ceph_crypto_key *secret, | ||
260 | void *buf, void *end) | ||
261 | { | ||
262 | void *p = buf; | ||
263 | u8 reply_struct_v; | ||
264 | u32 num; | ||
265 | int ret; | ||
266 | |||
267 | ceph_decode_8_safe(&p, end, reply_struct_v, bad); | ||
268 | if (reply_struct_v != 1) | ||
269 | return -EINVAL; | ||
270 | |||
271 | ceph_decode_32_safe(&p, end, num, bad); | ||
272 | dout("%d tickets\n", num); | ||
273 | |||
274 | while (num--) { | ||
275 | ret = process_one_ticket(ac, secret, &p, end); | ||
276 | if (ret) | ||
277 | return ret; | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | |||
282 | bad: | ||
283 | return -EINVAL; | ||
284 | } | ||
285 | |||
273 | static int ceph_x_build_authorizer(struct ceph_auth_client *ac, | 286 | static int ceph_x_build_authorizer(struct ceph_auth_client *ac, |
274 | struct ceph_x_ticket_handler *th, | 287 | struct ceph_x_ticket_handler *th, |
275 | struct ceph_x_authorizer *au) | 288 | struct ceph_x_authorizer *au) |
@@ -583,13 +596,14 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, | |||
583 | struct ceph_x_ticket_handler *th; | 596 | struct ceph_x_ticket_handler *th; |
584 | int ret = 0; | 597 | int ret = 0; |
585 | struct ceph_x_authorize_reply reply; | 598 | struct ceph_x_authorize_reply reply; |
599 | void *preply = &reply; | ||
586 | void *p = au->reply_buf; | 600 | void *p = au->reply_buf; |
587 | void *end = p + sizeof(au->reply_buf); | 601 | void *end = p + sizeof(au->reply_buf); |
588 | 602 | ||
589 | th = get_ticket_handler(ac, au->service); | 603 | th = get_ticket_handler(ac, au->service); |
590 | if (IS_ERR(th)) | 604 | if (IS_ERR(th)) |
591 | return PTR_ERR(th); | 605 | return PTR_ERR(th); |
592 | ret = ceph_x_decrypt(&th->session_key, &p, end, &reply, sizeof(reply)); | 606 | ret = ceph_x_decrypt(&th->session_key, &p, end, &preply, sizeof(reply)); |
593 | if (ret < 0) | 607 | if (ret < 0) |
594 | return ret; | 608 | return ret; |
595 | if (ret != sizeof(reply)) | 609 | if (ret != sizeof(reply)) |
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 067d3af2eaf6..61fcfc304f68 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c | |||
@@ -1181,7 +1181,15 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |||
1181 | if (!m) { | 1181 | if (!m) { |
1182 | pr_info("alloc_msg unknown type %d\n", type); | 1182 | pr_info("alloc_msg unknown type %d\n", type); |
1183 | *skip = 1; | 1183 | *skip = 1; |
1184 | } else if (front_len > m->front_alloc_len) { | ||
1185 | pr_warning("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n", | ||
1186 | front_len, m->front_alloc_len, | ||
1187 | (unsigned int)con->peer_name.type, | ||
1188 | le64_to_cpu(con->peer_name.num)); | ||
1189 | ceph_msg_put(m); | ||
1190 | m = ceph_msg_new(type, front_len, GFP_NOFS, false); | ||
1184 | } | 1191 | } |
1192 | |||
1185 | return m; | 1193 | return m; |
1186 | } | 1194 | } |
1187 | 1195 | ||
diff --git a/net/core/datagram.c b/net/core/datagram.c index 488dd1a825c0..fdbc9a81d4c2 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -775,7 +775,7 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) | |||
775 | EXPORT_SYMBOL(__skb_checksum_complete); | 775 | EXPORT_SYMBOL(__skb_checksum_complete); |
776 | 776 | ||
777 | /** | 777 | /** |
778 | * skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec. | 778 | * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec. |
779 | * @skb: skbuff | 779 | * @skb: skbuff |
780 | * @hlen: hardware length | 780 | * @hlen: hardware length |
781 | * @iov: io vector | 781 | * @iov: io vector |
diff --git a/net/core/dev.c b/net/core/dev.c index b65a5051361f..ab9a16530c36 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2587,13 +2587,19 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) | |||
2587 | return harmonize_features(skb, features); | 2587 | return harmonize_features(skb, features); |
2588 | } | 2588 | } |
2589 | 2589 | ||
2590 | features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX | | 2590 | features = netdev_intersect_features(features, |
2591 | NETIF_F_HW_VLAN_STAG_TX); | 2591 | skb->dev->vlan_features | |
2592 | NETIF_F_HW_VLAN_CTAG_TX | | ||
2593 | NETIF_F_HW_VLAN_STAG_TX); | ||
2592 | 2594 | ||
2593 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) | 2595 | if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) |
2594 | features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | | 2596 | features = netdev_intersect_features(features, |
2595 | NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX | | 2597 | NETIF_F_SG | |
2596 | NETIF_F_HW_VLAN_STAG_TX; | 2598 | NETIF_F_HIGHDMA | |
2599 | NETIF_F_FRAGLIST | | ||
2600 | NETIF_F_GEN_CSUM | | ||
2601 | NETIF_F_HW_VLAN_CTAG_TX | | ||
2602 | NETIF_F_HW_VLAN_STAG_TX); | ||
2597 | 2603 | ||
2598 | return harmonize_features(skb, features); | 2604 | return harmonize_features(skb, features); |
2599 | } | 2605 | } |
@@ -4889,7 +4895,8 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev, | |||
4889 | if (adj->master) | 4895 | if (adj->master) |
4890 | sysfs_remove_link(&(dev->dev.kobj), "master"); | 4896 | sysfs_remove_link(&(dev->dev.kobj), "master"); |
4891 | 4897 | ||
4892 | if (netdev_adjacent_is_neigh_list(dev, dev_list)) | 4898 | if (netdev_adjacent_is_neigh_list(dev, dev_list) && |
4899 | net_eq(dev_net(dev),dev_net(adj_dev))) | ||
4893 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); | 4900 | netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list); |
4894 | 4901 | ||
4895 | list_del_rcu(&adj->list); | 4902 | list_del_rcu(&adj->list); |
@@ -5159,11 +5166,65 @@ void netdev_upper_dev_unlink(struct net_device *dev, | |||
5159 | } | 5166 | } |
5160 | EXPORT_SYMBOL(netdev_upper_dev_unlink); | 5167 | EXPORT_SYMBOL(netdev_upper_dev_unlink); |
5161 | 5168 | ||
5169 | void netdev_adjacent_add_links(struct net_device *dev) | ||
5170 | { | ||
5171 | struct netdev_adjacent *iter; | ||
5172 | |||
5173 | struct net *net = dev_net(dev); | ||
5174 | |||
5175 | list_for_each_entry(iter, &dev->adj_list.upper, list) { | ||
5176 | if (!net_eq(net,dev_net(iter->dev))) | ||
5177 | continue; | ||
5178 | netdev_adjacent_sysfs_add(iter->dev, dev, | ||
5179 | &iter->dev->adj_list.lower); | ||
5180 | netdev_adjacent_sysfs_add(dev, iter->dev, | ||
5181 | &dev->adj_list.upper); | ||
5182 | } | ||
5183 | |||
5184 | list_for_each_entry(iter, &dev->adj_list.lower, list) { | ||
5185 | if (!net_eq(net,dev_net(iter->dev))) | ||
5186 | continue; | ||
5187 | netdev_adjacent_sysfs_add(iter->dev, dev, | ||
5188 | &iter->dev->adj_list.upper); | ||
5189 | netdev_adjacent_sysfs_add(dev, iter->dev, | ||
5190 | &dev->adj_list.lower); | ||
5191 | } | ||
5192 | } | ||
5193 | |||
5194 | void netdev_adjacent_del_links(struct net_device *dev) | ||
5195 | { | ||
5196 | struct netdev_adjacent *iter; | ||
5197 | |||
5198 | struct net *net = dev_net(dev); | ||
5199 | |||
5200 | list_for_each_entry(iter, &dev->adj_list.upper, list) { | ||
5201 | if (!net_eq(net,dev_net(iter->dev))) | ||
5202 | continue; | ||
5203 | netdev_adjacent_sysfs_del(iter->dev, dev->name, | ||
5204 | &iter->dev->adj_list.lower); | ||
5205 | netdev_adjacent_sysfs_del(dev, iter->dev->name, | ||
5206 | &dev->adj_list.upper); | ||
5207 | } | ||
5208 | |||
5209 | list_for_each_entry(iter, &dev->adj_list.lower, list) { | ||
5210 | if (!net_eq(net,dev_net(iter->dev))) | ||
5211 | continue; | ||
5212 | netdev_adjacent_sysfs_del(iter->dev, dev->name, | ||
5213 | &iter->dev->adj_list.upper); | ||
5214 | netdev_adjacent_sysfs_del(dev, iter->dev->name, | ||
5215 | &dev->adj_list.lower); | ||
5216 | } | ||
5217 | } | ||
5218 | |||
5162 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) | 5219 | void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) |
5163 | { | 5220 | { |
5164 | struct netdev_adjacent *iter; | 5221 | struct netdev_adjacent *iter; |
5165 | 5222 | ||
5223 | struct net *net = dev_net(dev); | ||
5224 | |||
5166 | list_for_each_entry(iter, &dev->adj_list.upper, list) { | 5225 | list_for_each_entry(iter, &dev->adj_list.upper, list) { |
5226 | if (!net_eq(net,dev_net(iter->dev))) | ||
5227 | continue; | ||
5167 | netdev_adjacent_sysfs_del(iter->dev, oldname, | 5228 | netdev_adjacent_sysfs_del(iter->dev, oldname, |
5168 | &iter->dev->adj_list.lower); | 5229 | &iter->dev->adj_list.lower); |
5169 | netdev_adjacent_sysfs_add(iter->dev, dev, | 5230 | netdev_adjacent_sysfs_add(iter->dev, dev, |
@@ -5171,6 +5232,8 @@ void netdev_adjacent_rename_links(struct net_device *dev, char *oldname) | |||
5171 | } | 5232 | } |
5172 | 5233 | ||
5173 | list_for_each_entry(iter, &dev->adj_list.lower, list) { | 5234 | list_for_each_entry(iter, &dev->adj_list.lower, list) { |
5235 | if (!net_eq(net,dev_net(iter->dev))) | ||
5236 | continue; | ||
5174 | netdev_adjacent_sysfs_del(iter->dev, oldname, | 5237 | netdev_adjacent_sysfs_del(iter->dev, oldname, |
5175 | &iter->dev->adj_list.upper); | 5238 | &iter->dev->adj_list.upper); |
5176 | netdev_adjacent_sysfs_add(iter->dev, dev, | 5239 | netdev_adjacent_sysfs_add(iter->dev, dev, |
@@ -6773,6 +6836,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
6773 | 6836 | ||
6774 | /* Send a netdev-removed uevent to the old namespace */ | 6837 | /* Send a netdev-removed uevent to the old namespace */ |
6775 | kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); | 6838 | kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE); |
6839 | netdev_adjacent_del_links(dev); | ||
6776 | 6840 | ||
6777 | /* Actually switch the network namespace */ | 6841 | /* Actually switch the network namespace */ |
6778 | dev_net_set(dev, net); | 6842 | dev_net_set(dev, net); |
@@ -6787,6 +6851,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char | |||
6787 | 6851 | ||
6788 | /* Send a netdev-add uevent to the new namespace */ | 6852 | /* Send a netdev-add uevent to the new namespace */ |
6789 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); | 6853 | kobject_uevent(&dev->dev.kobj, KOBJ_ADD); |
6854 | netdev_adjacent_add_links(dev); | ||
6790 | 6855 | ||
6791 | /* Fixup kobjects */ | 6856 | /* Fixup kobjects */ |
6792 | err = device_rename(&dev->dev, dev->name); | 6857 | err = device_rename(&dev->dev, dev->name); |
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index 6b5b6e7013ca..9d33dfffca19 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c | |||
@@ -197,7 +197,7 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats | |||
197 | * as destination. A new timer with the interval specified in the | 197 | * as destination. A new timer with the interval specified in the |
198 | * configuration TLV is created. Upon each interval, the latest statistics | 198 | * configuration TLV is created. Upon each interval, the latest statistics |
199 | * will be read from &bstats and the estimated rate will be stored in | 199 | * will be read from &bstats and the estimated rate will be stored in |
200 | * &rate_est with the statistics lock grabed during this period. | 200 | * &rate_est with the statistics lock grabbed during this period. |
201 | * | 201 | * |
202 | * Returns 0 on success or a negative error code. | 202 | * Returns 0 on success or a negative error code. |
203 | * | 203 | * |
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 9d3d9e78397b..2ddbce4cce14 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c | |||
@@ -206,7 +206,7 @@ EXPORT_SYMBOL(gnet_stats_copy_queue); | |||
206 | * @st: application specific statistics data | 206 | * @st: application specific statistics data |
207 | * @len: length of data | 207 | * @len: length of data |
208 | * | 208 | * |
209 | * Appends the application sepecific statistics to the top level TLV created by | 209 | * Appends the application specific statistics to the top level TLV created by |
210 | * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping | 210 | * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping |
211 | * handle is in backward compatibility mode. | 211 | * handle is in backward compatibility mode. |
212 | * | 212 | * |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 163b673f9e62..da1378a3e2c7 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -2647,7 +2647,7 @@ EXPORT_SYMBOL(skb_prepare_seq_read); | |||
2647 | * skb_seq_read() will return the remaining part of the block. | 2647 | * skb_seq_read() will return the remaining part of the block. |
2648 | * | 2648 | * |
2649 | * Note 1: The size of each block of data returned can be arbitrary, | 2649 | * Note 1: The size of each block of data returned can be arbitrary, |
2650 | * this limitation is the cost for zerocopy seqeuental | 2650 | * this limitation is the cost for zerocopy sequential |
2651 | * reads of potentially non linear data. | 2651 | * reads of potentially non linear data. |
2652 | * | 2652 | * |
2653 | * Note 2: Fragment lists within fragments are not implemented | 2653 | * Note 2: Fragment lists within fragments are not implemented |
@@ -2781,7 +2781,7 @@ EXPORT_SYMBOL(skb_find_text); | |||
2781 | /** | 2781 | /** |
2782 | * skb_append_datato_frags - append the user data to a skb | 2782 | * skb_append_datato_frags - append the user data to a skb |
2783 | * @sk: sock structure | 2783 | * @sk: sock structure |
2784 | * @skb: skb structure to be appened with user data. | 2784 | * @skb: skb structure to be appended with user data. |
2785 | * @getfrag: call back function to be used for getting the user data | 2785 | * @getfrag: call back function to be used for getting the user data |
2786 | * @from: pointer to user message iov | 2786 | * @from: pointer to user message iov |
2787 | * @length: length of the iov message | 2787 | * @length: length of the iov message |
diff --git a/net/core/sock.c b/net/core/sock.c index 2714811afbd8..d372b4bd3f99 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -166,7 +166,7 @@ EXPORT_SYMBOL(sk_ns_capable); | |||
166 | /** | 166 | /** |
167 | * sk_capable - Socket global capability test | 167 | * sk_capable - Socket global capability test |
168 | * @sk: Socket to use a capability on or through | 168 | * @sk: Socket to use a capability on or through |
169 | * @cap: The global capbility to use | 169 | * @cap: The global capability to use |
170 | * | 170 | * |
171 | * Test to see if the opener of the socket had when the socket was | 171 | * Test to see if the opener of the socket had when the socket was |
172 | * created and the current process has the capability @cap in all user | 172 | * created and the current process has the capability @cap in all user |
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(sk_capable); | |||
183 | * @sk: Socket to use a capability on or through | 183 | * @sk: Socket to use a capability on or through |
184 | * @cap: The capability to use | 184 | * @cap: The capability to use |
185 | * | 185 | * |
186 | * Test to see if the opener of the socket had when the socke was created | 186 | * Test to see if the opener of the socket had when the socket was created |
187 | * and the current process has the capability @cap over the network namespace | 187 | * and the current process has the capability @cap over the network namespace |
188 | * the socket is a member of. | 188 | * the socket is a member of. |
189 | */ | 189 | */ |
@@ -1822,6 +1822,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len, | |||
1822 | order); | 1822 | order); |
1823 | if (page) | 1823 | if (page) |
1824 | goto fill_page; | 1824 | goto fill_page; |
1825 | /* Do not retry other high order allocations */ | ||
1826 | order = 1; | ||
1827 | max_page_order = 0; | ||
1825 | } | 1828 | } |
1826 | order--; | 1829 | order--; |
1827 | } | 1830 | } |
@@ -1869,10 +1872,8 @@ EXPORT_SYMBOL(sock_alloc_send_skb); | |||
1869 | * no guarantee that allocations succeed. Therefore, @sz MUST be | 1872 | * no guarantee that allocations succeed. Therefore, @sz MUST be |
1870 | * less or equal than PAGE_SIZE. | 1873 | * less or equal than PAGE_SIZE. |
1871 | */ | 1874 | */ |
1872 | bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio) | 1875 | bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp) |
1873 | { | 1876 | { |
1874 | int order; | ||
1875 | |||
1876 | if (pfrag->page) { | 1877 | if (pfrag->page) { |
1877 | if (atomic_read(&pfrag->page->_count) == 1) { | 1878 | if (atomic_read(&pfrag->page->_count) == 1) { |
1878 | pfrag->offset = 0; | 1879 | pfrag->offset = 0; |
@@ -1883,20 +1884,21 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio) | |||
1883 | put_page(pfrag->page); | 1884 | put_page(pfrag->page); |
1884 | } | 1885 | } |
1885 | 1886 | ||
1886 | order = SKB_FRAG_PAGE_ORDER; | 1887 | pfrag->offset = 0; |
1887 | do { | 1888 | if (SKB_FRAG_PAGE_ORDER) { |
1888 | gfp_t gfp = prio; | 1889 | pfrag->page = alloc_pages(gfp | __GFP_COMP | |
1889 | 1890 | __GFP_NOWARN | __GFP_NORETRY, | |
1890 | if (order) | 1891 | SKB_FRAG_PAGE_ORDER); |
1891 | gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY; | ||
1892 | pfrag->page = alloc_pages(gfp, order); | ||
1893 | if (likely(pfrag->page)) { | 1892 | if (likely(pfrag->page)) { |
1894 | pfrag->offset = 0; | 1893 | pfrag->size = PAGE_SIZE << SKB_FRAG_PAGE_ORDER; |
1895 | pfrag->size = PAGE_SIZE << order; | ||
1896 | return true; | 1894 | return true; |
1897 | } | 1895 | } |
1898 | } while (--order >= 0); | 1896 | } |
1899 | 1897 | pfrag->page = alloc_page(gfp); | |
1898 | if (likely(pfrag->page)) { | ||
1899 | pfrag->size = PAGE_SIZE; | ||
1900 | return true; | ||
1901 | } | ||
1900 | return false; | 1902 | return false; |
1901 | } | 1903 | } |
1902 | EXPORT_SYMBOL(skb_page_frag_refill); | 1904 | EXPORT_SYMBOL(skb_page_frag_refill); |
diff --git a/net/ieee802154/6lowpan_rtnl.c b/net/ieee802154/6lowpan_rtnl.c index 016b77ee88f0..6591d27e53a4 100644 --- a/net/ieee802154/6lowpan_rtnl.c +++ b/net/ieee802154/6lowpan_rtnl.c | |||
@@ -246,7 +246,7 @@ lowpan_alloc_frag(struct sk_buff *skb, int size, | |||
246 | return ERR_PTR(-rc); | 246 | return ERR_PTR(-rc); |
247 | } | 247 | } |
248 | } else { | 248 | } else { |
249 | frag = ERR_PTR(ENOMEM); | 249 | frag = ERR_PTR(-ENOMEM); |
250 | } | 250 | } |
251 | 251 | ||
252 | return frag; | 252 | return frag; |
@@ -437,7 +437,7 @@ static void lowpan_setup(struct net_device *dev) | |||
437 | /* Frame Control + Sequence Number + Address fields + Security Header */ | 437 | /* Frame Control + Sequence Number + Address fields + Security Header */ |
438 | dev->hard_header_len = 2 + 1 + 20 + 14; | 438 | dev->hard_header_len = 2 + 1 + 20 + 14; |
439 | dev->needed_tailroom = 2; /* FCS */ | 439 | dev->needed_tailroom = 2; /* FCS */ |
440 | dev->mtu = 1281; | 440 | dev->mtu = IPV6_MIN_MTU; |
441 | dev->tx_queue_len = 0; | 441 | dev->tx_queue_len = 0; |
442 | dev->flags = IFF_BROADCAST | IFF_MULTICAST; | 442 | dev->flags = IFF_BROADCAST | IFF_MULTICAST; |
443 | dev->watchdog_timeo = 0; | 443 | dev->watchdog_timeo = 0; |
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c index ffec6ce51005..32755cb7e64e 100644 --- a/net/ieee802154/reassembly.c +++ b/net/ieee802154/reassembly.c | |||
@@ -355,8 +355,6 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type) | |||
355 | struct net *net = dev_net(skb->dev); | 355 | struct net *net = dev_net(skb->dev); |
356 | struct lowpan_frag_info *frag_info = lowpan_cb(skb); | 356 | struct lowpan_frag_info *frag_info = lowpan_cb(skb); |
357 | struct ieee802154_addr source, dest; | 357 | struct ieee802154_addr source, dest; |
358 | struct netns_ieee802154_lowpan *ieee802154_lowpan = | ||
359 | net_ieee802154_lowpan(net); | ||
360 | int err; | 358 | int err; |
361 | 359 | ||
362 | source = mac_cb(skb)->source; | 360 | source = mac_cb(skb)->source; |
@@ -366,8 +364,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type) | |||
366 | if (err < 0) | 364 | if (err < 0) |
367 | goto err; | 365 | goto err; |
368 | 366 | ||
369 | if (frag_info->d_size > ieee802154_lowpan->max_dsize) | 367 | if (frag_info->d_size > IPV6_MIN_MTU) { |
368 | net_warn_ratelimited("lowpan_frag_rcv: datagram size exceeds MTU\n"); | ||
370 | goto err; | 369 | goto err; |
370 | } | ||
371 | 371 | ||
372 | fq = fq_find(net, frag_info, &source, &dest); | 372 | fq = fq_find(net, frag_info, &source, &dest); |
373 | if (fq != NULL) { | 373 | if (fq != NULL) { |
@@ -415,13 +415,6 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = { | |||
415 | .mode = 0644, | 415 | .mode = 0644, |
416 | .proc_handler = proc_dointvec_jiffies, | 416 | .proc_handler = proc_dointvec_jiffies, |
417 | }, | 417 | }, |
418 | { | ||
419 | .procname = "6lowpanfrag_max_datagram_size", | ||
420 | .data = &init_net.ieee802154_lowpan.max_dsize, | ||
421 | .maxlen = sizeof(int), | ||
422 | .mode = 0644, | ||
423 | .proc_handler = proc_dointvec | ||
424 | }, | ||
425 | { } | 418 | { } |
426 | }; | 419 | }; |
427 | 420 | ||
@@ -458,7 +451,6 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net) | |||
458 | table[1].data = &ieee802154_lowpan->frags.low_thresh; | 451 | table[1].data = &ieee802154_lowpan->frags.low_thresh; |
459 | table[1].extra2 = &ieee802154_lowpan->frags.high_thresh; | 452 | table[1].extra2 = &ieee802154_lowpan->frags.high_thresh; |
460 | table[2].data = &ieee802154_lowpan->frags.timeout; | 453 | table[2].data = &ieee802154_lowpan->frags.timeout; |
461 | table[3].data = &ieee802154_lowpan->max_dsize; | ||
462 | 454 | ||
463 | /* Don't export sysctls to unprivileged users */ | 455 | /* Don't export sysctls to unprivileged users */ |
464 | if (net->user_ns != &init_user_ns) | 456 | if (net->user_ns != &init_user_ns) |
@@ -533,7 +525,6 @@ static int __net_init lowpan_frags_init_net(struct net *net) | |||
533 | ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH; | 525 | ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH; |
534 | ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH; | 526 | ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH; |
535 | ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT; | 527 | ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT; |
536 | ieee802154_lowpan->max_dsize = 0xFFFF; | ||
537 | 528 | ||
538 | inet_frags_init_net(&ieee802154_lowpan->frags); | 529 | inet_frags_init_net(&ieee802154_lowpan->frags); |
539 | 530 | ||
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index fb173126f03d..7cbcaf4f0194 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig | |||
@@ -82,6 +82,52 @@ config NF_TABLES_ARP | |||
82 | help | 82 | help |
83 | This option enables the ARP support for nf_tables. | 83 | This option enables the ARP support for nf_tables. |
84 | 84 | ||
85 | config NF_NAT_IPV4 | ||
86 | tristate "IPv4 NAT" | ||
87 | depends on NF_CONNTRACK_IPV4 | ||
88 | default m if NETFILTER_ADVANCED=n | ||
89 | select NF_NAT | ||
90 | help | ||
91 | The IPv4 NAT option allows masquerading, port forwarding and other | ||
92 | forms of full Network Address Port Translation. This can be | ||
93 | controlled by iptables or nft. | ||
94 | |||
95 | if NF_NAT_IPV4 | ||
96 | |||
97 | config NF_NAT_SNMP_BASIC | ||
98 | tristate "Basic SNMP-ALG support" | ||
99 | depends on NF_CONNTRACK_SNMP | ||
100 | depends on NETFILTER_ADVANCED | ||
101 | default NF_NAT && NF_CONNTRACK_SNMP | ||
102 | ---help--- | ||
103 | |||
104 | This module implements an Application Layer Gateway (ALG) for | ||
105 | SNMP payloads. In conjunction with NAT, it allows a network | ||
106 | management system to access multiple private networks with | ||
107 | conflicting addresses. It works by modifying IP addresses | ||
108 | inside SNMP payloads to match IP-layer NAT mapping. | ||
109 | |||
110 | This is the "basic" form of SNMP-ALG, as described in RFC 2962 | ||
111 | |||
112 | To compile it as a module, choose M here. If unsure, say N. | ||
113 | |||
114 | config NF_NAT_PROTO_GRE | ||
115 | tristate | ||
116 | depends on NF_CT_PROTO_GRE | ||
117 | |||
118 | config NF_NAT_PPTP | ||
119 | tristate | ||
120 | depends on NF_CONNTRACK | ||
121 | default NF_CONNTRACK_PPTP | ||
122 | select NF_NAT_PROTO_GRE | ||
123 | |||
124 | config NF_NAT_H323 | ||
125 | tristate | ||
126 | depends on NF_CONNTRACK | ||
127 | default NF_CONNTRACK_H323 | ||
128 | |||
129 | endif # NF_NAT_IPV4 | ||
130 | |||
85 | config IP_NF_IPTABLES | 131 | config IP_NF_IPTABLES |
86 | tristate "IP tables support (required for filtering/masq/NAT)" | 132 | tristate "IP tables support (required for filtering/masq/NAT)" |
87 | default m if NETFILTER_ADVANCED=n | 133 | default m if NETFILTER_ADVANCED=n |
@@ -170,19 +216,21 @@ config IP_NF_TARGET_SYNPROXY | |||
170 | To compile it as a module, choose M here. If unsure, say N. | 216 | To compile it as a module, choose M here. If unsure, say N. |
171 | 217 | ||
172 | # NAT + specific targets: nf_conntrack | 218 | # NAT + specific targets: nf_conntrack |
173 | config NF_NAT_IPV4 | 219 | config IP_NF_NAT |
174 | tristate "IPv4 NAT" | 220 | tristate "iptables NAT support" |
175 | depends on NF_CONNTRACK_IPV4 | 221 | depends on NF_CONNTRACK_IPV4 |
176 | default m if NETFILTER_ADVANCED=n | 222 | default m if NETFILTER_ADVANCED=n |
177 | select NF_NAT | 223 | select NF_NAT |
224 | select NF_NAT_IPV4 | ||
225 | select NETFILTER_XT_NAT | ||
178 | help | 226 | help |
179 | The IPv4 NAT option allows masquerading, port forwarding and other | 227 | This enables the `nat' table in iptables. This allows masquerading, |
180 | forms of full Network Address Port Translation. It is controlled by | 228 | port forwarding and other forms of full Network Address Port |
181 | the `nat' table in iptables: see the man page for iptables(8). | 229 | Translation. |
182 | 230 | ||
183 | To compile it as a module, choose M here. If unsure, say N. | 231 | To compile it as a module, choose M here. If unsure, say N. |
184 | 232 | ||
185 | if NF_NAT_IPV4 | 233 | if IP_NF_NAT |
186 | 234 | ||
187 | config IP_NF_TARGET_MASQUERADE | 235 | config IP_NF_TARGET_MASQUERADE |
188 | tristate "MASQUERADE target support" | 236 | tristate "MASQUERADE target support" |
@@ -214,47 +262,7 @@ config IP_NF_TARGET_REDIRECT | |||
214 | (e.g. when running oldconfig). It selects | 262 | (e.g. when running oldconfig). It selects |
215 | CONFIG_NETFILTER_XT_TARGET_REDIRECT. | 263 | CONFIG_NETFILTER_XT_TARGET_REDIRECT. |
216 | 264 | ||
217 | endif | 265 | endif # IP_NF_NAT |
218 | |||
219 | config NF_NAT_SNMP_BASIC | ||
220 | tristate "Basic SNMP-ALG support" | ||
221 | depends on NF_CONNTRACK_SNMP && NF_NAT_IPV4 | ||
222 | depends on NETFILTER_ADVANCED | ||
223 | default NF_NAT && NF_CONNTRACK_SNMP | ||
224 | ---help--- | ||
225 | |||
226 | This module implements an Application Layer Gateway (ALG) for | ||
227 | SNMP payloads. In conjunction with NAT, it allows a network | ||
228 | management system to access multiple private networks with | ||
229 | conflicting addresses. It works by modifying IP addresses | ||
230 | inside SNMP payloads to match IP-layer NAT mapping. | ||
231 | |||
232 | This is the "basic" form of SNMP-ALG, as described in RFC 2962 | ||
233 | |||
234 | To compile it as a module, choose M here. If unsure, say N. | ||
235 | |||
236 | # If they want FTP, set to $CONFIG_IP_NF_NAT (m or y), | ||
237 | # or $CONFIG_IP_NF_FTP (m or y), whichever is weaker. | ||
238 | # From kconfig-language.txt: | ||
239 | # | ||
240 | # <expr> '&&' <expr> (6) | ||
241 | # | ||
242 | # (6) Returns the result of min(/expr/, /expr/). | ||
243 | |||
244 | config NF_NAT_PROTO_GRE | ||
245 | tristate | ||
246 | depends on NF_NAT_IPV4 && NF_CT_PROTO_GRE | ||
247 | |||
248 | config NF_NAT_PPTP | ||
249 | tristate | ||
250 | depends on NF_CONNTRACK && NF_NAT_IPV4 | ||
251 | default NF_NAT_IPV4 && NF_CONNTRACK_PPTP | ||
252 | select NF_NAT_PROTO_GRE | ||
253 | |||
254 | config NF_NAT_H323 | ||
255 | tristate | ||
256 | depends on NF_CONNTRACK && NF_NAT_IPV4 | ||
257 | default NF_NAT_IPV4 && NF_CONNTRACK_H323 | ||
258 | 266 | ||
259 | # mangle + specific targets | 267 | # mangle + specific targets |
260 | config IP_NF_MANGLE | 268 | config IP_NF_MANGLE |
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile index 33001621465b..edf4af32e9f2 100644 --- a/net/ipv4/netfilter/Makefile +++ b/net/ipv4/netfilter/Makefile | |||
@@ -43,7 +43,7 @@ obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o | |||
43 | # the three instances of ip_tables | 43 | # the three instances of ip_tables |
44 | obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o | 44 | obj-$(CONFIG_IP_NF_FILTER) += iptable_filter.o |
45 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o | 45 | obj-$(CONFIG_IP_NF_MANGLE) += iptable_mangle.o |
46 | obj-$(CONFIG_NF_NAT_IPV4) += iptable_nat.o | 46 | obj-$(CONFIG_IP_NF_NAT) += iptable_nat.o |
47 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o | 47 | obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o |
48 | obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o | 48 | obj-$(CONFIG_IP_NF_SECURITY) += iptable_security.o |
49 | 49 | ||
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 0b239fc1816e..fc1fac2a0528 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1690,14 +1690,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp) | |||
1690 | addrconf_mod_dad_work(ifp, 0); | 1690 | addrconf_mod_dad_work(ifp, 0); |
1691 | } | 1691 | } |
1692 | 1692 | ||
1693 | /* Join to solicited addr multicast group. */ | 1693 | /* Join to solicited addr multicast group. |
1694 | 1694 | * caller must hold RTNL */ | |
1695 | void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) | 1695 | void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) |
1696 | { | 1696 | { |
1697 | struct in6_addr maddr; | 1697 | struct in6_addr maddr; |
1698 | 1698 | ||
1699 | ASSERT_RTNL(); | ||
1700 | |||
1701 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1699 | if (dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1702 | return; | 1700 | return; |
1703 | 1701 | ||
@@ -1705,12 +1703,11 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr) | |||
1705 | ipv6_dev_mc_inc(dev, &maddr); | 1703 | ipv6_dev_mc_inc(dev, &maddr); |
1706 | } | 1704 | } |
1707 | 1705 | ||
1706 | /* caller must hold RTNL */ | ||
1708 | void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | 1707 | void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) |
1709 | { | 1708 | { |
1710 | struct in6_addr maddr; | 1709 | struct in6_addr maddr; |
1711 | 1710 | ||
1712 | ASSERT_RTNL(); | ||
1713 | |||
1714 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) | 1711 | if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP)) |
1715 | return; | 1712 | return; |
1716 | 1713 | ||
@@ -1718,12 +1715,11 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr) | |||
1718 | __ipv6_dev_mc_dec(idev, &maddr); | 1715 | __ipv6_dev_mc_dec(idev, &maddr); |
1719 | } | 1716 | } |
1720 | 1717 | ||
1718 | /* caller must hold RTNL */ | ||
1721 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | 1719 | static void addrconf_join_anycast(struct inet6_ifaddr *ifp) |
1722 | { | 1720 | { |
1723 | struct in6_addr addr; | 1721 | struct in6_addr addr; |
1724 | 1722 | ||
1725 | ASSERT_RTNL(); | ||
1726 | |||
1727 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1723 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1728 | return; | 1724 | return; |
1729 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1725 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
@@ -1732,12 +1728,11 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp) | |||
1732 | ipv6_dev_ac_inc(ifp->idev->dev, &addr); | 1728 | ipv6_dev_ac_inc(ifp->idev->dev, &addr); |
1733 | } | 1729 | } |
1734 | 1730 | ||
1731 | /* caller must hold RTNL */ | ||
1735 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) | 1732 | static void addrconf_leave_anycast(struct inet6_ifaddr *ifp) |
1736 | { | 1733 | { |
1737 | struct in6_addr addr; | 1734 | struct in6_addr addr; |
1738 | 1735 | ||
1739 | ASSERT_RTNL(); | ||
1740 | |||
1741 | if (ifp->prefix_len >= 127) /* RFC 6164 */ | 1736 | if (ifp->prefix_len >= 127) /* RFC 6164 */ |
1742 | return; | 1737 | return; |
1743 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); | 1738 | ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len); |
@@ -4773,15 +4768,11 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp) | |||
4773 | addrconf_leave_solict(ifp->idev, &ifp->addr); | 4768 | addrconf_leave_solict(ifp->idev, &ifp->addr); |
4774 | if (!ipv6_addr_any(&ifp->peer_addr)) { | 4769 | if (!ipv6_addr_any(&ifp->peer_addr)) { |
4775 | struct rt6_info *rt; | 4770 | struct rt6_info *rt; |
4776 | struct net_device *dev = ifp->idev->dev; | 4771 | |
4777 | 4772 | rt = addrconf_get_prefix_route(&ifp->peer_addr, 128, | |
4778 | rt = rt6_lookup(dev_net(dev), &ifp->peer_addr, NULL, | 4773 | ifp->idev->dev, 0, 0); |
4779 | dev->ifindex, 1); | 4774 | if (rt && ip6_del_rt(rt)) |
4780 | if (rt) { | 4775 | dst_free(&rt->dst); |
4781 | dst_hold(&rt->dst); | ||
4782 | if (ip6_del_rt(rt)) | ||
4783 | dst_free(&rt->dst); | ||
4784 | } | ||
4785 | } | 4776 | } |
4786 | dst_hold(&ifp->rt->dst); | 4777 | dst_hold(&ifp->rt->dst); |
4787 | 4778 | ||
diff --git a/net/ipv6/anycast.c b/net/ipv6/anycast.c index 210183244689..ff2de7d9d8e6 100644 --- a/net/ipv6/anycast.c +++ b/net/ipv6/anycast.c | |||
@@ -77,6 +77,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
77 | pac->acl_next = NULL; | 77 | pac->acl_next = NULL; |
78 | pac->acl_addr = *addr; | 78 | pac->acl_addr = *addr; |
79 | 79 | ||
80 | rtnl_lock(); | ||
80 | rcu_read_lock(); | 81 | rcu_read_lock(); |
81 | if (ifindex == 0) { | 82 | if (ifindex == 0) { |
82 | struct rt6_info *rt; | 83 | struct rt6_info *rt; |
@@ -137,6 +138,7 @@ int ipv6_sock_ac_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
137 | 138 | ||
138 | error: | 139 | error: |
139 | rcu_read_unlock(); | 140 | rcu_read_unlock(); |
141 | rtnl_unlock(); | ||
140 | if (pac) | 142 | if (pac) |
141 | sock_kfree_s(sk, pac, sizeof(*pac)); | 143 | sock_kfree_s(sk, pac, sizeof(*pac)); |
142 | return err; | 144 | return err; |
@@ -171,11 +173,13 @@ int ipv6_sock_ac_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
171 | 173 | ||
172 | spin_unlock_bh(&ipv6_sk_ac_lock); | 174 | spin_unlock_bh(&ipv6_sk_ac_lock); |
173 | 175 | ||
176 | rtnl_lock(); | ||
174 | rcu_read_lock(); | 177 | rcu_read_lock(); |
175 | dev = dev_get_by_index_rcu(net, pac->acl_ifindex); | 178 | dev = dev_get_by_index_rcu(net, pac->acl_ifindex); |
176 | if (dev) | 179 | if (dev) |
177 | ipv6_dev_ac_dec(dev, &pac->acl_addr); | 180 | ipv6_dev_ac_dec(dev, &pac->acl_addr); |
178 | rcu_read_unlock(); | 181 | rcu_read_unlock(); |
182 | rtnl_unlock(); | ||
179 | 183 | ||
180 | sock_kfree_s(sk, pac, sizeof(*pac)); | 184 | sock_kfree_s(sk, pac, sizeof(*pac)); |
181 | return 0; | 185 | return 0; |
@@ -198,6 +202,7 @@ void ipv6_sock_ac_close(struct sock *sk) | |||
198 | spin_unlock_bh(&ipv6_sk_ac_lock); | 202 | spin_unlock_bh(&ipv6_sk_ac_lock); |
199 | 203 | ||
200 | prev_index = 0; | 204 | prev_index = 0; |
205 | rtnl_lock(); | ||
201 | rcu_read_lock(); | 206 | rcu_read_lock(); |
202 | while (pac) { | 207 | while (pac) { |
203 | struct ipv6_ac_socklist *next = pac->acl_next; | 208 | struct ipv6_ac_socklist *next = pac->acl_next; |
@@ -212,6 +217,7 @@ void ipv6_sock_ac_close(struct sock *sk) | |||
212 | pac = next; | 217 | pac = next; |
213 | } | 218 | } |
214 | rcu_read_unlock(); | 219 | rcu_read_unlock(); |
220 | rtnl_unlock(); | ||
215 | } | 221 | } |
216 | 222 | ||
217 | static void aca_put(struct ifacaddr6 *ac) | 223 | static void aca_put(struct ifacaddr6 *ac) |
@@ -233,6 +239,8 @@ int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr) | |||
233 | struct rt6_info *rt; | 239 | struct rt6_info *rt; |
234 | int err; | 240 | int err; |
235 | 241 | ||
242 | ASSERT_RTNL(); | ||
243 | |||
236 | idev = in6_dev_get(dev); | 244 | idev = in6_dev_get(dev); |
237 | 245 | ||
238 | if (idev == NULL) | 246 | if (idev == NULL) |
@@ -302,6 +310,8 @@ int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr) | |||
302 | { | 310 | { |
303 | struct ifacaddr6 *aca, *prev_aca; | 311 | struct ifacaddr6 *aca, *prev_aca; |
304 | 312 | ||
313 | ASSERT_RTNL(); | ||
314 | |||
305 | write_lock_bh(&idev->lock); | 315 | write_lock_bh(&idev->lock); |
306 | prev_aca = NULL; | 316 | prev_aca = NULL; |
307 | for (aca = idev->ac_list; aca; aca = aca->aca_next) { | 317 | for (aca = idev->ac_list; aca; aca = aca->aca_next) { |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 617f0958e164..a23b655a7627 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -172,6 +172,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
172 | mc_lst->next = NULL; | 172 | mc_lst->next = NULL; |
173 | mc_lst->addr = *addr; | 173 | mc_lst->addr = *addr; |
174 | 174 | ||
175 | rtnl_lock(); | ||
175 | rcu_read_lock(); | 176 | rcu_read_lock(); |
176 | if (ifindex == 0) { | 177 | if (ifindex == 0) { |
177 | struct rt6_info *rt; | 178 | struct rt6_info *rt; |
@@ -185,6 +186,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
185 | 186 | ||
186 | if (dev == NULL) { | 187 | if (dev == NULL) { |
187 | rcu_read_unlock(); | 188 | rcu_read_unlock(); |
189 | rtnl_unlock(); | ||
188 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 190 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
189 | return -ENODEV; | 191 | return -ENODEV; |
190 | } | 192 | } |
@@ -202,6 +204,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
202 | 204 | ||
203 | if (err) { | 205 | if (err) { |
204 | rcu_read_unlock(); | 206 | rcu_read_unlock(); |
207 | rtnl_unlock(); | ||
205 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); | 208 | sock_kfree_s(sk, mc_lst, sizeof(*mc_lst)); |
206 | return err; | 209 | return err; |
207 | } | 210 | } |
@@ -212,6 +215,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
212 | spin_unlock(&ipv6_sk_mc_lock); | 215 | spin_unlock(&ipv6_sk_mc_lock); |
213 | 216 | ||
214 | rcu_read_unlock(); | 217 | rcu_read_unlock(); |
218 | rtnl_unlock(); | ||
215 | 219 | ||
216 | return 0; | 220 | return 0; |
217 | } | 221 | } |
@@ -229,6 +233,7 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
229 | if (!ipv6_addr_is_multicast(addr)) | 233 | if (!ipv6_addr_is_multicast(addr)) |
230 | return -EINVAL; | 234 | return -EINVAL; |
231 | 235 | ||
236 | rtnl_lock(); | ||
232 | spin_lock(&ipv6_sk_mc_lock); | 237 | spin_lock(&ipv6_sk_mc_lock); |
233 | for (lnk = &np->ipv6_mc_list; | 238 | for (lnk = &np->ipv6_mc_list; |
234 | (mc_lst = rcu_dereference_protected(*lnk, | 239 | (mc_lst = rcu_dereference_protected(*lnk, |
@@ -252,12 +257,15 @@ int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr) | |||
252 | } else | 257 | } else |
253 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); | 258 | (void) ip6_mc_leave_src(sk, mc_lst, NULL); |
254 | rcu_read_unlock(); | 259 | rcu_read_unlock(); |
260 | rtnl_unlock(); | ||
261 | |||
255 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); | 262 | atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc); |
256 | kfree_rcu(mc_lst, rcu); | 263 | kfree_rcu(mc_lst, rcu); |
257 | return 0; | 264 | return 0; |
258 | } | 265 | } |
259 | } | 266 | } |
260 | spin_unlock(&ipv6_sk_mc_lock); | 267 | spin_unlock(&ipv6_sk_mc_lock); |
268 | rtnl_unlock(); | ||
261 | 269 | ||
262 | return -EADDRNOTAVAIL; | 270 | return -EADDRNOTAVAIL; |
263 | } | 271 | } |
@@ -302,6 +310,7 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
302 | if (!rcu_access_pointer(np->ipv6_mc_list)) | 310 | if (!rcu_access_pointer(np->ipv6_mc_list)) |
303 | return; | 311 | return; |
304 | 312 | ||
313 | rtnl_lock(); | ||
305 | spin_lock(&ipv6_sk_mc_lock); | 314 | spin_lock(&ipv6_sk_mc_lock); |
306 | while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, | 315 | while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list, |
307 | lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { | 316 | lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) { |
@@ -328,6 +337,7 @@ void ipv6_sock_mc_close(struct sock *sk) | |||
328 | spin_lock(&ipv6_sk_mc_lock); | 337 | spin_lock(&ipv6_sk_mc_lock); |
329 | } | 338 | } |
330 | spin_unlock(&ipv6_sk_mc_lock); | 339 | spin_unlock(&ipv6_sk_mc_lock); |
340 | rtnl_unlock(); | ||
331 | } | 341 | } |
332 | 342 | ||
333 | int ip6_mc_source(int add, int omode, struct sock *sk, | 343 | int ip6_mc_source(int add, int omode, struct sock *sk, |
@@ -845,6 +855,8 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr) | |||
845 | struct ifmcaddr6 *mc; | 855 | struct ifmcaddr6 *mc; |
846 | struct inet6_dev *idev; | 856 | struct inet6_dev *idev; |
847 | 857 | ||
858 | ASSERT_RTNL(); | ||
859 | |||
848 | /* we need to take a reference on idev */ | 860 | /* we need to take a reference on idev */ |
849 | idev = in6_dev_get(dev); | 861 | idev = in6_dev_get(dev); |
850 | 862 | ||
@@ -916,6 +928,8 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr) | |||
916 | { | 928 | { |
917 | struct ifmcaddr6 *ma, **map; | 929 | struct ifmcaddr6 *ma, **map; |
918 | 930 | ||
931 | ASSERT_RTNL(); | ||
932 | |||
919 | write_lock_bh(&idev->lock); | 933 | write_lock_bh(&idev->lock); |
920 | for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { | 934 | for (map = &idev->mc_list; (ma=*map) != NULL; map = &ma->next) { |
921 | if (ipv6_addr_equal(&ma->mca_addr, addr)) { | 935 | if (ipv6_addr_equal(&ma->mca_addr, addr)) { |
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index ac93df16f5af..2812816aabdc 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig | |||
@@ -57,9 +57,19 @@ config NFT_REJECT_IPV6 | |||
57 | 57 | ||
58 | config NF_LOG_IPV6 | 58 | config NF_LOG_IPV6 |
59 | tristate "IPv6 packet logging" | 59 | tristate "IPv6 packet logging" |
60 | depends on NETFILTER_ADVANCED | 60 | default m if NETFILTER_ADVANCED=n |
61 | select NF_LOG_COMMON | 61 | select NF_LOG_COMMON |
62 | 62 | ||
63 | config NF_NAT_IPV6 | ||
64 | tristate "IPv6 NAT" | ||
65 | depends on NF_CONNTRACK_IPV6 | ||
66 | depends on NETFILTER_ADVANCED | ||
67 | select NF_NAT | ||
68 | help | ||
69 | The IPv6 NAT option allows masquerading, port forwarding and other | ||
70 | forms of full Network Address Port Translation. This can be | ||
71 | controlled by iptables or nft. | ||
72 | |||
63 | config IP6_NF_IPTABLES | 73 | config IP6_NF_IPTABLES |
64 | tristate "IP6 tables support (required for filtering)" | 74 | tristate "IP6 tables support (required for filtering)" |
65 | depends on INET && IPV6 | 75 | depends on INET && IPV6 |
@@ -232,19 +242,21 @@ config IP6_NF_SECURITY | |||
232 | 242 | ||
233 | If unsure, say N. | 243 | If unsure, say N. |
234 | 244 | ||
235 | config NF_NAT_IPV6 | 245 | config IP6_NF_NAT |
236 | tristate "IPv6 NAT" | 246 | tristate "ip6tables NAT support" |
237 | depends on NF_CONNTRACK_IPV6 | 247 | depends on NF_CONNTRACK_IPV6 |
238 | depends on NETFILTER_ADVANCED | 248 | depends on NETFILTER_ADVANCED |
239 | select NF_NAT | 249 | select NF_NAT |
250 | select NF_NAT_IPV6 | ||
251 | select NETFILTER_XT_NAT | ||
240 | help | 252 | help |
241 | The IPv6 NAT option allows masquerading, port forwarding and other | 253 | This enables the `nat' table in ip6tables. This allows masquerading, |
242 | forms of full Network Address Port Translation. It is controlled by | 254 | port forwarding and other forms of full Network Address Port |
243 | the `nat' table in ip6tables, see the man page for ip6tables(8). | 255 | Translation. |
244 | 256 | ||
245 | To compile it as a module, choose M here. If unsure, say N. | 257 | To compile it as a module, choose M here. If unsure, say N. |
246 | 258 | ||
247 | if NF_NAT_IPV6 | 259 | if IP6_NF_NAT |
248 | 260 | ||
249 | config IP6_NF_TARGET_MASQUERADE | 261 | config IP6_NF_TARGET_MASQUERADE |
250 | tristate "MASQUERADE target support" | 262 | tristate "MASQUERADE target support" |
@@ -265,7 +277,7 @@ config IP6_NF_TARGET_NPT | |||
265 | 277 | ||
266 | To compile it as a module, choose M here. If unsure, say N. | 278 | To compile it as a module, choose M here. If unsure, say N. |
267 | 279 | ||
268 | endif # NF_NAT_IPV6 | 280 | endif # IP6_NF_NAT |
269 | 281 | ||
270 | endif # IP6_NF_IPTABLES | 282 | endif # IP6_NF_IPTABLES |
271 | 283 | ||
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index c0b263104ed2..c3d3286db4bb 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o | |||
8 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o | 8 | obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o |
9 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o | 9 | obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o |
10 | obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o | 10 | obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o |
11 | obj-$(CONFIG_NF_NAT_IPV6) += ip6table_nat.o | 11 | obj-$(CONFIG_IP6_NF_NAT) += ip6table_nat.o |
12 | 12 | ||
13 | # objects for l3 independent conntrack | 13 | # objects for l3 independent conntrack |
14 | nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o | 14 | nf_conntrack_ipv6-y := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o |
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 13752d96275e..b704a9356208 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -755,7 +755,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, | |||
755 | /* If PMTU discovery was enabled, use the MTU that was discovered */ | 755 | /* If PMTU discovery was enabled, use the MTU that was discovered */ |
756 | dst = sk_dst_get(tunnel->sock); | 756 | dst = sk_dst_get(tunnel->sock); |
757 | if (dst != NULL) { | 757 | if (dst != NULL) { |
758 | u32 pmtu = dst_mtu(__sk_dst_get(tunnel->sock)); | 758 | u32 pmtu = dst_mtu(dst); |
759 | |||
759 | if (pmtu != 0) | 760 | if (pmtu != 0) |
760 | session->mtu = session->mru = pmtu - | 761 | session->mtu = session->mru = pmtu - |
761 | PPPOL2TP_HEADER_OVERHEAD; | 762 | PPPOL2TP_HEADER_OVERHEAD; |
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 0375009ddc0d..399ad82c997f 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c | |||
@@ -541,6 +541,8 @@ static void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, | |||
541 | continue; | 541 | continue; |
542 | if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) | 542 | if (rcu_access_pointer(sdata->vif.chanctx_conf) != conf) |
543 | continue; | 543 | continue; |
544 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | ||
545 | continue; | ||
544 | 546 | ||
545 | if (!compat) | 547 | if (!compat) |
546 | compat = &sdata->vif.bss_conf.chandef; | 548 | compat = &sdata->vif.bss_conf.chandef; |
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c index 3db96648b45a..86173c0de40e 100644 --- a/net/mac80211/debugfs_sta.c +++ b/net/mac80211/debugfs_sta.c | |||
@@ -167,7 +167,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, | |||
167 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", | 167 | p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", |
168 | sta->ampdu_mlme.dialog_token_allocator + 1); | 168 | sta->ampdu_mlme.dialog_token_allocator + 1); |
169 | p += scnprintf(p, sizeof(buf) + buf - p, | 169 | p += scnprintf(p, sizeof(buf) + buf - p, |
170 | "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); | 170 | "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tpending\n"); |
171 | 171 | ||
172 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { | 172 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
173 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); | 173 | tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[i]); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 01eede7406a5..f75e5f132c5a 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1175,8 +1175,8 @@ static void ieee80211_iface_work(struct work_struct *work) | |||
1175 | if (sta) { | 1175 | if (sta) { |
1176 | u16 last_seq; | 1176 | u16 last_seq; |
1177 | 1177 | ||
1178 | last_seq = le16_to_cpu( | 1178 | last_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu( |
1179 | sta->last_seq_ctrl[rx_agg->tid]); | 1179 | sta->last_seq_ctrl[rx_agg->tid])); |
1180 | 1180 | ||
1181 | __ieee80211_start_rx_ba_session(sta, | 1181 | __ieee80211_start_rx_ba_session(sta, |
1182 | 0, 0, | 1182 | 0, 0, |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 63b874101b27..c47194d27149 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -959,7 +959,8 @@ mesh_plink_get_event(struct ieee80211_sub_if_data *sdata, | |||
959 | if (!matches_local) | 959 | if (!matches_local) |
960 | event = CNF_RJCT; | 960 | event = CNF_RJCT; |
961 | if (!mesh_plink_free_count(sdata) || | 961 | if (!mesh_plink_free_count(sdata) || |
962 | (sta->llid != llid || sta->plid != plid)) | 962 | sta->llid != llid || |
963 | (sta->plid && sta->plid != plid)) | ||
963 | event = CNF_IGNR; | 964 | event = CNF_IGNR; |
964 | else | 965 | else |
965 | event = CNF_ACPT; | 966 | event = CNF_ACPT; |
@@ -1080,6 +1081,10 @@ mesh_process_plink_frame(struct ieee80211_sub_if_data *sdata, | |||
1080 | goto unlock_rcu; | 1081 | goto unlock_rcu; |
1081 | } | 1082 | } |
1082 | 1083 | ||
1084 | /* 802.11-2012 13.3.7.2 - update plid on CNF if not set */ | ||
1085 | if (!sta->plid && event == CNF_ACPT) | ||
1086 | sta->plid = plid; | ||
1087 | |||
1083 | changed |= mesh_plink_fsm(sdata, sta, event); | 1088 | changed |= mesh_plink_fsm(sdata, sta, event); |
1084 | 1089 | ||
1085 | unlock_rcu: | 1090 | unlock_rcu: |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 31a8afaf7332..b82a12a9f0f1 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -4376,8 +4376,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, | |||
4376 | rcu_read_unlock(); | 4376 | rcu_read_unlock(); |
4377 | 4377 | ||
4378 | if (bss->wmm_used && bss->uapsd_supported && | 4378 | if (bss->wmm_used && bss->uapsd_supported && |
4379 | (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD) && | 4379 | (sdata->local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)) { |
4380 | sdata->wmm_acm != 0xff) { | ||
4381 | assoc_data->uapsd = true; | 4380 | assoc_data->uapsd = true; |
4382 | ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; | 4381 | ifmgd->flags |= IEEE80211_STA_UAPSD_ENABLED; |
4383 | } else { | 4382 | } else { |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index c6ee2139fbc5..441875f03750 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -1094,8 +1094,11 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) | |||
1094 | unsigned long flags; | 1094 | unsigned long flags; |
1095 | struct ps_data *ps; | 1095 | struct ps_data *ps; |
1096 | 1096 | ||
1097 | if (sdata->vif.type == NL80211_IFTYPE_AP || | 1097 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) |
1098 | sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 1098 | sdata = container_of(sdata->bss, struct ieee80211_sub_if_data, |
1099 | u.ap); | ||
1100 | |||
1101 | if (sdata->vif.type == NL80211_IFTYPE_AP) | ||
1099 | ps = &sdata->bss->ps; | 1102 | ps = &sdata->bss->ps; |
1100 | else if (ieee80211_vif_is_mesh(&sdata->vif)) | 1103 | else if (ieee80211_vif_is_mesh(&sdata->vif)) |
1101 | ps = &sdata->u.mesh.ps; | 1104 | ps = &sdata->u.mesh.ps; |
diff --git a/net/mac802154/wpan.c b/net/mac802154/wpan.c index 3c3069fd6971..547838822d5e 100644 --- a/net/mac802154/wpan.c +++ b/net/mac802154/wpan.c | |||
@@ -462,7 +462,10 @@ mac802154_subif_frame(struct mac802154_sub_if_data *sdata, struct sk_buff *skb, | |||
462 | skb->pkt_type = PACKET_OTHERHOST; | 462 | skb->pkt_type = PACKET_OTHERHOST; |
463 | break; | 463 | break; |
464 | default: | 464 | default: |
465 | break; | 465 | spin_unlock_bh(&sdata->mib_lock); |
466 | pr_debug("invalid dest mode\n"); | ||
467 | kfree_skb(skb); | ||
468 | return NET_RX_DROP; | ||
466 | } | 469 | } |
467 | 470 | ||
468 | spin_unlock_bh(&sdata->mib_lock); | 471 | spin_unlock_bh(&sdata->mib_lock); |
@@ -573,6 +576,7 @@ void mac802154_wpans_rx(struct mac802154_priv *priv, struct sk_buff *skb) | |||
573 | ret = mac802154_parse_frame_start(skb, &hdr); | 576 | ret = mac802154_parse_frame_start(skb, &hdr); |
574 | if (ret) { | 577 | if (ret) { |
575 | pr_debug("got invalid frame\n"); | 578 | pr_debug("got invalid frame\n"); |
579 | kfree_skb(skb); | ||
576 | return; | 580 | return; |
577 | } | 581 | } |
578 | 582 | ||
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index ad751fe2e82b..b5c1d3aadb41 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig | |||
@@ -499,7 +499,7 @@ config NFT_LIMIT | |||
499 | config NFT_NAT | 499 | config NFT_NAT |
500 | depends on NF_TABLES | 500 | depends on NF_TABLES |
501 | depends on NF_CONNTRACK | 501 | depends on NF_CONNTRACK |
502 | depends on NF_NAT | 502 | select NF_NAT |
503 | tristate "Netfilter nf_tables nat module" | 503 | tristate "Netfilter nf_tables nat module" |
504 | help | 504 | help |
505 | This option adds the "nat" expression that you can use to perform | 505 | This option adds the "nat" expression that you can use to perform |
@@ -747,7 +747,9 @@ config NETFILTER_XT_TARGET_LED | |||
747 | 747 | ||
748 | config NETFILTER_XT_TARGET_LOG | 748 | config NETFILTER_XT_TARGET_LOG |
749 | tristate "LOG target support" | 749 | tristate "LOG target support" |
750 | depends on NF_LOG_IPV4 && NF_LOG_IPV6 | 750 | select NF_LOG_COMMON |
751 | select NF_LOG_IPV4 | ||
752 | select NF_LOG_IPV6 if IPV6 | ||
751 | default m if NETFILTER_ADVANCED=n | 753 | default m if NETFILTER_ADVANCED=n |
752 | help | 754 | help |
753 | This option adds a `LOG' target, which allows you to create rules in | 755 | This option adds a `LOG' target, which allows you to create rules in |
@@ -764,6 +766,14 @@ config NETFILTER_XT_TARGET_MARK | |||
764 | (e.g. when running oldconfig). It selects | 766 | (e.g. when running oldconfig). It selects |
765 | CONFIG_NETFILTER_XT_MARK (combined mark/MARK module). | 767 | CONFIG_NETFILTER_XT_MARK (combined mark/MARK module). |
766 | 768 | ||
769 | config NETFILTER_XT_NAT | ||
770 | tristate '"SNAT and DNAT" targets support' | ||
771 | depends on NF_NAT | ||
772 | ---help--- | ||
773 | This option enables the SNAT and DNAT targets. | ||
774 | |||
775 | To compile it as a module, choose M here. If unsure, say N. | ||
776 | |||
767 | config NETFILTER_XT_TARGET_NETMAP | 777 | config NETFILTER_XT_TARGET_NETMAP |
768 | tristate '"NETMAP" target support' | 778 | tristate '"NETMAP" target support' |
769 | depends on NF_NAT | 779 | depends on NF_NAT |
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 8308624a406a..fad5fdba34e5 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile | |||
@@ -95,7 +95,7 @@ obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o | |||
95 | obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o | 95 | obj-$(CONFIG_NETFILTER_XT_MARK) += xt_mark.o |
96 | obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o | 96 | obj-$(CONFIG_NETFILTER_XT_CONNMARK) += xt_connmark.o |
97 | obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o | 97 | obj-$(CONFIG_NETFILTER_XT_SET) += xt_set.o |
98 | obj-$(CONFIG_NF_NAT) += xt_nat.o | 98 | obj-$(CONFIG_NETFILTER_XT_NAT) += xt_nat.o |
99 | 99 | ||
100 | # targets | 100 | # targets |
101 | obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o | 101 | obj-$(CONFIG_NETFILTER_XT_TARGET_AUDIT) += xt_AUDIT.o |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index a93c97f106d4..024a2e25c8a4 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -54,7 +54,7 @@ EXPORT_SYMBOL_GPL(nf_unregister_afinfo); | |||
54 | struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly; | 54 | struct list_head nf_hooks[NFPROTO_NUMPROTO][NF_MAX_HOOKS] __read_mostly; |
55 | EXPORT_SYMBOL(nf_hooks); | 55 | EXPORT_SYMBOL(nf_hooks); |
56 | 56 | ||
57 | #if defined(CONFIG_JUMP_LABEL) | 57 | #ifdef HAVE_JUMP_LABEL |
58 | struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; | 58 | struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; |
59 | EXPORT_SYMBOL(nf_hooks_needed); | 59 | EXPORT_SYMBOL(nf_hooks_needed); |
60 | #endif | 60 | #endif |
@@ -72,7 +72,7 @@ int nf_register_hook(struct nf_hook_ops *reg) | |||
72 | } | 72 | } |
73 | list_add_rcu(®->list, elem->list.prev); | 73 | list_add_rcu(®->list, elem->list.prev); |
74 | mutex_unlock(&nf_hook_mutex); | 74 | mutex_unlock(&nf_hook_mutex); |
75 | #if defined(CONFIG_JUMP_LABEL) | 75 | #ifdef HAVE_JUMP_LABEL |
76 | static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); | 76 | static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); |
77 | #endif | 77 | #endif |
78 | return 0; | 78 | return 0; |
@@ -84,7 +84,7 @@ void nf_unregister_hook(struct nf_hook_ops *reg) | |||
84 | mutex_lock(&nf_hook_mutex); | 84 | mutex_lock(&nf_hook_mutex); |
85 | list_del_rcu(®->list); | 85 | list_del_rcu(®->list); |
86 | mutex_unlock(&nf_hook_mutex); | 86 | mutex_unlock(&nf_hook_mutex); |
87 | #if defined(CONFIG_JUMP_LABEL) | 87 | #ifdef HAVE_JUMP_LABEL |
88 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); | 88 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); |
89 | #endif | 89 | #endif |
90 | synchronize_net(); | 90 | synchronize_net(); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index e6836755c45d..5c34e8d42e01 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1906,7 +1906,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = { | |||
1906 | { | 1906 | { |
1907 | .hook = ip_vs_local_reply6, | 1907 | .hook = ip_vs_local_reply6, |
1908 | .owner = THIS_MODULE, | 1908 | .owner = THIS_MODULE, |
1909 | .pf = NFPROTO_IPV4, | 1909 | .pf = NFPROTO_IPV6, |
1910 | .hooknum = NF_INET_LOCAL_OUT, | 1910 | .hooknum = NF_INET_LOCAL_OUT, |
1911 | .priority = NF_IP6_PRI_NAT_DST + 1, | 1911 | .priority = NF_IP6_PRI_NAT_DST + 1, |
1912 | }, | 1912 | }, |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 6f70bdd3a90a..56896a412bce 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <net/route.h> /* for ip_route_output */ | 38 | #include <net/route.h> /* for ip_route_output */ |
39 | #include <net/ipv6.h> | 39 | #include <net/ipv6.h> |
40 | #include <net/ip6_route.h> | 40 | #include <net/ip6_route.h> |
41 | #include <net/ip_tunnels.h> | ||
41 | #include <net/addrconf.h> | 42 | #include <net/addrconf.h> |
42 | #include <linux/icmpv6.h> | 43 | #include <linux/icmpv6.h> |
43 | #include <linux/netfilter.h> | 44 | #include <linux/netfilter.h> |
@@ -862,11 +863,15 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
862 | old_iph = ip_hdr(skb); | 863 | old_iph = ip_hdr(skb); |
863 | } | 864 | } |
864 | 865 | ||
865 | skb->transport_header = skb->network_header; | ||
866 | |||
867 | /* fix old IP header checksum */ | 866 | /* fix old IP header checksum */ |
868 | ip_send_check(old_iph); | 867 | ip_send_check(old_iph); |
869 | 868 | ||
869 | skb = iptunnel_handle_offloads(skb, false, SKB_GSO_IPIP); | ||
870 | if (IS_ERR(skb)) | ||
871 | goto tx_error; | ||
872 | |||
873 | skb->transport_header = skb->network_header; | ||
874 | |||
870 | skb_push(skb, sizeof(struct iphdr)); | 875 | skb_push(skb, sizeof(struct iphdr)); |
871 | skb_reset_network_header(skb); | 876 | skb_reset_network_header(skb); |
872 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 877 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
@@ -900,7 +905,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
900 | return NF_STOLEN; | 905 | return NF_STOLEN; |
901 | 906 | ||
902 | tx_error: | 907 | tx_error: |
903 | kfree_skb(skb); | 908 | if (!IS_ERR(skb)) |
909 | kfree_skb(skb); | ||
904 | rcu_read_unlock(); | 910 | rcu_read_unlock(); |
905 | LeaveFunction(10); | 911 | LeaveFunction(10); |
906 | return NF_STOLEN; | 912 | return NF_STOLEN; |
@@ -953,6 +959,11 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
953 | old_iph = ipv6_hdr(skb); | 959 | old_iph = ipv6_hdr(skb); |
954 | } | 960 | } |
955 | 961 | ||
962 | /* GSO: we need to provide proper SKB_GSO_ value for IPv6 */ | ||
963 | skb = iptunnel_handle_offloads(skb, false, 0); /* SKB_GSO_SIT/IPV6 */ | ||
964 | if (IS_ERR(skb)) | ||
965 | goto tx_error; | ||
966 | |||
956 | skb->transport_header = skb->network_header; | 967 | skb->transport_header = skb->network_header; |
957 | 968 | ||
958 | skb_push(skb, sizeof(struct ipv6hdr)); | 969 | skb_push(skb, sizeof(struct ipv6hdr)); |
@@ -988,7 +999,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, | |||
988 | return NF_STOLEN; | 999 | return NF_STOLEN; |
989 | 1000 | ||
990 | tx_error: | 1001 | tx_error: |
991 | kfree_skb(skb); | 1002 | if (!IS_ERR(skb)) |
1003 | kfree_skb(skb); | ||
992 | rcu_read_unlock(); | 1004 | rcu_read_unlock(); |
993 | LeaveFunction(10); | 1005 | LeaveFunction(10); |
994 | return NF_STOLEN; | 1006 | return NF_STOLEN; |
diff --git a/net/netfilter/xt_cgroup.c b/net/netfilter/xt_cgroup.c index f4e833005320..7198d660b4de 100644 --- a/net/netfilter/xt_cgroup.c +++ b/net/netfilter/xt_cgroup.c | |||
@@ -31,7 +31,7 @@ static int cgroup_mt_check(const struct xt_mtchk_param *par) | |||
31 | if (info->invert & ~1) | 31 | if (info->invert & ~1) |
32 | return -EINVAL; | 32 | return -EINVAL; |
33 | 33 | ||
34 | return info->id ? 0 : -EINVAL; | 34 | return 0; |
35 | } | 35 | } |
36 | 36 | ||
37 | static bool | 37 | static bool |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 7228ec3faf19..91d66b7e64ac 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -265,8 +265,11 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb) | |||
265 | upcall.key = &key; | 265 | upcall.key = &key; |
266 | upcall.userdata = NULL; | 266 | upcall.userdata = NULL; |
267 | upcall.portid = ovs_vport_find_upcall_portid(p, skb); | 267 | upcall.portid = ovs_vport_find_upcall_portid(p, skb); |
268 | ovs_dp_upcall(dp, skb, &upcall); | 268 | error = ovs_dp_upcall(dp, skb, &upcall); |
269 | consume_skb(skb); | 269 | if (unlikely(error)) |
270 | kfree_skb(skb); | ||
271 | else | ||
272 | consume_skb(skb); | ||
270 | stats_counter = &stats->n_missed; | 273 | stats_counter = &stats->n_missed; |
271 | goto out; | 274 | goto out; |
272 | } | 275 | } |
@@ -404,7 +407,7 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
404 | { | 407 | { |
405 | struct ovs_header *upcall; | 408 | struct ovs_header *upcall; |
406 | struct sk_buff *nskb = NULL; | 409 | struct sk_buff *nskb = NULL; |
407 | struct sk_buff *user_skb; /* to be queued to userspace */ | 410 | struct sk_buff *user_skb = NULL; /* to be queued to userspace */ |
408 | struct nlattr *nla; | 411 | struct nlattr *nla; |
409 | struct genl_info info = { | 412 | struct genl_info info = { |
410 | .dst_sk = ovs_dp_get_net(dp)->genl_sock, | 413 | .dst_sk = ovs_dp_get_net(dp)->genl_sock, |
@@ -494,9 +497,11 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb, | |||
494 | ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; | 497 | ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len; |
495 | 498 | ||
496 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); | 499 | err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid); |
500 | user_skb = NULL; | ||
497 | out: | 501 | out: |
498 | if (err) | 502 | if (err) |
499 | skb_tx_error(skb); | 503 | skb_tx_error(skb); |
504 | kfree_skb(user_skb); | ||
500 | kfree_skb(nskb); | 505 | kfree_skb(nskb); |
501 | return err; | 506 | return err; |
502 | } | 507 | } |
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c index 14c98e48f261..02a86a27fd84 100644 --- a/net/rfkill/rfkill-gpio.c +++ b/net/rfkill/rfkill-gpio.c | |||
@@ -158,6 +158,7 @@ static const struct acpi_device_id rfkill_acpi_match[] = { | |||
158 | { "BCM2E1A", RFKILL_TYPE_BLUETOOTH }, | 158 | { "BCM2E1A", RFKILL_TYPE_BLUETOOTH }, |
159 | { "BCM2E39", RFKILL_TYPE_BLUETOOTH }, | 159 | { "BCM2E39", RFKILL_TYPE_BLUETOOTH }, |
160 | { "BCM2E3D", RFKILL_TYPE_BLUETOOTH }, | 160 | { "BCM2E3D", RFKILL_TYPE_BLUETOOTH }, |
161 | { "BCM2E64", RFKILL_TYPE_BLUETOOTH }, | ||
161 | { "BCM4752", RFKILL_TYPE_GPS }, | 162 | { "BCM4752", RFKILL_TYPE_GPS }, |
162 | { "LNV4752", RFKILL_TYPE_GPS }, | 163 | { "LNV4752", RFKILL_TYPE_GPS }, |
163 | { }, | 164 | { }, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index eb71d49e7653..634a2abb5f3a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -4243,7 +4243,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len, | |||
4243 | transport = asoc->peer.primary_path; | 4243 | transport = asoc->peer.primary_path; |
4244 | 4244 | ||
4245 | status.sstat_assoc_id = sctp_assoc2id(asoc); | 4245 | status.sstat_assoc_id = sctp_assoc2id(asoc); |
4246 | status.sstat_state = asoc->state; | 4246 | status.sstat_state = sctp_assoc_to_state(asoc); |
4247 | status.sstat_rwnd = asoc->peer.rwnd; | 4247 | status.sstat_rwnd = asoc->peer.rwnd; |
4248 | status.sstat_unackdata = asoc->unack_data; | 4248 | status.sstat_unackdata = asoc->unack_data; |
4249 | 4249 | ||
diff --git a/net/socket.c b/net/socket.c index 95ee7d8682e7..2e2586e2dee1 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -734,8 +734,7 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk, | |||
734 | } | 734 | } |
735 | 735 | ||
736 | memset(&tss, 0, sizeof(tss)); | 736 | memset(&tss, 0, sizeof(tss)); |
737 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE || | 737 | if ((sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) && |
738 | skb_shinfo(skb)->tx_flags & SKBTX_ANY_SW_TSTAMP) && | ||
739 | ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) | 738 | ktime_to_timespec_cond(skb->tstamp, tss.ts + 0)) |
740 | empty = 0; | 739 | empty = 0; |
741 | if (shhwtstamps && | 740 | if (shhwtstamps && |
@@ -2602,7 +2601,7 @@ SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args) | |||
2602 | * | 2601 | * |
2603 | * This function is called by a protocol handler that wants to | 2602 | * This function is called by a protocol handler that wants to |
2604 | * advertise its address family, and have it linked into the | 2603 | * advertise its address family, and have it linked into the |
2605 | * socket interface. The value ops->family coresponds to the | 2604 | * socket interface. The value ops->family corresponds to the |
2606 | * socket system call protocol family. | 2605 | * socket system call protocol family. |
2607 | */ | 2606 | */ |
2608 | int sock_register(const struct net_proto_family *ops) | 2607 | int sock_register(const struct net_proto_family *ops) |
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index b385bcbbf2f5..4d08b398411f 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -2133,7 +2133,10 @@ sub process { | |||
2133 | # Check for improperly formed commit descriptions | 2133 | # Check for improperly formed commit descriptions |
2134 | if ($in_commit_log && | 2134 | if ($in_commit_log && |
2135 | $line =~ /\bcommit\s+[0-9a-f]{5,}/i && | 2135 | $line =~ /\bcommit\s+[0-9a-f]{5,}/i && |
2136 | $line !~ /\b[Cc]ommit [0-9a-f]{12,40} \("/) { | 2136 | !($line =~ /\b[Cc]ommit [0-9a-f]{12,40} \("/ || |
2137 | ($line =~ /\b[Cc]ommit [0-9a-f]{12,40}\s*$/ && | ||
2138 | defined $rawlines[$linenr] && | ||
2139 | $rawlines[$linenr] =~ /^\s*\("/))) { | ||
2137 | $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i; | 2140 | $line =~ /\b(c)ommit\s+([0-9a-f]{5,})/i; |
2138 | my $init_char = $1; | 2141 | my $init_char = $1; |
2139 | my $orig_commit = lc($2); | 2142 | my $orig_commit = lc($2); |
diff --git a/security/keys/key.c b/security/keys/key.c index b90a68c4e2c4..6d0cad16f002 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -27,8 +27,8 @@ DEFINE_SPINLOCK(key_serial_lock); | |||
27 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ | 27 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ |
28 | DEFINE_SPINLOCK(key_user_lock); | 28 | DEFINE_SPINLOCK(key_user_lock); |
29 | 29 | ||
30 | unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ | 30 | unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ |
31 | unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ | 31 | unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ |
32 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ | 32 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ |
33 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ | 33 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ |
34 | 34 | ||
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index f96bf4c7c232..95fc2eaf11dc 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c | |||
@@ -507,7 +507,16 @@ static void amdtp_pull_midi(struct amdtp_stream *s, | |||
507 | static void update_pcm_pointers(struct amdtp_stream *s, | 507 | static void update_pcm_pointers(struct amdtp_stream *s, |
508 | struct snd_pcm_substream *pcm, | 508 | struct snd_pcm_substream *pcm, |
509 | unsigned int frames) | 509 | unsigned int frames) |
510 | { unsigned int ptr; | 510 | { |
511 | unsigned int ptr; | ||
512 | |||
513 | /* | ||
514 | * In IEC 61883-6, one data block represents one event. In ALSA, one | ||
515 | * event equals to one PCM frame. But Dice has a quirk to transfer | ||
516 | * two PCM frames in one data block. | ||
517 | */ | ||
518 | if (s->double_pcm_frames) | ||
519 | frames *= 2; | ||
511 | 520 | ||
512 | ptr = s->pcm_buffer_pointer + frames; | 521 | ptr = s->pcm_buffer_pointer + frames; |
513 | if (ptr >= pcm->runtime->buffer_size) | 522 | if (ptr >= pcm->runtime->buffer_size) |
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h index d8ee7b0e9386..4823c08196ac 100644 --- a/sound/firewire/amdtp.h +++ b/sound/firewire/amdtp.h | |||
@@ -125,6 +125,7 @@ struct amdtp_stream { | |||
125 | unsigned int pcm_buffer_pointer; | 125 | unsigned int pcm_buffer_pointer; |
126 | unsigned int pcm_period_pointer; | 126 | unsigned int pcm_period_pointer; |
127 | bool pointer_flush; | 127 | bool pointer_flush; |
128 | bool double_pcm_frames; | ||
128 | 129 | ||
129 | struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; | 130 | struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; |
130 | 131 | ||
diff --git a/sound/firewire/dice.c b/sound/firewire/dice.c index a9a30c0161f1..e3a04d69c853 100644 --- a/sound/firewire/dice.c +++ b/sound/firewire/dice.c | |||
@@ -567,10 +567,14 @@ static int dice_hw_params(struct snd_pcm_substream *substream, | |||
567 | return err; | 567 | return err; |
568 | 568 | ||
569 | /* | 569 | /* |
570 | * At rates above 96 kHz, pretend that the stream runs at half the | 570 | * At 176.4/192.0 kHz, Dice has a quirk to transfer two PCM frames in |
571 | * actual sample rate with twice the number of channels; two samples | 571 | * one data block of AMDTP packet. Thus sampling transfer frequency is |
572 | * of a channel are stored consecutively in the packet. Requires | 572 | * a half of PCM sampling frequency, i.e. PCM frames at 192.0 kHz are |
573 | * blocking mode and PCM buffer size should be aligned to SYT_INTERVAL. | 573 | * transferred on AMDTP packets at 96 kHz. Two successive samples of a |
574 | * channel are stored consecutively in the packet. This quirk is called | ||
575 | * as 'Dual Wire'. | ||
576 | * For this quirk, blocking mode is required and PCM buffer size should | ||
577 | * be aligned to SYT_INTERVAL. | ||
574 | */ | 578 | */ |
575 | channels = params_channels(hw_params); | 579 | channels = params_channels(hw_params); |
576 | if (rate_index > 4) { | 580 | if (rate_index > 4) { |
@@ -579,18 +583,25 @@ static int dice_hw_params(struct snd_pcm_substream *substream, | |||
579 | return err; | 583 | return err; |
580 | } | 584 | } |
581 | 585 | ||
582 | for (i = 0; i < channels; i++) { | ||
583 | dice->stream.pcm_positions[i * 2] = i; | ||
584 | dice->stream.pcm_positions[i * 2 + 1] = i + channels; | ||
585 | } | ||
586 | |||
587 | rate /= 2; | 586 | rate /= 2; |
588 | channels *= 2; | 587 | channels *= 2; |
588 | dice->stream.double_pcm_frames = true; | ||
589 | } else { | ||
590 | dice->stream.double_pcm_frames = false; | ||
589 | } | 591 | } |
590 | 592 | ||
591 | mode = rate_index_to_mode(rate_index); | 593 | mode = rate_index_to_mode(rate_index); |
592 | amdtp_stream_set_parameters(&dice->stream, rate, channels, | 594 | amdtp_stream_set_parameters(&dice->stream, rate, channels, |
593 | dice->rx_midi_ports[mode]); | 595 | dice->rx_midi_ports[mode]); |
596 | if (rate_index > 4) { | ||
597 | channels /= 2; | ||
598 | |||
599 | for (i = 0; i < channels; i++) { | ||
600 | dice->stream.pcm_positions[i] = i * 2; | ||
601 | dice->stream.pcm_positions[i + channels] = i * 2 + 1; | ||
602 | } | ||
603 | } | ||
604 | |||
594 | amdtp_stream_set_pcm_format(&dice->stream, | 605 | amdtp_stream_set_pcm_format(&dice->stream, |
595 | params_format(hw_params)); | 606 | params_format(hw_params)); |
596 | 607 | ||
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 6f2fa838b635..6e5d0cb4e3d7 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -217,6 +217,7 @@ enum { | |||
217 | CXT_FIXUP_HEADPHONE_MIC_PIN, | 217 | CXT_FIXUP_HEADPHONE_MIC_PIN, |
218 | CXT_FIXUP_HEADPHONE_MIC, | 218 | CXT_FIXUP_HEADPHONE_MIC, |
219 | CXT_FIXUP_GPIO1, | 219 | CXT_FIXUP_GPIO1, |
220 | CXT_FIXUP_ASPIRE_DMIC, | ||
220 | CXT_FIXUP_THINKPAD_ACPI, | 221 | CXT_FIXUP_THINKPAD_ACPI, |
221 | CXT_FIXUP_OLPC_XO, | 222 | CXT_FIXUP_OLPC_XO, |
222 | CXT_FIXUP_CAP_MIX_AMP, | 223 | CXT_FIXUP_CAP_MIX_AMP, |
@@ -664,6 +665,12 @@ static const struct hda_fixup cxt_fixups[] = { | |||
664 | { } | 665 | { } |
665 | }, | 666 | }, |
666 | }, | 667 | }, |
668 | [CXT_FIXUP_ASPIRE_DMIC] = { | ||
669 | .type = HDA_FIXUP_FUNC, | ||
670 | .v.func = cxt_fixup_stereo_dmic, | ||
671 | .chained = true, | ||
672 | .chain_id = CXT_FIXUP_GPIO1, | ||
673 | }, | ||
667 | [CXT_FIXUP_THINKPAD_ACPI] = { | 674 | [CXT_FIXUP_THINKPAD_ACPI] = { |
668 | .type = HDA_FIXUP_FUNC, | 675 | .type = HDA_FIXUP_FUNC, |
669 | .v.func = hda_fixup_thinkpad_acpi, | 676 | .v.func = hda_fixup_thinkpad_acpi, |
@@ -744,7 +751,7 @@ static const struct hda_model_fixup cxt5051_fixup_models[] = { | |||
744 | 751 | ||
745 | static const struct snd_pci_quirk cxt5066_fixups[] = { | 752 | static const struct snd_pci_quirk cxt5066_fixups[] = { |
746 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), | 753 | SND_PCI_QUIRK(0x1025, 0x0543, "Acer Aspire One 522", CXT_FIXUP_STEREO_DMIC), |
747 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_GPIO1), | 754 | SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT_FIXUP_ASPIRE_DMIC), |
748 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), | 755 | SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN), |
749 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), | 756 | SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO), |
750 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), | 757 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d446ac3137b3..1ba22fb527c2 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -328,6 +328,7 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) | |||
328 | case 0x10ec0885: | 328 | case 0x10ec0885: |
329 | case 0x10ec0887: | 329 | case 0x10ec0887: |
330 | /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ | 330 | /*case 0x10ec0889:*/ /* this causes an SPDIF problem */ |
331 | case 0x10ec0900: | ||
331 | alc889_coef_init(codec); | 332 | alc889_coef_init(codec); |
332 | break; | 333 | break; |
333 | case 0x10ec0888: | 334 | case 0x10ec0888: |
@@ -2350,6 +2351,7 @@ static int patch_alc882(struct hda_codec *codec) | |||
2350 | switch (codec->vendor_id) { | 2351 | switch (codec->vendor_id) { |
2351 | case 0x10ec0882: | 2352 | case 0x10ec0882: |
2352 | case 0x10ec0885: | 2353 | case 0x10ec0885: |
2354 | case 0x10ec0900: | ||
2353 | break; | 2355 | break; |
2354 | default: | 2356 | default: |
2355 | /* ALC883 and variants */ | 2357 | /* ALC883 and variants */ |
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c index a20b30ca52c0..98523209f739 100644 --- a/sound/soc/codecs/cs4265.c +++ b/sound/soc/codecs/cs4265.c | |||
@@ -282,10 +282,10 @@ static const struct cs4265_clk_para clk_map_table[] = { | |||
282 | 282 | ||
283 | /*64k*/ | 283 | /*64k*/ |
284 | {8192000, 64000, 1, 0}, | 284 | {8192000, 64000, 1, 0}, |
285 | {1228800, 64000, 1, 1}, | 285 | {12288000, 64000, 1, 1}, |
286 | {1693440, 64000, 1, 2}, | 286 | {16934400, 64000, 1, 2}, |
287 | {2457600, 64000, 1, 3}, | 287 | {24576000, 64000, 1, 3}, |
288 | {3276800, 64000, 1, 4}, | 288 | {32768000, 64000, 1, 4}, |
289 | 289 | ||
290 | /* 88.2k */ | 290 | /* 88.2k */ |
291 | {11289600, 88200, 1, 0}, | 291 | {11289600, 88200, 1, 0}, |
@@ -435,10 +435,10 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream, | |||
435 | index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params)); | 435 | index = cs4265_get_clk_index(cs4265->sysclk, params_rate(params)); |
436 | if (index >= 0) { | 436 | if (index >= 0) { |
437 | snd_soc_update_bits(codec, CS4265_ADC_CTL, | 437 | snd_soc_update_bits(codec, CS4265_ADC_CTL, |
438 | CS4265_ADC_FM, clk_map_table[index].fm_mode); | 438 | CS4265_ADC_FM, clk_map_table[index].fm_mode << 6); |
439 | snd_soc_update_bits(codec, CS4265_MCLK_FREQ, | 439 | snd_soc_update_bits(codec, CS4265_MCLK_FREQ, |
440 | CS4265_MCLK_FREQ_MASK, | 440 | CS4265_MCLK_FREQ_MASK, |
441 | clk_map_table[index].mclkdiv); | 441 | clk_map_table[index].mclkdiv << 4); |
442 | 442 | ||
443 | } else { | 443 | } else { |
444 | dev_err(codec->dev, "can't get correct mclk\n"); | 444 | dev_err(codec->dev, "can't get correct mclk\n"); |
diff --git a/sound/soc/codecs/da732x.h b/sound/soc/codecs/da732x.h index 1dceafeec415..f586cbd30b77 100644 --- a/sound/soc/codecs/da732x.h +++ b/sound/soc/codecs/da732x.h | |||
@@ -11,7 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #ifndef __DA732X_H_ | 13 | #ifndef __DA732X_H_ |
14 | #define __DA732X_H | 14 | #define __DA732X_H_ |
15 | 15 | ||
16 | #include <sound/soc.h> | 16 | #include <sound/soc.h> |
17 | 17 | ||
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c index 6bc6efdec550..f1ec6e6bd08a 100644 --- a/sound/soc/codecs/rt5640.c +++ b/sound/soc/codecs/rt5640.c | |||
@@ -2059,6 +2059,7 @@ static struct snd_soc_codec_driver soc_codec_dev_rt5640 = { | |||
2059 | static const struct regmap_config rt5640_regmap = { | 2059 | static const struct regmap_config rt5640_regmap = { |
2060 | .reg_bits = 8, | 2060 | .reg_bits = 8, |
2061 | .val_bits = 16, | 2061 | .val_bits = 16, |
2062 | .use_single_rw = true, | ||
2062 | 2063 | ||
2063 | .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) * | 2064 | .max_register = RT5640_VENDOR_ID2 + 1 + (ARRAY_SIZE(rt5640_ranges) * |
2064 | RT5640_PR_SPACING), | 2065 | RT5640_PR_SPACING), |
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c index 67f14556462f..5337c448b5e3 100644 --- a/sound/soc/codecs/rt5677.c +++ b/sound/soc/codecs/rt5677.c | |||
@@ -2135,10 +2135,10 @@ static const struct snd_soc_dapm_route rt5677_dapm_routes[] = { | |||
2135 | { "BST2", NULL, "IN2P" }, | 2135 | { "BST2", NULL, "IN2P" }, |
2136 | { "BST2", NULL, "IN2N" }, | 2136 | { "BST2", NULL, "IN2N" }, |
2137 | 2137 | ||
2138 | { "IN1P", NULL, "micbias1" }, | 2138 | { "IN1P", NULL, "MICBIAS1" }, |
2139 | { "IN1N", NULL, "micbias1" }, | 2139 | { "IN1N", NULL, "MICBIAS1" }, |
2140 | { "IN2P", NULL, "micbias1" }, | 2140 | { "IN2P", NULL, "MICBIAS1" }, |
2141 | { "IN2N", NULL, "micbias1" }, | 2141 | { "IN2N", NULL, "MICBIAS1" }, |
2142 | 2142 | ||
2143 | { "ADC 1", NULL, "BST1" }, | 2143 | { "ADC 1", NULL, "BST1" }, |
2144 | { "ADC 1", NULL, "ADC 1 power" }, | 2144 | { "ADC 1", NULL, "ADC 1 power" }, |
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 159e517fa09a..cef7776b712c 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -481,12 +481,19 @@ static int asoc_simple_card_probe(struct platform_device *pdev) | |||
481 | snd_soc_card_set_drvdata(&priv->snd_card, priv); | 481 | snd_soc_card_set_drvdata(&priv->snd_card, priv); |
482 | 482 | ||
483 | ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card); | 483 | ret = devm_snd_soc_register_card(&pdev->dev, &priv->snd_card); |
484 | if (ret >= 0) | ||
485 | return ret; | ||
484 | 486 | ||
485 | err: | 487 | err: |
486 | asoc_simple_card_unref(pdev); | 488 | asoc_simple_card_unref(pdev); |
487 | return ret; | 489 | return ret; |
488 | } | 490 | } |
489 | 491 | ||
492 | static int asoc_simple_card_remove(struct platform_device *pdev) | ||
493 | { | ||
494 | return asoc_simple_card_unref(pdev); | ||
495 | } | ||
496 | |||
490 | static const struct of_device_id asoc_simple_of_match[] = { | 497 | static const struct of_device_id asoc_simple_of_match[] = { |
491 | { .compatible = "simple-audio-card", }, | 498 | { .compatible = "simple-audio-card", }, |
492 | {}, | 499 | {}, |
@@ -500,6 +507,7 @@ static struct platform_driver asoc_simple_card = { | |||
500 | .of_match_table = asoc_simple_of_match, | 507 | .of_match_table = asoc_simple_of_match, |
501 | }, | 508 | }, |
502 | .probe = asoc_simple_card_probe, | 509 | .probe = asoc_simple_card_probe, |
510 | .remove = asoc_simple_card_remove, | ||
503 | }; | 511 | }; |
504 | 512 | ||
505 | module_platform_driver(asoc_simple_card); | 513 | module_platform_driver(asoc_simple_card); |
diff --git a/sound/soc/omap/omap-twl4030.c b/sound/soc/omap/omap-twl4030.c index f8a6adc2d81c..4336d1831485 100644 --- a/sound/soc/omap/omap-twl4030.c +++ b/sound/soc/omap/omap-twl4030.c | |||
@@ -260,7 +260,7 @@ static struct snd_soc_dai_link omap_twl4030_dai_links[] = { | |||
260 | .stream_name = "TWL4030 Voice", | 260 | .stream_name = "TWL4030 Voice", |
261 | .cpu_dai_name = "omap-mcbsp.3", | 261 | .cpu_dai_name = "omap-mcbsp.3", |
262 | .codec_dai_name = "twl4030-voice", | 262 | .codec_dai_name = "twl4030-voice", |
263 | .platform_name = "omap-mcbsp.2", | 263 | .platform_name = "omap-mcbsp.3", |
264 | .codec_name = "twl4030-codec", | 264 | .codec_name = "twl4030-codec", |
265 | .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | | 265 | .dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF | |
266 | SND_SOC_DAIFMT_CBM_CFM, | 266 | SND_SOC_DAIFMT_CBM_CFM, |
diff --git a/sound/soc/sh/rcar/gen.c b/sound/soc/sh/rcar/gen.c index 3fdf3be7b99a..f95e7ab135e8 100644 --- a/sound/soc/sh/rcar/gen.c +++ b/sound/soc/sh/rcar/gen.c | |||
@@ -247,7 +247,7 @@ rsnd_gen2_dma_addr(struct rsnd_priv *priv, | |||
247 | }; | 247 | }; |
248 | 248 | ||
249 | /* it shouldn't happen */ | 249 | /* it shouldn't happen */ |
250 | if (use_dvc & !use_src) | 250 | if (use_dvc && !use_src) |
251 | dev_err(dev, "DVC is selected without SRC\n"); | 251 | dev_err(dev, "DVC is selected without SRC\n"); |
252 | 252 | ||
253 | /* use SSIU or SSI ? */ | 253 | /* use SSIU or SSI ? */ |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d4bfd4a9076f..889f4e3d35dc 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -1325,7 +1325,7 @@ static int soc_post_component_init(struct snd_soc_pcm_runtime *rtd, | |||
1325 | device_initialize(rtd->dev); | 1325 | device_initialize(rtd->dev); |
1326 | rtd->dev->parent = rtd->card->dev; | 1326 | rtd->dev->parent = rtd->card->dev; |
1327 | rtd->dev->release = rtd_release; | 1327 | rtd->dev->release = rtd_release; |
1328 | rtd->dev->init_name = name; | 1328 | dev_set_name(rtd->dev, "%s", name); |
1329 | dev_set_drvdata(rtd->dev, rtd); | 1329 | dev_set_drvdata(rtd->dev, rtd); |
1330 | mutex_init(&rtd->pcm_mutex); | 1330 | mutex_init(&rtd->pcm_mutex); |
1331 | INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); | 1331 | INIT_LIST_HEAD(&rtd->dpcm[SNDRV_PCM_STREAM_PLAYBACK].be_clients); |
diff --git a/sound/soc/tegra/tegra_asoc_utils.h b/sound/soc/tegra/tegra_asoc_utils.h index 9577121ce971..ca8037634100 100644 --- a/sound/soc/tegra/tegra_asoc_utils.h +++ b/sound/soc/tegra/tegra_asoc_utils.h | |||
@@ -21,7 +21,7 @@ | |||
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifndef __TEGRA_ASOC_UTILS_H__ | 23 | #ifndef __TEGRA_ASOC_UTILS_H__ |
24 | #define __TEGRA_ASOC_UTILS_H_ | 24 | #define __TEGRA_ASOC_UTILS_H__ |
25 | 25 | ||
26 | struct clk; | 26 | struct clk; |
27 | struct device; | 27 | struct device; |
diff --git a/tools/usb/usbip/libsrc/usbip_common.h b/tools/usb/usbip/libsrc/usbip_common.h index 5a0e95edf4df..15fe792e1e96 100644 --- a/tools/usb/usbip/libsrc/usbip_common.h +++ b/tools/usb/usbip/libsrc/usbip_common.h | |||
@@ -15,7 +15,7 @@ | |||
15 | #include <syslog.h> | 15 | #include <syslog.h> |
16 | #include <unistd.h> | 16 | #include <unistd.h> |
17 | #include <linux/usb/ch9.h> | 17 | #include <linux/usb/ch9.h> |
18 | #include "../../uapi/usbip.h" | 18 | #include <linux/usbip.h> |
19 | 19 | ||
20 | #ifndef USBIDS_FILE | 20 | #ifndef USBIDS_FILE |
21 | #define USBIDS_FILE "/usr/share/hwdata/usb.ids" | 21 | #define USBIDS_FILE "/usr/share/hwdata/usb.ids" |