diff options
author | David S. Miller <davem@davemloft.net> | 2017-06-06 22:20:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-06 22:20:08 -0400 |
commit | 216fe8f021e33c36e3b27c49c9f1951f6b037d7f (patch) | |
tree | a43daec41b4d3955e7a4f8d0ed0654a7c80527ec | |
parent | 9747e2313838ee8f5d8073fd6aa7289255c3c51b (diff) | |
parent | b29794ec95c6856b316c2295904208bf11ffddd9 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Just some simple overlapping changes in marvell PHY driver
and the DSA core code.
Signed-off-by: David S. Miller <davem@davemloft.net>
436 files changed, 3741 insertions, 2070 deletions
diff --git a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt index 6db22103e2dd..025cf8c9324a 100644 --- a/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt +++ b/Documentation/devicetree/bindings/input/touchscreen/edt-ft5x06.txt | |||
@@ -36,7 +36,7 @@ Optional properties: | |||
36 | control gpios | 36 | control gpios |
37 | 37 | ||
38 | - threshold: allows setting the "click"-threshold in the range | 38 | - threshold: allows setting the "click"-threshold in the range |
39 | from 20 to 80. | 39 | from 0 to 80. |
40 | 40 | ||
41 | - gain: allows setting the sensitivity in the range from 0 to | 41 | - gain: allows setting the sensitivity in the range from 0 to |
42 | 31. Note that lower values indicate higher | 42 | 31. Note that lower values indicate higher |
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt index 7ef9dbb08957..1d4d0f49c9d0 100644 --- a/Documentation/devicetree/bindings/net/dsa/marvell.txt +++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt | |||
@@ -26,6 +26,10 @@ Optional properties: | |||
26 | - interrupt-controller : Indicates the switch is itself an interrupt | 26 | - interrupt-controller : Indicates the switch is itself an interrupt |
27 | controller. This is used for the PHY interrupts. | 27 | controller. This is used for the PHY interrupts. |
28 | #interrupt-cells = <2> : Controller uses two cells, number and flag | 28 | #interrupt-cells = <2> : Controller uses two cells, number and flag |
29 | - eeprom-length : Set to the length of an EEPROM connected to the | ||
30 | switch. Must be set if the switch can not detect | ||
31 | the presence and/or size of a connected EEPROM, | ||
32 | otherwise optional. | ||
29 | - mdio : Container of PHY and devices on the switches MDIO | 33 | - mdio : Container of PHY and devices on the switches MDIO |
30 | bus. | 34 | bus. |
31 | - mdio? : Container of PHYs and devices on the external MDIO | 35 | - mdio? : Container of PHYs and devices on the external MDIO |
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt index 71a3c134af1b..f01d154090da 100644 --- a/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt +++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-bindings.txt | |||
@@ -247,7 +247,6 @@ bias-bus-hold - latch weakly | |||
247 | bias-pull-up - pull up the pin | 247 | bias-pull-up - pull up the pin |
248 | bias-pull-down - pull down the pin | 248 | bias-pull-down - pull down the pin |
249 | bias-pull-pin-default - use pin-default pull state | 249 | bias-pull-pin-default - use pin-default pull state |
250 | bi-directional - pin supports simultaneous input/output operations | ||
251 | drive-push-pull - drive actively high and low | 250 | drive-push-pull - drive actively high and low |
252 | drive-open-drain - drive with open drain | 251 | drive-open-drain - drive with open drain |
253 | drive-open-source - drive with open source | 252 | drive-open-source - drive with open source |
@@ -260,7 +259,6 @@ input-debounce - debounce mode with debound time X | |||
260 | power-source - select between different power supplies | 259 | power-source - select between different power supplies |
261 | low-power-enable - enable low power mode | 260 | low-power-enable - enable low power mode |
262 | low-power-disable - disable low power mode | 261 | low-power-disable - disable low power mode |
263 | output-enable - enable output on pin regardless of output value | ||
264 | output-low - set the pin to output mode with low level | 262 | output-low - set the pin to output mode with low level |
265 | output-high - set the pin to output mode with high level | 263 | output-high - set the pin to output mode with high level |
266 | slew-rate - set the slew rate | 264 | slew-rate - set the slew rate |
diff --git a/Documentation/input/devices/edt-ft5x06.rst b/Documentation/input/devices/edt-ft5x06.rst index 2032f0b7a8fa..1ccc94b192b7 100644 --- a/Documentation/input/devices/edt-ft5x06.rst +++ b/Documentation/input/devices/edt-ft5x06.rst | |||
@@ -15,7 +15,7 @@ It has been tested with the following devices: | |||
15 | The driver allows configuration of the touch screen via a set of sysfs files: | 15 | The driver allows configuration of the touch screen via a set of sysfs files: |
16 | 16 | ||
17 | /sys/class/input/eventX/device/device/threshold: | 17 | /sys/class/input/eventX/device/device/threshold: |
18 | allows setting the "click"-threshold in the range from 20 to 80. | 18 | allows setting the "click"-threshold in the range from 0 to 80. |
19 | 19 | ||
20 | /sys/class/input/eventX/device/device/gain: | 20 | /sys/class/input/eventX/device/device/gain: |
21 | allows setting the sensitivity in the range from 0 to 31. Note that | 21 | allows setting the sensitivity in the range from 0 to 31. Note that |
diff --git a/Documentation/networking/dpaa.txt b/Documentation/networking/dpaa.txt new file mode 100644 index 000000000000..76e016d4d344 --- /dev/null +++ b/Documentation/networking/dpaa.txt | |||
@@ -0,0 +1,194 @@ | |||
1 | The QorIQ DPAA Ethernet Driver | ||
2 | ============================== | ||
3 | |||
4 | Authors: | ||
5 | Madalin Bucur <madalin.bucur@nxp.com> | ||
6 | Camelia Groza <camelia.groza@nxp.com> | ||
7 | |||
8 | Contents | ||
9 | ======== | ||
10 | |||
11 | - DPAA Ethernet Overview | ||
12 | - DPAA Ethernet Supported SoCs | ||
13 | - Configuring DPAA Ethernet in your kernel | ||
14 | - DPAA Ethernet Frame Processing | ||
15 | - DPAA Ethernet Features | ||
16 | - Debugging | ||
17 | |||
18 | DPAA Ethernet Overview | ||
19 | ====================== | ||
20 | |||
21 | DPAA stands for Data Path Acceleration Architecture and it is a | ||
22 | set of networking acceleration IPs that are available on several | ||
23 | generations of SoCs, both on PowerPC and ARM64. | ||
24 | |||
25 | The Freescale DPAA architecture consists of a series of hardware blocks | ||
26 | that support Ethernet connectivity. The Ethernet driver depends upon the | ||
27 | following drivers in the Linux kernel: | ||
28 | |||
29 | - Peripheral Access Memory Unit (PAMU) (* needed only for PPC platforms) | ||
30 | drivers/iommu/fsl_* | ||
31 | - Frame Manager (FMan) | ||
32 | drivers/net/ethernet/freescale/fman | ||
33 | - Queue Manager (QMan), Buffer Manager (BMan) | ||
34 | drivers/soc/fsl/qbman | ||
35 | |||
36 | A simplified view of the dpaa_eth interfaces mapped to FMan MACs: | ||
37 | |||
38 | dpaa_eth /eth0\ ... /ethN\ | ||
39 | driver | | | | | ||
40 | ------------- ---- ----------- ---- ------------- | ||
41 | -Ports / Tx Rx \ ... / Tx Rx \ | ||
42 | FMan | | | | | ||
43 | -MACs | MAC0 | | MACN | | ||
44 | / dtsec0 \ ... / dtsecN \ (or tgec) | ||
45 | / \ / \(or memac) | ||
46 | --------- -------------- --- -------------- --------- | ||
47 | FMan, FMan Port, FMan SP, FMan MURAM drivers | ||
48 | --------------------------------------------------------- | ||
49 | FMan HW blocks: MURAM, MACs, Ports, SP | ||
50 | --------------------------------------------------------- | ||
51 | |||
52 | The dpaa_eth relation to the QMan, BMan and FMan: | ||
53 | ________________________________ | ||
54 | dpaa_eth / eth0 \ | ||
55 | driver / \ | ||
56 | --------- -^- -^- -^- --- --------- | ||
57 | QMan driver / \ / \ / \ \ / | BMan | | ||
58 | |Rx | |Rx | |Tx | |Tx | | driver | | ||
59 | --------- |Dfl| |Err| |Cnf| |FQs| | | | ||
60 | QMan HW |FQ | |FQ | |FQs| | | | | | ||
61 | / \ / \ / \ \ / | | | ||
62 | --------- --- --- --- -v- --------- | ||
63 | | FMan QMI | | | ||
64 | | FMan HW FMan BMI | BMan HW | | ||
65 | ----------------------- -------- | ||
66 | |||
67 | where the acronyms used above (and in the code) are: | ||
68 | DPAA = Data Path Acceleration Architecture | ||
69 | FMan = DPAA Frame Manager | ||
70 | QMan = DPAA Queue Manager | ||
71 | BMan = DPAA Buffers Manager | ||
72 | QMI = QMan interface in FMan | ||
73 | BMI = BMan interface in FMan | ||
74 | FMan SP = FMan Storage Profiles | ||
75 | MURAM = Multi-user RAM in FMan | ||
76 | FQ = QMan Frame Queue | ||
77 | Rx Dfl FQ = default reception FQ | ||
78 | Rx Err FQ = Rx error frames FQ | ||
79 | Tx Cnf FQ = Tx confirmation FQs | ||
80 | Tx FQs = transmission frame queues | ||
81 | dtsec = datapath three speed Ethernet controller (10/100/1000 Mbps) | ||
82 | tgec = ten gigabit Ethernet controller (10 Gbps) | ||
83 | memac = multirate Ethernet MAC (10/100/1000/10000) | ||
84 | |||
85 | DPAA Ethernet Supported SoCs | ||
86 | ============================ | ||
87 | |||
88 | The DPAA drivers enable the Ethernet controllers present on the following SoCs: | ||
89 | |||
90 | # PPC | ||
91 | P1023 | ||
92 | P2041 | ||
93 | P3041 | ||
94 | P4080 | ||
95 | P5020 | ||
96 | P5040 | ||
97 | T1023 | ||
98 | T1024 | ||
99 | T1040 | ||
100 | T1042 | ||
101 | T2080 | ||
102 | T4240 | ||
103 | B4860 | ||
104 | |||
105 | # ARM | ||
106 | LS1043A | ||
107 | LS1046A | ||
108 | |||
109 | Configuring DPAA Ethernet in your kernel | ||
110 | ======================================== | ||
111 | |||
112 | To enable the DPAA Ethernet driver, the following Kconfig options are required: | ||
113 | |||
114 | # common for arch/arm64 and arch/powerpc platforms | ||
115 | CONFIG_FSL_DPAA=y | ||
116 | CONFIG_FSL_FMAN=y | ||
117 | CONFIG_FSL_DPAA_ETH=y | ||
118 | CONFIG_FSL_XGMAC_MDIO=y | ||
119 | |||
120 | # for arch/powerpc only | ||
121 | CONFIG_FSL_PAMU=y | ||
122 | |||
123 | # common options needed for the PHYs used on the RDBs | ||
124 | CONFIG_VITESSE_PHY=y | ||
125 | CONFIG_REALTEK_PHY=y | ||
126 | CONFIG_AQUANTIA_PHY=y | ||
127 | |||
128 | DPAA Ethernet Frame Processing | ||
129 | ============================== | ||
130 | |||
131 | On Rx, buffers for the incoming frames are retrieved from one of the three | ||
132 | existing buffers pools. The driver initializes and seeds these, each with | ||
133 | buffers of different sizes: 1KB, 2KB and 4KB. | ||
134 | |||
135 | On Tx, all transmitted frames are returned to the driver through Tx | ||
136 | confirmation frame queues. The driver is then responsible for freeing the | ||
137 | buffers. In order to do this properly, a backpointer is added to the buffer | ||
138 | before transmission that points to the skb. When the buffer returns to the | ||
139 | driver on a confirmation FQ, the skb can be correctly consumed. | ||
140 | |||
141 | DPAA Ethernet Features | ||
142 | ====================== | ||
143 | |||
144 | Currently the DPAA Ethernet driver enables the basic features required for | ||
145 | a Linux Ethernet driver. The support for advanced features will be added | ||
146 | gradually. | ||
147 | |||
148 | The driver has Rx and Tx checksum offloading for UDP and TCP. Currently the Rx | ||
149 | checksum offload feature is enabled by default and cannot be controlled through | ||
150 | ethtool. | ||
151 | |||
152 | The driver has support for multiple prioritized Tx traffic classes. Priorities | ||
153 | range from 0 (lowest) to 3 (highest). These are mapped to HW workqueues with | ||
154 | strict priority levels. Each traffic class contains NR_CPU TX queues. By | ||
155 | default, only one traffic class is enabled and the lowest priority Tx queues | ||
156 | are used. Higher priority traffic classes can be enabled with the mqprio | ||
157 | qdisc. For example, all four traffic classes are enabled on an interface with | ||
158 | the following command. Furthermore, skb priority levels are mapped to traffic | ||
159 | classes as follows: | ||
160 | |||
161 | * priorities 0 to 3 - traffic class 0 (low priority) | ||
162 | * priorities 4 to 7 - traffic class 1 (medium-low priority) | ||
163 | * priorities 8 to 11 - traffic class 2 (medium-high priority) | ||
164 | * priorities 12 to 15 - traffic class 3 (high priority) | ||
165 | |||
166 | tc qdisc add dev <int> root handle 1: \ | ||
167 | mqprio num_tc 4 map 0 0 0 0 1 1 1 1 2 2 2 2 3 3 3 3 hw 1 | ||
168 | |||
169 | Debugging | ||
170 | ========= | ||
171 | |||
172 | The following statistics are exported for each interface through ethtool: | ||
173 | |||
174 | - interrupt count per CPU | ||
175 | - Rx packets count per CPU | ||
176 | - Tx packets count per CPU | ||
177 | - Tx confirmed packets count per CPU | ||
178 | - Tx S/G frames count per CPU | ||
179 | - Tx error count per CPU | ||
180 | - Rx error count per CPU | ||
181 | - Rx error count per type | ||
182 | - congestion related statistics: | ||
183 | - congestion status | ||
184 | - time spent in congestion | ||
185 | - number of time the device entered congestion | ||
186 | - dropped packets count per cause | ||
187 | |||
188 | The driver also exports the following information in sysfs: | ||
189 | |||
190 | - the FQ IDs for each FQ type | ||
191 | /sys/devices/platform/dpaa-ethernet.0/net/<int>/fqids | ||
192 | |||
193 | - the IDs of the buffer pools in use | ||
194 | /sys/devices/platform/dpaa-ethernet.0/net/<int>/bpids | ||
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt index bdc4c0db51e1..9c7139d57e57 100644 --- a/Documentation/networking/tcp.txt +++ b/Documentation/networking/tcp.txt | |||
@@ -1,7 +1,7 @@ | |||
1 | TCP protocol | 1 | TCP protocol |
2 | ============ | 2 | ============ |
3 | 3 | ||
4 | Last updated: 9 February 2008 | 4 | Last updated: 3 June 2017 |
5 | 5 | ||
6 | Contents | 6 | Contents |
7 | ======== | 7 | ======== |
@@ -29,18 +29,19 @@ As of 2.6.13, Linux supports pluggable congestion control algorithms. | |||
29 | A congestion control mechanism can be registered through functions in | 29 | A congestion control mechanism can be registered through functions in |
30 | tcp_cong.c. The functions used by the congestion control mechanism are | 30 | tcp_cong.c. The functions used by the congestion control mechanism are |
31 | registered via passing a tcp_congestion_ops struct to | 31 | registered via passing a tcp_congestion_ops struct to |
32 | tcp_register_congestion_control. As a minimum name, ssthresh, | 32 | tcp_register_congestion_control. As a minimum, the congestion control |
33 | cong_avoid must be valid. | 33 | mechanism must provide a valid name and must implement either ssthresh, |
34 | cong_avoid and undo_cwnd hooks or the "omnipotent" cong_control hook. | ||
34 | 35 | ||
35 | Private data for a congestion control mechanism is stored in tp->ca_priv. | 36 | Private data for a congestion control mechanism is stored in tp->ca_priv. |
36 | tcp_ca(tp) returns a pointer to this space. This is preallocated space - it | 37 | tcp_ca(tp) returns a pointer to this space. This is preallocated space - it |
37 | is important to check the size of your private data will fit this space, or | 38 | is important to check the size of your private data will fit this space, or |
38 | alternatively space could be allocated elsewhere and a pointer to it could | 39 | alternatively, space could be allocated elsewhere and a pointer to it could |
39 | be stored here. | 40 | be stored here. |
40 | 41 | ||
41 | There are three kinds of congestion control algorithms currently: The | 42 | There are three kinds of congestion control algorithms currently: The |
42 | simplest ones are derived from TCP reno (highspeed, scalable) and just | 43 | simplest ones are derived from TCP reno (highspeed, scalable) and just |
43 | provide an alternative the congestion window calculation. More complex | 44 | provide an alternative congestion window calculation. More complex |
44 | ones like BIC try to look at other events to provide better | 45 | ones like BIC try to look at other events to provide better |
45 | heuristics. There are also round trip time based algorithms like | 46 | heuristics. There are also round trip time based algorithms like |
46 | Vegas and Westwood+. | 47 | Vegas and Westwood+. |
@@ -49,21 +50,15 @@ Good TCP congestion control is a complex problem because the algorithm | |||
49 | needs to maintain fairness and performance. Please review current | 50 | needs to maintain fairness and performance. Please review current |
50 | research and RFC's before developing new modules. | 51 | research and RFC's before developing new modules. |
51 | 52 | ||
52 | The method that is used to determine which congestion control mechanism is | 53 | The default congestion control mechanism is chosen based on the |
53 | determined by the setting of the sysctl net.ipv4.tcp_congestion_control. | 54 | DEFAULT_TCP_CONG Kconfig parameter. If you really want a particular default |
54 | The default congestion control will be the last one registered (LIFO); | 55 | value then you can set it using sysctl net.ipv4.tcp_congestion_control. The |
55 | so if you built everything as modules, the default will be reno. If you | 56 | module will be autoloaded if needed and you will get the expected protocol. If |
56 | build with the defaults from Kconfig, then CUBIC will be builtin (not a | 57 | you ask for an unknown congestion method, then the sysctl attempt will fail. |
57 | module) and it will end up the default. | ||
58 | 58 | ||
59 | If you really want a particular default value then you will need | 59 | If you remove a TCP congestion control module, then you will get the next |
60 | to set it with the sysctl. If you use a sysctl, the module will be autoloaded | ||
61 | if needed and you will get the expected protocol. If you ask for an | ||
62 | unknown congestion method, then the sysctl attempt will fail. | ||
63 | |||
64 | If you remove a tcp congestion control module, then you will get the next | ||
65 | available one. Since reno cannot be built as a module, and cannot be | 60 | available one. Since reno cannot be built as a module, and cannot be |
66 | deleted, it will always be available. | 61 | removed, it will always be available. |
67 | 62 | ||
68 | How the new TCP output machine [nyi] works. | 63 | How the new TCP output machine [nyi] works. |
69 | =========================================== | 64 | =========================================== |
diff --git a/MAINTAINERS b/MAINTAINERS index 3cf8b0a22019..8b8249b576bf 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7144,7 +7144,7 @@ S: Maintained | |||
7144 | F: drivers/media/platform/rcar_jpu.c | 7144 | F: drivers/media/platform/rcar_jpu.c |
7145 | 7145 | ||
7146 | JSM Neo PCI based serial card | 7146 | JSM Neo PCI based serial card |
7147 | M: Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com> | 7147 | M: Guilherme G. Piccoli <gpiccoli@linux.vnet.ibm.com> |
7148 | L: linux-serial@vger.kernel.org | 7148 | L: linux-serial@vger.kernel.org |
7149 | S: Maintained | 7149 | S: Maintained |
7150 | F: drivers/tty/serial/jsm/ | 7150 | F: drivers/tty/serial/jsm/ |
@@ -8543,7 +8543,7 @@ S: Odd Fixes | |||
8543 | F: drivers/media/radio/radio-miropcm20* | 8543 | F: drivers/media/radio/radio-miropcm20* |
8544 | 8544 | ||
8545 | MELLANOX MLX4 core VPI driver | 8545 | MELLANOX MLX4 core VPI driver |
8546 | M: Yishai Hadas <yishaih@mellanox.com> | 8546 | M: Tariq Toukan <tariqt@mellanox.com> |
8547 | L: netdev@vger.kernel.org | 8547 | L: netdev@vger.kernel.org |
8548 | L: linux-rdma@vger.kernel.org | 8548 | L: linux-rdma@vger.kernel.org |
8549 | W: http://www.mellanox.com | 8549 | W: http://www.mellanox.com |
@@ -8551,7 +8551,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ | |||
8551 | S: Supported | 8551 | S: Supported |
8552 | F: drivers/net/ethernet/mellanox/mlx4/ | 8552 | F: drivers/net/ethernet/mellanox/mlx4/ |
8553 | F: include/linux/mlx4/ | 8553 | F: include/linux/mlx4/ |
8554 | F: include/uapi/rdma/mlx4-abi.h | ||
8555 | 8554 | ||
8556 | MELLANOX MLX4 IB driver | 8555 | MELLANOX MLX4 IB driver |
8557 | M: Yishai Hadas <yishaih@mellanox.com> | 8556 | M: Yishai Hadas <yishaih@mellanox.com> |
@@ -8561,6 +8560,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/ | |||
8561 | S: Supported | 8560 | S: Supported |
8562 | F: drivers/infiniband/hw/mlx4/ | 8561 | F: drivers/infiniband/hw/mlx4/ |
8563 | F: include/linux/mlx4/ | 8562 | F: include/linux/mlx4/ |
8563 | F: include/uapi/rdma/mlx4-abi.h | ||
8564 | 8564 | ||
8565 | MELLANOX MLX5 core VPI driver | 8565 | MELLANOX MLX5 core VPI driver |
8566 | M: Saeed Mahameed <saeedm@mellanox.com> | 8566 | M: Saeed Mahameed <saeedm@mellanox.com> |
@@ -8573,7 +8573,6 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ | |||
8573 | S: Supported | 8573 | S: Supported |
8574 | F: drivers/net/ethernet/mellanox/mlx5/core/ | 8574 | F: drivers/net/ethernet/mellanox/mlx5/core/ |
8575 | F: include/linux/mlx5/ | 8575 | F: include/linux/mlx5/ |
8576 | F: include/uapi/rdma/mlx5-abi.h | ||
8577 | 8576 | ||
8578 | MELLANOX MLX5 IB driver | 8577 | MELLANOX MLX5 IB driver |
8579 | M: Matan Barak <matanb@mellanox.com> | 8578 | M: Matan Barak <matanb@mellanox.com> |
@@ -8584,6 +8583,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/ | |||
8584 | S: Supported | 8583 | S: Supported |
8585 | F: drivers/infiniband/hw/mlx5/ | 8584 | F: drivers/infiniband/hw/mlx5/ |
8586 | F: include/linux/mlx5/ | 8585 | F: include/linux/mlx5/ |
8586 | F: include/uapi/rdma/mlx5-abi.h | ||
8587 | 8587 | ||
8588 | MELEXIS MLX90614 DRIVER | 8588 | MELEXIS MLX90614 DRIVER |
8589 | M: Crt Mori <cmo@melexis.com> | 8589 | M: Crt Mori <cmo@melexis.com> |
@@ -10485,7 +10485,7 @@ S: Orphan | |||
10485 | 10485 | ||
10486 | PXA RTC DRIVER | 10486 | PXA RTC DRIVER |
10487 | M: Robert Jarzmik <robert.jarzmik@free.fr> | 10487 | M: Robert Jarzmik <robert.jarzmik@free.fr> |
10488 | L: rtc-linux@googlegroups.com | 10488 | L: linux-rtc@vger.kernel.org |
10489 | S: Maintained | 10489 | S: Maintained |
10490 | 10490 | ||
10491 | QAT DRIVER | 10491 | QAT DRIVER |
@@ -10792,7 +10792,7 @@ X: kernel/torture.c | |||
10792 | REAL TIME CLOCK (RTC) SUBSYSTEM | 10792 | REAL TIME CLOCK (RTC) SUBSYSTEM |
10793 | M: Alessandro Zummo <a.zummo@towertech.it> | 10793 | M: Alessandro Zummo <a.zummo@towertech.it> |
10794 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> | 10794 | M: Alexandre Belloni <alexandre.belloni@free-electrons.com> |
10795 | L: rtc-linux@googlegroups.com | 10795 | L: linux-rtc@vger.kernel.org |
10796 | Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ | 10796 | Q: http://patchwork.ozlabs.org/project/rtc-linux/list/ |
10797 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git | 10797 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux.git |
10798 | S: Maintained | 10798 | S: Maintained |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 12 | 2 | PATCHLEVEL = 12 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc2 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/compressed/efi-header.S b/arch/arm/boot/compressed/efi-header.S index 9d5dc4fda3c1..3f7d1b74c5e0 100644 --- a/arch/arm/boot/compressed/efi-header.S +++ b/arch/arm/boot/compressed/efi-header.S | |||
@@ -17,14 +17,12 @@ | |||
17 | @ there. | 17 | @ there. |
18 | .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 | 18 | .inst 'M' | ('Z' << 8) | (0x1310 << 16) @ tstne r0, #0x4d000 |
19 | #else | 19 | #else |
20 | mov r0, r0 | 20 | W(mov) r0, r0 |
21 | #endif | 21 | #endif |
22 | .endm | 22 | .endm |
23 | 23 | ||
24 | .macro __EFI_HEADER | 24 | .macro __EFI_HEADER |
25 | #ifdef CONFIG_EFI_STUB | 25 | #ifdef CONFIG_EFI_STUB |
26 | b __efi_start | ||
27 | |||
28 | .set start_offset, __efi_start - start | 26 | .set start_offset, __efi_start - start |
29 | .org start + 0x3c | 27 | .org start + 0x3c |
30 | @ | 28 | @ |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index 7c711ba61417..8a756870c238 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -130,19 +130,22 @@ start: | |||
130 | .rept 7 | 130 | .rept 7 |
131 | __nop | 131 | __nop |
132 | .endr | 132 | .endr |
133 | ARM( mov r0, r0 ) | 133 | #ifndef CONFIG_THUMB2_KERNEL |
134 | ARM( b 1f ) | 134 | mov r0, r0 |
135 | THUMB( badr r12, 1f ) | 135 | #else |
136 | THUMB( bx r12 ) | 136 | AR_CLASS( sub pc, pc, #3 ) @ A/R: switch to Thumb2 mode |
137 | M_CLASS( nop.w ) @ M: already in Thumb2 mode | ||
138 | .thumb | ||
139 | #endif | ||
140 | W(b) 1f | ||
137 | 141 | ||
138 | .word _magic_sig @ Magic numbers to help the loader | 142 | .word _magic_sig @ Magic numbers to help the loader |
139 | .word _magic_start @ absolute load/run zImage address | 143 | .word _magic_start @ absolute load/run zImage address |
140 | .word _magic_end @ zImage end address | 144 | .word _magic_end @ zImage end address |
141 | .word 0x04030201 @ endianness flag | 145 | .word 0x04030201 @ endianness flag |
142 | 146 | ||
143 | THUMB( .thumb ) | 147 | __EFI_HEADER |
144 | 1: __EFI_HEADER | 148 | 1: |
145 | |||
146 | ARM_BE8( setend be ) @ go BE8 if compiled for BE8 | 149 | ARM_BE8( setend be ) @ go BE8 if compiled for BE8 |
147 | AR_CLASS( mrs r9, cpsr ) | 150 | AR_CLASS( mrs r9, cpsr ) |
148 | #ifdef CONFIG_ARM_VIRT_EXT | 151 | #ifdef CONFIG_ARM_VIRT_EXT |
diff --git a/arch/arm/boot/dts/imx6ul-14x14-evk.dts b/arch/arm/boot/dts/imx6ul-14x14-evk.dts index f18e1f1d0ce2..d2be8aa3370b 100644 --- a/arch/arm/boot/dts/imx6ul-14x14-evk.dts +++ b/arch/arm/boot/dts/imx6ul-14x14-evk.dts | |||
@@ -120,10 +120,16 @@ | |||
120 | 120 | ||
121 | ethphy0: ethernet-phy@2 { | 121 | ethphy0: ethernet-phy@2 { |
122 | reg = <2>; | 122 | reg = <2>; |
123 | micrel,led-mode = <1>; | ||
124 | clocks = <&clks IMX6UL_CLK_ENET_REF>; | ||
125 | clock-names = "rmii-ref"; | ||
123 | }; | 126 | }; |
124 | 127 | ||
125 | ethphy1: ethernet-phy@1 { | 128 | ethphy1: ethernet-phy@1 { |
126 | reg = <1>; | 129 | reg = <1>; |
130 | micrel,led-mode = <1>; | ||
131 | clocks = <&clks IMX6UL_CLK_ENET2_REF>; | ||
132 | clock-names = "rmii-ref"; | ||
127 | }; | 133 | }; |
128 | }; | 134 | }; |
129 | }; | 135 | }; |
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index cf062472e07b..2b913f17d50f 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c | |||
@@ -235,7 +235,7 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) | |||
235 | return ret; | 235 | return ret; |
236 | } | 236 | } |
237 | 237 | ||
238 | typedef void (*phys_reset_t)(unsigned long); | 238 | typedef typeof(cpu_reset) phys_reset_t; |
239 | 239 | ||
240 | void mcpm_cpu_power_down(void) | 240 | void mcpm_cpu_power_down(void) |
241 | { | 241 | { |
@@ -300,7 +300,7 @@ void mcpm_cpu_power_down(void) | |||
300 | * on the CPU. | 300 | * on the CPU. |
301 | */ | 301 | */ |
302 | phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); | 302 | phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); |
303 | phys_reset(__pa_symbol(mcpm_entry_point)); | 303 | phys_reset(__pa_symbol(mcpm_entry_point), false); |
304 | 304 | ||
305 | /* should never get here */ | 305 | /* should never get here */ |
306 | BUG(); | 306 | BUG(); |
@@ -389,7 +389,7 @@ static int __init nocache_trampoline(unsigned long _arg) | |||
389 | __mcpm_cpu_down(cpu, cluster); | 389 | __mcpm_cpu_down(cpu, cluster); |
390 | 390 | ||
391 | phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); | 391 | phys_reset = (phys_reset_t)(unsigned long)__pa_symbol(cpu_reset); |
392 | phys_reset(__pa_symbol(mcpm_entry_point)); | 392 | phys_reset(__pa_symbol(mcpm_entry_point), false); |
393 | BUG(); | 393 | BUG(); |
394 | } | 394 | } |
395 | 395 | ||
diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index 302240c19a5a..a0d726a47c8a 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h | |||
@@ -66,6 +66,7 @@ typedef pte_t *pte_addr_t; | |||
66 | #define pgprot_noncached(prot) (prot) | 66 | #define pgprot_noncached(prot) (prot) |
67 | #define pgprot_writecombine(prot) (prot) | 67 | #define pgprot_writecombine(prot) (prot) |
68 | #define pgprot_dmacoherent(prot) (prot) | 68 | #define pgprot_dmacoherent(prot) (prot) |
69 | #define pgprot_device(prot) (prot) | ||
69 | 70 | ||
70 | 71 | ||
71 | /* | 72 | /* |
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h index 0e99978da3f0..59cca1d6ec54 100644 --- a/arch/arm64/include/asm/acpi.h +++ b/arch/arm64/include/asm/acpi.h | |||
@@ -23,9 +23,9 @@ | |||
23 | #define ACPI_MADT_GICC_LENGTH \ | 23 | #define ACPI_MADT_GICC_LENGTH \ |
24 | (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) | 24 | (acpi_gbl_FADT.header.revision < 6 ? 76 : 80) |
25 | 25 | ||
26 | #define BAD_MADT_GICC_ENTRY(entry, end) \ | 26 | #define BAD_MADT_GICC_ENTRY(entry, end) \ |
27 | (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \ | 27 | (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \ |
28 | (entry)->header.length != ACPI_MADT_GICC_LENGTH) | 28 | (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end)) |
29 | 29 | ||
30 | /* Basic configuration for ACPI */ | 30 | /* Basic configuration for ACPI */ |
31 | #ifdef CONFIG_ACPI | 31 | #ifdef CONFIG_ACPI |
diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index 4f0e3ebfea4b..c7e3e6387a49 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c | |||
@@ -191,8 +191,10 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | |||
191 | return NULL; | 191 | return NULL; |
192 | 192 | ||
193 | root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); | 193 | root_ops = kzalloc_node(sizeof(*root_ops), GFP_KERNEL, node); |
194 | if (!root_ops) | 194 | if (!root_ops) { |
195 | kfree(ri); | ||
195 | return NULL; | 196 | return NULL; |
197 | } | ||
196 | 198 | ||
197 | ri->cfg = pci_acpi_setup_ecam_mapping(root); | 199 | ri->cfg = pci_acpi_setup_ecam_mapping(root); |
198 | if (!ri->cfg) { | 200 | if (!ri->cfg) { |
diff --git a/arch/frv/include/asm/timex.h b/arch/frv/include/asm/timex.h index a89bddefdacf..139093fab326 100644 --- a/arch/frv/include/asm/timex.h +++ b/arch/frv/include/asm/timex.h | |||
@@ -16,5 +16,11 @@ static inline cycles_t get_cycles(void) | |||
16 | #define vxtime_lock() do {} while (0) | 16 | #define vxtime_lock() do {} while (0) |
17 | #define vxtime_unlock() do {} while (0) | 17 | #define vxtime_unlock() do {} while (0) |
18 | 18 | ||
19 | /* This attribute is used in include/linux/jiffies.h alongside with | ||
20 | * __cacheline_aligned_in_smp. It is assumed that __cacheline_aligned_in_smp | ||
21 | * for frv does not contain another section specification. | ||
22 | */ | ||
23 | #define __jiffy_arch_data __attribute__((__section__(".data"))) | ||
24 | |||
19 | #endif | 25 | #endif |
20 | 26 | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 918d4c73e951..5351e1f3950d 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -120,7 +120,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp, | |||
120 | struct thread_info *ti = task_thread_info(p); | 120 | struct thread_info *ti = task_thread_info(p); |
121 | struct pt_regs *childregs, *regs = current_pt_regs(); | 121 | struct pt_regs *childregs, *regs = current_pt_regs(); |
122 | unsigned long childksp; | 122 | unsigned long childksp; |
123 | p->set_child_tid = p->clear_child_tid = NULL; | ||
124 | 123 | ||
125 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; | 124 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; |
126 | 125 | ||
diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index f8da545854f9..106859ae27ff 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c | |||
@@ -167,8 +167,6 @@ copy_thread(unsigned long clone_flags, unsigned long usp, | |||
167 | 167 | ||
168 | top_of_kernel_stack = sp; | 168 | top_of_kernel_stack = sp; |
169 | 169 | ||
170 | p->set_child_tid = p->clear_child_tid = NULL; | ||
171 | |||
172 | /* Locate userspace context on stack... */ | 170 | /* Locate userspace context on stack... */ |
173 | sp -= STACK_FRAME_OVERHEAD; /* redzone */ | 171 | sp -= STACK_FRAME_OVERHEAD; /* redzone */ |
174 | sp -= sizeof(struct pt_regs); | 172 | sp -= sizeof(struct pt_regs); |
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 3e7ce86d5c13..4d877144f377 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h | |||
@@ -46,6 +46,8 @@ | |||
46 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 | 46 | #define PPC_FEATURE2_HTM_NOSC 0x01000000 |
47 | #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ | 47 | #define PPC_FEATURE2_ARCH_3_00 0x00800000 /* ISA 3.00 */ |
48 | #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ | 48 | #define PPC_FEATURE2_HAS_IEEE128 0x00400000 /* VSX IEEE Binary Float 128-bit */ |
49 | #define PPC_FEATURE2_DARN 0x00200000 /* darn random number insn */ | ||
50 | #define PPC_FEATURE2_SCV 0x00100000 /* scv syscall */ | ||
49 | 51 | ||
50 | /* | 52 | /* |
51 | * IMPORTANT! | 53 | * IMPORTANT! |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9b3e88b1a9c8..6f849832a669 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -124,7 +124,8 @@ extern void __restore_cpu_e6500(void); | |||
124 | #define COMMON_USER_POWER9 COMMON_USER_POWER8 | 124 | #define COMMON_USER_POWER9 COMMON_USER_POWER8 |
125 | #define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ | 125 | #define COMMON_USER2_POWER9 (COMMON_USER2_POWER8 | \ |
126 | PPC_FEATURE2_ARCH_3_00 | \ | 126 | PPC_FEATURE2_ARCH_3_00 | \ |
127 | PPC_FEATURE2_HAS_IEEE128) | 127 | PPC_FEATURE2_HAS_IEEE128 | \ |
128 | PPC_FEATURE2_DARN ) | ||
128 | 129 | ||
129 | #ifdef CONFIG_PPC_BOOK3E_64 | 130 | #ifdef CONFIG_PPC_BOOK3E_64 |
130 | #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) | 131 | #define COMMON_USER_BOOKE (COMMON_USER_PPC64 | PPC_FEATURE_BOOKE) |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 40c4887c27b6..f83056297441 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -161,7 +161,9 @@ static struct ibm_pa_feature { | |||
161 | { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, | 161 | { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL }, |
162 | { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, | 162 | { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE }, |
163 | { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, | 163 | { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE }, |
164 | #ifdef CONFIG_PPC_RADIX_MMU | ||
164 | { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, | 165 | { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX }, |
166 | #endif | ||
165 | { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, | 167 | { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN }, |
166 | { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, | 168 | { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE, |
167 | .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, | 169 | .cpu_user_ftrs = PPC_FEATURE_TRUE_LE }, |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 96c2b8a40630..0c45cdbac4cf 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -197,7 +197,9 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | |||
197 | (REGION_ID(ea) != USER_REGION_ID)) { | 197 | (REGION_ID(ea) != USER_REGION_ID)) { |
198 | 198 | ||
199 | spin_unlock(&spu->register_lock); | 199 | spin_unlock(&spu->register_lock); |
200 | ret = hash_page(ea, _PAGE_PRESENT | _PAGE_READ, 0x300, dsisr); | 200 | ret = hash_page(ea, |
201 | _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED, | ||
202 | 0x300, dsisr); | ||
201 | spin_lock(&spu->register_lock); | 203 | spin_lock(&spu->register_lock); |
202 | 204 | ||
203 | if (!ret) { | 205 | if (!ret) { |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 067defeea691..78fa9395b8c5 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
@@ -714,7 +714,7 @@ static void pnv_npu2_release_context(struct kref *kref) | |||
714 | void pnv_npu2_destroy_context(struct npu_context *npu_context, | 714 | void pnv_npu2_destroy_context(struct npu_context *npu_context, |
715 | struct pci_dev *gpdev) | 715 | struct pci_dev *gpdev) |
716 | { | 716 | { |
717 | struct pnv_phb *nphb, *phb; | 717 | struct pnv_phb *nphb; |
718 | struct npu *npu; | 718 | struct npu *npu; |
719 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); | 719 | struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0); |
720 | struct device_node *nvlink_dn; | 720 | struct device_node *nvlink_dn; |
@@ -728,13 +728,12 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context, | |||
728 | 728 | ||
729 | nphb = pci_bus_to_host(npdev->bus)->private_data; | 729 | nphb = pci_bus_to_host(npdev->bus)->private_data; |
730 | npu = &nphb->npu; | 730 | npu = &nphb->npu; |
731 | phb = pci_bus_to_host(gpdev->bus)->private_data; | ||
732 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); | 731 | nvlink_dn = of_parse_phandle(npdev->dev.of_node, "ibm,nvlink", 0); |
733 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", | 732 | if (WARN_ON(of_property_read_u32(nvlink_dn, "ibm,npu-link-index", |
734 | &nvlink_index))) | 733 | &nvlink_index))) |
735 | return; | 734 | return; |
736 | npu_context->npdev[npu->index][nvlink_index] = NULL; | 735 | npu_context->npdev[npu->index][nvlink_index] = NULL; |
737 | opal_npu_destroy_context(phb->opal_id, npu_context->mm->context.id, | 736 | opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id, |
738 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); | 737 | PCI_DEVID(gpdev->bus->number, gpdev->devfn)); |
739 | kref_put(&npu_context->kref, pnv_npu2_release_context); | 738 | kref_put(&npu_context->kref, pnv_npu2_release_context); |
740 | } | 739 | } |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 58243b0d21c0..b558c9e29de3 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -192,9 +192,9 @@ config NR_CPUS | |||
192 | int "Maximum number of CPUs" | 192 | int "Maximum number of CPUs" |
193 | depends on SMP | 193 | depends on SMP |
194 | range 2 32 if SPARC32 | 194 | range 2 32 if SPARC32 |
195 | range 2 1024 if SPARC64 | 195 | range 2 4096 if SPARC64 |
196 | default 32 if SPARC32 | 196 | default 32 if SPARC32 |
197 | default 64 if SPARC64 | 197 | default 4096 if SPARC64 |
198 | 198 | ||
199 | source kernel/Kconfig.hz | 199 | source kernel/Kconfig.hz |
200 | 200 | ||
@@ -295,9 +295,13 @@ config NUMA | |||
295 | depends on SPARC64 && SMP | 295 | depends on SPARC64 && SMP |
296 | 296 | ||
297 | config NODES_SHIFT | 297 | config NODES_SHIFT |
298 | int | 298 | int "Maximum NUMA Nodes (as a power of 2)" |
299 | default "4" | 299 | range 4 5 if SPARC64 |
300 | default "5" | ||
300 | depends on NEED_MULTIPLE_NODES | 301 | depends on NEED_MULTIPLE_NODES |
302 | help | ||
303 | Specify the maximum number of NUMA Nodes available on the target | ||
304 | system. Increases memory reserved to accommodate various tables. | ||
301 | 305 | ||
302 | # Some NUMA nodes have memory ranges that span | 306 | # Some NUMA nodes have memory ranges that span |
303 | # other nodes. Even though a pfn is valid and | 307 | # other nodes. Even though a pfn is valid and |
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index f7de0dbc38af..83b36a5371ff 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h | |||
@@ -52,7 +52,7 @@ | |||
52 | #define CTX_NR_MASK TAG_CONTEXT_BITS | 52 | #define CTX_NR_MASK TAG_CONTEXT_BITS |
53 | #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) | 53 | #define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK) |
54 | 54 | ||
55 | #define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL)) | 55 | #define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT) |
56 | #define CTX_VALID(__ctx) \ | 56 | #define CTX_VALID(__ctx) \ |
57 | (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) | 57 | (!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK)) |
58 | #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) | 58 | #define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK) |
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index 22fede6eba11..2cddcda4f85f 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h | |||
@@ -19,13 +19,8 @@ extern spinlock_t ctx_alloc_lock; | |||
19 | extern unsigned long tlb_context_cache; | 19 | extern unsigned long tlb_context_cache; |
20 | extern unsigned long mmu_context_bmap[]; | 20 | extern unsigned long mmu_context_bmap[]; |
21 | 21 | ||
22 | DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm); | ||
22 | void get_new_mmu_context(struct mm_struct *mm); | 23 | void get_new_mmu_context(struct mm_struct *mm); |
23 | #ifdef CONFIG_SMP | ||
24 | void smp_new_mmu_context_version(void); | ||
25 | #else | ||
26 | #define smp_new_mmu_context_version() do { } while (0) | ||
27 | #endif | ||
28 | |||
29 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); | 24 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm); |
30 | void destroy_context(struct mm_struct *mm); | 25 | void destroy_context(struct mm_struct *mm); |
31 | 26 | ||
@@ -76,8 +71,9 @@ void __flush_tlb_mm(unsigned long, unsigned long); | |||
76 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) | 71 | static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) |
77 | { | 72 | { |
78 | unsigned long ctx_valid, flags; | 73 | unsigned long ctx_valid, flags; |
79 | int cpu; | 74 | int cpu = smp_processor_id(); |
80 | 75 | ||
76 | per_cpu(per_cpu_secondary_mm, cpu) = mm; | ||
81 | if (unlikely(mm == &init_mm)) | 77 | if (unlikely(mm == &init_mm)) |
82 | return; | 78 | return; |
83 | 79 | ||
@@ -123,7 +119,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
123 | * for the first time, we must flush that context out of the | 119 | * for the first time, we must flush that context out of the |
124 | * local TLB. | 120 | * local TLB. |
125 | */ | 121 | */ |
126 | cpu = smp_processor_id(); | ||
127 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { | 122 | if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) { |
128 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 123 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
129 | __flush_tlb_mm(CTX_HWBITS(mm->context), | 124 | __flush_tlb_mm(CTX_HWBITS(mm->context), |
@@ -133,26 +128,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str | |||
133 | } | 128 | } |
134 | 129 | ||
135 | #define deactivate_mm(tsk,mm) do { } while (0) | 130 | #define deactivate_mm(tsk,mm) do { } while (0) |
136 | 131 | #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) | |
137 | /* Activate a new MM instance for the current task. */ | ||
138 | static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) | ||
139 | { | ||
140 | unsigned long flags; | ||
141 | int cpu; | ||
142 | |||
143 | spin_lock_irqsave(&mm->context.lock, flags); | ||
144 | if (!CTX_VALID(mm->context)) | ||
145 | get_new_mmu_context(mm); | ||
146 | cpu = smp_processor_id(); | ||
147 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm))) | ||
148 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
149 | |||
150 | load_secondary_context(mm); | ||
151 | __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); | ||
152 | tsb_context_switch(mm); | ||
153 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
154 | } | ||
155 | |||
156 | #endif /* !(__ASSEMBLY__) */ | 132 | #endif /* !(__ASSEMBLY__) */ |
157 | 133 | ||
158 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ | 134 | #endif /* !(__SPARC64_MMU_CONTEXT_H) */ |
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h index 266937030546..522b43db2ed3 100644 --- a/arch/sparc/include/asm/pil.h +++ b/arch/sparc/include/asm/pil.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #define PIL_SMP_CALL_FUNC 1 | 20 | #define PIL_SMP_CALL_FUNC 1 |
21 | #define PIL_SMP_RECEIVE_SIGNAL 2 | 21 | #define PIL_SMP_RECEIVE_SIGNAL 2 |
22 | #define PIL_SMP_CAPTURE 3 | 22 | #define PIL_SMP_CAPTURE 3 |
23 | #define PIL_SMP_CTX_NEW_VERSION 4 | ||
24 | #define PIL_DEVICE_IRQ 5 | 23 | #define PIL_DEVICE_IRQ 5 |
25 | #define PIL_SMP_CALL_FUNC_SNGL 6 | 24 | #define PIL_SMP_CALL_FUNC_SNGL 6 |
26 | #define PIL_DEFERRED_PCR_WORK 7 | 25 | #define PIL_DEFERRED_PCR_WORK 7 |
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h index 8174f6cdbbbb..9dca7a892978 100644 --- a/arch/sparc/include/asm/vio.h +++ b/arch/sparc/include/asm/vio.h | |||
@@ -327,6 +327,7 @@ struct vio_dev { | |||
327 | int compat_len; | 327 | int compat_len; |
328 | 328 | ||
329 | u64 dev_no; | 329 | u64 dev_no; |
330 | u64 id; | ||
330 | 331 | ||
331 | unsigned long channel_id; | 332 | unsigned long channel_id; |
332 | 333 | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index b542cc7c8d94..f87265afb175 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -909,7 +909,7 @@ static int register_services(struct ds_info *dp) | |||
909 | pbuf.req.handle = cp->handle; | 909 | pbuf.req.handle = cp->handle; |
910 | pbuf.req.major = 1; | 910 | pbuf.req.major = 1; |
911 | pbuf.req.minor = 0; | 911 | pbuf.req.minor = 0; |
912 | strcpy(pbuf.req.svc_id, cp->service_id); | 912 | strcpy(pbuf.id_buf, cp->service_id); |
913 | 913 | ||
914 | err = __ds_send(lp, &pbuf, msg_len); | 914 | err = __ds_send(lp, &pbuf, msg_len); |
915 | if (err > 0) | 915 | if (err > 0) |
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 4d0248aa0928..99dd133a029f 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c | |||
@@ -1034,17 +1034,26 @@ static void __init init_cpu_send_mondo_info(struct trap_per_cpu *tb) | |||
1034 | { | 1034 | { |
1035 | #ifdef CONFIG_SMP | 1035 | #ifdef CONFIG_SMP |
1036 | unsigned long page; | 1036 | unsigned long page; |
1037 | void *mondo, *p; | ||
1037 | 1038 | ||
1038 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64)); | 1039 | BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE); |
1040 | |||
1041 | /* Make sure mondo block is 64byte aligned */ | ||
1042 | p = kzalloc(127, GFP_KERNEL); | ||
1043 | if (!p) { | ||
1044 | prom_printf("SUN4V: Error, cannot allocate mondo block.\n"); | ||
1045 | prom_halt(); | ||
1046 | } | ||
1047 | mondo = (void *)(((unsigned long)p + 63) & ~0x3f); | ||
1048 | tb->cpu_mondo_block_pa = __pa(mondo); | ||
1039 | 1049 | ||
1040 | page = get_zeroed_page(GFP_KERNEL); | 1050 | page = get_zeroed_page(GFP_KERNEL); |
1041 | if (!page) { | 1051 | if (!page) { |
1042 | prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n"); | 1052 | prom_printf("SUN4V: Error, cannot allocate cpu list page.\n"); |
1043 | prom_halt(); | 1053 | prom_halt(); |
1044 | } | 1054 | } |
1045 | 1055 | ||
1046 | tb->cpu_mondo_block_pa = __pa(page); | 1056 | tb->cpu_list_pa = __pa(page); |
1047 | tb->cpu_list_pa = __pa(page + 64); | ||
1048 | #endif | 1057 | #endif |
1049 | } | 1058 | } |
1050 | 1059 | ||
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index c9804551262c..6ae1e77be0bf 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h | |||
@@ -37,7 +37,6 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr | |||
37 | /* smp_64.c */ | 37 | /* smp_64.c */ |
38 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); | 38 | void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); |
39 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); | 39 | void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); |
40 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); | ||
41 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); | 40 | void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); |
42 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); | 41 | void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); |
43 | 42 | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b3bc0ac757cc..fdf31040a7dc 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -964,37 +964,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) | |||
964 | preempt_enable(); | 964 | preempt_enable(); |
965 | } | 965 | } |
966 | 966 | ||
967 | void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) | ||
968 | { | ||
969 | struct mm_struct *mm; | ||
970 | unsigned long flags; | ||
971 | |||
972 | clear_softint(1 << irq); | ||
973 | |||
974 | /* See if we need to allocate a new TLB context because | ||
975 | * the version of the one we are using is now out of date. | ||
976 | */ | ||
977 | mm = current->active_mm; | ||
978 | if (unlikely(!mm || (mm == &init_mm))) | ||
979 | return; | ||
980 | |||
981 | spin_lock_irqsave(&mm->context.lock, flags); | ||
982 | |||
983 | if (unlikely(!CTX_VALID(mm->context))) | ||
984 | get_new_mmu_context(mm); | ||
985 | |||
986 | spin_unlock_irqrestore(&mm->context.lock, flags); | ||
987 | |||
988 | load_secondary_context(mm); | ||
989 | __flush_tlb_mm(CTX_HWBITS(mm->context), | ||
990 | SECONDARY_CONTEXT); | ||
991 | } | ||
992 | |||
993 | void smp_new_mmu_context_version(void) | ||
994 | { | ||
995 | smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0); | ||
996 | } | ||
997 | |||
998 | #ifdef CONFIG_KGDB | 967 | #ifdef CONFIG_KGDB |
999 | void kgdb_roundup_cpus(unsigned long flags) | 968 | void kgdb_roundup_cpus(unsigned long flags) |
1000 | { | 969 | { |
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 10689cfd0ad4..07c0df924960 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S | |||
@@ -455,13 +455,16 @@ __tsb_context_switch: | |||
455 | .type copy_tsb,#function | 455 | .type copy_tsb,#function |
456 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | 456 | copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size |
457 | * %o2=new_tsb_base, %o3=new_tsb_size | 457 | * %o2=new_tsb_base, %o3=new_tsb_size |
458 | * %o4=page_size_shift | ||
458 | */ | 459 | */ |
459 | sethi %uhi(TSB_PASS_BITS), %g7 | 460 | sethi %uhi(TSB_PASS_BITS), %g7 |
460 | srlx %o3, 4, %o3 | 461 | srlx %o3, 4, %o3 |
461 | add %o0, %o1, %g1 /* end of old tsb */ | 462 | add %o0, %o1, %o1 /* end of old tsb */ |
462 | sllx %g7, 32, %g7 | 463 | sllx %g7, 32, %g7 |
463 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ | 464 | sub %o3, 1, %o3 /* %o3 == new tsb hash mask */ |
464 | 465 | ||
466 | mov %o4, %g1 /* page_size_shift */ | ||
467 | |||
465 | 661: prefetcha [%o0] ASI_N, #one_read | 468 | 661: prefetcha [%o0] ASI_N, #one_read |
466 | .section .tsb_phys_patch, "ax" | 469 | .section .tsb_phys_patch, "ax" |
467 | .word 661b | 470 | .word 661b |
@@ -486,9 +489,9 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | |||
486 | /* This can definitely be computed faster... */ | 489 | /* This can definitely be computed faster... */ |
487 | srlx %o0, 4, %o5 /* Build index */ | 490 | srlx %o0, 4, %o5 /* Build index */ |
488 | and %o5, 511, %o5 /* Mask index */ | 491 | and %o5, 511, %o5 /* Mask index */ |
489 | sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */ | 492 | sllx %o5, %g1, %o5 /* Put into vaddr position */ |
490 | or %o4, %o5, %o4 /* Full VADDR. */ | 493 | or %o4, %o5, %o4 /* Full VADDR. */ |
491 | srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */ | 494 | srlx %o4, %g1, %o4 /* Shift down to create index */ |
492 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ | 495 | and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */ |
493 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ | 496 | sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */ |
494 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ | 497 | TSB_STORE(%o2 + %o4, %g2) /* Store TAG */ |
@@ -496,7 +499,7 @@ copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size | |||
496 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ | 499 | TSB_STORE(%o2 + %o4, %g3) /* Store TTE */ |
497 | 500 | ||
498 | 80: add %o0, 16, %o0 | 501 | 80: add %o0, 16, %o0 |
499 | cmp %o0, %g1 | 502 | cmp %o0, %o1 |
500 | bne,pt %xcc, 90b | 503 | bne,pt %xcc, 90b |
501 | nop | 504 | nop |
502 | 505 | ||
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S index 7bd8f6556352..efe93ab4a9c0 100644 --- a/arch/sparc/kernel/ttable_64.S +++ b/arch/sparc/kernel/ttable_64.S | |||
@@ -50,7 +50,7 @@ tl0_resv03e: BTRAP(0x3e) BTRAP(0x3f) BTRAP(0x40) | |||
50 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) | 50 | tl0_irq1: TRAP_IRQ(smp_call_function_client, 1) |
51 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) | 51 | tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2) |
52 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) | 52 | tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3) |
53 | tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4) | 53 | tl0_irq4: BTRAP(0x44) |
54 | #else | 54 | #else |
55 | tl0_irq1: BTRAP(0x41) | 55 | tl0_irq1: BTRAP(0x41) |
56 | tl0_irq2: BTRAP(0x42) | 56 | tl0_irq2: BTRAP(0x42) |
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index f6bb857254fc..075d38980dee 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c | |||
@@ -302,13 +302,16 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp, | |||
302 | if (!id) { | 302 | if (!id) { |
303 | dev_set_name(&vdev->dev, "%s", bus_id_name); | 303 | dev_set_name(&vdev->dev, "%s", bus_id_name); |
304 | vdev->dev_no = ~(u64)0; | 304 | vdev->dev_no = ~(u64)0; |
305 | vdev->id = ~(u64)0; | ||
305 | } else if (!cfg_handle) { | 306 | } else if (!cfg_handle) { |
306 | dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); | 307 | dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id); |
307 | vdev->dev_no = *id; | 308 | vdev->dev_no = *id; |
309 | vdev->id = ~(u64)0; | ||
308 | } else { | 310 | } else { |
309 | dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, | 311 | dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name, |
310 | *cfg_handle, *id); | 312 | *cfg_handle, *id); |
311 | vdev->dev_no = *cfg_handle; | 313 | vdev->dev_no = *cfg_handle; |
314 | vdev->id = *id; | ||
312 | } | 315 | } |
313 | 316 | ||
314 | vdev->dev.parent = parent; | 317 | vdev->dev.parent = parent; |
@@ -351,27 +354,84 @@ static void vio_add(struct mdesc_handle *hp, u64 node) | |||
351 | (void) vio_create_one(hp, node, &root_vdev->dev); | 354 | (void) vio_create_one(hp, node, &root_vdev->dev); |
352 | } | 355 | } |
353 | 356 | ||
357 | struct vio_md_node_query { | ||
358 | const char *type; | ||
359 | u64 dev_no; | ||
360 | u64 id; | ||
361 | }; | ||
362 | |||
354 | static int vio_md_node_match(struct device *dev, void *arg) | 363 | static int vio_md_node_match(struct device *dev, void *arg) |
355 | { | 364 | { |
365 | struct vio_md_node_query *query = (struct vio_md_node_query *) arg; | ||
356 | struct vio_dev *vdev = to_vio_dev(dev); | 366 | struct vio_dev *vdev = to_vio_dev(dev); |
357 | 367 | ||
358 | if (vdev->mp == (u64) arg) | 368 | if (vdev->dev_no != query->dev_no) |
359 | return 1; | 369 | return 0; |
370 | if (vdev->id != query->id) | ||
371 | return 0; | ||
372 | if (strcmp(vdev->type, query->type)) | ||
373 | return 0; | ||
360 | 374 | ||
361 | return 0; | 375 | return 1; |
362 | } | 376 | } |
363 | 377 | ||
364 | static void vio_remove(struct mdesc_handle *hp, u64 node) | 378 | static void vio_remove(struct mdesc_handle *hp, u64 node) |
365 | { | 379 | { |
380 | const char *type; | ||
381 | const u64 *id, *cfg_handle; | ||
382 | u64 a; | ||
383 | struct vio_md_node_query query; | ||
366 | struct device *dev; | 384 | struct device *dev; |
367 | 385 | ||
368 | dev = device_find_child(&root_vdev->dev, (void *) node, | 386 | type = mdesc_get_property(hp, node, "device-type", NULL); |
387 | if (!type) { | ||
388 | type = mdesc_get_property(hp, node, "name", NULL); | ||
389 | if (!type) | ||
390 | type = mdesc_node_name(hp, node); | ||
391 | } | ||
392 | |||
393 | query.type = type; | ||
394 | |||
395 | id = mdesc_get_property(hp, node, "id", NULL); | ||
396 | cfg_handle = NULL; | ||
397 | mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) { | ||
398 | u64 target; | ||
399 | |||
400 | target = mdesc_arc_target(hp, a); | ||
401 | cfg_handle = mdesc_get_property(hp, target, | ||
402 | "cfg-handle", NULL); | ||
403 | if (cfg_handle) | ||
404 | break; | ||
405 | } | ||
406 | |||
407 | if (!id) { | ||
408 | query.dev_no = ~(u64)0; | ||
409 | query.id = ~(u64)0; | ||
410 | } else if (!cfg_handle) { | ||
411 | query.dev_no = *id; | ||
412 | query.id = ~(u64)0; | ||
413 | } else { | ||
414 | query.dev_no = *cfg_handle; | ||
415 | query.id = *id; | ||
416 | } | ||
417 | |||
418 | dev = device_find_child(&root_vdev->dev, &query, | ||
369 | vio_md_node_match); | 419 | vio_md_node_match); |
370 | if (dev) { | 420 | if (dev) { |
371 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); | 421 | printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); |
372 | 422 | ||
373 | device_unregister(dev); | 423 | device_unregister(dev); |
374 | put_device(dev); | 424 | put_device(dev); |
425 | } else { | ||
426 | if (!id) | ||
427 | printk(KERN_ERR "VIO: Removed unknown %s node.\n", | ||
428 | type); | ||
429 | else if (!cfg_handle) | ||
430 | printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n", | ||
431 | type, *id); | ||
432 | else | ||
433 | printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n", | ||
434 | type, *cfg_handle, *id); | ||
375 | } | 435 | } |
376 | } | 436 | } |
377 | 437 | ||
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index 69912d2f8b54..07c03e72d812 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile | |||
@@ -15,6 +15,7 @@ lib-$(CONFIG_SPARC32) += copy_user.o locks.o | |||
15 | lib-$(CONFIG_SPARC64) += atomic_64.o | 15 | lib-$(CONFIG_SPARC64) += atomic_64.o |
16 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o | 16 | lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o |
17 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o | 17 | lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o |
18 | lib-$(CONFIG_SPARC64) += multi3.o | ||
18 | 19 | ||
19 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o | 20 | lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o |
20 | lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o | 21 | lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o |
diff --git a/arch/sparc/lib/multi3.S b/arch/sparc/lib/multi3.S new file mode 100644 index 000000000000..d6b6c97fe3c7 --- /dev/null +++ b/arch/sparc/lib/multi3.S | |||
@@ -0,0 +1,35 @@ | |||
1 | #include <linux/linkage.h> | ||
2 | #include <asm/export.h> | ||
3 | |||
4 | .text | ||
5 | .align 4 | ||
6 | ENTRY(__multi3) /* %o0 = u, %o1 = v */ | ||
7 | mov %o1, %g1 | ||
8 | srl %o3, 0, %g4 | ||
9 | mulx %g4, %g1, %o1 | ||
10 | srlx %g1, 0x20, %g3 | ||
11 | mulx %g3, %g4, %g5 | ||
12 | sllx %g5, 0x20, %o5 | ||
13 | srl %g1, 0, %g4 | ||
14 | sub %o1, %o5, %o5 | ||
15 | srlx %o5, 0x20, %o5 | ||
16 | addcc %g5, %o5, %g5 | ||
17 | srlx %o3, 0x20, %o5 | ||
18 | mulx %g4, %o5, %g4 | ||
19 | mulx %g3, %o5, %o5 | ||
20 | sethi %hi(0x80000000), %g3 | ||
21 | addcc %g5, %g4, %g5 | ||
22 | srlx %g5, 0x20, %g5 | ||
23 | add %g3, %g3, %g3 | ||
24 | movcc %xcc, %g0, %g3 | ||
25 | addcc %o5, %g5, %o5 | ||
26 | sllx %g4, 0x20, %g4 | ||
27 | add %o1, %g4, %o1 | ||
28 | add %o5, %g3, %g2 | ||
29 | mulx %g1, %o2, %g1 | ||
30 | add %g1, %g2, %g1 | ||
31 | mulx %o0, %o3, %o0 | ||
32 | retl | ||
33 | add %g1, %o0, %o0 | ||
34 | ENDPROC(__multi3) | ||
35 | EXPORT_SYMBOL(__multi3) | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 0cda653ae007..3c40ebd50f92 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -358,7 +358,8 @@ static int __init setup_hugepagesz(char *string) | |||
358 | } | 358 | } |
359 | 359 | ||
360 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { | 360 | if ((hv_pgsz_mask & cpu_pgsz_mask) == 0U) { |
361 | pr_warn("hugepagesz=%llu not supported by MMU.\n", | 361 | hugetlb_bad_size(); |
362 | pr_err("hugepagesz=%llu not supported by MMU.\n", | ||
362 | hugepage_size); | 363 | hugepage_size); |
363 | goto out; | 364 | goto out; |
364 | } | 365 | } |
@@ -706,10 +707,58 @@ EXPORT_SYMBOL(__flush_dcache_range); | |||
706 | 707 | ||
707 | /* get_new_mmu_context() uses "cache + 1". */ | 708 | /* get_new_mmu_context() uses "cache + 1". */ |
708 | DEFINE_SPINLOCK(ctx_alloc_lock); | 709 | DEFINE_SPINLOCK(ctx_alloc_lock); |
709 | unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1; | 710 | unsigned long tlb_context_cache = CTX_FIRST_VERSION; |
710 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) | 711 | #define MAX_CTX_NR (1UL << CTX_NR_BITS) |
711 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) | 712 | #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR) |
712 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); | 713 | DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR); |
714 | DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0}; | ||
715 | |||
716 | static void mmu_context_wrap(void) | ||
717 | { | ||
718 | unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK; | ||
719 | unsigned long new_ver, new_ctx, old_ctx; | ||
720 | struct mm_struct *mm; | ||
721 | int cpu; | ||
722 | |||
723 | bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS); | ||
724 | |||
725 | /* Reserve kernel context */ | ||
726 | set_bit(0, mmu_context_bmap); | ||
727 | |||
728 | new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION; | ||
729 | if (unlikely(new_ver == 0)) | ||
730 | new_ver = CTX_FIRST_VERSION; | ||
731 | tlb_context_cache = new_ver; | ||
732 | |||
733 | /* | ||
734 | * Make sure that any new mm that are added into per_cpu_secondary_mm, | ||
735 | * are going to go through get_new_mmu_context() path. | ||
736 | */ | ||
737 | mb(); | ||
738 | |||
739 | /* | ||
740 | * Updated versions to current on those CPUs that had valid secondary | ||
741 | * contexts | ||
742 | */ | ||
743 | for_each_online_cpu(cpu) { | ||
744 | /* | ||
745 | * If a new mm is stored after we took this mm from the array, | ||
746 | * it will go into get_new_mmu_context() path, because we | ||
747 | * already bumped the version in tlb_context_cache. | ||
748 | */ | ||
749 | mm = per_cpu(per_cpu_secondary_mm, cpu); | ||
750 | |||
751 | if (unlikely(!mm || mm == &init_mm)) | ||
752 | continue; | ||
753 | |||
754 | old_ctx = mm->context.sparc64_ctx_val; | ||
755 | if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) { | ||
756 | new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver; | ||
757 | set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap); | ||
758 | mm->context.sparc64_ctx_val = new_ctx; | ||
759 | } | ||
760 | } | ||
761 | } | ||
713 | 762 | ||
714 | /* Caller does TLB context flushing on local CPU if necessary. | 763 | /* Caller does TLB context flushing on local CPU if necessary. |
715 | * The caller also ensures that CTX_VALID(mm->context) is false. | 764 | * The caller also ensures that CTX_VALID(mm->context) is false. |
@@ -725,48 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm) | |||
725 | { | 774 | { |
726 | unsigned long ctx, new_ctx; | 775 | unsigned long ctx, new_ctx; |
727 | unsigned long orig_pgsz_bits; | 776 | unsigned long orig_pgsz_bits; |
728 | int new_version; | ||
729 | 777 | ||
730 | spin_lock(&ctx_alloc_lock); | 778 | spin_lock(&ctx_alloc_lock); |
779 | retry: | ||
780 | /* wrap might have happened, test again if our context became valid */ | ||
781 | if (unlikely(CTX_VALID(mm->context))) | ||
782 | goto out; | ||
731 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); | 783 | orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK); |
732 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; | 784 | ctx = (tlb_context_cache + 1) & CTX_NR_MASK; |
733 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); | 785 | new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx); |
734 | new_version = 0; | ||
735 | if (new_ctx >= (1 << CTX_NR_BITS)) { | 786 | if (new_ctx >= (1 << CTX_NR_BITS)) { |
736 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); | 787 | new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1); |
737 | if (new_ctx >= ctx) { | 788 | if (new_ctx >= ctx) { |
738 | int i; | 789 | mmu_context_wrap(); |
739 | new_ctx = (tlb_context_cache & CTX_VERSION_MASK) + | 790 | goto retry; |
740 | CTX_FIRST_VERSION; | ||
741 | if (new_ctx == 1) | ||
742 | new_ctx = CTX_FIRST_VERSION; | ||
743 | |||
744 | /* Don't call memset, for 16 entries that's just | ||
745 | * plain silly... | ||
746 | */ | ||
747 | mmu_context_bmap[0] = 3; | ||
748 | mmu_context_bmap[1] = 0; | ||
749 | mmu_context_bmap[2] = 0; | ||
750 | mmu_context_bmap[3] = 0; | ||
751 | for (i = 4; i < CTX_BMAP_SLOTS; i += 4) { | ||
752 | mmu_context_bmap[i + 0] = 0; | ||
753 | mmu_context_bmap[i + 1] = 0; | ||
754 | mmu_context_bmap[i + 2] = 0; | ||
755 | mmu_context_bmap[i + 3] = 0; | ||
756 | } | ||
757 | new_version = 1; | ||
758 | goto out; | ||
759 | } | 791 | } |
760 | } | 792 | } |
793 | if (mm->context.sparc64_ctx_val) | ||
794 | cpumask_clear(mm_cpumask(mm)); | ||
761 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); | 795 | mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63)); |
762 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); | 796 | new_ctx |= (tlb_context_cache & CTX_VERSION_MASK); |
763 | out: | ||
764 | tlb_context_cache = new_ctx; | 797 | tlb_context_cache = new_ctx; |
765 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; | 798 | mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits; |
799 | out: | ||
766 | spin_unlock(&ctx_alloc_lock); | 800 | spin_unlock(&ctx_alloc_lock); |
767 | |||
768 | if (unlikely(new_version)) | ||
769 | smp_new_mmu_context_version(); | ||
770 | } | 801 | } |
771 | 802 | ||
772 | static int numa_enabled = 1; | 803 | static int numa_enabled = 1; |
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index bedf08b22a47..0d4b998c7d7b 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c | |||
@@ -496,7 +496,8 @@ retry_tsb_alloc: | |||
496 | extern void copy_tsb(unsigned long old_tsb_base, | 496 | extern void copy_tsb(unsigned long old_tsb_base, |
497 | unsigned long old_tsb_size, | 497 | unsigned long old_tsb_size, |
498 | unsigned long new_tsb_base, | 498 | unsigned long new_tsb_base, |
499 | unsigned long new_tsb_size); | 499 | unsigned long new_tsb_size, |
500 | unsigned long page_size_shift); | ||
500 | unsigned long old_tsb_base = (unsigned long) old_tsb; | 501 | unsigned long old_tsb_base = (unsigned long) old_tsb; |
501 | unsigned long new_tsb_base = (unsigned long) new_tsb; | 502 | unsigned long new_tsb_base = (unsigned long) new_tsb; |
502 | 503 | ||
@@ -504,7 +505,9 @@ retry_tsb_alloc: | |||
504 | old_tsb_base = __pa(old_tsb_base); | 505 | old_tsb_base = __pa(old_tsb_base); |
505 | new_tsb_base = __pa(new_tsb_base); | 506 | new_tsb_base = __pa(new_tsb_base); |
506 | } | 507 | } |
507 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); | 508 | copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size, |
509 | tsb_index == MM_TSB_BASE ? | ||
510 | PAGE_SHIFT : REAL_HPAGE_SHIFT); | ||
508 | } | 511 | } |
509 | 512 | ||
510 | mm->context.tsb_block[tsb_index].tsb = new_tsb; | 513 | mm->context.tsb_block[tsb_index].tsb = new_tsb; |
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 5d2fd6cd3189..fcf4d27a38fb 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S | |||
@@ -971,11 +971,6 @@ xcall_capture: | |||
971 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint | 971 | wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint |
972 | retry | 972 | retry |
973 | 973 | ||
974 | .globl xcall_new_mmu_context_version | ||
975 | xcall_new_mmu_context_version: | ||
976 | wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint | ||
977 | retry | ||
978 | |||
979 | #ifdef CONFIG_KGDB | 974 | #ifdef CONFIG_KGDB |
980 | .globl xcall_kgdb_capture | 975 | .globl xcall_kgdb_capture |
981 | xcall_kgdb_capture: | 976 | xcall_kgdb_capture: |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cd18994a9555..4ccfacc7232a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -360,7 +360,7 @@ config SMP | |||
360 | Management" code will be disabled if you say Y here. | 360 | Management" code will be disabled if you say Y here. |
361 | 361 | ||
362 | See also <file:Documentation/x86/i386/IO-APIC.txt>, | 362 | See also <file:Documentation/x86/i386/IO-APIC.txt>, |
363 | <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at | 363 | <file:Documentation/lockup-watchdogs.txt> and the SMP-HOWTO available at |
364 | <http://www.tldp.org/docs.html#howto>. | 364 | <http://www.tldp.org/docs.html#howto>. |
365 | 365 | ||
366 | If you don't know what to do here, say N. | 366 | If you don't know what to do here, say N. |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 5851411e60fb..bf240b920473 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -159,7 +159,7 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER | |||
159 | # If '-Os' is enabled, disable it and print a warning. | 159 | # If '-Os' is enabled, disable it and print a warning. |
160 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE | 160 | ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE |
161 | undefine CONFIG_CC_OPTIMIZE_FOR_SIZE | 161 | undefine CONFIG_CC_OPTIMIZE_FOR_SIZE |
162 | $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) | 162 | $(warning Disabling CONFIG_CC_OPTIMIZE_FOR_SIZE. Your compiler does not have -mfentry so you cannot optimize for size with CONFIG_FUNCTION_GRAPH_TRACER.) |
163 | endif | 163 | endif |
164 | 164 | ||
165 | endif | 165 | endif |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 44163e8c3868..2c860ad4fe06 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -94,7 +94,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o | |||
94 | quiet_cmd_check_data_rel = DATAREL $@ | 94 | quiet_cmd_check_data_rel = DATAREL $@ |
95 | define cmd_check_data_rel | 95 | define cmd_check_data_rel |
96 | for obj in $(filter %.o,$^); do \ | 96 | for obj in $(filter %.o,$^); do \ |
97 | readelf -S $$obj | grep -qF .rel.local && { \ | 97 | ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \ |
98 | echo "error: $$obj has data relocations!" >&2; \ | 98 | echo "error: $$obj has data relocations!" >&2; \ |
99 | exit 1; \ | 99 | exit 1; \ |
100 | } || true; \ | 100 | } || true; \ |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 50bc26949e9e..48ef7bb32c42 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -252,6 +252,23 @@ ENTRY(__switch_to_asm) | |||
252 | END(__switch_to_asm) | 252 | END(__switch_to_asm) |
253 | 253 | ||
254 | /* | 254 | /* |
255 | * The unwinder expects the last frame on the stack to always be at the same | ||
256 | * offset from the end of the page, which allows it to validate the stack. | ||
257 | * Calling schedule_tail() directly would break that convention because its an | ||
258 | * asmlinkage function so its argument has to be pushed on the stack. This | ||
259 | * wrapper creates a proper "end of stack" frame header before the call. | ||
260 | */ | ||
261 | ENTRY(schedule_tail_wrapper) | ||
262 | FRAME_BEGIN | ||
263 | |||
264 | pushl %eax | ||
265 | call schedule_tail | ||
266 | popl %eax | ||
267 | |||
268 | FRAME_END | ||
269 | ret | ||
270 | ENDPROC(schedule_tail_wrapper) | ||
271 | /* | ||
255 | * A newly forked process directly context switches into this address. | 272 | * A newly forked process directly context switches into this address. |
256 | * | 273 | * |
257 | * eax: prev task we switched from | 274 | * eax: prev task we switched from |
@@ -259,24 +276,15 @@ END(__switch_to_asm) | |||
259 | * edi: kernel thread arg | 276 | * edi: kernel thread arg |
260 | */ | 277 | */ |
261 | ENTRY(ret_from_fork) | 278 | ENTRY(ret_from_fork) |
262 | FRAME_BEGIN /* help unwinder find end of stack */ | 279 | call schedule_tail_wrapper |
263 | |||
264 | /* | ||
265 | * schedule_tail() is asmlinkage so we have to put its 'prev' argument | ||
266 | * on the stack. | ||
267 | */ | ||
268 | pushl %eax | ||
269 | call schedule_tail | ||
270 | popl %eax | ||
271 | 280 | ||
272 | testl %ebx, %ebx | 281 | testl %ebx, %ebx |
273 | jnz 1f /* kernel threads are uncommon */ | 282 | jnz 1f /* kernel threads are uncommon */ |
274 | 283 | ||
275 | 2: | 284 | 2: |
276 | /* When we fork, we trace the syscall return in the child, too. */ | 285 | /* When we fork, we trace the syscall return in the child, too. */ |
277 | leal FRAME_OFFSET(%esp), %eax | 286 | movl %esp, %eax |
278 | call syscall_return_slowpath | 287 | call syscall_return_slowpath |
279 | FRAME_END | ||
280 | jmp restore_all | 288 | jmp restore_all |
281 | 289 | ||
282 | /* kernel thread */ | 290 | /* kernel thread */ |
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 607d72c4a485..4a4c0834f965 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -36,7 +36,6 @@ | |||
36 | #include <asm/smap.h> | 36 | #include <asm/smap.h> |
37 | #include <asm/pgtable_types.h> | 37 | #include <asm/pgtable_types.h> |
38 | #include <asm/export.h> | 38 | #include <asm/export.h> |
39 | #include <asm/frame.h> | ||
40 | #include <linux/err.h> | 39 | #include <linux/err.h> |
41 | 40 | ||
42 | .code64 | 41 | .code64 |
@@ -406,19 +405,17 @@ END(__switch_to_asm) | |||
406 | * r12: kernel thread arg | 405 | * r12: kernel thread arg |
407 | */ | 406 | */ |
408 | ENTRY(ret_from_fork) | 407 | ENTRY(ret_from_fork) |
409 | FRAME_BEGIN /* help unwinder find end of stack */ | ||
410 | movq %rax, %rdi | 408 | movq %rax, %rdi |
411 | call schedule_tail /* rdi: 'prev' task parameter */ | 409 | call schedule_tail /* rdi: 'prev' task parameter */ |
412 | 410 | ||
413 | testq %rbx, %rbx /* from kernel_thread? */ | 411 | testq %rbx, %rbx /* from kernel_thread? */ |
414 | jnz 1f /* kernel threads are uncommon */ | 412 | jnz 1f /* kernel threads are uncommon */ |
415 | 413 | ||
416 | 2: | 414 | 2: |
417 | leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */ | 415 | movq %rsp, %rdi |
418 | call syscall_return_slowpath /* returns with IRQs disabled */ | 416 | call syscall_return_slowpath /* returns with IRQs disabled */ |
419 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ | 417 | TRACE_IRQS_ON /* user mode is traced as IRQS on */ |
420 | SWAPGS | 418 | SWAPGS |
421 | FRAME_END | ||
422 | jmp restore_regs_and_iret | 419 | jmp restore_regs_and_iret |
423 | 420 | ||
424 | 1: | 421 | 1: |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4fd5195deed0..3f9a3d2a5209 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -266,6 +266,7 @@ static inline int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *s | |||
266 | #endif | 266 | #endif |
267 | 267 | ||
268 | int mce_available(struct cpuinfo_x86 *c); | 268 | int mce_available(struct cpuinfo_x86 *c); |
269 | bool mce_is_memory_error(struct mce *m); | ||
269 | 270 | ||
270 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 271 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
271 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 272 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c5b8f760473c..32e14d137416 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -409,8 +409,13 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start, | |||
409 | memcpy(insnbuf, replacement, a->replacementlen); | 409 | memcpy(insnbuf, replacement, a->replacementlen); |
410 | insnbuf_sz = a->replacementlen; | 410 | insnbuf_sz = a->replacementlen; |
411 | 411 | ||
412 | /* 0xe8 is a relative jump; fix the offset. */ | 412 | /* |
413 | if (*insnbuf == 0xe8 && a->replacementlen == 5) { | 413 | * 0xe8 is a relative jump; fix the offset. |
414 | * | ||
415 | * Instruction length is checked before the opcode to avoid | ||
416 | * accessing uninitialized bytes for zero-length replacements. | ||
417 | */ | ||
418 | if (a->replacementlen == 5 && *insnbuf == 0xe8) { | ||
414 | *(s32 *)(insnbuf + 1) += replacement - instr; | 419 | *(s32 *)(insnbuf + 1) += replacement - instr; |
415 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", | 420 | DPRINTK("Fix CALL offset: 0x%x, CALL 0x%lx", |
416 | *(s32 *)(insnbuf + 1), | 421 | *(s32 *)(insnbuf + 1), |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5abd4bf73d6e..5cfbaeb6529a 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -499,16 +499,14 @@ static int mce_usable_address(struct mce *m) | |||
499 | return 1; | 499 | return 1; |
500 | } | 500 | } |
501 | 501 | ||
502 | static bool memory_error(struct mce *m) | 502 | bool mce_is_memory_error(struct mce *m) |
503 | { | 503 | { |
504 | struct cpuinfo_x86 *c = &boot_cpu_data; | 504 | if (m->cpuvendor == X86_VENDOR_AMD) { |
505 | |||
506 | if (c->x86_vendor == X86_VENDOR_AMD) { | ||
507 | /* ErrCodeExt[20:16] */ | 505 | /* ErrCodeExt[20:16] */ |
508 | u8 xec = (m->status >> 16) & 0x1f; | 506 | u8 xec = (m->status >> 16) & 0x1f; |
509 | 507 | ||
510 | return (xec == 0x0 || xec == 0x8); | 508 | return (xec == 0x0 || xec == 0x8); |
511 | } else if (c->x86_vendor == X86_VENDOR_INTEL) { | 509 | } else if (m->cpuvendor == X86_VENDOR_INTEL) { |
512 | /* | 510 | /* |
513 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes | 511 | * Intel SDM Volume 3B - 15.9.2 Compound Error Codes |
514 | * | 512 | * |
@@ -529,6 +527,7 @@ static bool memory_error(struct mce *m) | |||
529 | 527 | ||
530 | return false; | 528 | return false; |
531 | } | 529 | } |
530 | EXPORT_SYMBOL_GPL(mce_is_memory_error); | ||
532 | 531 | ||
533 | static bool cec_add_mce(struct mce *m) | 532 | static bool cec_add_mce(struct mce *m) |
534 | { | 533 | { |
@@ -536,7 +535,7 @@ static bool cec_add_mce(struct mce *m) | |||
536 | return false; | 535 | return false; |
537 | 536 | ||
538 | /* We eat only correctable DRAM errors with usable addresses. */ | 537 | /* We eat only correctable DRAM errors with usable addresses. */ |
539 | if (memory_error(m) && | 538 | if (mce_is_memory_error(m) && |
540 | !(m->status & MCI_STATUS_UC) && | 539 | !(m->status & MCI_STATUS_UC) && |
541 | mce_usable_address(m)) | 540 | mce_usable_address(m)) |
542 | if (!cec_add_elem(m->addr >> PAGE_SHIFT)) | 541 | if (!cec_add_elem(m->addr >> PAGE_SHIFT)) |
@@ -713,7 +712,7 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b) | |||
713 | 712 | ||
714 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); | 713 | severity = mce_severity(&m, mca_cfg.tolerant, NULL, false); |
715 | 714 | ||
716 | if (severity == MCE_DEFERRED_SEVERITY && memory_error(&m)) | 715 | if (severity == MCE_DEFERRED_SEVERITY && mce_is_memory_error(&m)) |
717 | if (m.status & MCI_STATUS_ADDRV) | 716 | if (m.status & MCI_STATUS_ADDRV) |
718 | m.severity = severity; | 717 | m.severity = severity; |
719 | 718 | ||
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 45db4d2ebd01..e9f4d762aa5b 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -320,7 +320,7 @@ void load_ucode_amd_ap(unsigned int cpuid_1_eax) | |||
320 | } | 320 | } |
321 | 321 | ||
322 | static enum ucode_state | 322 | static enum ucode_state |
323 | load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size); | 323 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size); |
324 | 324 | ||
325 | int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) | 325 | int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) |
326 | { | 326 | { |
@@ -338,8 +338,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) | |||
338 | if (!desc.mc) | 338 | if (!desc.mc) |
339 | return -EINVAL; | 339 | return -EINVAL; |
340 | 340 | ||
341 | ret = load_microcode_amd(smp_processor_id(), x86_family(cpuid_1_eax), | 341 | ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size); |
342 | desc.data, desc.size); | ||
343 | if (ret != UCODE_OK) | 342 | if (ret != UCODE_OK) |
344 | return -EINVAL; | 343 | return -EINVAL; |
345 | 344 | ||
@@ -675,7 +674,7 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, | |||
675 | } | 674 | } |
676 | 675 | ||
677 | static enum ucode_state | 676 | static enum ucode_state |
678 | load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) | 677 | load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) |
679 | { | 678 | { |
680 | enum ucode_state ret; | 679 | enum ucode_state ret; |
681 | 680 | ||
@@ -689,8 +688,8 @@ load_microcode_amd(int cpu, u8 family, const u8 *data, size_t size) | |||
689 | 688 | ||
690 | #ifdef CONFIG_X86_32 | 689 | #ifdef CONFIG_X86_32 |
691 | /* save BSP's matching patch for early load */ | 690 | /* save BSP's matching patch for early load */ |
692 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | 691 | if (save) { |
693 | struct ucode_patch *p = find_patch(cpu); | 692 | struct ucode_patch *p = find_patch(0); |
694 | if (p) { | 693 | if (p) { |
695 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); | 694 | memset(amd_ucode_patch, 0, PATCH_MAX_SIZE); |
696 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), | 695 | memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), |
@@ -722,11 +721,12 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
722 | { | 721 | { |
723 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; | 722 | char fw_name[36] = "amd-ucode/microcode_amd.bin"; |
724 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 723 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
724 | bool bsp = c->cpu_index == boot_cpu_data.cpu_index; | ||
725 | enum ucode_state ret = UCODE_NFOUND; | 725 | enum ucode_state ret = UCODE_NFOUND; |
726 | const struct firmware *fw; | 726 | const struct firmware *fw; |
727 | 727 | ||
728 | /* reload ucode container only on the boot cpu */ | 728 | /* reload ucode container only on the boot cpu */ |
729 | if (!refresh_fw || c->cpu_index != boot_cpu_data.cpu_index) | 729 | if (!refresh_fw || !bsp) |
730 | return UCODE_OK; | 730 | return UCODE_OK; |
731 | 731 | ||
732 | if (c->x86 >= 0x15) | 732 | if (c->x86 >= 0x15) |
@@ -743,7 +743,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
743 | goto fw_release; | 743 | goto fw_release; |
744 | } | 744 | } |
745 | 745 | ||
746 | ret = load_microcode_amd(cpu, c->x86, fw->data, fw->size); | 746 | ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size); |
747 | 747 | ||
748 | fw_release: | 748 | fw_release: |
749 | release_firmware(fw); | 749 | release_firmware(fw); |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 0651e974dcb3..9bef1bbeba63 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -689,8 +689,12 @@ static inline void *alloc_tramp(unsigned long size) | |||
689 | { | 689 | { |
690 | return module_alloc(size); | 690 | return module_alloc(size); |
691 | } | 691 | } |
692 | static inline void tramp_free(void *tramp) | 692 | static inline void tramp_free(void *tramp, int size) |
693 | { | 693 | { |
694 | int npages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
695 | |||
696 | set_memory_nx((unsigned long)tramp, npages); | ||
697 | set_memory_rw((unsigned long)tramp, npages); | ||
694 | module_memfree(tramp); | 698 | module_memfree(tramp); |
695 | } | 699 | } |
696 | #else | 700 | #else |
@@ -699,7 +703,7 @@ static inline void *alloc_tramp(unsigned long size) | |||
699 | { | 703 | { |
700 | return NULL; | 704 | return NULL; |
701 | } | 705 | } |
702 | static inline void tramp_free(void *tramp) { } | 706 | static inline void tramp_free(void *tramp, int size) { } |
703 | #endif | 707 | #endif |
704 | 708 | ||
705 | /* Defined as markers to the end of the ftrace default trampolines */ | 709 | /* Defined as markers to the end of the ftrace default trampolines */ |
@@ -771,7 +775,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | |||
771 | /* Copy ftrace_caller onto the trampoline memory */ | 775 | /* Copy ftrace_caller onto the trampoline memory */ |
772 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); | 776 | ret = probe_kernel_read(trampoline, (void *)start_offset, size); |
773 | if (WARN_ON(ret < 0)) { | 777 | if (WARN_ON(ret < 0)) { |
774 | tramp_free(trampoline); | 778 | tramp_free(trampoline, *tramp_size); |
775 | return 0; | 779 | return 0; |
776 | } | 780 | } |
777 | 781 | ||
@@ -797,7 +801,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size) | |||
797 | 801 | ||
798 | /* Are we pointing to the reference? */ | 802 | /* Are we pointing to the reference? */ |
799 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { | 803 | if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) { |
800 | tramp_free(trampoline); | 804 | tramp_free(trampoline, *tramp_size); |
801 | return 0; | 805 | return 0; |
802 | } | 806 | } |
803 | 807 | ||
@@ -839,7 +843,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
839 | unsigned long offset; | 843 | unsigned long offset; |
840 | unsigned long ip; | 844 | unsigned long ip; |
841 | unsigned int size; | 845 | unsigned int size; |
842 | int ret; | 846 | int ret, npages; |
843 | 847 | ||
844 | if (ops->trampoline) { | 848 | if (ops->trampoline) { |
845 | /* | 849 | /* |
@@ -848,11 +852,14 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
848 | */ | 852 | */ |
849 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | 853 | if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) |
850 | return; | 854 | return; |
855 | npages = PAGE_ALIGN(ops->trampoline_size) >> PAGE_SHIFT; | ||
856 | set_memory_rw(ops->trampoline, npages); | ||
851 | } else { | 857 | } else { |
852 | ops->trampoline = create_trampoline(ops, &size); | 858 | ops->trampoline = create_trampoline(ops, &size); |
853 | if (!ops->trampoline) | 859 | if (!ops->trampoline) |
854 | return; | 860 | return; |
855 | ops->trampoline_size = size; | 861 | ops->trampoline_size = size; |
862 | npages = PAGE_ALIGN(size) >> PAGE_SHIFT; | ||
856 | } | 863 | } |
857 | 864 | ||
858 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); | 865 | offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS); |
@@ -863,6 +870,7 @@ void arch_ftrace_update_trampoline(struct ftrace_ops *ops) | |||
863 | /* Do a safe modify in case the trampoline is executing */ | 870 | /* Do a safe modify in case the trampoline is executing */ |
864 | new = ftrace_call_replace(ip, (unsigned long)func); | 871 | new = ftrace_call_replace(ip, (unsigned long)func); |
865 | ret = update_ftrace_func(ip, new); | 872 | ret = update_ftrace_func(ip, new); |
873 | set_memory_ro(ops->trampoline, npages); | ||
866 | 874 | ||
867 | /* The update should never fail */ | 875 | /* The update should never fail */ |
868 | WARN_ON(ret); | 876 | WARN_ON(ret); |
@@ -939,7 +947,7 @@ void arch_ftrace_trampoline_free(struct ftrace_ops *ops) | |||
939 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) | 947 | if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP)) |
940 | return; | 948 | return; |
941 | 949 | ||
942 | tramp_free((void *)ops->trampoline); | 950 | tramp_free((void *)ops->trampoline, ops->trampoline_size); |
943 | ops->trampoline = 0; | 951 | ops->trampoline = 0; |
944 | } | 952 | } |
945 | 953 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 5b2bbfbb3712..6b877807598b 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/ftrace.h> | 52 | #include <linux/ftrace.h> |
53 | #include <linux/frame.h> | 53 | #include <linux/frame.h> |
54 | #include <linux/kasan.h> | 54 | #include <linux/kasan.h> |
55 | #include <linux/moduleloader.h> | ||
55 | 56 | ||
56 | #include <asm/text-patching.h> | 57 | #include <asm/text-patching.h> |
57 | #include <asm/cacheflush.h> | 58 | #include <asm/cacheflush.h> |
@@ -417,6 +418,14 @@ static void prepare_boost(struct kprobe *p, struct insn *insn) | |||
417 | } | 418 | } |
418 | } | 419 | } |
419 | 420 | ||
421 | /* Recover page to RW mode before releasing it */ | ||
422 | void free_insn_page(void *page) | ||
423 | { | ||
424 | set_memory_nx((unsigned long)page & PAGE_MASK, 1); | ||
425 | set_memory_rw((unsigned long)page & PAGE_MASK, 1); | ||
426 | module_memfree(page); | ||
427 | } | ||
428 | |||
420 | static int arch_copy_kprobe(struct kprobe *p) | 429 | static int arch_copy_kprobe(struct kprobe *p) |
421 | { | 430 | { |
422 | struct insn insn; | 431 | struct insn insn; |
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index ff40e74c9181..ffeae818aa7a 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
@@ -78,7 +78,7 @@ void __show_regs(struct pt_regs *regs, int all) | |||
78 | 78 | ||
79 | printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); | 79 | printk(KERN_DEFAULT "EIP: %pS\n", (void *)regs->ip); |
80 | printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, | 80 | printk(KERN_DEFAULT "EFLAGS: %08lx CPU: %d\n", regs->flags, |
81 | smp_processor_id()); | 81 | raw_smp_processor_id()); |
82 | 82 | ||
83 | printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", | 83 | printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
84 | regs->ax, regs->bx, regs->cx, regs->dx); | 84 | regs->ax, regs->bx, regs->cx, regs->dx); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0b4d3c686b1e..f81823695014 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -980,8 +980,6 @@ void __init setup_arch(char **cmdline_p) | |||
980 | */ | 980 | */ |
981 | x86_configure_nx(); | 981 | x86_configure_nx(); |
982 | 982 | ||
983 | simple_udelay_calibration(); | ||
984 | |||
985 | parse_early_param(); | 983 | parse_early_param(); |
986 | 984 | ||
987 | #ifdef CONFIG_MEMORY_HOTPLUG | 985 | #ifdef CONFIG_MEMORY_HOTPLUG |
@@ -1041,6 +1039,8 @@ void __init setup_arch(char **cmdline_p) | |||
1041 | */ | 1039 | */ |
1042 | init_hypervisor_platform(); | 1040 | init_hypervisor_platform(); |
1043 | 1041 | ||
1042 | simple_udelay_calibration(); | ||
1043 | |||
1044 | x86_init.resources.probe_roms(); | 1044 | x86_init.resources.probe_roms(); |
1045 | 1045 | ||
1046 | /* after parse_early_param, so could debug it */ | 1046 | /* after parse_early_param, so could debug it */ |
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 82c6d7f1fd73..b9389d72b2f7 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c | |||
@@ -104,6 +104,11 @@ static inline unsigned long *last_frame(struct unwind_state *state) | |||
104 | return (unsigned long *)task_pt_regs(state->task) - 2; | 104 | return (unsigned long *)task_pt_regs(state->task) - 2; |
105 | } | 105 | } |
106 | 106 | ||
107 | static bool is_last_frame(struct unwind_state *state) | ||
108 | { | ||
109 | return state->bp == last_frame(state); | ||
110 | } | ||
111 | |||
107 | #ifdef CONFIG_X86_32 | 112 | #ifdef CONFIG_X86_32 |
108 | #define GCC_REALIGN_WORDS 3 | 113 | #define GCC_REALIGN_WORDS 3 |
109 | #else | 114 | #else |
@@ -115,16 +120,15 @@ static inline unsigned long *last_aligned_frame(struct unwind_state *state) | |||
115 | return last_frame(state) - GCC_REALIGN_WORDS; | 120 | return last_frame(state) - GCC_REALIGN_WORDS; |
116 | } | 121 | } |
117 | 122 | ||
118 | static bool is_last_task_frame(struct unwind_state *state) | 123 | static bool is_last_aligned_frame(struct unwind_state *state) |
119 | { | 124 | { |
120 | unsigned long *last_bp = last_frame(state); | 125 | unsigned long *last_bp = last_frame(state); |
121 | unsigned long *aligned_bp = last_aligned_frame(state); | 126 | unsigned long *aligned_bp = last_aligned_frame(state); |
122 | 127 | ||
123 | /* | 128 | /* |
124 | * We have to check for the last task frame at two different locations | 129 | * GCC can occasionally decide to realign the stack pointer and change |
125 | * because gcc can occasionally decide to realign the stack pointer and | 130 | * the offset of the stack frame in the prologue of a function called |
126 | * change the offset of the stack frame in the prologue of a function | 131 | * by head/entry code. Examples: |
127 | * called by head/entry code. Examples: | ||
128 | * | 132 | * |
129 | * <start_secondary>: | 133 | * <start_secondary>: |
130 | * push %edi | 134 | * push %edi |
@@ -141,11 +145,38 @@ static bool is_last_task_frame(struct unwind_state *state) | |||
141 | * push %rbp | 145 | * push %rbp |
142 | * mov %rsp,%rbp | 146 | * mov %rsp,%rbp |
143 | * | 147 | * |
144 | * Note that after aligning the stack, it pushes a duplicate copy of | 148 | * After aligning the stack, it pushes a duplicate copy of the return |
145 | * the return address before pushing the frame pointer. | 149 | * address before pushing the frame pointer. |
150 | */ | ||
151 | return (state->bp == aligned_bp && *(aligned_bp + 1) == *(last_bp + 1)); | ||
152 | } | ||
153 | |||
154 | static bool is_last_ftrace_frame(struct unwind_state *state) | ||
155 | { | ||
156 | unsigned long *last_bp = last_frame(state); | ||
157 | unsigned long *last_ftrace_bp = last_bp - 3; | ||
158 | |||
159 | /* | ||
160 | * When unwinding from an ftrace handler of a function called by entry | ||
161 | * code, the stack layout of the last frame is: | ||
162 | * | ||
163 | * bp | ||
164 | * parent ret addr | ||
165 | * bp | ||
166 | * function ret addr | ||
167 | * parent ret addr | ||
168 | * pt_regs | ||
169 | * ----------------- | ||
146 | */ | 170 | */ |
147 | return (state->bp == last_bp || | 171 | return (state->bp == last_ftrace_bp && |
148 | (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); | 172 | *state->bp == *(state->bp + 2) && |
173 | *(state->bp + 1) == *(state->bp + 4)); | ||
174 | } | ||
175 | |||
176 | static bool is_last_task_frame(struct unwind_state *state) | ||
177 | { | ||
178 | return is_last_frame(state) || is_last_aligned_frame(state) || | ||
179 | is_last_ftrace_frame(state); | ||
149 | } | 180 | } |
150 | 181 | ||
151 | /* | 182 | /* |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index c329d2894905..d24c8742d9b0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1495,8 +1495,10 @@ EXPORT_SYMBOL_GPL(kvm_lapic_hv_timer_in_use); | |||
1495 | 1495 | ||
1496 | static void cancel_hv_timer(struct kvm_lapic *apic) | 1496 | static void cancel_hv_timer(struct kvm_lapic *apic) |
1497 | { | 1497 | { |
1498 | preempt_disable(); | ||
1498 | kvm_x86_ops->cancel_hv_timer(apic->vcpu); | 1499 | kvm_x86_ops->cancel_hv_timer(apic->vcpu); |
1499 | apic->lapic_timer.hv_timer_in_use = false; | 1500 | apic->lapic_timer.hv_timer_in_use = false; |
1501 | preempt_enable(); | ||
1500 | } | 1502 | } |
1501 | 1503 | ||
1502 | static bool start_hv_timer(struct kvm_lapic *apic) | 1504 | static bool start_hv_timer(struct kvm_lapic *apic) |
@@ -1934,7 +1936,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
1934 | for (i = 0; i < KVM_APIC_LVT_NUM; i++) | 1936 | for (i = 0; i < KVM_APIC_LVT_NUM; i++) |
1935 | kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | 1937 | kvm_lapic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); |
1936 | apic_update_lvtt(apic); | 1938 | apic_update_lvtt(apic); |
1937 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) | 1939 | if (kvm_vcpu_is_reset_bsp(vcpu) && |
1940 | kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) | ||
1938 | kvm_lapic_set_reg(apic, APIC_LVT0, | 1941 | kvm_lapic_set_reg(apic, APIC_LVT0, |
1939 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | 1942 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); |
1940 | apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); | 1943 | apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0)); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 183ddb235fb4..ba9891ac5c56 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1807,7 +1807,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1807 | * AMD's VMCB does not have an explicit unusable field, so emulate it | 1807 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
1808 | * for cross vendor migration purposes by "not present" | 1808 | * for cross vendor migration purposes by "not present" |
1809 | */ | 1809 | */ |
1810 | var->unusable = !var->present || (var->type == 0); | 1810 | var->unusable = !var->present; |
1811 | 1811 | ||
1812 | switch (seg) { | 1812 | switch (seg) { |
1813 | case VCPU_SREG_TR: | 1813 | case VCPU_SREG_TR: |
@@ -1840,6 +1840,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1840 | */ | 1840 | */ |
1841 | if (var->unusable) | 1841 | if (var->unusable) |
1842 | var->db = 0; | 1842 | var->db = 0; |
1843 | /* This is symmetric with svm_set_segment() */ | ||
1843 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; | 1844 | var->dpl = to_svm(vcpu)->vmcb->save.cpl; |
1844 | break; | 1845 | break; |
1845 | } | 1846 | } |
@@ -1980,18 +1981,14 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
1980 | s->base = var->base; | 1981 | s->base = var->base; |
1981 | s->limit = var->limit; | 1982 | s->limit = var->limit; |
1982 | s->selector = var->selector; | 1983 | s->selector = var->selector; |
1983 | if (var->unusable) | 1984 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); |
1984 | s->attrib = 0; | 1985 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; |
1985 | else { | 1986 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; |
1986 | s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK); | 1987 | s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT; |
1987 | s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT; | 1988 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; |
1988 | s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT; | 1989 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; |
1989 | s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT; | 1990 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; |
1990 | s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT; | 1991 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; |
1991 | s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT; | ||
1992 | s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT; | ||
1993 | s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT; | ||
1994 | } | ||
1995 | 1992 | ||
1996 | /* | 1993 | /* |
1997 | * This is always accurate, except if SYSRET returned to a segment | 1994 | * This is always accurate, except if SYSRET returned to a segment |
@@ -2000,7 +1997,8 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
2000 | * would entail passing the CPL to userspace and back. | 1997 | * would entail passing the CPL to userspace and back. |
2001 | */ | 1998 | */ |
2002 | if (seg == VCPU_SREG_SS) | 1999 | if (seg == VCPU_SREG_SS) |
2003 | svm->vmcb->save.cpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3; | 2000 | /* This is symmetric with svm_get_segment() */ |
2001 | svm->vmcb->save.cpl = (var->dpl & 3); | ||
2004 | 2002 | ||
2005 | mark_dirty(svm->vmcb, VMCB_SEG); | 2003 | mark_dirty(svm->vmcb, VMCB_SEG); |
2006 | } | 2004 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 72f78396bc09..9b4b5d6dcd34 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6914,97 +6914,21 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu, | |||
6914 | return 0; | 6914 | return 0; |
6915 | } | 6915 | } |
6916 | 6916 | ||
6917 | /* | 6917 | static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer) |
6918 | * This function performs the various checks including | ||
6919 | * - if it's 4KB aligned | ||
6920 | * - No bits beyond the physical address width are set | ||
6921 | * - Returns 0 on success or else 1 | ||
6922 | * (Intel SDM Section 30.3) | ||
6923 | */ | ||
6924 | static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | ||
6925 | gpa_t *vmpointer) | ||
6926 | { | 6918 | { |
6927 | gva_t gva; | 6919 | gva_t gva; |
6928 | gpa_t vmptr; | ||
6929 | struct x86_exception e; | 6920 | struct x86_exception e; |
6930 | struct page *page; | ||
6931 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
6932 | int maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
6933 | 6921 | ||
6934 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), | 6922 | if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), |
6935 | vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) | 6923 | vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva)) |
6936 | return 1; | 6924 | return 1; |
6937 | 6925 | ||
6938 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, | 6926 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, vmpointer, |
6939 | sizeof(vmptr), &e)) { | 6927 | sizeof(*vmpointer), &e)) { |
6940 | kvm_inject_page_fault(vcpu, &e); | 6928 | kvm_inject_page_fault(vcpu, &e); |
6941 | return 1; | 6929 | return 1; |
6942 | } | 6930 | } |
6943 | 6931 | ||
6944 | switch (exit_reason) { | ||
6945 | case EXIT_REASON_VMON: | ||
6946 | /* | ||
6947 | * SDM 3: 24.11.5 | ||
6948 | * The first 4 bytes of VMXON region contain the supported | ||
6949 | * VMCS revision identifier | ||
6950 | * | ||
6951 | * Note - IA32_VMX_BASIC[48] will never be 1 | ||
6952 | * for the nested case; | ||
6953 | * which replaces physical address width with 32 | ||
6954 | * | ||
6955 | */ | ||
6956 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6957 | nested_vmx_failInvalid(vcpu); | ||
6958 | return kvm_skip_emulated_instruction(vcpu); | ||
6959 | } | ||
6960 | |||
6961 | page = nested_get_page(vcpu, vmptr); | ||
6962 | if (page == NULL) { | ||
6963 | nested_vmx_failInvalid(vcpu); | ||
6964 | return kvm_skip_emulated_instruction(vcpu); | ||
6965 | } | ||
6966 | if (*(u32 *)kmap(page) != VMCS12_REVISION) { | ||
6967 | kunmap(page); | ||
6968 | nested_release_page_clean(page); | ||
6969 | nested_vmx_failInvalid(vcpu); | ||
6970 | return kvm_skip_emulated_instruction(vcpu); | ||
6971 | } | ||
6972 | kunmap(page); | ||
6973 | nested_release_page_clean(page); | ||
6974 | vmx->nested.vmxon_ptr = vmptr; | ||
6975 | break; | ||
6976 | case EXIT_REASON_VMCLEAR: | ||
6977 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6978 | nested_vmx_failValid(vcpu, | ||
6979 | VMXERR_VMCLEAR_INVALID_ADDRESS); | ||
6980 | return kvm_skip_emulated_instruction(vcpu); | ||
6981 | } | ||
6982 | |||
6983 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
6984 | nested_vmx_failValid(vcpu, | ||
6985 | VMXERR_VMCLEAR_VMXON_POINTER); | ||
6986 | return kvm_skip_emulated_instruction(vcpu); | ||
6987 | } | ||
6988 | break; | ||
6989 | case EXIT_REASON_VMPTRLD: | ||
6990 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { | ||
6991 | nested_vmx_failValid(vcpu, | ||
6992 | VMXERR_VMPTRLD_INVALID_ADDRESS); | ||
6993 | return kvm_skip_emulated_instruction(vcpu); | ||
6994 | } | ||
6995 | |||
6996 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
6997 | nested_vmx_failValid(vcpu, | ||
6998 | VMXERR_VMPTRLD_VMXON_POINTER); | ||
6999 | return kvm_skip_emulated_instruction(vcpu); | ||
7000 | } | ||
7001 | break; | ||
7002 | default: | ||
7003 | return 1; /* shouldn't happen */ | ||
7004 | } | ||
7005 | |||
7006 | if (vmpointer) | ||
7007 | *vmpointer = vmptr; | ||
7008 | return 0; | 6932 | return 0; |
7009 | } | 6933 | } |
7010 | 6934 | ||
@@ -7066,6 +6990,8 @@ out_msr_bitmap: | |||
7066 | static int handle_vmon(struct kvm_vcpu *vcpu) | 6990 | static int handle_vmon(struct kvm_vcpu *vcpu) |
7067 | { | 6991 | { |
7068 | int ret; | 6992 | int ret; |
6993 | gpa_t vmptr; | ||
6994 | struct page *page; | ||
7069 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 6995 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7070 | const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | 6996 | const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED |
7071 | | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | 6997 | | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; |
@@ -7095,9 +7021,37 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
7095 | return 1; | 7021 | return 1; |
7096 | } | 7022 | } |
7097 | 7023 | ||
7098 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL)) | 7024 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7099 | return 1; | 7025 | return 1; |
7100 | 7026 | ||
7027 | /* | ||
7028 | * SDM 3: 24.11.5 | ||
7029 | * The first 4 bytes of VMXON region contain the supported | ||
7030 | * VMCS revision identifier | ||
7031 | * | ||
7032 | * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; | ||
7033 | * which replaces physical address width with 32 | ||
7034 | */ | ||
7035 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7036 | nested_vmx_failInvalid(vcpu); | ||
7037 | return kvm_skip_emulated_instruction(vcpu); | ||
7038 | } | ||
7039 | |||
7040 | page = nested_get_page(vcpu, vmptr); | ||
7041 | if (page == NULL) { | ||
7042 | nested_vmx_failInvalid(vcpu); | ||
7043 | return kvm_skip_emulated_instruction(vcpu); | ||
7044 | } | ||
7045 | if (*(u32 *)kmap(page) != VMCS12_REVISION) { | ||
7046 | kunmap(page); | ||
7047 | nested_release_page_clean(page); | ||
7048 | nested_vmx_failInvalid(vcpu); | ||
7049 | return kvm_skip_emulated_instruction(vcpu); | ||
7050 | } | ||
7051 | kunmap(page); | ||
7052 | nested_release_page_clean(page); | ||
7053 | |||
7054 | vmx->nested.vmxon_ptr = vmptr; | ||
7101 | ret = enter_vmx_operation(vcpu); | 7055 | ret = enter_vmx_operation(vcpu); |
7102 | if (ret) | 7056 | if (ret) |
7103 | return ret; | 7057 | return ret; |
@@ -7213,9 +7167,19 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
7213 | if (!nested_vmx_check_permission(vcpu)) | 7167 | if (!nested_vmx_check_permission(vcpu)) |
7214 | return 1; | 7168 | return 1; |
7215 | 7169 | ||
7216 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) | 7170 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7217 | return 1; | 7171 | return 1; |
7218 | 7172 | ||
7173 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7174 | nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS); | ||
7175 | return kvm_skip_emulated_instruction(vcpu); | ||
7176 | } | ||
7177 | |||
7178 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
7179 | nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER); | ||
7180 | return kvm_skip_emulated_instruction(vcpu); | ||
7181 | } | ||
7182 | |||
7219 | if (vmptr == vmx->nested.current_vmptr) | 7183 | if (vmptr == vmx->nested.current_vmptr) |
7220 | nested_release_vmcs12(vmx); | 7184 | nested_release_vmcs12(vmx); |
7221 | 7185 | ||
@@ -7545,9 +7509,19 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
7545 | if (!nested_vmx_check_permission(vcpu)) | 7509 | if (!nested_vmx_check_permission(vcpu)) |
7546 | return 1; | 7510 | return 1; |
7547 | 7511 | ||
7548 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr)) | 7512 | if (nested_vmx_get_vmptr(vcpu, &vmptr)) |
7549 | return 1; | 7513 | return 1; |
7550 | 7514 | ||
7515 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> cpuid_maxphyaddr(vcpu))) { | ||
7516 | nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS); | ||
7517 | return kvm_skip_emulated_instruction(vcpu); | ||
7518 | } | ||
7519 | |||
7520 | if (vmptr == vmx->nested.vmxon_ptr) { | ||
7521 | nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_VMXON_POINTER); | ||
7522 | return kvm_skip_emulated_instruction(vcpu); | ||
7523 | } | ||
7524 | |||
7551 | if (vmx->nested.current_vmptr != vmptr) { | 7525 | if (vmx->nested.current_vmptr != vmptr) { |
7552 | struct vmcs12 *new_vmcs12; | 7526 | struct vmcs12 *new_vmcs12; |
7553 | struct page *page; | 7527 | struct page *page; |
@@ -7913,11 +7887,13 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7913 | { | 7887 | { |
7914 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 7888 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
7915 | int cr = exit_qualification & 15; | 7889 | int cr = exit_qualification & 15; |
7916 | int reg = (exit_qualification >> 8) & 15; | 7890 | int reg; |
7917 | unsigned long val = kvm_register_readl(vcpu, reg); | 7891 | unsigned long val; |
7918 | 7892 | ||
7919 | switch ((exit_qualification >> 4) & 3) { | 7893 | switch ((exit_qualification >> 4) & 3) { |
7920 | case 0: /* mov to cr */ | 7894 | case 0: /* mov to cr */ |
7895 | reg = (exit_qualification >> 8) & 15; | ||
7896 | val = kvm_register_readl(vcpu, reg); | ||
7921 | switch (cr) { | 7897 | switch (cr) { |
7922 | case 0: | 7898 | case 0: |
7923 | if (vmcs12->cr0_guest_host_mask & | 7899 | if (vmcs12->cr0_guest_host_mask & |
@@ -7972,6 +7948,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
7972 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of | 7948 | * lmsw can change bits 1..3 of cr0, and only set bit 0 of |
7973 | * cr0. Other attempted changes are ignored, with no exit. | 7949 | * cr0. Other attempted changes are ignored, with no exit. |
7974 | */ | 7950 | */ |
7951 | val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f; | ||
7975 | if (vmcs12->cr0_guest_host_mask & 0xe & | 7952 | if (vmcs12->cr0_guest_host_mask & 0xe & |
7976 | (val ^ vmcs12->cr0_read_shadow)) | 7953 | (val ^ vmcs12->cr0_read_shadow)) |
7977 | return true; | 7954 | return true; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 02363e37d4a6..a2cd0997343c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -8394,10 +8394,13 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | |||
8394 | if (vcpu->arch.pv.pv_unhalted) | 8394 | if (vcpu->arch.pv.pv_unhalted) |
8395 | return true; | 8395 | return true; |
8396 | 8396 | ||
8397 | if (atomic_read(&vcpu->arch.nmi_queued)) | 8397 | if (kvm_test_request(KVM_REQ_NMI, vcpu) || |
8398 | (vcpu->arch.nmi_pending && | ||
8399 | kvm_x86_ops->nmi_allowed(vcpu))) | ||
8398 | return true; | 8400 | return true; |
8399 | 8401 | ||
8400 | if (kvm_test_request(KVM_REQ_SMI, vcpu)) | 8402 | if (kvm_test_request(KVM_REQ_SMI, vcpu) || |
8403 | (vcpu->arch.smi_pending && !is_smm(vcpu))) | ||
8401 | return true; | 8404 | return true; |
8402 | 8405 | ||
8403 | if (kvm_arch_interrupt_allowed(vcpu) && | 8406 | if (kvm_arch_interrupt_allowed(vcpu) && |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1dcd2be4cce4..c8520b2c62d2 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -186,7 +186,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) | |||
186 | unsigned int i, level; | 186 | unsigned int i, level; |
187 | unsigned long addr; | 187 | unsigned long addr; |
188 | 188 | ||
189 | BUG_ON(irqs_disabled()); | 189 | BUG_ON(irqs_disabled() && !early_boot_irqs_disabled); |
190 | WARN_ON(PAGE_ALIGN(start) != start); | 190 | WARN_ON(PAGE_ALIGN(start) != start); |
191 | 191 | ||
192 | on_each_cpu(__cpa_flush_range, NULL, 1); | 192 | on_each_cpu(__cpa_flush_range, NULL, 1); |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 7e76a4d8304b..43b96f5f78ba 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -828,9 +828,11 @@ static void __init kexec_enter_virtual_mode(void) | |||
828 | 828 | ||
829 | /* | 829 | /* |
830 | * We don't do virtual mode, since we don't do runtime services, on | 830 | * We don't do virtual mode, since we don't do runtime services, on |
831 | * non-native EFI | 831 | * non-native EFI. With efi=old_map, we don't do runtime services in |
832 | * kexec kernel because in the initial boot something else might | ||
833 | * have been mapped at these virtual addresses. | ||
832 | */ | 834 | */ |
833 | if (!efi_is_native()) { | 835 | if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) { |
834 | efi_memmap_unmap(); | 836 | efi_memmap_unmap(); |
835 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); | 837 | clear_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
836 | return; | 838 | return; |
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index c488625c9712..eb8dff15a7f6 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -71,11 +71,13 @@ static void __init early_code_mapping_set_exec(int executable) | |||
71 | 71 | ||
72 | pgd_t * __init efi_call_phys_prolog(void) | 72 | pgd_t * __init efi_call_phys_prolog(void) |
73 | { | 73 | { |
74 | unsigned long vaddress; | 74 | unsigned long vaddr, addr_pgd, addr_p4d, addr_pud; |
75 | pgd_t *save_pgd; | 75 | pgd_t *save_pgd, *pgd_k, *pgd_efi; |
76 | p4d_t *p4d, *p4d_k, *p4d_efi; | ||
77 | pud_t *pud; | ||
76 | 78 | ||
77 | int pgd; | 79 | int pgd; |
78 | int n_pgds; | 80 | int n_pgds, i, j; |
79 | 81 | ||
80 | if (!efi_enabled(EFI_OLD_MEMMAP)) { | 82 | if (!efi_enabled(EFI_OLD_MEMMAP)) { |
81 | save_pgd = (pgd_t *)read_cr3(); | 83 | save_pgd = (pgd_t *)read_cr3(); |
@@ -88,10 +90,49 @@ pgd_t * __init efi_call_phys_prolog(void) | |||
88 | n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); | 90 | n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE); |
89 | save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); | 91 | save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL); |
90 | 92 | ||
93 | /* | ||
94 | * Build 1:1 identity mapping for efi=old_map usage. Note that | ||
95 | * PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while | ||
96 | * it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical | ||
97 | * address X, the pud_index(X) != pud_index(__va(X)), we can only copy | ||
98 | * PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping. | ||
99 | * This means here we can only reuse the PMD tables of the direct mapping. | ||
100 | */ | ||
91 | for (pgd = 0; pgd < n_pgds; pgd++) { | 101 | for (pgd = 0; pgd < n_pgds; pgd++) { |
92 | save_pgd[pgd] = *pgd_offset_k(pgd * PGDIR_SIZE); | 102 | addr_pgd = (unsigned long)(pgd * PGDIR_SIZE); |
93 | vaddress = (unsigned long)__va(pgd * PGDIR_SIZE); | 103 | vaddr = (unsigned long)__va(pgd * PGDIR_SIZE); |
94 | set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress)); | 104 | pgd_efi = pgd_offset_k(addr_pgd); |
105 | save_pgd[pgd] = *pgd_efi; | ||
106 | |||
107 | p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd); | ||
108 | if (!p4d) { | ||
109 | pr_err("Failed to allocate p4d table!\n"); | ||
110 | goto out; | ||
111 | } | ||
112 | |||
113 | for (i = 0; i < PTRS_PER_P4D; i++) { | ||
114 | addr_p4d = addr_pgd + i * P4D_SIZE; | ||
115 | p4d_efi = p4d + p4d_index(addr_p4d); | ||
116 | |||
117 | pud = pud_alloc(&init_mm, p4d_efi, addr_p4d); | ||
118 | if (!pud) { | ||
119 | pr_err("Failed to allocate pud table!\n"); | ||
120 | goto out; | ||
121 | } | ||
122 | |||
123 | for (j = 0; j < PTRS_PER_PUD; j++) { | ||
124 | addr_pud = addr_p4d + j * PUD_SIZE; | ||
125 | |||
126 | if (addr_pud > (max_pfn << PAGE_SHIFT)) | ||
127 | break; | ||
128 | |||
129 | vaddr = (unsigned long)__va(addr_pud); | ||
130 | |||
131 | pgd_k = pgd_offset_k(vaddr); | ||
132 | p4d_k = p4d_offset(pgd_k, vaddr); | ||
133 | pud[j] = *pud_offset(p4d_k, vaddr); | ||
134 | } | ||
135 | } | ||
95 | } | 136 | } |
96 | out: | 137 | out: |
97 | __flush_tlb_all(); | 138 | __flush_tlb_all(); |
@@ -104,8 +145,11 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) | |||
104 | /* | 145 | /* |
105 | * After the lock is released, the original page table is restored. | 146 | * After the lock is released, the original page table is restored. |
106 | */ | 147 | */ |
107 | int pgd_idx; | 148 | int pgd_idx, i; |
108 | int nr_pgds; | 149 | int nr_pgds; |
150 | pgd_t *pgd; | ||
151 | p4d_t *p4d; | ||
152 | pud_t *pud; | ||
109 | 153 | ||
110 | if (!efi_enabled(EFI_OLD_MEMMAP)) { | 154 | if (!efi_enabled(EFI_OLD_MEMMAP)) { |
111 | write_cr3((unsigned long)save_pgd); | 155 | write_cr3((unsigned long)save_pgd); |
@@ -115,9 +159,28 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) | |||
115 | 159 | ||
116 | nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); | 160 | nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE); |
117 | 161 | ||
118 | for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) | 162 | for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) { |
163 | pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE); | ||
119 | set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); | 164 | set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]); |
120 | 165 | ||
166 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) | ||
167 | continue; | ||
168 | |||
169 | for (i = 0; i < PTRS_PER_P4D; i++) { | ||
170 | p4d = p4d_offset(pgd, | ||
171 | pgd_idx * PGDIR_SIZE + i * P4D_SIZE); | ||
172 | |||
173 | if (!(p4d_val(*p4d) & _PAGE_PRESENT)) | ||
174 | continue; | ||
175 | |||
176 | pud = (pud_t *)p4d_page_vaddr(*p4d); | ||
177 | pud_free(&init_mm, pud); | ||
178 | } | ||
179 | |||
180 | p4d = (p4d_t *)pgd_page_vaddr(*pgd); | ||
181 | p4d_free(&init_mm, p4d); | ||
182 | } | ||
183 | |||
121 | kfree(save_pgd); | 184 | kfree(save_pgd); |
122 | 185 | ||
123 | __flush_tlb_all(); | 186 | __flush_tlb_all(); |
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 26615991d69c..e0cf95a83f3f 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c | |||
@@ -360,6 +360,9 @@ void __init efi_free_boot_services(void) | |||
360 | free_bootmem_late(start, size); | 360 | free_bootmem_late(start, size); |
361 | } | 361 | } |
362 | 362 | ||
363 | if (!num_entries) | ||
364 | return; | ||
365 | |||
363 | new_size = efi.memmap.desc_size * num_entries; | 366 | new_size = efi.memmap.desc_size * num_entries; |
364 | new_phys = efi_memmap_alloc(num_entries); | 367 | new_phys = efi_memmap_alloc(num_entries); |
365 | if (!new_phys) { | 368 | if (!new_phys) { |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 7c2947128f58..0480892e97e5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -74,7 +74,7 @@ static void blkg_free(struct blkcg_gq *blkg) | |||
74 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); | 74 | blkcg_policy[i]->pd_free_fn(blkg->pd[i]); |
75 | 75 | ||
76 | if (blkg->blkcg != &blkcg_root) | 76 | if (blkg->blkcg != &blkcg_root) |
77 | blk_exit_rl(&blkg->rl); | 77 | blk_exit_rl(blkg->q, &blkg->rl); |
78 | 78 | ||
79 | blkg_rwstat_exit(&blkg->stat_ios); | 79 | blkg_rwstat_exit(&blkg->stat_ios); |
80 | blkg_rwstat_exit(&blkg->stat_bytes); | 80 | blkg_rwstat_exit(&blkg->stat_bytes); |
diff --git a/block/blk-core.c b/block/blk-core.c index c7068520794b..a7421b772d0e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -648,13 +648,19 @@ int blk_init_rl(struct request_list *rl, struct request_queue *q, | |||
648 | if (!rl->rq_pool) | 648 | if (!rl->rq_pool) |
649 | return -ENOMEM; | 649 | return -ENOMEM; |
650 | 650 | ||
651 | if (rl != &q->root_rl) | ||
652 | WARN_ON_ONCE(!blk_get_queue(q)); | ||
653 | |||
651 | return 0; | 654 | return 0; |
652 | } | 655 | } |
653 | 656 | ||
654 | void blk_exit_rl(struct request_list *rl) | 657 | void blk_exit_rl(struct request_queue *q, struct request_list *rl) |
655 | { | 658 | { |
656 | if (rl->rq_pool) | 659 | if (rl->rq_pool) { |
657 | mempool_destroy(rl->rq_pool); | 660 | mempool_destroy(rl->rq_pool); |
661 | if (rl != &q->root_rl) | ||
662 | blk_put_queue(q); | ||
663 | } | ||
658 | } | 664 | } |
659 | 665 | ||
660 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) | 666 | struct request_queue *blk_alloc_queue(gfp_t gfp_mask) |
diff --git a/block/blk-mq.c b/block/blk-mq.c index f2224ffd225d..1bcccedcc74f 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -2641,7 +2641,8 @@ int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr) | |||
2641 | return ret; | 2641 | return ret; |
2642 | } | 2642 | } |
2643 | 2643 | ||
2644 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) | 2644 | static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, |
2645 | int nr_hw_queues) | ||
2645 | { | 2646 | { |
2646 | struct request_queue *q; | 2647 | struct request_queue *q; |
2647 | 2648 | ||
@@ -2665,6 +2666,13 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) | |||
2665 | list_for_each_entry(q, &set->tag_list, tag_set_list) | 2666 | list_for_each_entry(q, &set->tag_list, tag_set_list) |
2666 | blk_mq_unfreeze_queue(q); | 2667 | blk_mq_unfreeze_queue(q); |
2667 | } | 2668 | } |
2669 | |||
2670 | void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) | ||
2671 | { | ||
2672 | mutex_lock(&set->tag_list_lock); | ||
2673 | __blk_mq_update_nr_hw_queues(set, nr_hw_queues); | ||
2674 | mutex_unlock(&set->tag_list_lock); | ||
2675 | } | ||
2668 | EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); | 2676 | EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); |
2669 | 2677 | ||
2670 | /* Enable polling stats and return whether they were already enabled. */ | 2678 | /* Enable polling stats and return whether they were already enabled. */ |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 712b018e9f54..283da7fbe034 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -809,7 +809,7 @@ static void blk_release_queue(struct kobject *kobj) | |||
809 | 809 | ||
810 | blk_free_queue_stats(q->stats); | 810 | blk_free_queue_stats(q->stats); |
811 | 811 | ||
812 | blk_exit_rl(&q->root_rl); | 812 | blk_exit_rl(q, &q->root_rl); |
813 | 813 | ||
814 | if (q->queue_tags) | 814 | if (q->queue_tags) |
815 | __blk_queue_free_tags(q); | 815 | __blk_queue_free_tags(q); |
diff --git a/block/blk.h b/block/blk.h index 2ed70228e44f..83c8e1100525 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -59,7 +59,7 @@ void blk_free_flush_queue(struct blk_flush_queue *q); | |||
59 | 59 | ||
60 | int blk_init_rl(struct request_list *rl, struct request_queue *q, | 60 | int blk_init_rl(struct request_list *rl, struct request_queue *q, |
61 | gfp_t gfp_mask); | 61 | gfp_t gfp_mask); |
62 | void blk_exit_rl(struct request_list *rl); | 62 | void blk_exit_rl(struct request_queue *q, struct request_list *rl); |
63 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | 63 | void blk_rq_bio_prep(struct request_queue *q, struct request *rq, |
64 | struct bio *bio); | 64 | struct bio *bio); |
65 | void blk_queue_bypass_start(struct request_queue *q); | 65 | void blk_queue_bypass_start(struct request_queue *q); |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index da69b079725f..b7e9c7feeab2 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -38,9 +38,13 @@ static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */ | |||
38 | static const int cfq_hist_divisor = 4; | 38 | static const int cfq_hist_divisor = 4; |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * offset from end of service tree | 41 | * offset from end of queue service tree for idle class |
42 | */ | 42 | */ |
43 | #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) | 43 | #define CFQ_IDLE_DELAY (NSEC_PER_SEC / 5) |
44 | /* offset from end of group service tree under time slice mode */ | ||
45 | #define CFQ_SLICE_MODE_GROUP_DELAY (NSEC_PER_SEC / 5) | ||
46 | /* offset from end of group service under IOPS mode */ | ||
47 | #define CFQ_IOPS_MODE_GROUP_DELAY (HZ / 5) | ||
44 | 48 | ||
45 | /* | 49 | /* |
46 | * below this threshold, we consider thinktime immediate | 50 | * below this threshold, we consider thinktime immediate |
@@ -1362,6 +1366,14 @@ cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) | |||
1362 | cfqg->vfraction = max_t(unsigned, vfr, 1); | 1366 | cfqg->vfraction = max_t(unsigned, vfr, 1); |
1363 | } | 1367 | } |
1364 | 1368 | ||
1369 | static inline u64 cfq_get_cfqg_vdisktime_delay(struct cfq_data *cfqd) | ||
1370 | { | ||
1371 | if (!iops_mode(cfqd)) | ||
1372 | return CFQ_SLICE_MODE_GROUP_DELAY; | ||
1373 | else | ||
1374 | return CFQ_IOPS_MODE_GROUP_DELAY; | ||
1375 | } | ||
1376 | |||
1365 | static void | 1377 | static void |
1366 | cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | 1378 | cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) |
1367 | { | 1379 | { |
@@ -1381,7 +1393,8 @@ cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) | |||
1381 | n = rb_last(&st->rb); | 1393 | n = rb_last(&st->rb); |
1382 | if (n) { | 1394 | if (n) { |
1383 | __cfqg = rb_entry_cfqg(n); | 1395 | __cfqg = rb_entry_cfqg(n); |
1384 | cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; | 1396 | cfqg->vdisktime = __cfqg->vdisktime + |
1397 | cfq_get_cfqg_vdisktime_delay(cfqd); | ||
1385 | } else | 1398 | } else |
1386 | cfqg->vdisktime = st->min_vdisktime; | 1399 | cfqg->vdisktime = st->min_vdisktime; |
1387 | cfq_group_service_tree_add(st, cfqg); | 1400 | cfq_group_service_tree_add(st, cfqg); |
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c index 5a968a78652b..7abe66505739 100644 --- a/drivers/acpi/acpica/tbutils.c +++ b/drivers/acpi/acpica/tbutils.c | |||
@@ -418,11 +418,7 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc, | |||
418 | 418 | ||
419 | table_desc->validation_count++; | 419 | table_desc->validation_count++; |
420 | if (table_desc->validation_count == 0) { | 420 | if (table_desc->validation_count == 0) { |
421 | ACPI_ERROR((AE_INFO, | ||
422 | "Table %p, Validation count is zero after increment\n", | ||
423 | table_desc)); | ||
424 | table_desc->validation_count--; | 421 | table_desc->validation_count--; |
425 | return_ACPI_STATUS(AE_LIMIT); | ||
426 | } | 422 | } |
427 | 423 | ||
428 | *out_table = table_desc->pointer; | 424 | *out_table = table_desc->pointer; |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 25aba9b107dd..9ad8cdb58743 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -113,7 +113,7 @@ struct acpi_button { | |||
113 | 113 | ||
114 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); | 114 | static BLOCKING_NOTIFIER_HEAD(acpi_lid_notifier); |
115 | static struct acpi_device *lid_device; | 115 | static struct acpi_device *lid_device; |
116 | static u8 lid_init_state = ACPI_BUTTON_LID_INIT_OPEN; | 116 | static u8 lid_init_state = ACPI_BUTTON_LID_INIT_METHOD; |
117 | 117 | ||
118 | static unsigned long lid_report_interval __read_mostly = 500; | 118 | static unsigned long lid_report_interval __read_mostly = 500; |
119 | module_param(lid_report_interval, ulong, 0644); | 119 | module_param(lid_report_interval, ulong, 0644); |
diff --git a/drivers/acpi/nfit/mce.c b/drivers/acpi/nfit/mce.c index 3ba1c3472cf9..fd86bec98dea 100644 --- a/drivers/acpi/nfit/mce.c +++ b/drivers/acpi/nfit/mce.c | |||
@@ -26,7 +26,7 @@ static int nfit_handle_mce(struct notifier_block *nb, unsigned long val, | |||
26 | struct nfit_spa *nfit_spa; | 26 | struct nfit_spa *nfit_spa; |
27 | 27 | ||
28 | /* We only care about memory errors */ | 28 | /* We only care about memory errors */ |
29 | if (!(mce->status & MCACOD)) | 29 | if (!mce_is_memory_error(mce)) |
30 | return NOTIFY_DONE; | 30 | return NOTIFY_DONE; |
31 | 31 | ||
32 | /* | 32 | /* |
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 1b5ee1e0e5a3..e414fabf7315 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c | |||
@@ -333,14 +333,17 @@ static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj, | |||
333 | container_of(bin_attr, struct acpi_table_attr, attr); | 333 | container_of(bin_attr, struct acpi_table_attr, attr); |
334 | struct acpi_table_header *table_header = NULL; | 334 | struct acpi_table_header *table_header = NULL; |
335 | acpi_status status; | 335 | acpi_status status; |
336 | ssize_t rc; | ||
336 | 337 | ||
337 | status = acpi_get_table(table_attr->name, table_attr->instance, | 338 | status = acpi_get_table(table_attr->name, table_attr->instance, |
338 | &table_header); | 339 | &table_header); |
339 | if (ACPI_FAILURE(status)) | 340 | if (ACPI_FAILURE(status)) |
340 | return -ENODEV; | 341 | return -ENODEV; |
341 | 342 | ||
342 | return memory_read_from_buffer(buf, count, &offset, | 343 | rc = memory_read_from_buffer(buf, count, &offset, table_header, |
343 | table_header, table_header->length); | 344 | table_header->length); |
345 | acpi_put_table(table_header); | ||
346 | return rc; | ||
344 | } | 347 | } |
345 | 348 | ||
346 | static int acpi_table_attr_init(struct kobject *tables_obj, | 349 | static int acpi_table_attr_init(struct kobject *tables_obj, |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 2fc52407306c..c69954023c2e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -1364,6 +1364,40 @@ static inline void ahci_gtf_filter_workaround(struct ata_host *host) | |||
1364 | {} | 1364 | {} |
1365 | #endif | 1365 | #endif |
1366 | 1366 | ||
1367 | /* | ||
1368 | * On the Acer Aspire Switch Alpha 12, sometimes all SATA ports are detected | ||
1369 | * as DUMMY, or detected but eventually get a "link down" and never get up | ||
1370 | * again. When this happens, CAP.NP may hold a value of 0x00 or 0x01, and the | ||
1371 | * port_map may hold a value of 0x00. | ||
1372 | * | ||
1373 | * Overriding CAP.NP to 0x02 and the port_map to 0x7 will reveal all 3 ports | ||
1374 | * and can significantly reduce the occurrence of the problem. | ||
1375 | * | ||
1376 | * https://bugzilla.kernel.org/show_bug.cgi?id=189471 | ||
1377 | */ | ||
1378 | static void acer_sa5_271_workaround(struct ahci_host_priv *hpriv, | ||
1379 | struct pci_dev *pdev) | ||
1380 | { | ||
1381 | static const struct dmi_system_id sysids[] = { | ||
1382 | { | ||
1383 | .ident = "Acer Switch Alpha 12", | ||
1384 | .matches = { | ||
1385 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
1386 | DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271") | ||
1387 | }, | ||
1388 | }, | ||
1389 | { } | ||
1390 | }; | ||
1391 | |||
1392 | if (dmi_check_system(sysids)) { | ||
1393 | dev_info(&pdev->dev, "enabling Acer Switch Alpha 12 workaround\n"); | ||
1394 | if ((hpriv->saved_cap & 0xC734FF00) == 0xC734FF00) { | ||
1395 | hpriv->port_map = 0x7; | ||
1396 | hpriv->cap = 0xC734FF02; | ||
1397 | } | ||
1398 | } | ||
1399 | } | ||
1400 | |||
1367 | #ifdef CONFIG_ARM64 | 1401 | #ifdef CONFIG_ARM64 |
1368 | /* | 1402 | /* |
1369 | * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. | 1403 | * Due to ERRATA#22536, ThunderX needs to handle HOST_IRQ_STAT differently. |
@@ -1636,6 +1670,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1636 | "online status unreliable, applying workaround\n"); | 1670 | "online status unreliable, applying workaround\n"); |
1637 | } | 1671 | } |
1638 | 1672 | ||
1673 | |||
1674 | /* Acer SA5-271 workaround modifies private_data */ | ||
1675 | acer_sa5_271_workaround(hpriv, pdev); | ||
1676 | |||
1639 | /* CAP.NP sometimes indicate the index of the last enabled | 1677 | /* CAP.NP sometimes indicate the index of the last enabled |
1640 | * port, at other times, that of the last possible port, so | 1678 | * port, at other times, that of the last possible port, so |
1641 | * determining the maximum port number requires looking at | 1679 | * determining the maximum port number requires looking at |
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c index aaa761b9081c..cd2eab6aa92e 100644 --- a/drivers/ata/libahci_platform.c +++ b/drivers/ata/libahci_platform.c | |||
@@ -514,8 +514,9 @@ int ahci_platform_init_host(struct platform_device *pdev, | |||
514 | 514 | ||
515 | irq = platform_get_irq(pdev, 0); | 515 | irq = platform_get_irq(pdev, 0); |
516 | if (irq <= 0) { | 516 | if (irq <= 0) { |
517 | dev_err(dev, "no irq\n"); | 517 | if (irq != -EPROBE_DEFER) |
518 | return -EINVAL; | 518 | dev_err(dev, "no irq\n"); |
519 | return irq; | ||
519 | } | 520 | } |
520 | 521 | ||
521 | hpriv->irq = irq; | 522 | hpriv->irq = irq; |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2d83b8c75965..e157a0e44419 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -6800,7 +6800,7 @@ static int __init ata_parse_force_one(char **cur, | |||
6800 | } | 6800 | } |
6801 | 6801 | ||
6802 | force_ent->port = simple_strtoul(id, &endp, 10); | 6802 | force_ent->port = simple_strtoul(id, &endp, 10); |
6803 | if (p == endp || *endp != '\0') { | 6803 | if (id == endp || *endp != '\0') { |
6804 | *reason = "invalid port/link"; | 6804 | *reason = "invalid port/link"; |
6805 | return -EINVAL; | 6805 | return -EINVAL; |
6806 | } | 6806 | } |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index b66bcda88320..3b2246dded74 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -4067,7 +4067,6 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4067 | struct ata_host *host; | 4067 | struct ata_host *host; |
4068 | struct mv_host_priv *hpriv; | 4068 | struct mv_host_priv *hpriv; |
4069 | struct resource *res; | 4069 | struct resource *res; |
4070 | void __iomem *mmio; | ||
4071 | int n_ports = 0, irq = 0; | 4070 | int n_ports = 0, irq = 0; |
4072 | int rc; | 4071 | int rc; |
4073 | int port; | 4072 | int port; |
@@ -4086,9 +4085,8 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4086 | * Get the register base first | 4085 | * Get the register base first |
4087 | */ | 4086 | */ |
4088 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 4087 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
4089 | mmio = devm_ioremap_resource(&pdev->dev, res); | 4088 | if (res == NULL) |
4090 | if (IS_ERR(mmio)) | 4089 | return -EINVAL; |
4091 | return PTR_ERR(mmio); | ||
4092 | 4090 | ||
4093 | /* allocate host */ | 4091 | /* allocate host */ |
4094 | if (pdev->dev.of_node) { | 4092 | if (pdev->dev.of_node) { |
@@ -4132,7 +4130,12 @@ static int mv_platform_probe(struct platform_device *pdev) | |||
4132 | hpriv->board_idx = chip_soc; | 4130 | hpriv->board_idx = chip_soc; |
4133 | 4131 | ||
4134 | host->iomap = NULL; | 4132 | host->iomap = NULL; |
4135 | hpriv->base = mmio - SATAHC0_REG_BASE; | 4133 | hpriv->base = devm_ioremap(&pdev->dev, res->start, |
4134 | resource_size(res)); | ||
4135 | if (!hpriv->base) | ||
4136 | return -ENOMEM; | ||
4137 | |||
4138 | hpriv->base -= SATAHC0_REG_BASE; | ||
4136 | 4139 | ||
4137 | hpriv->clk = clk_get(&pdev->dev, NULL); | 4140 | hpriv->clk = clk_get(&pdev->dev, NULL); |
4138 | if (IS_ERR(hpriv->clk)) | 4141 | if (IS_ERR(hpriv->clk)) |
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 5d38245a7a73..b7939a2c1fab 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c | |||
@@ -890,7 +890,10 @@ static int sata_rcar_probe(struct platform_device *pdev) | |||
890 | dev_err(&pdev->dev, "failed to get access to sata clock\n"); | 890 | dev_err(&pdev->dev, "failed to get access to sata clock\n"); |
891 | return PTR_ERR(priv->clk); | 891 | return PTR_ERR(priv->clk); |
892 | } | 892 | } |
893 | clk_prepare_enable(priv->clk); | 893 | |
894 | ret = clk_prepare_enable(priv->clk); | ||
895 | if (ret) | ||
896 | return ret; | ||
894 | 897 | ||
895 | host = ata_host_alloc(&pdev->dev, 1); | 898 | host = ata_host_alloc(&pdev->dev, 1); |
896 | if (!host) { | 899 | if (!host) { |
@@ -970,8 +973,11 @@ static int sata_rcar_resume(struct device *dev) | |||
970 | struct ata_host *host = dev_get_drvdata(dev); | 973 | struct ata_host *host = dev_get_drvdata(dev); |
971 | struct sata_rcar_priv *priv = host->private_data; | 974 | struct sata_rcar_priv *priv = host->private_data; |
972 | void __iomem *base = priv->base; | 975 | void __iomem *base = priv->base; |
976 | int ret; | ||
973 | 977 | ||
974 | clk_prepare_enable(priv->clk); | 978 | ret = clk_prepare_enable(priv->clk); |
979 | if (ret) | ||
980 | return ret; | ||
975 | 981 | ||
976 | /* ack and mask */ | 982 | /* ack and mask */ |
977 | iowrite32(0, base + SATAINTSTAT_REG); | 983 | iowrite32(0, base + SATAINTSTAT_REG); |
@@ -988,8 +994,11 @@ static int sata_rcar_restore(struct device *dev) | |||
988 | { | 994 | { |
989 | struct ata_host *host = dev_get_drvdata(dev); | 995 | struct ata_host *host = dev_get_drvdata(dev); |
990 | struct sata_rcar_priv *priv = host->private_data; | 996 | struct sata_rcar_priv *priv = host->private_data; |
997 | int ret; | ||
991 | 998 | ||
992 | clk_prepare_enable(priv->clk); | 999 | ret = clk_prepare_enable(priv->clk); |
1000 | if (ret) | ||
1001 | return ret; | ||
993 | 1002 | ||
994 | sata_rcar_setup_port(host); | 1003 | sata_rcar_setup_port(host); |
995 | 1004 | ||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 9a7bb2c29447..f3f191ba8ca4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -937,14 +937,6 @@ static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg) | |||
937 | return -ENOSPC; | 937 | return -ENOSPC; |
938 | } | 938 | } |
939 | 939 | ||
940 | /* Reset all properties of an NBD device */ | ||
941 | static void nbd_reset(struct nbd_device *nbd) | ||
942 | { | ||
943 | nbd->config = NULL; | ||
944 | nbd->tag_set.timeout = 0; | ||
945 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | ||
946 | } | ||
947 | |||
948 | static void nbd_bdev_reset(struct block_device *bdev) | 940 | static void nbd_bdev_reset(struct block_device *bdev) |
949 | { | 941 | { |
950 | if (bdev->bd_openers > 1) | 942 | if (bdev->bd_openers > 1) |
@@ -1029,7 +1021,11 @@ static void nbd_config_put(struct nbd_device *nbd) | |||
1029 | } | 1021 | } |
1030 | kfree(config->socks); | 1022 | kfree(config->socks); |
1031 | } | 1023 | } |
1032 | nbd_reset(nbd); | 1024 | kfree(nbd->config); |
1025 | nbd->config = NULL; | ||
1026 | |||
1027 | nbd->tag_set.timeout = 0; | ||
1028 | queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); | ||
1033 | 1029 | ||
1034 | mutex_unlock(&nbd->config_lock); | 1030 | mutex_unlock(&nbd->config_lock); |
1035 | nbd_put(nbd); | 1031 | nbd_put(nbd); |
@@ -1483,7 +1479,6 @@ static int nbd_dev_add(int index) | |||
1483 | disk->fops = &nbd_fops; | 1479 | disk->fops = &nbd_fops; |
1484 | disk->private_data = nbd; | 1480 | disk->private_data = nbd; |
1485 | sprintf(disk->disk_name, "nbd%d", index); | 1481 | sprintf(disk->disk_name, "nbd%d", index); |
1486 | nbd_reset(nbd); | ||
1487 | add_disk(disk); | 1482 | add_disk(disk); |
1488 | nbd_total_devices++; | 1483 | nbd_total_devices++; |
1489 | return index; | 1484 | return index; |
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 454bf9c34882..c16f74547804 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c | |||
@@ -4023,6 +4023,7 @@ static void rbd_queue_workfn(struct work_struct *work) | |||
4023 | 4023 | ||
4024 | switch (req_op(rq)) { | 4024 | switch (req_op(rq)) { |
4025 | case REQ_OP_DISCARD: | 4025 | case REQ_OP_DISCARD: |
4026 | case REQ_OP_WRITE_ZEROES: | ||
4026 | op_type = OBJ_OP_DISCARD; | 4027 | op_type = OBJ_OP_DISCARD; |
4027 | break; | 4028 | break; |
4028 | case REQ_OP_WRITE: | 4029 | case REQ_OP_WRITE: |
@@ -4420,6 +4421,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) | |||
4420 | q->limits.discard_granularity = segment_size; | 4421 | q->limits.discard_granularity = segment_size; |
4421 | q->limits.discard_alignment = segment_size; | 4422 | q->limits.discard_alignment = segment_size; |
4422 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); | 4423 | blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); |
4424 | blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); | ||
4423 | 4425 | ||
4424 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) | 4426 | if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) |
4425 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; | 4427 | q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; |
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index d4dbd8d8e524..382c864814d9 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
@@ -374,7 +374,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
374 | 374 | ||
375 | rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); | 375 | rc = write_sync_reg(SCR_HOST_TO_READER_START, dev); |
376 | if (rc <= 0) { | 376 | if (rc <= 0) { |
377 | DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); | 377 | DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); |
378 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 378 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
379 | if (rc == -ERESTARTSYS) | 379 | if (rc == -ERESTARTSYS) |
380 | return rc; | 380 | return rc; |
@@ -387,7 +387,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
387 | for (i = 0; i < bytes_to_write; i++) { | 387 | for (i = 0; i < bytes_to_write; i++) { |
388 | rc = wait_for_bulk_out_ready(dev); | 388 | rc = wait_for_bulk_out_ready(dev); |
389 | if (rc <= 0) { | 389 | if (rc <= 0) { |
390 | DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n", | 390 | DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n", |
391 | rc); | 391 | rc); |
392 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 392 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
393 | if (rc == -ERESTARTSYS) | 393 | if (rc == -ERESTARTSYS) |
@@ -403,7 +403,7 @@ static ssize_t cm4040_write(struct file *filp, const char __user *buf, | |||
403 | rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); | 403 | rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev); |
404 | 404 | ||
405 | if (rc <= 0) { | 405 | if (rc <= 0) { |
406 | DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc); | 406 | DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc); |
407 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); | 407 | DEBUGP(2, dev, "<- cm4040_write (failed)\n"); |
408 | if (rc == -ERESTARTSYS) | 408 | if (rc == -ERESTARTSYS) |
409 | return rc; | 409 | return rc; |
diff --git a/drivers/char/random.c b/drivers/char/random.c index 0ab024918907..a561f0c2f428 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -1097,12 +1097,16 @@ static void add_interrupt_bench(cycles_t start) | |||
1097 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) | 1097 | static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) |
1098 | { | 1098 | { |
1099 | __u32 *ptr = (__u32 *) regs; | 1099 | __u32 *ptr = (__u32 *) regs; |
1100 | unsigned long flags; | ||
1100 | 1101 | ||
1101 | if (regs == NULL) | 1102 | if (regs == NULL) |
1102 | return 0; | 1103 | return 0; |
1104 | local_irq_save(flags); | ||
1103 | if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) | 1105 | if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) |
1104 | f->reg_idx = 0; | 1106 | f->reg_idx = 0; |
1105 | return *(ptr + f->reg_idx++); | 1107 | ptr += f->reg_idx++; |
1108 | local_irq_restore(flags); | ||
1109 | return *ptr; | ||
1106 | } | 1110 | } |
1107 | 1111 | ||
1108 | void add_interrupt_randomness(int irq, int irq_flags) | 1112 | void add_interrupt_randomness(int irq, int irq_flags) |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0e3f6496524d..26b643d57847 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -2468,6 +2468,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
2468 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && | 2468 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY) && |
2469 | list_empty(&cpufreq_policy_list)) { | 2469 | list_empty(&cpufreq_policy_list)) { |
2470 | /* if all ->init() calls failed, unregister */ | 2470 | /* if all ->init() calls failed, unregister */ |
2471 | ret = -ENODEV; | ||
2471 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, | 2472 | pr_debug("%s: No CPU initialized for driver %s\n", __func__, |
2472 | driver_data->name); | 2473 | driver_data->name); |
2473 | goto err_if_unreg; | 2474 | goto err_if_unreg; |
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c index 1b9bcd76c60e..c2dd43f3f5d8 100644 --- a/drivers/cpufreq/kirkwood-cpufreq.c +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
@@ -127,7 +127,12 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
127 | return PTR_ERR(priv.cpu_clk); | 127 | return PTR_ERR(priv.cpu_clk); |
128 | } | 128 | } |
129 | 129 | ||
130 | clk_prepare_enable(priv.cpu_clk); | 130 | err = clk_prepare_enable(priv.cpu_clk); |
131 | if (err) { | ||
132 | dev_err(priv.dev, "Unable to prepare cpuclk\n"); | ||
133 | return err; | ||
134 | } | ||
135 | |||
131 | kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; | 136 | kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; |
132 | 137 | ||
133 | priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); | 138 | priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); |
@@ -137,7 +142,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
137 | goto out_cpu; | 142 | goto out_cpu; |
138 | } | 143 | } |
139 | 144 | ||
140 | clk_prepare_enable(priv.ddr_clk); | 145 | err = clk_prepare_enable(priv.ddr_clk); |
146 | if (err) { | ||
147 | dev_err(priv.dev, "Unable to prepare ddrclk\n"); | ||
148 | goto out_cpu; | ||
149 | } | ||
141 | kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; | 150 | kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; |
142 | 151 | ||
143 | priv.powersave_clk = of_clk_get_by_name(np, "powersave"); | 152 | priv.powersave_clk = of_clk_get_by_name(np, "powersave"); |
@@ -146,7 +155,11 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) | |||
146 | err = PTR_ERR(priv.powersave_clk); | 155 | err = PTR_ERR(priv.powersave_clk); |
147 | goto out_ddr; | 156 | goto out_ddr; |
148 | } | 157 | } |
149 | clk_prepare_enable(priv.powersave_clk); | 158 | err = clk_prepare_enable(priv.powersave_clk); |
159 | if (err) { | ||
160 | dev_err(priv.dev, "Unable to prepare powersave clk\n"); | ||
161 | goto out_ddr; | ||
162 | } | ||
150 | 163 | ||
151 | of_node_put(np); | 164 | of_node_put(np); |
152 | np = NULL; | 165 | np = NULL; |
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index d37e8dda8079..ec240592f5c8 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c | |||
@@ -201,6 +201,7 @@ struct ep93xx_dma_engine { | |||
201 | struct dma_device dma_dev; | 201 | struct dma_device dma_dev; |
202 | bool m2m; | 202 | bool m2m; |
203 | int (*hw_setup)(struct ep93xx_dma_chan *); | 203 | int (*hw_setup)(struct ep93xx_dma_chan *); |
204 | void (*hw_synchronize)(struct ep93xx_dma_chan *); | ||
204 | void (*hw_shutdown)(struct ep93xx_dma_chan *); | 205 | void (*hw_shutdown)(struct ep93xx_dma_chan *); |
205 | void (*hw_submit)(struct ep93xx_dma_chan *); | 206 | void (*hw_submit)(struct ep93xx_dma_chan *); |
206 | int (*hw_interrupt)(struct ep93xx_dma_chan *); | 207 | int (*hw_interrupt)(struct ep93xx_dma_chan *); |
@@ -323,6 +324,8 @@ static int m2p_hw_setup(struct ep93xx_dma_chan *edmac) | |||
323 | | M2P_CONTROL_ENABLE; | 324 | | M2P_CONTROL_ENABLE; |
324 | m2p_set_control(edmac, control); | 325 | m2p_set_control(edmac, control); |
325 | 326 | ||
327 | edmac->buffer = 0; | ||
328 | |||
326 | return 0; | 329 | return 0; |
327 | } | 330 | } |
328 | 331 | ||
@@ -331,21 +334,27 @@ static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac) | |||
331 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; | 334 | return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3; |
332 | } | 335 | } |
333 | 336 | ||
334 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | 337 | static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac) |
335 | { | 338 | { |
339 | unsigned long flags; | ||
336 | u32 control; | 340 | u32 control; |
337 | 341 | ||
342 | spin_lock_irqsave(&edmac->lock, flags); | ||
338 | control = readl(edmac->regs + M2P_CONTROL); | 343 | control = readl(edmac->regs + M2P_CONTROL); |
339 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); | 344 | control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT); |
340 | m2p_set_control(edmac, control); | 345 | m2p_set_control(edmac, control); |
346 | spin_unlock_irqrestore(&edmac->lock, flags); | ||
341 | 347 | ||
342 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) | 348 | while (m2p_channel_state(edmac) >= M2P_STATE_ON) |
343 | cpu_relax(); | 349 | schedule(); |
350 | } | ||
344 | 351 | ||
352 | static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac) | ||
353 | { | ||
345 | m2p_set_control(edmac, 0); | 354 | m2p_set_control(edmac, 0); |
346 | 355 | ||
347 | while (m2p_channel_state(edmac) == M2P_STATE_STALL) | 356 | while (m2p_channel_state(edmac) != M2P_STATE_IDLE) |
348 | cpu_relax(); | 357 | dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n"); |
349 | } | 358 | } |
350 | 359 | ||
351 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) | 360 | static void m2p_fill_desc(struct ep93xx_dma_chan *edmac) |
@@ -1161,6 +1170,26 @@ fail: | |||
1161 | } | 1170 | } |
1162 | 1171 | ||
1163 | /** | 1172 | /** |
1173 | * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the | ||
1174 | * current context. | ||
1175 | * @chan: channel | ||
1176 | * | ||
1177 | * Synchronizes the DMA channel termination to the current context. When this | ||
1178 | * function returns it is guaranteed that all transfers for previously issued | ||
1179 | * descriptors have stopped and and it is safe to free the memory associated | ||
1180 | * with them. Furthermore it is guaranteed that all complete callback functions | ||
1181 | * for a previously submitted descriptor have finished running and it is safe to | ||
1182 | * free resources accessed from within the complete callbacks. | ||
1183 | */ | ||
1184 | static void ep93xx_dma_synchronize(struct dma_chan *chan) | ||
1185 | { | ||
1186 | struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); | ||
1187 | |||
1188 | if (edmac->edma->hw_synchronize) | ||
1189 | edmac->edma->hw_synchronize(edmac); | ||
1190 | } | ||
1191 | |||
1192 | /** | ||
1164 | * ep93xx_dma_terminate_all - terminate all transactions | 1193 | * ep93xx_dma_terminate_all - terminate all transactions |
1165 | * @chan: channel | 1194 | * @chan: channel |
1166 | * | 1195 | * |
@@ -1323,6 +1352,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) | |||
1323 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; | 1352 | dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; |
1324 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; | 1353 | dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; |
1325 | dma_dev->device_config = ep93xx_dma_slave_config; | 1354 | dma_dev->device_config = ep93xx_dma_slave_config; |
1355 | dma_dev->device_synchronize = ep93xx_dma_synchronize; | ||
1326 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; | 1356 | dma_dev->device_terminate_all = ep93xx_dma_terminate_all; |
1327 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; | 1357 | dma_dev->device_issue_pending = ep93xx_dma_issue_pending; |
1328 | dma_dev->device_tx_status = ep93xx_dma_tx_status; | 1358 | dma_dev->device_tx_status = ep93xx_dma_tx_status; |
@@ -1340,6 +1370,7 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) | |||
1340 | } else { | 1370 | } else { |
1341 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); | 1371 | dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); |
1342 | 1372 | ||
1373 | edma->hw_synchronize = m2p_hw_synchronize; | ||
1343 | edma->hw_setup = m2p_hw_setup; | 1374 | edma->hw_setup = m2p_hw_setup; |
1344 | edma->hw_shutdown = m2p_hw_shutdown; | 1375 | edma->hw_shutdown = m2p_hw_shutdown; |
1345 | edma->hw_submit = m2p_hw_submit; | 1376 | edma->hw_submit = m2p_hw_submit; |
diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index a28a01fcba67..f3e211f8f6c5 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c | |||
@@ -161,6 +161,7 @@ struct mv_xor_v2_device { | |||
161 | struct mv_xor_v2_sw_desc *sw_desq; | 161 | struct mv_xor_v2_sw_desc *sw_desq; |
162 | int desc_size; | 162 | int desc_size; |
163 | unsigned int npendings; | 163 | unsigned int npendings; |
164 | unsigned int hw_queue_idx; | ||
164 | }; | 165 | }; |
165 | 166 | ||
166 | /** | 167 | /** |
@@ -214,18 +215,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev, | |||
214 | } | 215 | } |
215 | 216 | ||
216 | /* | 217 | /* |
217 | * Return the next available index in the DESQ. | ||
218 | */ | ||
219 | static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev) | ||
220 | { | ||
221 | /* read the index for the next available descriptor in the DESQ */ | ||
222 | u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF); | ||
223 | |||
224 | return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT) | ||
225 | & MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK); | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * notify the engine of new descriptors, and update the available index. | 218 | * notify the engine of new descriptors, and update the available index. |
230 | */ | 219 | */ |
231 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, | 220 | static void mv_xor_v2_add_desc_to_desq(struct mv_xor_v2_device *xor_dev, |
@@ -257,22 +246,6 @@ static int mv_xor_v2_set_desc_size(struct mv_xor_v2_device *xor_dev) | |||
257 | return MV_XOR_V2_EXT_DESC_SIZE; | 246 | return MV_XOR_V2_EXT_DESC_SIZE; |
258 | } | 247 | } |
259 | 248 | ||
260 | /* | ||
261 | * Set the IMSG threshold | ||
262 | */ | ||
263 | static inline | ||
264 | void mv_xor_v2_set_imsg_thrd(struct mv_xor_v2_device *xor_dev, int thrd_val) | ||
265 | { | ||
266 | u32 reg; | ||
267 | |||
268 | reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | ||
269 | |||
270 | reg &= (~MV_XOR_V2_DMA_IMSG_THRD_MASK << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | ||
271 | reg |= (thrd_val << MV_XOR_V2_DMA_IMSG_THRD_SHIFT); | ||
272 | |||
273 | writel(reg, xor_dev->dma_base + MV_XOR_V2_DMA_IMSG_THRD_OFF); | ||
274 | } | ||
275 | |||
276 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | 249 | static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) |
277 | { | 250 | { |
278 | struct mv_xor_v2_device *xor_dev = data; | 251 | struct mv_xor_v2_device *xor_dev = data; |
@@ -288,12 +261,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
288 | if (!ndescs) | 261 | if (!ndescs) |
289 | return IRQ_NONE; | 262 | return IRQ_NONE; |
290 | 263 | ||
291 | /* | ||
292 | * Update IMSG threshold, to disable new IMSG interrupts until | ||
293 | * end of the tasklet | ||
294 | */ | ||
295 | mv_xor_v2_set_imsg_thrd(xor_dev, MV_XOR_V2_DESC_NUM); | ||
296 | |||
297 | /* schedule a tasklet to handle descriptors callbacks */ | 264 | /* schedule a tasklet to handle descriptors callbacks */ |
298 | tasklet_schedule(&xor_dev->irq_tasklet); | 265 | tasklet_schedule(&xor_dev->irq_tasklet); |
299 | 266 | ||
@@ -306,7 +273,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data) | |||
306 | static dma_cookie_t | 273 | static dma_cookie_t |
307 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | 274 | mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) |
308 | { | 275 | { |
309 | int desq_ptr; | ||
310 | void *dest_hw_desc; | 276 | void *dest_hw_desc; |
311 | dma_cookie_t cookie; | 277 | dma_cookie_t cookie; |
312 | struct mv_xor_v2_sw_desc *sw_desc = | 278 | struct mv_xor_v2_sw_desc *sw_desc = |
@@ -322,15 +288,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx) | |||
322 | spin_lock_bh(&xor_dev->lock); | 288 | spin_lock_bh(&xor_dev->lock); |
323 | cookie = dma_cookie_assign(tx); | 289 | cookie = dma_cookie_assign(tx); |
324 | 290 | ||
325 | /* get the next available slot in the DESQ */ | ||
326 | desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev); | ||
327 | |||
328 | /* copy the HW descriptor from the SW descriptor to the DESQ */ | 291 | /* copy the HW descriptor from the SW descriptor to the DESQ */ |
329 | dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr; | 292 | dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx; |
330 | 293 | ||
331 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); | 294 | memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size); |
332 | 295 | ||
333 | xor_dev->npendings++; | 296 | xor_dev->npendings++; |
297 | xor_dev->hw_queue_idx++; | ||
298 | if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM) | ||
299 | xor_dev->hw_queue_idx = 0; | ||
334 | 300 | ||
335 | spin_unlock_bh(&xor_dev->lock); | 301 | spin_unlock_bh(&xor_dev->lock); |
336 | 302 | ||
@@ -344,6 +310,7 @@ static struct mv_xor_v2_sw_desc * | |||
344 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | 310 | mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) |
345 | { | 311 | { |
346 | struct mv_xor_v2_sw_desc *sw_desc; | 312 | struct mv_xor_v2_sw_desc *sw_desc; |
313 | bool found = false; | ||
347 | 314 | ||
348 | /* Lock the channel */ | 315 | /* Lock the channel */ |
349 | spin_lock_bh(&xor_dev->lock); | 316 | spin_lock_bh(&xor_dev->lock); |
@@ -355,19 +322,23 @@ mv_xor_v2_prep_sw_desc(struct mv_xor_v2_device *xor_dev) | |||
355 | return NULL; | 322 | return NULL; |
356 | } | 323 | } |
357 | 324 | ||
358 | /* get a free SW descriptor from the SW DESQ */ | 325 | list_for_each_entry(sw_desc, &xor_dev->free_sw_desc, free_list) { |
359 | sw_desc = list_first_entry(&xor_dev->free_sw_desc, | 326 | if (async_tx_test_ack(&sw_desc->async_tx)) { |
360 | struct mv_xor_v2_sw_desc, free_list); | 327 | found = true; |
328 | break; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | if (!found) { | ||
333 | spin_unlock_bh(&xor_dev->lock); | ||
334 | return NULL; | ||
335 | } | ||
336 | |||
361 | list_del(&sw_desc->free_list); | 337 | list_del(&sw_desc->free_list); |
362 | 338 | ||
363 | /* Release the channel */ | 339 | /* Release the channel */ |
364 | spin_unlock_bh(&xor_dev->lock); | 340 | spin_unlock_bh(&xor_dev->lock); |
365 | 341 | ||
366 | /* set the async tx descriptor */ | ||
367 | dma_async_tx_descriptor_init(&sw_desc->async_tx, &xor_dev->dmachan); | ||
368 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
369 | async_tx_ack(&sw_desc->async_tx); | ||
370 | |||
371 | return sw_desc; | 342 | return sw_desc; |
372 | } | 343 | } |
373 | 344 | ||
@@ -389,6 +360,8 @@ mv_xor_v2_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, | |||
389 | __func__, len, &src, &dest, flags); | 360 | __func__, len, &src, &dest, flags); |
390 | 361 | ||
391 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 362 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
363 | if (!sw_desc) | ||
364 | return NULL; | ||
392 | 365 | ||
393 | sw_desc->async_tx.flags = flags; | 366 | sw_desc->async_tx.flags = flags; |
394 | 367 | ||
@@ -443,6 +416,8 @@ mv_xor_v2_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, | |||
443 | __func__, src_cnt, len, &dest, flags); | 416 | __func__, src_cnt, len, &dest, flags); |
444 | 417 | ||
445 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 418 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
419 | if (!sw_desc) | ||
420 | return NULL; | ||
446 | 421 | ||
447 | sw_desc->async_tx.flags = flags; | 422 | sw_desc->async_tx.flags = flags; |
448 | 423 | ||
@@ -491,6 +466,8 @@ mv_xor_v2_prep_dma_interrupt(struct dma_chan *chan, unsigned long flags) | |||
491 | container_of(chan, struct mv_xor_v2_device, dmachan); | 466 | container_of(chan, struct mv_xor_v2_device, dmachan); |
492 | 467 | ||
493 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); | 468 | sw_desc = mv_xor_v2_prep_sw_desc(xor_dev); |
469 | if (!sw_desc) | ||
470 | return NULL; | ||
494 | 471 | ||
495 | /* set the HW descriptor */ | 472 | /* set the HW descriptor */ |
496 | hw_descriptor = &sw_desc->hw_desc; | 473 | hw_descriptor = &sw_desc->hw_desc; |
@@ -554,7 +531,6 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
554 | { | 531 | { |
555 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; | 532 | struct mv_xor_v2_device *xor_dev = (struct mv_xor_v2_device *) data; |
556 | int pending_ptr, num_of_pending, i; | 533 | int pending_ptr, num_of_pending, i; |
557 | struct mv_xor_v2_descriptor *next_pending_hw_desc = NULL; | ||
558 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; | 534 | struct mv_xor_v2_sw_desc *next_pending_sw_desc = NULL; |
559 | 535 | ||
560 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); | 536 | dev_dbg(xor_dev->dmadev.dev, "%s %d\n", __func__, __LINE__); |
@@ -562,17 +538,10 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
562 | /* get the pending descriptors parameters */ | 538 | /* get the pending descriptors parameters */ |
563 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); | 539 | num_of_pending = mv_xor_v2_get_pending_params(xor_dev, &pending_ptr); |
564 | 540 | ||
565 | /* next HW descriptor */ | ||
566 | next_pending_hw_desc = xor_dev->hw_desq_virt + pending_ptr; | ||
567 | |||
568 | /* loop over free descriptors */ | 541 | /* loop over free descriptors */ |
569 | for (i = 0; i < num_of_pending; i++) { | 542 | for (i = 0; i < num_of_pending; i++) { |
570 | 543 | struct mv_xor_v2_descriptor *next_pending_hw_desc = | |
571 | if (pending_ptr > MV_XOR_V2_DESC_NUM) | 544 | xor_dev->hw_desq_virt + pending_ptr; |
572 | pending_ptr = 0; | ||
573 | |||
574 | if (next_pending_sw_desc != NULL) | ||
575 | next_pending_hw_desc++; | ||
576 | 545 | ||
577 | /* get the SW descriptor related to the HW descriptor */ | 546 | /* get the SW descriptor related to the HW descriptor */ |
578 | next_pending_sw_desc = | 547 | next_pending_sw_desc = |
@@ -608,15 +577,14 @@ static void mv_xor_v2_tasklet(unsigned long data) | |||
608 | 577 | ||
609 | /* increment the next descriptor */ | 578 | /* increment the next descriptor */ |
610 | pending_ptr++; | 579 | pending_ptr++; |
580 | if (pending_ptr >= MV_XOR_V2_DESC_NUM) | ||
581 | pending_ptr = 0; | ||
611 | } | 582 | } |
612 | 583 | ||
613 | if (num_of_pending != 0) { | 584 | if (num_of_pending != 0) { |
614 | /* free the descriptores */ | 585 | /* free the descriptores */ |
615 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); | 586 | mv_xor_v2_free_desc_from_desq(xor_dev, num_of_pending); |
616 | } | 587 | } |
617 | |||
618 | /* Update IMSG threshold, to enable new IMSG interrupts */ | ||
619 | mv_xor_v2_set_imsg_thrd(xor_dev, 0); | ||
620 | } | 588 | } |
621 | 589 | ||
622 | /* | 590 | /* |
@@ -648,9 +616,6 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
648 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, | 616 | writel((xor_dev->hw_desq & 0xFFFF00000000) >> 32, |
649 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); | 617 | xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_BAHR_OFF); |
650 | 618 | ||
651 | /* enable the DMA engine */ | ||
652 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
653 | |||
654 | /* | 619 | /* |
655 | * This is a temporary solution, until we activate the | 620 | * This is a temporary solution, until we activate the |
656 | * SMMU. Set the attributes for reading & writing data buffers | 621 | * SMMU. Set the attributes for reading & writing data buffers |
@@ -694,6 +659,9 @@ static int mv_xor_v2_descq_init(struct mv_xor_v2_device *xor_dev) | |||
694 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; | 659 | reg |= MV_XOR_V2_GLOB_PAUSE_AXI_TIME_DIS_VAL; |
695 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); | 660 | writel(reg, xor_dev->glob_base + MV_XOR_V2_GLOB_PAUSE); |
696 | 661 | ||
662 | /* enable the DMA engine */ | ||
663 | writel(0, xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_STOP_OFF); | ||
664 | |||
697 | return 0; | 665 | return 0; |
698 | } | 666 | } |
699 | 667 | ||
@@ -725,6 +693,10 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
725 | 693 | ||
726 | platform_set_drvdata(pdev, xor_dev); | 694 | platform_set_drvdata(pdev, xor_dev); |
727 | 695 | ||
696 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40)); | ||
697 | if (ret) | ||
698 | return ret; | ||
699 | |||
728 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); | 700 | xor_dev->clk = devm_clk_get(&pdev->dev, NULL); |
729 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) | 701 | if (IS_ERR(xor_dev->clk) && PTR_ERR(xor_dev->clk) == -EPROBE_DEFER) |
730 | return -EPROBE_DEFER; | 702 | return -EPROBE_DEFER; |
@@ -785,8 +757,15 @@ static int mv_xor_v2_probe(struct platform_device *pdev) | |||
785 | 757 | ||
786 | /* add all SW descriptors to the free list */ | 758 | /* add all SW descriptors to the free list */ |
787 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { | 759 | for (i = 0; i < MV_XOR_V2_DESC_NUM; i++) { |
788 | xor_dev->sw_desq[i].idx = i; | 760 | struct mv_xor_v2_sw_desc *sw_desc = |
789 | list_add(&xor_dev->sw_desq[i].free_list, | 761 | xor_dev->sw_desq + i; |
762 | sw_desc->idx = i; | ||
763 | dma_async_tx_descriptor_init(&sw_desc->async_tx, | ||
764 | &xor_dev->dmachan); | ||
765 | sw_desc->async_tx.tx_submit = mv_xor_v2_tx_submit; | ||
766 | async_tx_ack(&sw_desc->async_tx); | ||
767 | |||
768 | list_add(&sw_desc->free_list, | ||
790 | &xor_dev->free_sw_desc); | 769 | &xor_dev->free_sw_desc); |
791 | } | 770 | } |
792 | 771 | ||
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 8b0da7fa520d..e90a7a0d760a 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c | |||
@@ -3008,7 +3008,8 @@ static int pl330_remove(struct amba_device *adev) | |||
3008 | 3008 | ||
3009 | for (i = 0; i < AMBA_NR_IRQS; i++) { | 3009 | for (i = 0; i < AMBA_NR_IRQS; i++) { |
3010 | irq = adev->irq[i]; | 3010 | irq = adev->irq[i]; |
3011 | devm_free_irq(&adev->dev, irq, pl330); | 3011 | if (irq) |
3012 | devm_free_irq(&adev->dev, irq, pl330); | ||
3012 | } | 3013 | } |
3013 | 3014 | ||
3014 | dma_async_device_unregister(&pl330->ddma); | 3015 | dma_async_device_unregister(&pl330->ddma); |
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index db41795fe42a..bd261c9e9664 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c | |||
@@ -1287,6 +1287,9 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, | |||
1287 | if (desc->hwdescs.use) { | 1287 | if (desc->hwdescs.use) { |
1288 | dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & | 1288 | dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & |
1289 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; | 1289 | RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; |
1290 | if (dptr == 0) | ||
1291 | dptr = desc->nchunks; | ||
1292 | dptr--; | ||
1290 | WARN_ON(dptr >= desc->nchunks); | 1293 | WARN_ON(dptr >= desc->nchunks); |
1291 | } else { | 1294 | } else { |
1292 | running = desc->running; | 1295 | running = desc->running; |
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 72c649713ace..31a145154e9f 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c | |||
@@ -117,7 +117,7 @@ struct usb_dmac { | |||
117 | #define USB_DMASWR 0x0008 | 117 | #define USB_DMASWR 0x0008 |
118 | #define USB_DMASWR_SWR (1 << 0) | 118 | #define USB_DMASWR_SWR (1 << 0) |
119 | #define USB_DMAOR 0x0060 | 119 | #define USB_DMAOR 0x0060 |
120 | #define USB_DMAOR_AE (1 << 2) | 120 | #define USB_DMAOR_AE (1 << 1) |
121 | #define USB_DMAOR_DME (1 << 0) | 121 | #define USB_DMAOR_DME (1 << 0) |
122 | 122 | ||
123 | #define USB_DMASAR 0x0000 | 123 | #define USB_DMASAR 0x0000 |
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c index 44c01390d035..dc269cb288c2 100644 --- a/drivers/firmware/dmi-id.c +++ b/drivers/firmware/dmi-id.c | |||
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME); | |||
47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); | 47 | DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); |
48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); | 48 | DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); |
49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); | 49 | DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); |
50 | DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); | ||
50 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); | 51 | DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); |
51 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); | 52 | DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); |
52 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); | 53 | DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); |
@@ -191,6 +192,7 @@ static void __init dmi_id_init_attr_table(void) | |||
191 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); | 192 | ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); |
192 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); | 193 | ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); |
193 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); | 194 | ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); |
195 | ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); | ||
194 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); | 196 | ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); |
195 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); | 197 | ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); |
196 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); | 198 | ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 54be60ead08f..93f7acdaac7a 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -430,6 +430,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy) | |||
430 | dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); | 430 | dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6); |
431 | dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); | 431 | dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7); |
432 | dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); | 432 | dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8); |
433 | dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26); | ||
433 | break; | 434 | break; |
434 | case 2: /* Base Board Information */ | 435 | case 2: /* Base Board Information */ |
435 | dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); | 436 | dmi_save_ident(dm, DMI_BOARD_VENDOR, 4); |
diff --git a/drivers/firmware/efi/efi-bgrt.c b/drivers/firmware/efi/efi-bgrt.c index 04ca8764f0c0..8bf27323f7a3 100644 --- a/drivers/firmware/efi/efi-bgrt.c +++ b/drivers/firmware/efi/efi-bgrt.c | |||
@@ -36,6 +36,9 @@ void __init efi_bgrt_init(struct acpi_table_header *table) | |||
36 | if (acpi_disabled) | 36 | if (acpi_disabled) |
37 | return; | 37 | return; |
38 | 38 | ||
39 | if (!efi_enabled(EFI_BOOT)) | ||
40 | return; | ||
41 | |||
39 | if (table->length < sizeof(bgrt_tab)) { | 42 | if (table->length < sizeof(bgrt_tab)) { |
40 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", | 43 | pr_notice("Ignoring BGRT: invalid length %u (expected %zu)\n", |
41 | table->length, sizeof(bgrt_tab)); | 44 | table->length, sizeof(bgrt_tab)); |
diff --git a/drivers/firmware/efi/libstub/secureboot.c b/drivers/firmware/efi/libstub/secureboot.c index 8c34d50a4d80..959777ec8a77 100644 --- a/drivers/firmware/efi/libstub/secureboot.c +++ b/drivers/firmware/efi/libstub/secureboot.c | |||
@@ -16,10 +16,10 @@ | |||
16 | 16 | ||
17 | /* BIOS variables */ | 17 | /* BIOS variables */ |
18 | static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; | 18 | static const efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID; |
19 | static const efi_char16_t const efi_SecureBoot_name[] = { | 19 | static const efi_char16_t efi_SecureBoot_name[] = { |
20 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 | 20 | 'S', 'e', 'c', 'u', 'r', 'e', 'B', 'o', 'o', 't', 0 |
21 | }; | 21 | }; |
22 | static const efi_char16_t const efi_SetupMode_name[] = { | 22 | static const efi_char16_t efi_SetupMode_name[] = { |
23 | 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 | 23 | 'S', 'e', 't', 'u', 'p', 'M', 'o', 'd', 'e', 0 |
24 | }; | 24 | }; |
25 | 25 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index a4831fe0223b..a2c59a08b2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | |||
@@ -220,9 +220,9 @@ static void amdgpu_vram_mgr_debug(struct ttm_mem_type_manager *man, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { | 222 | const struct ttm_mem_type_manager_func amdgpu_vram_mgr_func = { |
223 | amdgpu_vram_mgr_init, | 223 | .init = amdgpu_vram_mgr_init, |
224 | amdgpu_vram_mgr_fini, | 224 | .takedown = amdgpu_vram_mgr_fini, |
225 | amdgpu_vram_mgr_new, | 225 | .get_node = amdgpu_vram_mgr_new, |
226 | amdgpu_vram_mgr_del, | 226 | .put_node = amdgpu_vram_mgr_del, |
227 | amdgpu_vram_mgr_debug | 227 | .debug = amdgpu_vram_mgr_debug |
228 | }; | 228 | }; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index fb0819359909..90332f55cfba 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -77,13 +77,26 @@ static int vce_v3_0_set_clockgating_state(void *handle, | |||
77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | 77 | static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) |
78 | { | 78 | { |
79 | struct amdgpu_device *adev = ring->adev; | 79 | struct amdgpu_device *adev = ring->adev; |
80 | u32 v; | ||
81 | |||
82 | mutex_lock(&adev->grbm_idx_mutex); | ||
83 | if (adev->vce.harvest_config == 0 || | ||
84 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
85 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
86 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
87 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
80 | 88 | ||
81 | if (ring == &adev->vce.ring[0]) | 89 | if (ring == &adev->vce.ring[0]) |
82 | return RREG32(mmVCE_RB_RPTR); | 90 | v = RREG32(mmVCE_RB_RPTR); |
83 | else if (ring == &adev->vce.ring[1]) | 91 | else if (ring == &adev->vce.ring[1]) |
84 | return RREG32(mmVCE_RB_RPTR2); | 92 | v = RREG32(mmVCE_RB_RPTR2); |
85 | else | 93 | else |
86 | return RREG32(mmVCE_RB_RPTR3); | 94 | v = RREG32(mmVCE_RB_RPTR3); |
95 | |||
96 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
97 | mutex_unlock(&adev->grbm_idx_mutex); | ||
98 | |||
99 | return v; | ||
87 | } | 100 | } |
88 | 101 | ||
89 | /** | 102 | /** |
@@ -96,13 +109,26 @@ static uint64_t vce_v3_0_ring_get_rptr(struct amdgpu_ring *ring) | |||
96 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) | 109 | static uint64_t vce_v3_0_ring_get_wptr(struct amdgpu_ring *ring) |
97 | { | 110 | { |
98 | struct amdgpu_device *adev = ring->adev; | 111 | struct amdgpu_device *adev = ring->adev; |
112 | u32 v; | ||
113 | |||
114 | mutex_lock(&adev->grbm_idx_mutex); | ||
115 | if (adev->vce.harvest_config == 0 || | ||
116 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
117 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
118 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
119 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
99 | 120 | ||
100 | if (ring == &adev->vce.ring[0]) | 121 | if (ring == &adev->vce.ring[0]) |
101 | return RREG32(mmVCE_RB_WPTR); | 122 | v = RREG32(mmVCE_RB_WPTR); |
102 | else if (ring == &adev->vce.ring[1]) | 123 | else if (ring == &adev->vce.ring[1]) |
103 | return RREG32(mmVCE_RB_WPTR2); | 124 | v = RREG32(mmVCE_RB_WPTR2); |
104 | else | 125 | else |
105 | return RREG32(mmVCE_RB_WPTR3); | 126 | v = RREG32(mmVCE_RB_WPTR3); |
127 | |||
128 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
129 | mutex_unlock(&adev->grbm_idx_mutex); | ||
130 | |||
131 | return v; | ||
106 | } | 132 | } |
107 | 133 | ||
108 | /** | 134 | /** |
@@ -116,12 +142,22 @@ static void vce_v3_0_ring_set_wptr(struct amdgpu_ring *ring) | |||
116 | { | 142 | { |
117 | struct amdgpu_device *adev = ring->adev; | 143 | struct amdgpu_device *adev = ring->adev; |
118 | 144 | ||
145 | mutex_lock(&adev->grbm_idx_mutex); | ||
146 | if (adev->vce.harvest_config == 0 || | ||
147 | adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE1) | ||
148 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0)); | ||
149 | else if (adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) | ||
150 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1)); | ||
151 | |||
119 | if (ring == &adev->vce.ring[0]) | 152 | if (ring == &adev->vce.ring[0]) |
120 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | 153 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); |
121 | else if (ring == &adev->vce.ring[1]) | 154 | else if (ring == &adev->vce.ring[1]) |
122 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | 155 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); |
123 | else | 156 | else |
124 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | 157 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); |
158 | |||
159 | WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT); | ||
160 | mutex_unlock(&adev->grbm_idx_mutex); | ||
125 | } | 161 | } |
126 | 162 | ||
127 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) | 163 | static void vce_v3_0_override_vce_clock_gating(struct amdgpu_device *adev, bool override) |
@@ -231,33 +267,38 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
231 | struct amdgpu_ring *ring; | 267 | struct amdgpu_ring *ring; |
232 | int idx, r; | 268 | int idx, r; |
233 | 269 | ||
234 | ring = &adev->vce.ring[0]; | ||
235 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
236 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
237 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
238 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
239 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
240 | |||
241 | ring = &adev->vce.ring[1]; | ||
242 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
243 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
244 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
245 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
246 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
247 | |||
248 | ring = &adev->vce.ring[2]; | ||
249 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
250 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
251 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
252 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
253 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
254 | |||
255 | mutex_lock(&adev->grbm_idx_mutex); | 270 | mutex_lock(&adev->grbm_idx_mutex); |
256 | for (idx = 0; idx < 2; ++idx) { | 271 | for (idx = 0; idx < 2; ++idx) { |
257 | if (adev->vce.harvest_config & (1 << idx)) | 272 | if (adev->vce.harvest_config & (1 << idx)) |
258 | continue; | 273 | continue; |
259 | 274 | ||
260 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); | 275 | WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx)); |
276 | |||
277 | /* Program instance 0 reg space for two instances or instance 0 case | ||
278 | program instance 1 reg space for only instance 1 available case */ | ||
279 | if (idx != 1 || adev->vce.harvest_config == AMDGPU_VCE_HARVEST_VCE0) { | ||
280 | ring = &adev->vce.ring[0]; | ||
281 | WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr)); | ||
282 | WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr)); | ||
283 | WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr); | ||
284 | WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); | ||
285 | WREG32(mmVCE_RB_SIZE, ring->ring_size / 4); | ||
286 | |||
287 | ring = &adev->vce.ring[1]; | ||
288 | WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr)); | ||
289 | WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr)); | ||
290 | WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); | ||
291 | WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); | ||
292 | WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); | ||
293 | |||
294 | ring = &adev->vce.ring[2]; | ||
295 | WREG32(mmVCE_RB_RPTR3, lower_32_bits(ring->wptr)); | ||
296 | WREG32(mmVCE_RB_WPTR3, lower_32_bits(ring->wptr)); | ||
297 | WREG32(mmVCE_RB_BASE_LO3, ring->gpu_addr); | ||
298 | WREG32(mmVCE_RB_BASE_HI3, upper_32_bits(ring->gpu_addr)); | ||
299 | WREG32(mmVCE_RB_SIZE3, ring->ring_size / 4); | ||
300 | } | ||
301 | |||
261 | vce_v3_0_mc_resume(adev, idx); | 302 | vce_v3_0_mc_resume(adev, idx); |
262 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); | 303 | WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1); |
263 | 304 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c index d5f53d04fa08..83e40fe51b62 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c | |||
@@ -709,17 +709,17 @@ static int tf_vega10_thermal_disable_alert(struct pp_hwmgr *hwmgr, | |||
709 | 709 | ||
710 | static struct phm_master_table_item | 710 | static struct phm_master_table_item |
711 | vega10_thermal_start_thermal_controller_master_list[] = { | 711 | vega10_thermal_start_thermal_controller_master_list[] = { |
712 | {NULL, tf_vega10_thermal_initialize}, | 712 | { .tableFunction = tf_vega10_thermal_initialize }, |
713 | {NULL, tf_vega10_thermal_set_temperature_range}, | 713 | { .tableFunction = tf_vega10_thermal_set_temperature_range }, |
714 | {NULL, tf_vega10_thermal_enable_alert}, | 714 | { .tableFunction = tf_vega10_thermal_enable_alert }, |
715 | /* We should restrict performance levels to low before we halt the SMC. | 715 | /* We should restrict performance levels to low before we halt the SMC. |
716 | * On the other hand we are still in boot state when we do this | 716 | * On the other hand we are still in boot state when we do this |
717 | * so it would be pointless. | 717 | * so it would be pointless. |
718 | * If this assumption changes we have to revisit this table. | 718 | * If this assumption changes we have to revisit this table. |
719 | */ | 719 | */ |
720 | {NULL, tf_vega10_thermal_setup_fan_table}, | 720 | { .tableFunction = tf_vega10_thermal_setup_fan_table }, |
721 | {NULL, tf_vega10_thermal_start_smc_fan_control}, | 721 | { .tableFunction = tf_vega10_thermal_start_smc_fan_control }, |
722 | {NULL, NULL} | 722 | { } |
723 | }; | 723 | }; |
724 | 724 | ||
725 | static struct phm_master_table_header | 725 | static struct phm_master_table_header |
@@ -731,10 +731,10 @@ vega10_thermal_start_thermal_controller_master = { | |||
731 | 731 | ||
732 | static struct phm_master_table_item | 732 | static struct phm_master_table_item |
733 | vega10_thermal_set_temperature_range_master_list[] = { | 733 | vega10_thermal_set_temperature_range_master_list[] = { |
734 | {NULL, tf_vega10_thermal_disable_alert}, | 734 | { .tableFunction = tf_vega10_thermal_disable_alert }, |
735 | {NULL, tf_vega10_thermal_set_temperature_range}, | 735 | { .tableFunction = tf_vega10_thermal_set_temperature_range }, |
736 | {NULL, tf_vega10_thermal_enable_alert}, | 736 | { .tableFunction = tf_vega10_thermal_enable_alert }, |
737 | {NULL, NULL} | 737 | { } |
738 | }; | 738 | }; |
739 | 739 | ||
740 | struct phm_master_table_header | 740 | struct phm_master_table_header |
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 3e5f52110ea1..213fb837e1c4 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c | |||
@@ -1208,3 +1208,86 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) | |||
1208 | return 0; | 1208 | return 0; |
1209 | } | 1209 | } |
1210 | EXPORT_SYMBOL(drm_dp_stop_crc); | 1210 | EXPORT_SYMBOL(drm_dp_stop_crc); |
1211 | |||
1212 | struct dpcd_quirk { | ||
1213 | u8 oui[3]; | ||
1214 | bool is_branch; | ||
1215 | u32 quirks; | ||
1216 | }; | ||
1217 | |||
1218 | #define OUI(first, second, third) { (first), (second), (third) } | ||
1219 | |||
1220 | static const struct dpcd_quirk dpcd_quirk_list[] = { | ||
1221 | /* Analogix 7737 needs reduced M and N at HBR2 link rates */ | ||
1222 | { OUI(0x00, 0x22, 0xb9), true, BIT(DP_DPCD_QUIRK_LIMITED_M_N) }, | ||
1223 | }; | ||
1224 | |||
1225 | #undef OUI | ||
1226 | |||
1227 | /* | ||
1228 | * Get a bit mask of DPCD quirks for the sink/branch device identified by | ||
1229 | * ident. The quirk data is shared but it's up to the drivers to act on the | ||
1230 | * data. | ||
1231 | * | ||
1232 | * For now, only the OUI (first three bytes) is used, but this may be extended | ||
1233 | * to device identification string and hardware/firmware revisions later. | ||
1234 | */ | ||
1235 | static u32 | ||
1236 | drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) | ||
1237 | { | ||
1238 | const struct dpcd_quirk *quirk; | ||
1239 | u32 quirks = 0; | ||
1240 | int i; | ||
1241 | |||
1242 | for (i = 0; i < ARRAY_SIZE(dpcd_quirk_list); i++) { | ||
1243 | quirk = &dpcd_quirk_list[i]; | ||
1244 | |||
1245 | if (quirk->is_branch != is_branch) | ||
1246 | continue; | ||
1247 | |||
1248 | if (memcmp(quirk->oui, ident->oui, sizeof(ident->oui)) != 0) | ||
1249 | continue; | ||
1250 | |||
1251 | quirks |= quirk->quirks; | ||
1252 | } | ||
1253 | |||
1254 | return quirks; | ||
1255 | } | ||
1256 | |||
1257 | /** | ||
1258 | * drm_dp_read_desc - read sink/branch descriptor from DPCD | ||
1259 | * @aux: DisplayPort AUX channel | ||
1260 | * @desc: Device decriptor to fill from DPCD | ||
1261 | * @is_branch: true for branch devices, false for sink devices | ||
1262 | * | ||
1263 | * Read DPCD 0x400 (sink) or 0x500 (branch) into @desc. Also debug log the | ||
1264 | * identification. | ||
1265 | * | ||
1266 | * Returns 0 on success or a negative error code on failure. | ||
1267 | */ | ||
1268 | int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, | ||
1269 | bool is_branch) | ||
1270 | { | ||
1271 | struct drm_dp_dpcd_ident *ident = &desc->ident; | ||
1272 | unsigned int offset = is_branch ? DP_BRANCH_OUI : DP_SINK_OUI; | ||
1273 | int ret, dev_id_len; | ||
1274 | |||
1275 | ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); | ||
1276 | if (ret < 0) | ||
1277 | return ret; | ||
1278 | |||
1279 | desc->quirks = drm_dp_get_quirks(ident, is_branch); | ||
1280 | |||
1281 | dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id)); | ||
1282 | |||
1283 | DRM_DEBUG_KMS("DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n", | ||
1284 | is_branch ? "branch" : "sink", | ||
1285 | (int)sizeof(ident->oui), ident->oui, | ||
1286 | dev_id_len, ident->device_id, | ||
1287 | ident->hw_rev >> 4, ident->hw_rev & 0xf, | ||
1288 | ident->sw_major_rev, ident->sw_minor_rev, | ||
1289 | desc->quirks); | ||
1290 | |||
1291 | return 0; | ||
1292 | } | ||
1293 | EXPORT_SYMBOL(drm_dp_read_desc); | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 09d3c4c3c858..50294a7bd29d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -82,14 +82,9 @@ err_file_priv_free: | |||
82 | return ret; | 82 | return ret; |
83 | } | 83 | } |
84 | 84 | ||
85 | static void exynos_drm_preclose(struct drm_device *dev, | ||
86 | struct drm_file *file) | ||
87 | { | ||
88 | exynos_drm_subdrv_close(dev, file); | ||
89 | } | ||
90 | |||
91 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) | 85 | static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file) |
92 | { | 86 | { |
87 | exynos_drm_subdrv_close(dev, file); | ||
93 | kfree(file->driver_priv); | 88 | kfree(file->driver_priv); |
94 | file->driver_priv = NULL; | 89 | file->driver_priv = NULL; |
95 | } | 90 | } |
@@ -145,7 +140,6 @@ static struct drm_driver exynos_drm_driver = { | |||
145 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 140 | .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
146 | | DRIVER_ATOMIC | DRIVER_RENDER, | 141 | | DRIVER_ATOMIC | DRIVER_RENDER, |
147 | .open = exynos_drm_open, | 142 | .open = exynos_drm_open, |
148 | .preclose = exynos_drm_preclose, | ||
149 | .lastclose = exynos_drm_lastclose, | 143 | .lastclose = exynos_drm_lastclose, |
150 | .postclose = exynos_drm_postclose, | 144 | .postclose = exynos_drm_postclose, |
151 | .gem_free_object_unlocked = exynos_drm_gem_free_object, | 145 | .gem_free_object_unlocked = exynos_drm_gem_free_object, |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h index cb3176930596..39c740572034 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.h +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h | |||
@@ -160,12 +160,9 @@ struct exynos_drm_clk { | |||
160 | * drm framework doesn't support multiple irq yet. | 160 | * drm framework doesn't support multiple irq yet. |
161 | * we can refer to the crtc to current hardware interrupt occurred through | 161 | * we can refer to the crtc to current hardware interrupt occurred through |
162 | * this pipe value. | 162 | * this pipe value. |
163 | * @enabled: if the crtc is enabled or not | ||
164 | * @event: vblank event that is currently queued for flip | ||
165 | * @wait_update: wait all pending planes updates to finish | ||
166 | * @pending_update: number of pending plane updates in this crtc | ||
167 | * @ops: pointer to callbacks for exynos drm specific functionality | 163 | * @ops: pointer to callbacks for exynos drm specific functionality |
168 | * @ctx: A pointer to the crtc's implementation specific context | 164 | * @ctx: A pointer to the crtc's implementation specific context |
165 | * @pipe_clk: A pointer to the crtc's pipeline clock. | ||
169 | */ | 166 | */ |
170 | struct exynos_drm_crtc { | 167 | struct exynos_drm_crtc { |
171 | struct drm_crtc base; | 168 | struct drm_crtc base; |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index fc4fda738906..d404de86d5f9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c | |||
@@ -1633,7 +1633,6 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
1633 | { | 1633 | { |
1634 | struct device *dev = dsi->dev; | 1634 | struct device *dev = dsi->dev; |
1635 | struct device_node *node = dev->of_node; | 1635 | struct device_node *node = dev->of_node; |
1636 | struct device_node *ep; | ||
1637 | int ret; | 1636 | int ret; |
1638 | 1637 | ||
1639 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", | 1638 | ret = exynos_dsi_of_read_u32(node, "samsung,pll-clock-frequency", |
@@ -1641,32 +1640,21 @@ static int exynos_dsi_parse_dt(struct exynos_dsi *dsi) | |||
1641 | if (ret < 0) | 1640 | if (ret < 0) |
1642 | return ret; | 1641 | return ret; |
1643 | 1642 | ||
1644 | ep = of_graph_get_endpoint_by_regs(node, DSI_PORT_OUT, 0); | 1643 | ret = exynos_dsi_of_read_u32(node, "samsung,burst-clock-frequency", |
1645 | if (!ep) { | ||
1646 | dev_err(dev, "no output port with endpoint specified\n"); | ||
1647 | return -EINVAL; | ||
1648 | } | ||
1649 | |||
1650 | ret = exynos_dsi_of_read_u32(ep, "samsung,burst-clock-frequency", | ||
1651 | &dsi->burst_clk_rate); | 1644 | &dsi->burst_clk_rate); |
1652 | if (ret < 0) | 1645 | if (ret < 0) |
1653 | goto end; | 1646 | return ret; |
1654 | 1647 | ||
1655 | ret = exynos_dsi_of_read_u32(ep, "samsung,esc-clock-frequency", | 1648 | ret = exynos_dsi_of_read_u32(node, "samsung,esc-clock-frequency", |
1656 | &dsi->esc_clk_rate); | 1649 | &dsi->esc_clk_rate); |
1657 | if (ret < 0) | 1650 | if (ret < 0) |
1658 | goto end; | 1651 | return ret; |
1659 | |||
1660 | of_node_put(ep); | ||
1661 | 1652 | ||
1662 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); | 1653 | dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_OUT, 0); |
1663 | if (!dsi->bridge_node) | 1654 | if (!dsi->bridge_node) |
1664 | return -EINVAL; | 1655 | return -EINVAL; |
1665 | 1656 | ||
1666 | end: | 1657 | return 0; |
1667 | of_node_put(ep); | ||
1668 | |||
1669 | return ret; | ||
1670 | } | 1658 | } |
1671 | 1659 | ||
1672 | static int exynos_dsi_bind(struct device *dev, struct device *master, | 1660 | static int exynos_dsi_bind(struct device *dev, struct device *master, |
@@ -1817,6 +1805,10 @@ static int exynos_dsi_probe(struct platform_device *pdev) | |||
1817 | 1805 | ||
1818 | static int exynos_dsi_remove(struct platform_device *pdev) | 1806 | static int exynos_dsi_remove(struct platform_device *pdev) |
1819 | { | 1807 | { |
1808 | struct exynos_dsi *dsi = platform_get_drvdata(pdev); | ||
1809 | |||
1810 | of_node_put(dsi->bridge_node); | ||
1811 | |||
1820 | pm_runtime_disable(&pdev->dev); | 1812 | pm_runtime_disable(&pdev->dev); |
1821 | 1813 | ||
1822 | component_del(&pdev->dev, &exynos_dsi_component_ops); | 1814 | component_del(&pdev->dev, &exynos_dsi_component_ops); |
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index dca989eb2d42..24fe04d6307b 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -779,8 +779,26 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id) | |||
779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; | 779 | vgpu_vreg(vgpu, ctx_status_ptr_reg) = ctx_status_ptr.dw; |
780 | } | 780 | } |
781 | 781 | ||
782 | static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask) | ||
783 | { | ||
784 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
785 | struct intel_engine_cs *engine; | ||
786 | struct intel_vgpu_workload *pos, *n; | ||
787 | unsigned int tmp; | ||
788 | |||
789 | /* free the unsubmited workloads in the queues. */ | ||
790 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | ||
791 | list_for_each_entry_safe(pos, n, | ||
792 | &vgpu->workload_q_head[engine->id], list) { | ||
793 | list_del_init(&pos->list); | ||
794 | free_workload(pos); | ||
795 | } | ||
796 | } | ||
797 | } | ||
798 | |||
782 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) | 799 | void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu) |
783 | { | 800 | { |
801 | clean_workloads(vgpu, ALL_ENGINES); | ||
784 | kmem_cache_destroy(vgpu->workloads); | 802 | kmem_cache_destroy(vgpu->workloads); |
785 | } | 803 | } |
786 | 804 | ||
@@ -811,17 +829,9 @@ void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, | |||
811 | { | 829 | { |
812 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 830 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
813 | struct intel_engine_cs *engine; | 831 | struct intel_engine_cs *engine; |
814 | struct intel_vgpu_workload *pos, *n; | ||
815 | unsigned int tmp; | 832 | unsigned int tmp; |
816 | 833 | ||
817 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { | 834 | clean_workloads(vgpu, engine_mask); |
818 | /* free the unsubmited workload in the queue */ | 835 | for_each_engine_masked(engine, dev_priv, engine_mask, tmp) |
819 | list_for_each_entry_safe(pos, n, | ||
820 | &vgpu->workload_q_head[engine->id], list) { | ||
821 | list_del_init(&pos->list); | ||
822 | free_workload(pos); | ||
823 | } | ||
824 | |||
825 | init_vgpu_execlist(vgpu, engine->id); | 836 | init_vgpu_execlist(vgpu, engine->id); |
826 | } | ||
827 | } | 837 | } |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index c995e540ff96..0ffd69654592 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1366,18 +1366,28 @@ static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1366 | void *p_data, unsigned int bytes) | 1366 | void *p_data, unsigned int bytes) |
1367 | { | 1367 | { |
1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 1368 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
1369 | i915_reg_t reg = {.reg = offset}; | 1369 | u32 v = *(u32 *)p_data; |
1370 | |||
1371 | if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) | ||
1372 | return intel_vgpu_default_mmio_write(vgpu, | ||
1373 | offset, p_data, bytes); | ||
1370 | 1374 | ||
1371 | switch (offset) { | 1375 | switch (offset) { |
1372 | case 0x4ddc: | 1376 | case 0x4ddc: |
1373 | vgpu_vreg(vgpu, offset) = 0x8000003c; | 1377 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ |
1374 | /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl */ | 1378 | vgpu_vreg(vgpu, offset) = v & ~(1 << 31); |
1375 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | ||
1376 | break; | 1379 | break; |
1377 | case 0x42080: | 1380 | case 0x42080: |
1378 | vgpu_vreg(vgpu, offset) = 0x8000; | 1381 | /* bypass WaCompressedResourceDisplayNewHashMode */ |
1379 | /* WaCompressedResourceDisplayNewHashMode:skl */ | 1382 | vgpu_vreg(vgpu, offset) = v & ~(1 << 15); |
1380 | I915_WRITE(reg, vgpu_vreg(vgpu, offset)); | 1383 | break; |
1384 | case 0xe194: | ||
1385 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
1386 | vgpu_vreg(vgpu, offset) = v & ~(1 << 8); | ||
1387 | break; | ||
1388 | case 0x7014: | ||
1389 | /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ | ||
1390 | vgpu_vreg(vgpu, offset) = v & ~(1 << 13); | ||
1381 | break; | 1391 | break; |
1382 | default: | 1392 | default: |
1383 | return -EINVAL; | 1393 | return -EINVAL; |
@@ -1634,7 +1644,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1634 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1644 | MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1635 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1645 | MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1636 | NULL, NULL); | 1646 | NULL, NULL); |
1637 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1647 | MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, |
1648 | skl_misc_ctl_write); | ||
1638 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1649 | MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1639 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1650 | MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1640 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1651 | MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); |
@@ -2568,7 +2579,8 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2568 | MMIO_D(0x6e570, D_BDW_PLUS); | 2579 | MMIO_D(0x6e570, D_BDW_PLUS); |
2569 | MMIO_D(0x65f10, D_BDW_PLUS); | 2580 | MMIO_D(0x65f10, D_BDW_PLUS); |
2570 | 2581 | ||
2571 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2582 | MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, |
2583 | skl_misc_ctl_write); | ||
2572 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2584 | MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2573 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2585 | MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2574 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2586 | MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3036d4835b0f..c994fe6e65b2 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -1272,10 +1272,6 @@ int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1272 | 1272 | ||
1273 | dev_priv->ipc_enabled = false; | 1273 | dev_priv->ipc_enabled = false; |
1274 | 1274 | ||
1275 | /* Everything is in place, we can now relax! */ | ||
1276 | DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", | ||
1277 | driver.name, driver.major, driver.minor, driver.patchlevel, | ||
1278 | driver.date, pci_name(pdev), dev_priv->drm.primary->index); | ||
1279 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) | 1275 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG)) |
1280 | DRM_INFO("DRM_I915_DEBUG enabled\n"); | 1276 | DRM_INFO("DRM_I915_DEBUG enabled\n"); |
1281 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | 1277 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c9b0949f6c1a..963f6d4481f7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -562,7 +562,8 @@ struct intel_link_m_n { | |||
562 | 562 | ||
563 | void intel_link_compute_m_n(int bpp, int nlanes, | 563 | void intel_link_compute_m_n(int bpp, int nlanes, |
564 | int pixel_clock, int link_clock, | 564 | int pixel_clock, int link_clock, |
565 | struct intel_link_m_n *m_n); | 565 | struct intel_link_m_n *m_n, |
566 | bool reduce_m_n); | ||
566 | 567 | ||
567 | /* Interface history: | 568 | /* Interface history: |
568 | * | 569 | * |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index a0563e18d753..50b8f1139ff9 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -2313,7 +2313,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, | |||
2313 | appgtt->base.allocate_va_range) { | 2313 | appgtt->base.allocate_va_range) { |
2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, | 2314 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
2315 | vma->node.start, | 2315 | vma->node.start, |
2316 | vma->node.size); | 2316 | vma->size); |
2317 | if (ret) | 2317 | if (ret) |
2318 | goto err_pages; | 2318 | goto err_pages; |
2319 | } | 2319 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 129ed303a6c4..57d9f7f4ef15 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
@@ -59,9 +59,6 @@ static void i915_gem_shrinker_unlock(struct drm_device *dev, bool unlock) | |||
59 | return; | 59 | return; |
60 | 60 | ||
61 | mutex_unlock(&dev->struct_mutex); | 61 | mutex_unlock(&dev->struct_mutex); |
62 | |||
63 | /* expedite the RCU grace period to free some request slabs */ | ||
64 | synchronize_rcu_expedited(); | ||
65 | } | 62 | } |
66 | 63 | ||
67 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) | 64 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) |
@@ -274,8 +271,6 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | |||
274 | I915_SHRINK_ACTIVE); | 271 | I915_SHRINK_ACTIVE); |
275 | intel_runtime_pm_put(dev_priv); | 272 | intel_runtime_pm_put(dev_priv); |
276 | 273 | ||
277 | synchronize_rcu(); /* wait for our earlier RCU delayed slab frees */ | ||
278 | |||
279 | return freed; | 274 | return freed; |
280 | } | 275 | } |
281 | 276 | ||
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index fd97fe00cd0d..190f6aa5d15e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -2953,7 +2953,6 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
2953 | u32 pipestat_mask; | 2953 | u32 pipestat_mask; |
2954 | u32 enable_mask; | 2954 | u32 enable_mask; |
2955 | enum pipe pipe; | 2955 | enum pipe pipe; |
2956 | u32 val; | ||
2957 | 2956 | ||
2958 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | 2957 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | |
2959 | PIPE_CRC_DONE_INTERRUPT_STATUS; | 2958 | PIPE_CRC_DONE_INTERRUPT_STATUS; |
@@ -2964,18 +2963,16 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) | |||
2964 | 2963 | ||
2965 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | | 2964 | enable_mask = I915_DISPLAY_PORT_INTERRUPT | |
2966 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | 2965 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2967 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | 2966 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
2967 | I915_LPE_PIPE_A_INTERRUPT | | ||
2968 | I915_LPE_PIPE_B_INTERRUPT; | ||
2969 | |||
2968 | if (IS_CHERRYVIEW(dev_priv)) | 2970 | if (IS_CHERRYVIEW(dev_priv)) |
2969 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | 2971 | enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT | |
2972 | I915_LPE_PIPE_C_INTERRUPT; | ||
2970 | 2973 | ||
2971 | WARN_ON(dev_priv->irq_mask != ~0); | 2974 | WARN_ON(dev_priv->irq_mask != ~0); |
2972 | 2975 | ||
2973 | val = (I915_LPE_PIPE_A_INTERRUPT | | ||
2974 | I915_LPE_PIPE_B_INTERRUPT | | ||
2975 | I915_LPE_PIPE_C_INTERRUPT); | ||
2976 | |||
2977 | enable_mask |= val; | ||
2978 | |||
2979 | dev_priv->irq_mask = ~enable_mask; | 2976 | dev_priv->irq_mask = ~enable_mask; |
2980 | 2977 | ||
2981 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); | 2978 | GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5a7c63e64381..65b837e96fe6 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -8280,7 +8280,7 @@ enum { | |||
8280 | 8280 | ||
8281 | /* MIPI DSI registers */ | 8281 | /* MIPI DSI registers */ |
8282 | 8282 | ||
8283 | #define _MIPI_PORT(port, a, c) ((port) ? c : a) /* ports A and C only */ | 8283 | #define _MIPI_PORT(port, a, c) (((port) == PORT_A) ? a : c) /* ports A and C only */ |
8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) | 8284 | #define _MMIO_MIPI(port, a, c) _MMIO(_MIPI_PORT(port, a, c)) |
8285 | 8285 | ||
8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) | 8286 | #define MIPIO_TXESC_CLK_DIV1 _MMIO(0x160004) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3617927af269..3cabe52a4e3b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -6101,7 +6101,7 @@ retry: | |||
6101 | pipe_config->fdi_lanes = lane; | 6101 | pipe_config->fdi_lanes = lane; |
6102 | 6102 | ||
6103 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, | 6103 | intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, |
6104 | link_bw, &pipe_config->fdi_m_n); | 6104 | link_bw, &pipe_config->fdi_m_n, false); |
6105 | 6105 | ||
6106 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); | 6106 | ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); |
6107 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { | 6107 | if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) { |
@@ -6277,7 +6277,8 @@ intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den) | |||
6277 | } | 6277 | } |
6278 | 6278 | ||
6279 | static void compute_m_n(unsigned int m, unsigned int n, | 6279 | static void compute_m_n(unsigned int m, unsigned int n, |
6280 | uint32_t *ret_m, uint32_t *ret_n) | 6280 | uint32_t *ret_m, uint32_t *ret_n, |
6281 | bool reduce_m_n) | ||
6281 | { | 6282 | { |
6282 | /* | 6283 | /* |
6283 | * Reduce M/N as much as possible without loss in precision. Several DP | 6284 | * Reduce M/N as much as possible without loss in precision. Several DP |
@@ -6285,9 +6286,11 @@ static void compute_m_n(unsigned int m, unsigned int n, | |||
6285 | * values. The passed in values are more likely to have the least | 6286 | * values. The passed in values are more likely to have the least |
6286 | * significant bits zero than M after rounding below, so do this first. | 6287 | * significant bits zero than M after rounding below, so do this first. |
6287 | */ | 6288 | */ |
6288 | while ((m & 1) == 0 && (n & 1) == 0) { | 6289 | if (reduce_m_n) { |
6289 | m >>= 1; | 6290 | while ((m & 1) == 0 && (n & 1) == 0) { |
6290 | n >>= 1; | 6291 | m >>= 1; |
6292 | n >>= 1; | ||
6293 | } | ||
6291 | } | 6294 | } |
6292 | 6295 | ||
6293 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); | 6296 | *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); |
@@ -6298,16 +6301,19 @@ static void compute_m_n(unsigned int m, unsigned int n, | |||
6298 | void | 6301 | void |
6299 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, | 6302 | intel_link_compute_m_n(int bits_per_pixel, int nlanes, |
6300 | int pixel_clock, int link_clock, | 6303 | int pixel_clock, int link_clock, |
6301 | struct intel_link_m_n *m_n) | 6304 | struct intel_link_m_n *m_n, |
6305 | bool reduce_m_n) | ||
6302 | { | 6306 | { |
6303 | m_n->tu = 64; | 6307 | m_n->tu = 64; |
6304 | 6308 | ||
6305 | compute_m_n(bits_per_pixel * pixel_clock, | 6309 | compute_m_n(bits_per_pixel * pixel_clock, |
6306 | link_clock * nlanes * 8, | 6310 | link_clock * nlanes * 8, |
6307 | &m_n->gmch_m, &m_n->gmch_n); | 6311 | &m_n->gmch_m, &m_n->gmch_n, |
6312 | reduce_m_n); | ||
6308 | 6313 | ||
6309 | compute_m_n(pixel_clock, link_clock, | 6314 | compute_m_n(pixel_clock, link_clock, |
6310 | &m_n->link_m, &m_n->link_n); | 6315 | &m_n->link_m, &m_n->link_n, |
6316 | reduce_m_n); | ||
6311 | } | 6317 | } |
6312 | 6318 | ||
6313 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 6319 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index ee77b519835c..fc691b8b317c 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1507,37 +1507,6 @@ static void intel_dp_print_rates(struct intel_dp *intel_dp) | |||
1507 | DRM_DEBUG_KMS("common rates: %s\n", str); | 1507 | DRM_DEBUG_KMS("common rates: %s\n", str); |
1508 | } | 1508 | } |
1509 | 1509 | ||
1510 | bool | ||
1511 | __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc) | ||
1512 | { | ||
1513 | u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI : | ||
1514 | DP_SINK_OUI; | ||
1515 | |||
1516 | return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) == | ||
1517 | sizeof(*desc); | ||
1518 | } | ||
1519 | |||
1520 | bool intel_dp_read_desc(struct intel_dp *intel_dp) | ||
1521 | { | ||
1522 | struct intel_dp_desc *desc = &intel_dp->desc; | ||
1523 | bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & | ||
1524 | DP_OUI_SUPPORT; | ||
1525 | int dev_id_len; | ||
1526 | |||
1527 | if (!__intel_dp_read_desc(intel_dp, desc)) | ||
1528 | return false; | ||
1529 | |||
1530 | dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id)); | ||
1531 | DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n", | ||
1532 | drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink", | ||
1533 | (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)", | ||
1534 | dev_id_len, desc->device_id, | ||
1535 | desc->hw_rev >> 4, desc->hw_rev & 0xf, | ||
1536 | desc->sw_major_rev, desc->sw_minor_rev); | ||
1537 | |||
1538 | return true; | ||
1539 | } | ||
1540 | |||
1541 | static int rate_to_index(int find, const int *rates) | 1510 | static int rate_to_index(int find, const int *rates) |
1542 | { | 1511 | { |
1543 | int i = 0; | 1512 | int i = 0; |
@@ -1624,6 +1593,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, | |||
1624 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; | 1593 | int common_rates[DP_MAX_SUPPORTED_RATES] = {}; |
1625 | int common_len; | 1594 | int common_len; |
1626 | uint8_t link_bw, rate_select; | 1595 | uint8_t link_bw, rate_select; |
1596 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, | ||
1597 | DP_DPCD_QUIRK_LIMITED_M_N); | ||
1627 | 1598 | ||
1628 | common_len = intel_dp_common_rates(intel_dp, common_rates); | 1599 | common_len = intel_dp_common_rates(intel_dp, common_rates); |
1629 | 1600 | ||
@@ -1753,7 +1724,8 @@ found: | |||
1753 | intel_link_compute_m_n(bpp, lane_count, | 1724 | intel_link_compute_m_n(bpp, lane_count, |
1754 | adjusted_mode->crtc_clock, | 1725 | adjusted_mode->crtc_clock, |
1755 | pipe_config->port_clock, | 1726 | pipe_config->port_clock, |
1756 | &pipe_config->dp_m_n); | 1727 | &pipe_config->dp_m_n, |
1728 | reduce_m_n); | ||
1757 | 1729 | ||
1758 | if (intel_connector->panel.downclock_mode != NULL && | 1730 | if (intel_connector->panel.downclock_mode != NULL && |
1759 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { | 1731 | dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { |
@@ -1761,7 +1733,8 @@ found: | |||
1761 | intel_link_compute_m_n(bpp, lane_count, | 1733 | intel_link_compute_m_n(bpp, lane_count, |
1762 | intel_connector->panel.downclock_mode->clock, | 1734 | intel_connector->panel.downclock_mode->clock, |
1763 | pipe_config->port_clock, | 1735 | pipe_config->port_clock, |
1764 | &pipe_config->dp_m2_n2); | 1736 | &pipe_config->dp_m2_n2, |
1737 | reduce_m_n); | ||
1765 | } | 1738 | } |
1766 | 1739 | ||
1767 | /* | 1740 | /* |
@@ -3622,7 +3595,8 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) | |||
3622 | if (!intel_dp_read_dpcd(intel_dp)) | 3595 | if (!intel_dp_read_dpcd(intel_dp)) |
3623 | return false; | 3596 | return false; |
3624 | 3597 | ||
3625 | intel_dp_read_desc(intel_dp); | 3598 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, |
3599 | drm_dp_is_branch(intel_dp->dpcd)); | ||
3626 | 3600 | ||
3627 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 3601 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
3628 | dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & | 3602 | dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] & |
@@ -4624,7 +4598,8 @@ intel_dp_long_pulse(struct intel_connector *intel_connector) | |||
4624 | 4598 | ||
4625 | intel_dp_print_rates(intel_dp); | 4599 | intel_dp_print_rates(intel_dp); |
4626 | 4600 | ||
4627 | intel_dp_read_desc(intel_dp); | 4601 | drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc, |
4602 | drm_dp_is_branch(intel_dp->dpcd)); | ||
4628 | 4603 | ||
4629 | intel_dp_configure_mst(intel_dp); | 4604 | intel_dp_configure_mst(intel_dp); |
4630 | 4605 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index c1f62eb07c07..989e25577ac0 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c | |||
@@ -44,6 +44,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
44 | int lane_count, slots; | 44 | int lane_count, slots; |
45 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; | 45 | const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; |
46 | int mst_pbn; | 46 | int mst_pbn; |
47 | bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, | ||
48 | DP_DPCD_QUIRK_LIMITED_M_N); | ||
47 | 49 | ||
48 | pipe_config->has_pch_encoder = false; | 50 | pipe_config->has_pch_encoder = false; |
49 | bpp = 24; | 51 | bpp = 24; |
@@ -75,7 +77,8 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, | |||
75 | intel_link_compute_m_n(bpp, lane_count, | 77 | intel_link_compute_m_n(bpp, lane_count, |
76 | adjusted_mode->crtc_clock, | 78 | adjusted_mode->crtc_clock, |
77 | pipe_config->port_clock, | 79 | pipe_config->port_clock, |
78 | &pipe_config->dp_m_n); | 80 | &pipe_config->dp_m_n, |
81 | reduce_m_n); | ||
79 | 82 | ||
80 | pipe_config->dp_m_n.tu = slots; | 83 | pipe_config->dp_m_n.tu = slots; |
81 | 84 | ||
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index aaee3949a422..f630c7af5020 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -906,14 +906,6 @@ enum link_m_n_set { | |||
906 | M2_N2 | 906 | M2_N2 |
907 | }; | 907 | }; |
908 | 908 | ||
909 | struct intel_dp_desc { | ||
910 | u8 oui[3]; | ||
911 | u8 device_id[6]; | ||
912 | u8 hw_rev; | ||
913 | u8 sw_major_rev; | ||
914 | u8 sw_minor_rev; | ||
915 | } __packed; | ||
916 | |||
917 | struct intel_dp_compliance_data { | 909 | struct intel_dp_compliance_data { |
918 | unsigned long edid; | 910 | unsigned long edid; |
919 | uint8_t video_pattern; | 911 | uint8_t video_pattern; |
@@ -957,7 +949,7 @@ struct intel_dp { | |||
957 | /* Max link BW for the sink as per DPCD registers */ | 949 | /* Max link BW for the sink as per DPCD registers */ |
958 | int max_sink_link_bw; | 950 | int max_sink_link_bw; |
959 | /* sink or branch descriptor */ | 951 | /* sink or branch descriptor */ |
960 | struct intel_dp_desc desc; | 952 | struct drm_dp_desc desc; |
961 | struct drm_dp_aux aux; | 953 | struct drm_dp_aux aux; |
962 | enum intel_display_power_domain aux_power_domain; | 954 | enum intel_display_power_domain aux_power_domain; |
963 | uint8_t train_set[4]; | 955 | uint8_t train_set[4]; |
@@ -1532,9 +1524,6 @@ static inline unsigned int intel_dp_unused_lane_mask(int lane_count) | |||
1532 | } | 1524 | } |
1533 | 1525 | ||
1534 | bool intel_dp_read_dpcd(struct intel_dp *intel_dp); | 1526 | bool intel_dp_read_dpcd(struct intel_dp *intel_dp); |
1535 | bool __intel_dp_read_desc(struct intel_dp *intel_dp, | ||
1536 | struct intel_dp_desc *desc); | ||
1537 | bool intel_dp_read_desc(struct intel_dp *intel_dp); | ||
1538 | int intel_dp_link_required(int pixel_clock, int bpp); | 1527 | int intel_dp_link_required(int pixel_clock, int bpp); |
1539 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); | 1528 | int intel_dp_max_data_rate(int max_link_clock, int max_lanes); |
1540 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, | 1529 | bool intel_digital_port_connected(struct drm_i915_private *dev_priv, |
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c index 668f00480d97..292fedf30b00 100644 --- a/drivers/gpu/drm/i915/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/intel_lpe_audio.c | |||
@@ -149,44 +149,10 @@ static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv) | |||
149 | 149 | ||
150 | static void lpe_audio_irq_unmask(struct irq_data *d) | 150 | static void lpe_audio_irq_unmask(struct irq_data *d) |
151 | { | 151 | { |
152 | struct drm_i915_private *dev_priv = d->chip_data; | ||
153 | unsigned long irqflags; | ||
154 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
155 | I915_LPE_PIPE_B_INTERRUPT); | ||
156 | |||
157 | if (IS_CHERRYVIEW(dev_priv)) | ||
158 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
159 | |||
160 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
161 | |||
162 | dev_priv->irq_mask &= ~val; | ||
163 | I915_WRITE(VLV_IIR, val); | ||
164 | I915_WRITE(VLV_IIR, val); | ||
165 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
166 | POSTING_READ(VLV_IMR); | ||
167 | |||
168 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
169 | } | 152 | } |
170 | 153 | ||
171 | static void lpe_audio_irq_mask(struct irq_data *d) | 154 | static void lpe_audio_irq_mask(struct irq_data *d) |
172 | { | 155 | { |
173 | struct drm_i915_private *dev_priv = d->chip_data; | ||
174 | unsigned long irqflags; | ||
175 | u32 val = (I915_LPE_PIPE_A_INTERRUPT | | ||
176 | I915_LPE_PIPE_B_INTERRUPT); | ||
177 | |||
178 | if (IS_CHERRYVIEW(dev_priv)) | ||
179 | val |= I915_LPE_PIPE_C_INTERRUPT; | ||
180 | |||
181 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | ||
182 | |||
183 | dev_priv->irq_mask |= val; | ||
184 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | ||
185 | I915_WRITE(VLV_IIR, val); | ||
186 | I915_WRITE(VLV_IIR, val); | ||
187 | POSTING_READ(VLV_IIR); | ||
188 | |||
189 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | ||
190 | } | 156 | } |
191 | 157 | ||
192 | static struct irq_chip lpe_audio_irqchip = { | 158 | static struct irq_chip lpe_audio_irqchip = { |
@@ -330,8 +296,6 @@ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) | |||
330 | 296 | ||
331 | desc = irq_to_desc(dev_priv->lpe_audio.irq); | 297 | desc = irq_to_desc(dev_priv->lpe_audio.irq); |
332 | 298 | ||
333 | lpe_audio_irq_mask(&desc->irq_data); | ||
334 | |||
335 | lpe_audio_platdev_destroy(dev_priv); | 299 | lpe_audio_platdev_destroy(dev_priv); |
336 | 300 | ||
337 | irq_free_desc(dev_priv->lpe_audio.irq); | 301 | irq_free_desc(dev_priv->lpe_audio.irq); |
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index c8f7c631fc1f..dac4e003c1f3 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -1989,7 +1989,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, | |||
1989 | 1989 | ||
1990 | ce->ring = ring; | 1990 | ce->ring = ring; |
1991 | ce->state = vma; | 1991 | ce->state = vma; |
1992 | ce->initialised = engine->init_context == NULL; | 1992 | ce->initialised |= engine->init_context == NULL; |
1993 | 1993 | ||
1994 | return 0; | 1994 | return 0; |
1995 | 1995 | ||
diff --git a/drivers/gpu/drm/i915/intel_lspcon.c b/drivers/gpu/drm/i915/intel_lspcon.c index 71cbe9c08932..5abef482eacf 100644 --- a/drivers/gpu/drm/i915/intel_lspcon.c +++ b/drivers/gpu/drm/i915/intel_lspcon.c | |||
@@ -240,7 +240,7 @@ bool lspcon_init(struct intel_digital_port *intel_dig_port) | |||
240 | return false; | 240 | return false; |
241 | } | 241 | } |
242 | 242 | ||
243 | intel_dp_read_desc(dp); | 243 | drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd)); |
244 | 244 | ||
245 | DRM_DEBUG_KMS("Success: LSPCON init\n"); | 245 | DRM_DEBUG_KMS("Success: LSPCON init\n"); |
246 | return true; | 246 | return true; |
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c index 1afb8b06e3e1..12b85b3278cd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c | |||
@@ -320,7 +320,7 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) | |||
320 | static int igt_ctx_exec(void *arg) | 320 | static int igt_ctx_exec(void *arg) |
321 | { | 321 | { |
322 | struct drm_i915_private *i915 = arg; | 322 | struct drm_i915_private *i915 = arg; |
323 | struct drm_i915_gem_object *obj; | 323 | struct drm_i915_gem_object *obj = NULL; |
324 | struct drm_file *file; | 324 | struct drm_file *file; |
325 | IGT_TIMEOUT(end_time); | 325 | IGT_TIMEOUT(end_time); |
326 | LIST_HEAD(objects); | 326 | LIST_HEAD(objects); |
@@ -359,7 +359,7 @@ static int igt_ctx_exec(void *arg) | |||
359 | } | 359 | } |
360 | 360 | ||
361 | for_each_engine(engine, i915, id) { | 361 | for_each_engine(engine, i915, id) { |
362 | if (dw == 0) { | 362 | if (!obj) { |
363 | obj = create_test_object(ctx, file, &objects); | 363 | obj = create_test_object(ctx, file, &objects); |
364 | if (IS_ERR(obj)) { | 364 | if (IS_ERR(obj)) { |
365 | err = PTR_ERR(obj); | 365 | err = PTR_ERR(obj); |
@@ -376,8 +376,10 @@ static int igt_ctx_exec(void *arg) | |||
376 | goto out_unlock; | 376 | goto out_unlock; |
377 | } | 377 | } |
378 | 378 | ||
379 | if (++dw == max_dwords(obj)) | 379 | if (++dw == max_dwords(obj)) { |
380 | obj = NULL; | ||
380 | dw = 0; | 381 | dw = 0; |
382 | } | ||
381 | ndwords++; | 383 | ndwords++; |
382 | } | 384 | } |
383 | ncontexts++; | 385 | ncontexts++; |
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 5b8e23d051f2..0a31cd6d01ce 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig | |||
@@ -13,6 +13,7 @@ config DRM_MSM | |||
13 | select QCOM_SCM | 13 | select QCOM_SCM |
14 | select SND_SOC_HDMI_CODEC if SND_SOC | 14 | select SND_SOC_HDMI_CODEC if SND_SOC |
15 | select SYNC_FILE | 15 | select SYNC_FILE |
16 | select PM_OPP | ||
16 | default y | 17 | default y |
17 | help | 18 | help |
18 | DRM/KMS driver for MSM/snapdragon. | 19 | DRM/KMS driver for MSM/snapdragon. |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c index f8f48d014978..9c34d7824988 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c | |||
@@ -116,7 +116,7 @@ static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq, | |||
116 | return 0; | 116 | return 0; |
117 | } | 117 | } |
118 | 118 | ||
119 | static struct irq_domain_ops mdss_hw_irqdomain_ops = { | 119 | static const struct irq_domain_ops mdss_hw_irqdomain_ops = { |
120 | .map = mdss_hw_irqdomain_map, | 120 | .map = mdss_hw_irqdomain_map, |
121 | .xlate = irq_domain_xlate_onecell, | 121 | .xlate = irq_domain_xlate_onecell, |
122 | }; | 122 | }; |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c index a38c5fe6cc19..7d3741215387 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c | |||
@@ -225,9 +225,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane) | |||
225 | 225 | ||
226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), | 226 | mdp5_state = kmemdup(to_mdp5_plane_state(plane->state), |
227 | sizeof(*mdp5_state), GFP_KERNEL); | 227 | sizeof(*mdp5_state), GFP_KERNEL); |
228 | if (!mdp5_state) | ||
229 | return NULL; | ||
228 | 230 | ||
229 | if (mdp5_state && mdp5_state->base.fb) | 231 | __drm_atomic_helper_plane_duplicate_state(plane, &mdp5_state->base); |
230 | drm_framebuffer_reference(mdp5_state->base.fb); | ||
231 | 232 | ||
232 | return &mdp5_state->base; | 233 | return &mdp5_state->base; |
233 | } | 234 | } |
@@ -444,6 +445,10 @@ static int mdp5_plane_atomic_check_with_state(struct drm_crtc_state *crtc_state, | |||
444 | mdp5_pipe_release(state->state, old_hwpipe); | 445 | mdp5_pipe_release(state->state, old_hwpipe); |
445 | mdp5_pipe_release(state->state, old_right_hwpipe); | 446 | mdp5_pipe_release(state->state, old_right_hwpipe); |
446 | } | 447 | } |
448 | } else { | ||
449 | mdp5_pipe_release(state->state, mdp5_state->hwpipe); | ||
450 | mdp5_pipe_release(state->state, mdp5_state->r_hwpipe); | ||
451 | mdp5_state->hwpipe = mdp5_state->r_hwpipe = NULL; | ||
447 | } | 452 | } |
448 | 453 | ||
449 | return 0; | 454 | return 0; |
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 87b5695d4034..9d498eb81906 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c | |||
@@ -830,6 +830,7 @@ static struct drm_driver msm_driver = { | |||
830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, | 830 | .prime_fd_to_handle = drm_gem_prime_fd_to_handle, |
831 | .gem_prime_export = drm_gem_prime_export, | 831 | .gem_prime_export = drm_gem_prime_export, |
832 | .gem_prime_import = drm_gem_prime_import, | 832 | .gem_prime_import = drm_gem_prime_import, |
833 | .gem_prime_res_obj = msm_gem_prime_res_obj, | ||
833 | .gem_prime_pin = msm_gem_prime_pin, | 834 | .gem_prime_pin = msm_gem_prime_pin, |
834 | .gem_prime_unpin = msm_gem_prime_unpin, | 835 | .gem_prime_unpin = msm_gem_prime_unpin, |
835 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, | 836 | .gem_prime_get_sg_table = msm_gem_prime_get_sg_table, |
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 28b6f9ba5066..1b26ca626528 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h | |||
@@ -224,6 +224,7 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); | |||
224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); | 224 | void *msm_gem_prime_vmap(struct drm_gem_object *obj); |
225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | 225 | void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); |
226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); | 226 | int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
227 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj); | ||
227 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, | 228 | struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, |
228 | struct dma_buf_attachment *attach, struct sg_table *sg); | 229 | struct dma_buf_attachment *attach, struct sg_table *sg); |
229 | int msm_gem_prime_pin(struct drm_gem_object *obj); | 230 | int msm_gem_prime_pin(struct drm_gem_object *obj); |
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 3f299c537b77..a2f89bac9c16 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c | |||
@@ -99,8 +99,8 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) | |||
99 | } | 99 | } |
100 | 100 | ||
101 | struct msm_fence { | 101 | struct msm_fence { |
102 | struct msm_fence_context *fctx; | ||
103 | struct dma_fence base; | 102 | struct dma_fence base; |
103 | struct msm_fence_context *fctx; | ||
104 | }; | 104 | }; |
105 | 105 | ||
106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) | 106 | static inline struct msm_fence *to_msm_fence(struct dma_fence *fence) |
@@ -130,19 +130,13 @@ static bool msm_fence_signaled(struct dma_fence *fence) | |||
130 | return fence_completed(f->fctx, f->base.seqno); | 130 | return fence_completed(f->fctx, f->base.seqno); |
131 | } | 131 | } |
132 | 132 | ||
133 | static void msm_fence_release(struct dma_fence *fence) | ||
134 | { | ||
135 | struct msm_fence *f = to_msm_fence(fence); | ||
136 | kfree_rcu(f, base.rcu); | ||
137 | } | ||
138 | |||
139 | static const struct dma_fence_ops msm_fence_ops = { | 133 | static const struct dma_fence_ops msm_fence_ops = { |
140 | .get_driver_name = msm_fence_get_driver_name, | 134 | .get_driver_name = msm_fence_get_driver_name, |
141 | .get_timeline_name = msm_fence_get_timeline_name, | 135 | .get_timeline_name = msm_fence_get_timeline_name, |
142 | .enable_signaling = msm_fence_enable_signaling, | 136 | .enable_signaling = msm_fence_enable_signaling, |
143 | .signaled = msm_fence_signaled, | 137 | .signaled = msm_fence_signaled, |
144 | .wait = dma_fence_default_wait, | 138 | .wait = dma_fence_default_wait, |
145 | .release = msm_fence_release, | 139 | .release = dma_fence_free, |
146 | }; | 140 | }; |
147 | 141 | ||
148 | struct dma_fence * | 142 | struct dma_fence * |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 68e509b3b9e4..50289a23baf8 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -758,6 +758,8 @@ static int msm_gem_new_impl(struct drm_device *dev, | |||
758 | struct msm_gem_object *msm_obj; | 758 | struct msm_gem_object *msm_obj; |
759 | bool use_vram = false; | 759 | bool use_vram = false; |
760 | 760 | ||
761 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
762 | |||
761 | switch (flags & MSM_BO_CACHE_MASK) { | 763 | switch (flags & MSM_BO_CACHE_MASK) { |
762 | case MSM_BO_UNCACHED: | 764 | case MSM_BO_UNCACHED: |
763 | case MSM_BO_CACHED: | 765 | case MSM_BO_CACHED: |
@@ -853,7 +855,11 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, | |||
853 | 855 | ||
854 | size = PAGE_ALIGN(dmabuf->size); | 856 | size = PAGE_ALIGN(dmabuf->size); |
855 | 857 | ||
858 | /* Take mutex so we can modify the inactive list in msm_gem_new_impl */ | ||
859 | mutex_lock(&dev->struct_mutex); | ||
856 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); | 860 | ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj); |
861 | mutex_unlock(&dev->struct_mutex); | ||
862 | |||
857 | if (ret) | 863 | if (ret) |
858 | goto fail; | 864 | goto fail; |
859 | 865 | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 60bb290700ce..13403c6da6c7 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c | |||
@@ -70,3 +70,10 @@ void msm_gem_prime_unpin(struct drm_gem_object *obj) | |||
70 | if (!obj->import_attach) | 70 | if (!obj->import_attach) |
71 | msm_gem_put_pages(obj); | 71 | msm_gem_put_pages(obj); |
72 | } | 72 | } |
73 | |||
74 | struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj) | ||
75 | { | ||
76 | struct msm_gem_object *msm_obj = to_msm_bo(obj); | ||
77 | |||
78 | return msm_obj->resv; | ||
79 | } | ||
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 1c545ebe6a5a..7832e6421d25 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -410,12 +410,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
410 | if (!in_fence) | 410 | if (!in_fence) |
411 | return -EINVAL; | 411 | return -EINVAL; |
412 | 412 | ||
413 | /* TODO if we get an array-fence due to userspace merging multiple | 413 | /* |
414 | * fences, we need a way to determine if all the backing fences | 414 | * Wait if the fence is from a foreign context, or if the fence |
415 | * are from our own context.. | 415 | * array contains any fence from a foreign context. |
416 | */ | 416 | */ |
417 | 417 | if (!dma_fence_match_context(in_fence, gpu->fctx->context)) { | |
418 | if (in_fence->context != gpu->fctx->context) { | ||
419 | ret = dma_fence_wait(in_fence, true); | 418 | ret = dma_fence_wait(in_fence, true); |
420 | if (ret) | 419 | if (ret) |
421 | return ret; | 420 | return ret; |
@@ -496,8 +495,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
496 | goto out; | 495 | goto out; |
497 | } | 496 | } |
498 | 497 | ||
499 | if ((submit_cmd.size + submit_cmd.submit_offset) >= | 498 | if (!submit_cmd.size || |
500 | msm_obj->base.size) { | 499 | ((submit_cmd.size + submit_cmd.submit_offset) > |
500 | msm_obj->base.size)) { | ||
501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); | 501 | DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size); |
502 | ret = -EINVAL; | 502 | ret = -EINVAL; |
503 | goto out; | 503 | goto out; |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 97b9c38c6b3f..0fdc88d79ca8 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -549,9 +549,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) | |||
549 | gpu->grp_clks[i] = get_clock(dev, name); | 549 | gpu->grp_clks[i] = get_clock(dev, name); |
550 | 550 | ||
551 | /* Remember the key clocks that we need to control later */ | 551 | /* Remember the key clocks that we need to control later */ |
552 | if (!strcmp(name, "core")) | 552 | if (!strcmp(name, "core") || !strcmp(name, "core_clk")) |
553 | gpu->core_clk = gpu->grp_clks[i]; | 553 | gpu->core_clk = gpu->grp_clks[i]; |
554 | else if (!strcmp(name, "rbbmtimer")) | 554 | else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk")) |
555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; | 555 | gpu->rbbmtimer_clk = gpu->grp_clks[i]; |
556 | 556 | ||
557 | ++i; | 557 | ++i; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index fe40e5e499dd..687705c50794 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -275,10 +275,12 @@ config HID_EMS_FF | |||
275 | - Trio Linker Plus II | 275 | - Trio Linker Plus II |
276 | 276 | ||
277 | config HID_ELECOM | 277 | config HID_ELECOM |
278 | tristate "ELECOM BM084 bluetooth mouse" | 278 | tristate "ELECOM HID devices" |
279 | depends on HID | 279 | depends on HID |
280 | ---help--- | 280 | ---help--- |
281 | Support for the ELECOM BM084 (bluetooth mouse). | 281 | Support for ELECOM devices: |
282 | - BM084 Bluetooth Mouse | ||
283 | - DEFT Trackball (Wired and wireless) | ||
282 | 284 | ||
283 | config HID_ELO | 285 | config HID_ELO |
284 | tristate "ELO USB 4000/4500 touchscreen" | 286 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index 16df6cc90235..a6268f2f7408 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c | |||
@@ -69,6 +69,7 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad"); | |||
69 | #define QUIRK_IS_MULTITOUCH BIT(3) | 69 | #define QUIRK_IS_MULTITOUCH BIT(3) |
70 | #define QUIRK_NO_CONSUMER_USAGES BIT(4) | 70 | #define QUIRK_NO_CONSUMER_USAGES BIT(4) |
71 | #define QUIRK_USE_KBD_BACKLIGHT BIT(5) | 71 | #define QUIRK_USE_KBD_BACKLIGHT BIT(5) |
72 | #define QUIRK_T100_KEYBOARD BIT(6) | ||
72 | 73 | ||
73 | #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ | 74 | #define I2C_KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \ |
74 | QUIRK_NO_INIT_REPORTS | \ | 75 | QUIRK_NO_INIT_REPORTS | \ |
@@ -536,6 +537,8 @@ static void asus_remove(struct hid_device *hdev) | |||
536 | drvdata->kbd_backlight->removed = true; | 537 | drvdata->kbd_backlight->removed = true; |
537 | cancel_work_sync(&drvdata->kbd_backlight->work); | 538 | cancel_work_sync(&drvdata->kbd_backlight->work); |
538 | } | 539 | } |
540 | |||
541 | hid_hw_stop(hdev); | ||
539 | } | 542 | } |
540 | 543 | ||
541 | static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 544 | static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
@@ -548,6 +551,12 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
548 | hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); | 551 | hid_info(hdev, "Fixing up Asus notebook report descriptor\n"); |
549 | rdesc[55] = 0xdd; | 552 | rdesc[55] = 0xdd; |
550 | } | 553 | } |
554 | if (drvdata->quirks & QUIRK_T100_KEYBOARD && | ||
555 | *rsize == 76 && rdesc[73] == 0x81 && rdesc[74] == 0x01) { | ||
556 | hid_info(hdev, "Fixing up Asus T100 keyb report descriptor\n"); | ||
557 | rdesc[74] &= ~HID_MAIN_ITEM_CONSTANT; | ||
558 | } | ||
559 | |||
551 | return rdesc; | 560 | return rdesc; |
552 | } | 561 | } |
553 | 562 | ||
@@ -560,6 +569,9 @@ static const struct hid_device_id asus_devices[] = { | |||
560 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, | 569 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, |
561 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, | 570 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, |
562 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, | 571 | USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2), QUIRK_USE_KBD_BACKLIGHT }, |
572 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, | ||
573 | USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD), | ||
574 | QUIRK_T100_KEYBOARD | QUIRK_NO_CONSUMER_USAGES }, | ||
563 | { } | 575 | { } |
564 | }; | 576 | }; |
565 | MODULE_DEVICE_TABLE(hid, asus_devices); | 577 | MODULE_DEVICE_TABLE(hid, asus_devices); |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 37084b645785..04cee65531d7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -1855,6 +1855,7 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1855 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, | 1855 | { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, |
1856 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, | 1856 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, |
1857 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, | 1857 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, |
1858 | { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, | ||
1858 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, | 1859 | { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, |
1859 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, | 1860 | { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, |
1860 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, | 1861 | { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, |
@@ -1891,6 +1892,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
1891 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, | 1892 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, |
1892 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, | 1893 | { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) }, |
1893 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 1894 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
1895 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | ||
1896 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | ||
1894 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 1897 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
1895 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, | 1898 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, |
1896 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, | 1899 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, |
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index 6e3848a8d8dd..e2c7465df69f 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * HID driver for Elecom BM084 (bluetooth mouse). | 2 | * HID driver for ELECOM devices. |
3 | * Removes a non-existing horizontal wheel from | ||
4 | * the HID descriptor. | ||
5 | * (This module is based on "hid-ortek".) | ||
6 | * | ||
7 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | ||
5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | ||
8 | */ | 6 | */ |
9 | 7 | ||
10 | /* | 8 | /* |
@@ -23,15 +21,61 @@ | |||
23 | static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | 21 | static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, |
24 | unsigned int *rsize) | 22 | unsigned int *rsize) |
25 | { | 23 | { |
26 | if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { | 24 | switch (hdev->product) { |
27 | hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); | 25 | case USB_DEVICE_ID_ELECOM_BM084: |
28 | rdesc[47] = 0x00; | 26 | /* The BM084 Bluetooth mouse includes a non-existing horizontal |
27 | * wheel in the HID descriptor. */ | ||
28 | if (*rsize >= 48 && rdesc[46] == 0x05 && rdesc[47] == 0x0c) { | ||
29 | hid_info(hdev, "Fixing up Elecom BM084 report descriptor\n"); | ||
30 | rdesc[47] = 0x00; | ||
31 | } | ||
32 | break; | ||
33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | ||
34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | ||
35 | /* The DEFT trackball has eight buttons, but its descriptor only | ||
36 | * reports five, disabling the three Fn buttons on the top of | ||
37 | * the mouse. | ||
38 | * | ||
39 | * Apply the following diff to the descriptor: | ||
40 | * | ||
41 | * Collection (Physical), Collection (Physical), | ||
42 | * Report ID (1), Report ID (1), | ||
43 | * Report Count (5), -> Report Count (8), | ||
44 | * Report Size (1), Report Size (1), | ||
45 | * Usage Page (Button), Usage Page (Button), | ||
46 | * Usage Minimum (01h), Usage Minimum (01h), | ||
47 | * Usage Maximum (05h), -> Usage Maximum (08h), | ||
48 | * Logical Minimum (0), Logical Minimum (0), | ||
49 | * Logical Maximum (1), Logical Maximum (1), | ||
50 | * Input (Variable), Input (Variable), | ||
51 | * Report Count (1), -> Report Count (0), | ||
52 | * Report Size (3), Report Size (3), | ||
53 | * Input (Constant), Input (Constant), | ||
54 | * Report Size (16), Report Size (16), | ||
55 | * Report Count (2), Report Count (2), | ||
56 | * Usage Page (Desktop), Usage Page (Desktop), | ||
57 | * Usage (X), Usage (X), | ||
58 | * Usage (Y), Usage (Y), | ||
59 | * Logical Minimum (-32768), Logical Minimum (-32768), | ||
60 | * Logical Maximum (32767), Logical Maximum (32767), | ||
61 | * Input (Variable, Relative), Input (Variable, Relative), | ||
62 | * End Collection, End Collection, | ||
63 | */ | ||
64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | ||
65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | ||
66 | rdesc[13] = 8; /* Button/Variable Report Count */ | ||
67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | ||
68 | rdesc[29] = 0; /* Button/Constant Report Count */ | ||
69 | } | ||
70 | break; | ||
29 | } | 71 | } |
30 | return rdesc; | 72 | return rdesc; |
31 | } | 73 | } |
32 | 74 | ||
33 | static const struct hid_device_id elecom_devices[] = { | 75 | static const struct hid_device_id elecom_devices[] = { |
34 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084)}, | 76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | ||
78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | ||
35 | { } | 79 | { } |
36 | }; | 80 | }; |
37 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 81 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 643390ba749d..8ca1e8ce0af2 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -173,6 +173,7 @@ | |||
173 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 | 173 | #define USB_VENDOR_ID_ASUSTEK 0x0b05 |
174 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 | 174 | #define USB_DEVICE_ID_ASUSTEK_LCM 0x1726 |
175 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b | 175 | #define USB_DEVICE_ID_ASUSTEK_LCM2 0x175b |
176 | #define USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD 0x17e0 | ||
176 | #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 | 177 | #define USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD 0x8585 |
177 | #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 | 178 | #define USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD 0x0101 |
178 | #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 | 179 | #define USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1 0x1854 |
@@ -358,6 +359,8 @@ | |||
358 | 359 | ||
359 | #define USB_VENDOR_ID_ELECOM 0x056e | 360 | #define USB_VENDOR_ID_ELECOM 0x056e |
360 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 361 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
362 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | ||
363 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | ||
361 | 364 | ||
362 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 365 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
363 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 366 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 20b40ad26325..1d6c997b3001 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -349,6 +349,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, | |||
349 | 349 | ||
350 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { | 350 | if (input->id.product == USB_DEVICE_ID_APPLE_MAGICMOUSE) { |
351 | magicmouse_emit_buttons(msc, clicks & 3); | 351 | magicmouse_emit_buttons(msc, clicks & 3); |
352 | input_mt_report_pointer_emulation(input, true); | ||
352 | input_report_rel(input, REL_X, x); | 353 | input_report_rel(input, REL_X, x); |
353 | input_report_rel(input, REL_Y, y); | 354 | input_report_rel(input, REL_Y, y); |
354 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ | 355 | } else { /* USB_DEVICE_ID_APPLE_MAGICTRACKPAD */ |
@@ -388,16 +389,16 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd | |||
388 | __clear_bit(BTN_RIGHT, input->keybit); | 389 | __clear_bit(BTN_RIGHT, input->keybit); |
389 | __clear_bit(BTN_MIDDLE, input->keybit); | 390 | __clear_bit(BTN_MIDDLE, input->keybit); |
390 | __set_bit(BTN_MOUSE, input->keybit); | 391 | __set_bit(BTN_MOUSE, input->keybit); |
391 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
392 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
393 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
394 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
395 | __set_bit(BTN_TOOL_QUINTTAP, input->keybit); | ||
396 | __set_bit(BTN_TOUCH, input->keybit); | ||
397 | __set_bit(INPUT_PROP_POINTER, input->propbit); | ||
398 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); | 392 | __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); |
399 | } | 393 | } |
400 | 394 | ||
395 | __set_bit(BTN_TOOL_FINGER, input->keybit); | ||
396 | __set_bit(BTN_TOOL_DOUBLETAP, input->keybit); | ||
397 | __set_bit(BTN_TOOL_TRIPLETAP, input->keybit); | ||
398 | __set_bit(BTN_TOOL_QUADTAP, input->keybit); | ||
399 | __set_bit(BTN_TOOL_QUINTTAP, input->keybit); | ||
400 | __set_bit(BTN_TOUCH, input->keybit); | ||
401 | __set_bit(INPUT_PROP_POINTER, input->propbit); | ||
401 | 402 | ||
402 | __set_bit(EV_ABS, input->evbit); | 403 | __set_bit(EV_ABS, input->evbit); |
403 | 404 | ||
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 8daa8ce64ebb..fb55fb4c39fc 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c | |||
@@ -897,6 +897,15 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
897 | return 0; | 897 | return 0; |
898 | } | 898 | } |
899 | 899 | ||
900 | static void i2c_hid_acpi_fix_up_power(struct device *dev) | ||
901 | { | ||
902 | acpi_handle handle = ACPI_HANDLE(dev); | ||
903 | struct acpi_device *adev; | ||
904 | |||
905 | if (handle && acpi_bus_get_device(handle, &adev) == 0) | ||
906 | acpi_device_fix_up_power(adev); | ||
907 | } | ||
908 | |||
900 | static const struct acpi_device_id i2c_hid_acpi_match[] = { | 909 | static const struct acpi_device_id i2c_hid_acpi_match[] = { |
901 | {"ACPI0C50", 0 }, | 910 | {"ACPI0C50", 0 }, |
902 | {"PNP0C50", 0 }, | 911 | {"PNP0C50", 0 }, |
@@ -909,6 +918,8 @@ static inline int i2c_hid_acpi_pdata(struct i2c_client *client, | |||
909 | { | 918 | { |
910 | return -ENODEV; | 919 | return -ENODEV; |
911 | } | 920 | } |
921 | |||
922 | static inline void i2c_hid_acpi_fix_up_power(struct device *dev) {} | ||
912 | #endif | 923 | #endif |
913 | 924 | ||
914 | #ifdef CONFIG_OF | 925 | #ifdef CONFIG_OF |
@@ -1030,6 +1041,8 @@ static int i2c_hid_probe(struct i2c_client *client, | |||
1030 | if (ret < 0) | 1041 | if (ret < 0) |
1031 | goto err_regulator; | 1042 | goto err_regulator; |
1032 | 1043 | ||
1044 | i2c_hid_acpi_fix_up_power(&client->dev); | ||
1045 | |||
1033 | pm_runtime_get_noresume(&client->dev); | 1046 | pm_runtime_get_noresume(&client->dev); |
1034 | pm_runtime_set_active(&client->dev); | 1047 | pm_runtime_set_active(&client->dev); |
1035 | pm_runtime_enable(&client->dev); | 1048 | pm_runtime_enable(&client->dev); |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 4b225fb19a16..e274c9dc32f3 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -1571,37 +1571,38 @@ static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) | |||
1571 | { | 1571 | { |
1572 | unsigned char *data = wacom->data; | 1572 | unsigned char *data = wacom->data; |
1573 | 1573 | ||
1574 | if (wacom->pen_input) | 1574 | if (wacom->pen_input) { |
1575 | dev_dbg(wacom->pen_input->dev.parent, | 1575 | dev_dbg(wacom->pen_input->dev.parent, |
1576 | "%s: received report #%d\n", __func__, data[0]); | 1576 | "%s: received report #%d\n", __func__, data[0]); |
1577 | else if (wacom->touch_input) | 1577 | |
1578 | if (len == WACOM_PKGLEN_PENABLED || | ||
1579 | data[0] == WACOM_REPORT_PENABLED) | ||
1580 | return wacom_tpc_pen(wacom); | ||
1581 | } | ||
1582 | else if (wacom->touch_input) { | ||
1578 | dev_dbg(wacom->touch_input->dev.parent, | 1583 | dev_dbg(wacom->touch_input->dev.parent, |
1579 | "%s: received report #%d\n", __func__, data[0]); | 1584 | "%s: received report #%d\n", __func__, data[0]); |
1580 | 1585 | ||
1581 | switch (len) { | 1586 | switch (len) { |
1582 | case WACOM_PKGLEN_TPC1FG: | 1587 | case WACOM_PKGLEN_TPC1FG: |
1583 | return wacom_tpc_single_touch(wacom, len); | 1588 | return wacom_tpc_single_touch(wacom, len); |
1584 | 1589 | ||
1585 | case WACOM_PKGLEN_TPC2FG: | 1590 | case WACOM_PKGLEN_TPC2FG: |
1586 | return wacom_tpc_mt_touch(wacom); | 1591 | return wacom_tpc_mt_touch(wacom); |
1587 | 1592 | ||
1588 | case WACOM_PKGLEN_PENABLED: | 1593 | default: |
1589 | return wacom_tpc_pen(wacom); | 1594 | switch (data[0]) { |
1595 | case WACOM_REPORT_TPC1FG: | ||
1596 | case WACOM_REPORT_TPCHID: | ||
1597 | case WACOM_REPORT_TPCST: | ||
1598 | case WACOM_REPORT_TPC1FGE: | ||
1599 | return wacom_tpc_single_touch(wacom, len); | ||
1590 | 1600 | ||
1591 | default: | 1601 | case WACOM_REPORT_TPCMT: |
1592 | switch (data[0]) { | 1602 | case WACOM_REPORT_TPCMT2: |
1593 | case WACOM_REPORT_TPC1FG: | 1603 | return wacom_mt_touch(wacom); |
1594 | case WACOM_REPORT_TPCHID: | ||
1595 | case WACOM_REPORT_TPCST: | ||
1596 | case WACOM_REPORT_TPC1FGE: | ||
1597 | return wacom_tpc_single_touch(wacom, len); | ||
1598 | |||
1599 | case WACOM_REPORT_TPCMT: | ||
1600 | case WACOM_REPORT_TPCMT2: | ||
1601 | return wacom_mt_touch(wacom); | ||
1602 | 1604 | ||
1603 | case WACOM_REPORT_PENABLED: | 1605 | } |
1604 | return wacom_tpc_pen(wacom); | ||
1605 | } | 1606 | } |
1606 | } | 1607 | } |
1607 | 1608 | ||
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 22d5eafd6815..5ef2814345ef 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -343,6 +343,7 @@ config SENSORS_ASB100 | |||
343 | 343 | ||
344 | config SENSORS_ASPEED | 344 | config SENSORS_ASPEED |
345 | tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" | 345 | tristate "ASPEED AST2400/AST2500 PWM and Fan tach driver" |
346 | select REGMAP | ||
346 | help | 347 | help |
347 | This driver provides support for ASPEED AST2400/AST2500 PWM | 348 | This driver provides support for ASPEED AST2400/AST2500 PWM |
348 | and Fan Tacho controllers. | 349 | and Fan Tacho controllers. |
diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 48403a2115be..9de13d626c68 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/clk.h> | 9 | #include <linux/clk.h> |
10 | #include <linux/errno.h> | ||
10 | #include <linux/gpio/consumer.h> | 11 | #include <linux/gpio/consumer.h> |
11 | #include <linux/delay.h> | 12 | #include <linux/delay.h> |
12 | #include <linux/hwmon.h> | 13 | #include <linux/hwmon.h> |
@@ -494,7 +495,7 @@ static u32 aspeed_get_fan_tach_ch_measure_period(struct aspeed_pwm_tacho_data | |||
494 | return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); | 495 | return clk / (clk_unit * div_h * div_l * tacho_div * tacho_unit); |
495 | } | 496 | } |
496 | 497 | ||
497 | static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, | 498 | static int aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, |
498 | u8 fan_tach_ch) | 499 | u8 fan_tach_ch) |
499 | { | 500 | { |
500 | u32 raw_data, tach_div, clk_source, sec, val; | 501 | u32 raw_data, tach_div, clk_source, sec, val; |
@@ -510,6 +511,9 @@ static u32 aspeed_get_fan_tach_ch_rpm(struct aspeed_pwm_tacho_data *priv, | |||
510 | msleep(sec); | 511 | msleep(sec); |
511 | 512 | ||
512 | regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); | 513 | regmap_read(priv->regmap, ASPEED_PTCR_RESULT, &val); |
514 | if (!(val & RESULT_STATUS_MASK)) | ||
515 | return -ETIMEDOUT; | ||
516 | |||
513 | raw_data = val & RESULT_VALUE_MASK; | 517 | raw_data = val & RESULT_VALUE_MASK; |
514 | tach_div = priv->type_fan_tach_clock_division[type]; | 518 | tach_div = priv->type_fan_tach_clock_division[type]; |
515 | tach_div = 0x4 << (tach_div * 2); | 519 | tach_div = 0x4 << (tach_div * 2); |
@@ -561,12 +565,14 @@ static ssize_t show_rpm(struct device *dev, struct device_attribute *attr, | |||
561 | { | 565 | { |
562 | struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); | 566 | struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); |
563 | int index = sensor_attr->index; | 567 | int index = sensor_attr->index; |
564 | u32 rpm; | 568 | int rpm; |
565 | struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); | 569 | struct aspeed_pwm_tacho_data *priv = dev_get_drvdata(dev); |
566 | 570 | ||
567 | rpm = aspeed_get_fan_tach_ch_rpm(priv, index); | 571 | rpm = aspeed_get_fan_tach_ch_rpm(priv, index); |
572 | if (rpm < 0) | ||
573 | return rpm; | ||
568 | 574 | ||
569 | return sprintf(buf, "%u\n", rpm); | 575 | return sprintf(buf, "%d\n", rpm); |
570 | } | 576 | } |
571 | 577 | ||
572 | static umode_t pwm_is_visible(struct kobject *kobj, | 578 | static umode_t pwm_is_visible(struct kobject *kobj, |
@@ -591,24 +597,23 @@ static umode_t fan_dev_is_visible(struct kobject *kobj, | |||
591 | return a->mode; | 597 | return a->mode; |
592 | } | 598 | } |
593 | 599 | ||
594 | static SENSOR_DEVICE_ATTR(pwm0, 0644, | ||
595 | show_pwm, set_pwm, 0); | ||
596 | static SENSOR_DEVICE_ATTR(pwm1, 0644, | 600 | static SENSOR_DEVICE_ATTR(pwm1, 0644, |
597 | show_pwm, set_pwm, 1); | 601 | show_pwm, set_pwm, 0); |
598 | static SENSOR_DEVICE_ATTR(pwm2, 0644, | 602 | static SENSOR_DEVICE_ATTR(pwm2, 0644, |
599 | show_pwm, set_pwm, 2); | 603 | show_pwm, set_pwm, 1); |
600 | static SENSOR_DEVICE_ATTR(pwm3, 0644, | 604 | static SENSOR_DEVICE_ATTR(pwm3, 0644, |
601 | show_pwm, set_pwm, 3); | 605 | show_pwm, set_pwm, 2); |
602 | static SENSOR_DEVICE_ATTR(pwm4, 0644, | 606 | static SENSOR_DEVICE_ATTR(pwm4, 0644, |
603 | show_pwm, set_pwm, 4); | 607 | show_pwm, set_pwm, 3); |
604 | static SENSOR_DEVICE_ATTR(pwm5, 0644, | 608 | static SENSOR_DEVICE_ATTR(pwm5, 0644, |
605 | show_pwm, set_pwm, 5); | 609 | show_pwm, set_pwm, 4); |
606 | static SENSOR_DEVICE_ATTR(pwm6, 0644, | 610 | static SENSOR_DEVICE_ATTR(pwm6, 0644, |
607 | show_pwm, set_pwm, 6); | 611 | show_pwm, set_pwm, 5); |
608 | static SENSOR_DEVICE_ATTR(pwm7, 0644, | 612 | static SENSOR_DEVICE_ATTR(pwm7, 0644, |
613 | show_pwm, set_pwm, 6); | ||
614 | static SENSOR_DEVICE_ATTR(pwm8, 0644, | ||
609 | show_pwm, set_pwm, 7); | 615 | show_pwm, set_pwm, 7); |
610 | static struct attribute *pwm_dev_attrs[] = { | 616 | static struct attribute *pwm_dev_attrs[] = { |
611 | &sensor_dev_attr_pwm0.dev_attr.attr, | ||
612 | &sensor_dev_attr_pwm1.dev_attr.attr, | 617 | &sensor_dev_attr_pwm1.dev_attr.attr, |
613 | &sensor_dev_attr_pwm2.dev_attr.attr, | 618 | &sensor_dev_attr_pwm2.dev_attr.attr, |
614 | &sensor_dev_attr_pwm3.dev_attr.attr, | 619 | &sensor_dev_attr_pwm3.dev_attr.attr, |
@@ -616,6 +621,7 @@ static struct attribute *pwm_dev_attrs[] = { | |||
616 | &sensor_dev_attr_pwm5.dev_attr.attr, | 621 | &sensor_dev_attr_pwm5.dev_attr.attr, |
617 | &sensor_dev_attr_pwm6.dev_attr.attr, | 622 | &sensor_dev_attr_pwm6.dev_attr.attr, |
618 | &sensor_dev_attr_pwm7.dev_attr.attr, | 623 | &sensor_dev_attr_pwm7.dev_attr.attr, |
624 | &sensor_dev_attr_pwm8.dev_attr.attr, | ||
619 | NULL, | 625 | NULL, |
620 | }; | 626 | }; |
621 | 627 | ||
@@ -624,40 +630,39 @@ static const struct attribute_group pwm_dev_group = { | |||
624 | .is_visible = pwm_is_visible, | 630 | .is_visible = pwm_is_visible, |
625 | }; | 631 | }; |
626 | 632 | ||
627 | static SENSOR_DEVICE_ATTR(fan0_input, 0444, | ||
628 | show_rpm, NULL, 0); | ||
629 | static SENSOR_DEVICE_ATTR(fan1_input, 0444, | 633 | static SENSOR_DEVICE_ATTR(fan1_input, 0444, |
630 | show_rpm, NULL, 1); | 634 | show_rpm, NULL, 0); |
631 | static SENSOR_DEVICE_ATTR(fan2_input, 0444, | 635 | static SENSOR_DEVICE_ATTR(fan2_input, 0444, |
632 | show_rpm, NULL, 2); | 636 | show_rpm, NULL, 1); |
633 | static SENSOR_DEVICE_ATTR(fan3_input, 0444, | 637 | static SENSOR_DEVICE_ATTR(fan3_input, 0444, |
634 | show_rpm, NULL, 3); | 638 | show_rpm, NULL, 2); |
635 | static SENSOR_DEVICE_ATTR(fan4_input, 0444, | 639 | static SENSOR_DEVICE_ATTR(fan4_input, 0444, |
636 | show_rpm, NULL, 4); | 640 | show_rpm, NULL, 3); |
637 | static SENSOR_DEVICE_ATTR(fan5_input, 0444, | 641 | static SENSOR_DEVICE_ATTR(fan5_input, 0444, |
638 | show_rpm, NULL, 5); | 642 | show_rpm, NULL, 4); |
639 | static SENSOR_DEVICE_ATTR(fan6_input, 0444, | 643 | static SENSOR_DEVICE_ATTR(fan6_input, 0444, |
640 | show_rpm, NULL, 6); | 644 | show_rpm, NULL, 5); |
641 | static SENSOR_DEVICE_ATTR(fan7_input, 0444, | 645 | static SENSOR_DEVICE_ATTR(fan7_input, 0444, |
642 | show_rpm, NULL, 7); | 646 | show_rpm, NULL, 6); |
643 | static SENSOR_DEVICE_ATTR(fan8_input, 0444, | 647 | static SENSOR_DEVICE_ATTR(fan8_input, 0444, |
644 | show_rpm, NULL, 8); | 648 | show_rpm, NULL, 7); |
645 | static SENSOR_DEVICE_ATTR(fan9_input, 0444, | 649 | static SENSOR_DEVICE_ATTR(fan9_input, 0444, |
646 | show_rpm, NULL, 9); | 650 | show_rpm, NULL, 8); |
647 | static SENSOR_DEVICE_ATTR(fan10_input, 0444, | 651 | static SENSOR_DEVICE_ATTR(fan10_input, 0444, |
648 | show_rpm, NULL, 10); | 652 | show_rpm, NULL, 9); |
649 | static SENSOR_DEVICE_ATTR(fan11_input, 0444, | 653 | static SENSOR_DEVICE_ATTR(fan11_input, 0444, |
650 | show_rpm, NULL, 11); | 654 | show_rpm, NULL, 10); |
651 | static SENSOR_DEVICE_ATTR(fan12_input, 0444, | 655 | static SENSOR_DEVICE_ATTR(fan12_input, 0444, |
652 | show_rpm, NULL, 12); | 656 | show_rpm, NULL, 11); |
653 | static SENSOR_DEVICE_ATTR(fan13_input, 0444, | 657 | static SENSOR_DEVICE_ATTR(fan13_input, 0444, |
654 | show_rpm, NULL, 13); | 658 | show_rpm, NULL, 12); |
655 | static SENSOR_DEVICE_ATTR(fan14_input, 0444, | 659 | static SENSOR_DEVICE_ATTR(fan14_input, 0444, |
656 | show_rpm, NULL, 14); | 660 | show_rpm, NULL, 13); |
657 | static SENSOR_DEVICE_ATTR(fan15_input, 0444, | 661 | static SENSOR_DEVICE_ATTR(fan15_input, 0444, |
662 | show_rpm, NULL, 14); | ||
663 | static SENSOR_DEVICE_ATTR(fan16_input, 0444, | ||
658 | show_rpm, NULL, 15); | 664 | show_rpm, NULL, 15); |
659 | static struct attribute *fan_dev_attrs[] = { | 665 | static struct attribute *fan_dev_attrs[] = { |
660 | &sensor_dev_attr_fan0_input.dev_attr.attr, | ||
661 | &sensor_dev_attr_fan1_input.dev_attr.attr, | 666 | &sensor_dev_attr_fan1_input.dev_attr.attr, |
662 | &sensor_dev_attr_fan2_input.dev_attr.attr, | 667 | &sensor_dev_attr_fan2_input.dev_attr.attr, |
663 | &sensor_dev_attr_fan3_input.dev_attr.attr, | 668 | &sensor_dev_attr_fan3_input.dev_attr.attr, |
@@ -673,6 +678,7 @@ static struct attribute *fan_dev_attrs[] = { | |||
673 | &sensor_dev_attr_fan13_input.dev_attr.attr, | 678 | &sensor_dev_attr_fan13_input.dev_attr.attr, |
674 | &sensor_dev_attr_fan14_input.dev_attr.attr, | 679 | &sensor_dev_attr_fan14_input.dev_attr.attr, |
675 | &sensor_dev_attr_fan15_input.dev_attr.attr, | 680 | &sensor_dev_attr_fan15_input.dev_attr.attr, |
681 | &sensor_dev_attr_fan16_input.dev_attr.attr, | ||
676 | NULL | 682 | NULL |
677 | }; | 683 | }; |
678 | 684 | ||
@@ -802,7 +808,6 @@ static int aspeed_pwm_tacho_probe(struct platform_device *pdev) | |||
802 | if (ret) | 808 | if (ret) |
803 | return ret; | 809 | return ret; |
804 | } | 810 | } |
805 | of_node_put(np); | ||
806 | 811 | ||
807 | priv->groups[0] = &pwm_dev_group; | 812 | priv->groups[0] = &pwm_dev_group; |
808 | priv->groups[1] = &fan_dev_group; | 813 | priv->groups[1] = &fan_dev_group; |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 1844770f3ae8..2b4d613a3474 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, | |||
1429 | primary_path->packet_life_time = | 1429 | primary_path->packet_life_time = |
1430 | cm_req_get_primary_local_ack_timeout(req_msg); | 1430 | cm_req_get_primary_local_ack_timeout(req_msg); |
1431 | primary_path->packet_life_time -= (primary_path->packet_life_time > 0); | 1431 | primary_path->packet_life_time -= (primary_path->packet_life_time > 0); |
1432 | sa_path_set_service_id(primary_path, req_msg->service_id); | 1432 | primary_path->service_id = req_msg->service_id; |
1433 | 1433 | ||
1434 | if (req_msg->alt_local_lid) { | 1434 | if (req_msg->alt_local_lid) { |
1435 | alt_path->dgid = req_msg->alt_local_gid; | 1435 | alt_path->dgid = req_msg->alt_local_gid; |
@@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg, | |||
1452 | alt_path->packet_life_time = | 1452 | alt_path->packet_life_time = |
1453 | cm_req_get_alt_local_ack_timeout(req_msg); | 1453 | cm_req_get_alt_local_ack_timeout(req_msg); |
1454 | alt_path->packet_life_time -= (alt_path->packet_life_time > 0); | 1454 | alt_path->packet_life_time -= (alt_path->packet_life_time > 0); |
1455 | sa_path_set_service_id(alt_path, req_msg->service_id); | 1455 | alt_path->service_id = req_msg->service_id; |
1456 | } | 1456 | } |
1457 | } | 1457 | } |
1458 | 1458 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 91b7a2fe5a55..31bb82d8ecd7 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr, | |||
1140 | ib->sib_pkey = path->pkey; | 1140 | ib->sib_pkey = path->pkey; |
1141 | ib->sib_flowinfo = path->flow_label; | 1141 | ib->sib_flowinfo = path->flow_label; |
1142 | memcpy(&ib->sib_addr, &path->sgid, 16); | 1142 | memcpy(&ib->sib_addr, &path->sgid, 16); |
1143 | ib->sib_sid = sa_path_get_service_id(path); | 1143 | ib->sib_sid = path->service_id; |
1144 | ib->sib_scope_id = 0; | 1144 | ib->sib_scope_id = 0; |
1145 | } else { | 1145 | } else { |
1146 | ib->sib_pkey = listen_ib->sib_pkey; | 1146 | ib->sib_pkey = listen_ib->sib_pkey; |
@@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event, | |||
1274 | memcpy(&req->local_gid, &req_param->primary_path->sgid, | 1274 | memcpy(&req->local_gid, &req_param->primary_path->sgid, |
1275 | sizeof(req->local_gid)); | 1275 | sizeof(req->local_gid)); |
1276 | req->has_gid = true; | 1276 | req->has_gid = true; |
1277 | req->service_id = | 1277 | req->service_id = req_param->primary_path->service_id; |
1278 | sa_path_get_service_id(req_param->primary_path); | ||
1279 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); | 1278 | req->pkey = be16_to_cpu(req_param->primary_path->pkey); |
1280 | if (req->pkey != req_param->bth_pkey) | 1279 | if (req->pkey != req_param->bth_pkey) |
1281 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" | 1280 | pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" |
@@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1827 | struct rdma_route *rt; | 1826 | struct rdma_route *rt; |
1828 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; | 1827 | const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; |
1829 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; | 1828 | struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; |
1830 | const __be64 service_id = sa_path_get_service_id(path); | 1829 | const __be64 service_id = |
1830 | ib_event->param.req_rcvd.primary_path->service_id; | ||
1831 | int ret; | 1831 | int ret; |
1832 | 1832 | ||
1833 | id = rdma_create_id(listen_id->route.addr.dev_addr.net, | 1833 | id = rdma_create_id(listen_id->route.addr.dev_addr.net, |
@@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, | |||
2345 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); | 2345 | path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); |
2346 | path_rec.numb_path = 1; | 2346 | path_rec.numb_path = 1; |
2347 | path_rec.reversible = 1; | 2347 | path_rec.reversible = 1; |
2348 | sa_path_set_service_id(&path_rec, | 2348 | path_rec.service_id = rdma_get_service_id(&id_priv->id, |
2349 | rdma_get_service_id(&id_priv->id, | 2349 | cma_dst_addr(id_priv)); |
2350 | cma_dst_addr(id_priv))); | ||
2351 | 2350 | ||
2352 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | | 2351 | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | |
2353 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | | 2352 | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..d92ab4eaa8f3 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -169,6 +169,16 @@ void ib_mad_cleanup(void); | |||
169 | int ib_sa_init(void); | 169 | int ib_sa_init(void); |
170 | void ib_sa_cleanup(void); | 170 | void ib_sa_cleanup(void); |
171 | 171 | ||
172 | int ibnl_init(void); | ||
173 | void ibnl_cleanup(void); | ||
174 | |||
175 | /** | ||
176 | * Check if there are any listeners to the netlink group | ||
177 | * @group: the netlink group ID | ||
178 | * Returns 0 on success or a negative for no listeners. | ||
179 | */ | ||
180 | int ibnl_chk_listeners(unsigned int group); | ||
181 | |||
172 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, | 182 | int ib_nl_handle_resolve_resp(struct sk_buff *skb, |
173 | struct netlink_callback *cb); | 183 | struct netlink_callback *cb); |
174 | int ib_nl_handle_set_timeout(struct sk_buff *skb, | 184 | int ib_nl_handle_set_timeout(struct sk_buff *skb, |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b784055423c8..94931c474d41 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <net/net_namespace.h> | 37 | #include <net/net_namespace.h> |
38 | #include <net/sock.h> | 38 | #include <net/sock.h> |
39 | #include <rdma/rdma_netlink.h> | 39 | #include <rdma/rdma_netlink.h> |
40 | #include "core_priv.h" | ||
40 | 41 | ||
41 | struct ibnl_client { | 42 | struct ibnl_client { |
42 | struct list_head list; | 43 | struct list_head list; |
@@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group) | |||
55 | return -1; | 56 | return -1; |
56 | return 0; | 57 | return 0; |
57 | } | 58 | } |
58 | EXPORT_SYMBOL(ibnl_chk_listeners); | ||
59 | 59 | ||
60 | int ibnl_add_client(int index, int nops, | 60 | int ibnl_add_client(int index, int nops, |
61 | const struct ibnl_client_cbs cb_table[]) | 61 | const struct ibnl_client_cbs cb_table[]) |
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e335b09c022e..fb7aec4047c8 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -194,7 +194,7 @@ static u32 tid; | |||
194 | .field_name = "sa_path_rec:" #field | 194 | .field_name = "sa_path_rec:" #field |
195 | 195 | ||
196 | static const struct ib_field path_rec_table[] = { | 196 | static const struct ib_field path_rec_table[] = { |
197 | { PATH_REC_FIELD(ib.service_id), | 197 | { PATH_REC_FIELD(service_id), |
198 | .offset_words = 0, | 198 | .offset_words = 0, |
199 | .offset_bits = 0, | 199 | .offset_bits = 0, |
200 | .size_bits = 64 }, | 200 | .size_bits = 64 }, |
@@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = { | |||
296 | .field_name = "sa_path_rec:" #field | 296 | .field_name = "sa_path_rec:" #field |
297 | 297 | ||
298 | static const struct ib_field opa_path_rec_table[] = { | 298 | static const struct ib_field opa_path_rec_table[] = { |
299 | { OPA_PATH_REC_FIELD(opa.service_id), | 299 | { OPA_PATH_REC_FIELD(service_id), |
300 | .offset_words = 0, | 300 | .offset_words = 0, |
301 | .offset_bits = 0, | 301 | .offset_bits = 0, |
302 | .size_bits = 64 }, | 302 | .size_bits = 64 }, |
@@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb, | |||
774 | 774 | ||
775 | /* Now build the attributes */ | 775 | /* Now build the attributes */ |
776 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { | 776 | if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { |
777 | val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); | 777 | val64 = be64_to_cpu(sa_rec->service_id); |
778 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, | 778 | nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, |
779 | sizeof(val64), &val64); | 779 | sizeof(val64), &val64); |
780 | } | 780 | } |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 3dbf811d3c51..21e60b1e2ff4 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
58 | for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { | 58 | for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { |
59 | 59 | ||
60 | page = sg_page(sg); | 60 | page = sg_page(sg); |
61 | if (umem->writable && dirty) | 61 | if (!PageDirty(page) && umem->writable && dirty) |
62 | set_page_dirty_lock(page); | 62 | set_page_dirty_lock(page); |
63 | put_page(page); | 63 | put_page(page); |
64 | } | 64 | } |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 0780b1afefa9..8c4ec564e495 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem, | |||
321 | struct vm_area_struct *vma; | 321 | struct vm_area_struct *vma; |
322 | struct hstate *h; | 322 | struct hstate *h; |
323 | 323 | ||
324 | down_read(&mm->mmap_sem); | ||
324 | vma = find_vma(mm, ib_umem_start(umem)); | 325 | vma = find_vma(mm, ib_umem_start(umem)); |
325 | if (!vma || !is_vm_hugetlb_page(vma)) | 326 | if (!vma || !is_vm_hugetlb_page(vma)) { |
327 | up_read(&mm->mmap_sem); | ||
326 | return -EINVAL; | 328 | return -EINVAL; |
329 | } | ||
327 | h = hstate_vma(vma); | 330 | h = hstate_vma(vma); |
328 | umem->page_shift = huge_page_shift(h); | 331 | umem->page_shift = huge_page_shift(h); |
332 | up_read(&mm->mmap_sem); | ||
329 | umem->hugetlb = 1; | 333 | umem->hugetlb = 1; |
330 | } else { | 334 | } else { |
331 | umem->hugetlb = 0; | 335 | umem->hugetlb = 0; |
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index 8b9587fe2303..94fd989c9060 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c | |||
@@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, | |||
96 | } | 96 | } |
97 | EXPORT_SYMBOL(ib_copy_qp_attr_to_user); | 97 | EXPORT_SYMBOL(ib_copy_qp_attr_to_user); |
98 | 98 | ||
99 | void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, | 99 | static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, |
100 | struct sa_path_rec *src) | 100 | struct sa_path_rec *src) |
101 | { | 101 | { |
102 | memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); | 102 | memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid)); |
103 | memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); | 103 | memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid)); |
104 | 104 | ||
105 | dst->dlid = htons(ntohl(sa_path_get_dlid(src))); | 105 | dst->dlid = htons(ntohl(sa_path_get_dlid(src))); |
106 | dst->slid = htons(ntohl(sa_path_get_slid(src))); | 106 | dst->slid = htons(ntohl(sa_path_get_slid(src))); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index b6fe45924c6e..0910faf3587b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) | |||
488 | 488 | ||
489 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); | 489 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
490 | release_ep_resources(ep); | 490 | release_ep_resources(ep); |
491 | kfree_skb(skb); | ||
491 | return 0; | 492 | return 0; |
492 | } | 493 | } |
493 | 494 | ||
@@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) | |||
498 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); | 499 | ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); |
499 | c4iw_put_ep(&ep->parent_ep->com); | 500 | c4iw_put_ep(&ep->parent_ep->com); |
500 | release_ep_resources(ep); | 501 | release_ep_resources(ep); |
502 | kfree_skb(skb); | ||
501 | return 0; | 503 | return 0; |
502 | } | 504 | } |
503 | 505 | ||
@@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb) | |||
569 | 571 | ||
570 | pr_debug("%s rdev %p\n", __func__, rdev); | 572 | pr_debug("%s rdev %p\n", __func__, rdev); |
571 | req->cmd = CPL_ABORT_NO_RST; | 573 | req->cmd = CPL_ABORT_NO_RST; |
574 | skb_get(skb); | ||
572 | ret = c4iw_ofld_send(rdev, skb); | 575 | ret = c4iw_ofld_send(rdev, skb); |
573 | if (ret) { | 576 | if (ret) { |
574 | __state_set(&ep->com, DEAD); | 577 | __state_set(&ep->com, DEAD); |
575 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); | 578 | queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); |
576 | } | 579 | } else |
580 | kfree_skb(skb); | ||
577 | } | 581 | } |
578 | 582 | ||
579 | static int send_flowc(struct c4iw_ep *ep) | 583 | static int send_flowc(struct c4iw_ep *ep) |
@@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) | |||
2517 | goto reject; | 2521 | goto reject; |
2518 | } | 2522 | } |
2519 | 2523 | ||
2520 | hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + | 2524 | hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) + |
2525 | sizeof(struct tcphdr) + | ||
2521 | ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); | 2526 | ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); |
2522 | if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) | 2527 | if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) |
2523 | child_ep->mtu = peer_mss + hdrs; | 2528 | child_ep->mtu = peer_mss + hdrs; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 329fb65e8fb0..f96a96dbcf1f 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) | |||
971 | devp->rdev.lldi.sge_egrstatuspagesize); | 971 | devp->rdev.lldi.sge_egrstatuspagesize); |
972 | 972 | ||
973 | devp->rdev.hw_queue.t4_eq_status_entries = | 973 | devp->rdev.hw_queue.t4_eq_status_entries = |
974 | devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; | 974 | devp->rdev.lldi.sge_egrstatuspagesize / 64; |
975 | devp->rdev.hw_queue.t4_max_eq_size = 65520; | 975 | devp->rdev.hw_queue.t4_max_eq_size = 65520; |
976 | devp->rdev.hw_queue.t4_max_iq_size = 65520; | 976 | devp->rdev.hw_queue.t4_max_iq_size = 65520; |
977 | devp->rdev.hw_queue.t4_max_rq_size = 8192 - | 977 | devp->rdev.hw_queue.t4_max_rq_size = 8192 - |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 5d6b1eeaa9a0..2ba00b89df6a 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd) | |||
6312 | } | 6312 | } |
6313 | } | 6313 | } |
6314 | 6314 | ||
6315 | static void write_global_credit(struct hfi1_devdata *dd, | 6315 | /* |
6316 | u8 vau, u16 total, u16 shared) | 6316 | * Set up allocation unit vaulue. |
6317 | */ | ||
6318 | void set_up_vau(struct hfi1_devdata *dd, u8 vau) | ||
6317 | { | 6319 | { |
6318 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, | 6320 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
6319 | ((u64)total << | 6321 | |
6320 | SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | | 6322 | /* do not modify other values in the register */ |
6321 | ((u64)shared << | 6323 | reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK; |
6322 | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | | 6324 | reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT; |
6323 | ((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); | 6325 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); |
6324 | } | 6326 | } |
6325 | 6327 | ||
6326 | /* | 6328 | /* |
6327 | * Set up initial VL15 credits of the remote. Assumes the rest of | 6329 | * Set up initial VL15 credits of the remote. Assumes the rest of |
6328 | * the CM credit registers are zero from a previous global or credit reset . | 6330 | * the CM credit registers are zero from a previous global or credit reset. |
6331 | * Shared limit for VL15 will always be 0. | ||
6329 | */ | 6332 | */ |
6330 | void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) | 6333 | void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf) |
6331 | { | 6334 | { |
6332 | /* leave shared count at zero for both global and VL15 */ | 6335 | u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT); |
6333 | write_global_credit(dd, vau, vl15buf, 0); | 6336 | |
6337 | /* set initial values for total and shared credit limit */ | ||
6338 | reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK | | ||
6339 | SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK); | ||
6340 | |||
6341 | /* | ||
6342 | * Set total limit to be equal to VL15 credits. | ||
6343 | * Leave shared limit at 0. | ||
6344 | */ | ||
6345 | reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT; | ||
6346 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg); | ||
6334 | 6347 | ||
6335 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf | 6348 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf |
6336 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | 6349 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); |
@@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd) | |||
6348 | for (i = 0; i < TXE_NUM_DATA_VL; i++) | 6361 | for (i = 0; i < TXE_NUM_DATA_VL; i++) |
6349 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); | 6362 | write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); |
6350 | write_csr(dd, SEND_CM_CREDIT_VL15, 0); | 6363 | write_csr(dd, SEND_CM_CREDIT_VL15, 0); |
6351 | write_global_credit(dd, 0, 0, 0); | 6364 | write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0); |
6352 | /* reset the CM block */ | 6365 | /* reset the CM block */ |
6353 | pio_send_control(dd, PSC_CM_RESET); | 6366 | pio_send_control(dd, PSC_CM_RESET); |
6367 | /* reset cached value */ | ||
6368 | dd->vl15buf_cached = 0; | ||
6354 | } | 6369 | } |
6355 | 6370 | ||
6356 | /* convert a vCU to a CU */ | 6371 | /* convert a vCU to a CU */ |
@@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work) | |||
6839 | { | 6854 | { |
6840 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, | 6855 | struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, |
6841 | link_up_work); | 6856 | link_up_work); |
6857 | struct hfi1_devdata *dd = ppd->dd; | ||
6858 | |||
6842 | set_link_state(ppd, HLS_UP_INIT); | 6859 | set_link_state(ppd, HLS_UP_INIT); |
6843 | 6860 | ||
6844 | /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ | 6861 | /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ |
6845 | read_ltp_rtt(ppd->dd); | 6862 | read_ltp_rtt(dd); |
6846 | /* | 6863 | /* |
6847 | * OPA specifies that certain counters are cleared on a transition | 6864 | * OPA specifies that certain counters are cleared on a transition |
6848 | * to link up, so do that. | 6865 | * to link up, so do that. |
6849 | */ | 6866 | */ |
6850 | clear_linkup_counters(ppd->dd); | 6867 | clear_linkup_counters(dd); |
6851 | /* | 6868 | /* |
6852 | * And (re)set link up default values. | 6869 | * And (re)set link up default values. |
6853 | */ | 6870 | */ |
6854 | set_linkup_defaults(ppd); | 6871 | set_linkup_defaults(ppd); |
6855 | 6872 | ||
6873 | /* | ||
6874 | * Set VL15 credits. Use cached value from verify cap interrupt. | ||
6875 | * In case of quick linkup or simulator, vl15 value will be set by | ||
6876 | * handle_linkup_change. VerifyCap interrupt handler will not be | ||
6877 | * called in those scenarios. | ||
6878 | */ | ||
6879 | if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR)) | ||
6880 | set_up_vl15(dd, dd->vl15buf_cached); | ||
6881 | |||
6856 | /* enforce link speed enabled */ | 6882 | /* enforce link speed enabled */ |
6857 | if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { | 6883 | if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { |
6858 | /* oops - current speed is not enabled, bounce */ | 6884 | /* oops - current speed is not enabled, bounce */ |
6859 | dd_dev_err(ppd->dd, | 6885 | dd_dev_err(dd, |
6860 | "Link speed active 0x%x is outside enabled 0x%x, downing link\n", | 6886 | "Link speed active 0x%x is outside enabled 0x%x, downing link\n", |
6861 | ppd->link_speed_active, ppd->link_speed_enabled); | 6887 | ppd->link_speed_active, ppd->link_speed_enabled); |
6862 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, | 6888 | set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, |
@@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work) | |||
7357 | */ | 7383 | */ |
7358 | if (vau == 0) | 7384 | if (vau == 0) |
7359 | vau = 1; | 7385 | vau = 1; |
7360 | set_up_vl15(dd, vau, vl15buf); | 7386 | set_up_vau(dd, vau); |
7387 | |||
7388 | /* | ||
7389 | * Set VL15 credits to 0 in global credit register. Cache remote VL15 | ||
7390 | * credits value and wait for link-up interrupt ot set it. | ||
7391 | */ | ||
7392 | set_up_vl15(dd, 0); | ||
7393 | dd->vl15buf_cached = vl15buf; | ||
7361 | 7394 | ||
7362 | /* set up the LCB CRC mode */ | 7395 | /* set up the LCB CRC mode */ |
7363 | crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; | 7396 | crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; |
diff --git a/drivers/infiniband/hw/hfi1/chip_registers.h b/drivers/infiniband/hw/hfi1/chip_registers.h index 5bfa839d1c48..793514f1d15f 100644 --- a/drivers/infiniband/hw/hfi1/chip_registers.h +++ b/drivers/infiniband/hw/hfi1/chip_registers.h | |||
@@ -839,7 +839,9 @@ | |||
839 | #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull | 839 | #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull |
840 | #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull | 840 | #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull |
841 | #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) | 841 | #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) |
842 | #define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull | ||
842 | #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 | 843 | #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 |
844 | #define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull | ||
843 | #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull | 845 | #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull |
844 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull | 846 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull |
845 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 | 847 | #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index da322e6668cc..414a04a481c2 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -1045,6 +1045,14 @@ struct hfi1_devdata { | |||
1045 | /* initial vl15 credits to use */ | 1045 | /* initial vl15 credits to use */ |
1046 | u16 vl15_init; | 1046 | u16 vl15_init; |
1047 | 1047 | ||
1048 | /* | ||
1049 | * Cached value for vl15buf, read during verify cap interrupt. VL15 | ||
1050 | * credits are to be kept at 0 and set when handling the link-up | ||
1051 | * interrupt. This removes the possibility of receiving VL15 MAD | ||
1052 | * packets before this HFI is ready. | ||
1053 | */ | ||
1054 | u16 vl15buf_cached; | ||
1055 | |||
1048 | /* Misc small ints */ | 1056 | /* Misc small ints */ |
1049 | u8 n_krcv_queues; | 1057 | u8 n_krcv_queues; |
1050 | u8 qos_shift; | 1058 | u8 qos_shift; |
@@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode); | |||
1598 | int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); | 1606 | int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); |
1599 | int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); | 1607 | int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); |
1600 | 1608 | ||
1601 | void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); | 1609 | void set_up_vau(struct hfi1_devdata *dd, u8 vau); |
1610 | void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf); | ||
1602 | void reset_link_credits(struct hfi1_devdata *dd); | 1611 | void reset_link_credits(struct hfi1_devdata *dd); |
1603 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); | 1612 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); |
1604 | 1613 | ||
diff --git a/drivers/infiniband/hw/hfi1/intr.c b/drivers/infiniband/hw/hfi1/intr.c index ba265d0ae93b..04a5082d5ac5 100644 --- a/drivers/infiniband/hw/hfi1/intr.c +++ b/drivers/infiniband/hw/hfi1/intr.c | |||
@@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup) | |||
130 | * the remote values. Both sides must be using the values. | 130 | * the remote values. Both sides must be using the values. |
131 | */ | 131 | */ |
132 | if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { | 132 | if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { |
133 | set_up_vl15(dd, dd->vau, dd->vl15_init); | 133 | set_up_vau(dd, dd->vau); |
134 | set_up_vl15(dd, dd->vl15_init); | ||
134 | assign_remote_cm_au_table(dd, dd->vcu); | 135 | assign_remote_cm_au_table(dd, dd->vcu); |
135 | } | 136 | } |
136 | 137 | ||
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 93faf86d54b6..6a9f6f9819e1 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
@@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) | |||
207 | /* | 207 | /* |
208 | * Save BARs and command to rewrite after device reset. | 208 | * Save BARs and command to rewrite after device reset. |
209 | */ | 209 | */ |
210 | dd->pcibar0 = addr; | 210 | pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0); |
211 | dd->pcibar1 = addr >> 32; | 211 | pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1); |
212 | pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); | 212 | pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); |
213 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); | 213 | pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); |
214 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); | 214 | pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 069bdaf061ab..1080778a1f7c 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -2159,8 +2159,11 @@ send_last: | |||
2159 | ret = hfi1_rvt_get_rwqe(qp, 1); | 2159 | ret = hfi1_rvt_get_rwqe(qp, 1); |
2160 | if (ret < 0) | 2160 | if (ret < 0) |
2161 | goto nack_op_err; | 2161 | goto nack_op_err; |
2162 | if (!ret) | 2162 | if (!ret) { |
2163 | /* peer will send again */ | ||
2164 | rvt_put_ss(&qp->r_sge); | ||
2163 | goto rnr_nak; | 2165 | goto rnr_nak; |
2166 | } | ||
2164 | wc.ex.imm_data = ohdr->u.rc.imm_data; | 2167 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
2165 | wc.wc_flags = IB_WC_WITH_IMM; | 2168 | wc.wc_flags = IB_WC_WITH_IMM; |
2166 | goto send_last; | 2169 | goto send_last; |
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index 50d140d25e38..2f3bbcac1e34 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c | |||
@@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = { | |||
196 | }; | 196 | }; |
197 | 197 | ||
198 | static struct attribute *port_cc_default_attributes[] = { | 198 | static struct attribute *port_cc_default_attributes[] = { |
199 | &cc_prescan_attr.attr | 199 | &cc_prescan_attr.attr, |
200 | NULL | ||
200 | }; | 201 | }; |
201 | 202 | ||
202 | static struct kobj_type port_cc_ktype = { | 203 | static struct kobj_type port_cc_ktype = { |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index f3bc01bce483..6ae98aa7f74e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c | |||
@@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node, | |||
784 | } | 784 | } |
785 | 785 | ||
786 | ctrl_ird |= IETF_PEER_TO_PEER; | 786 | ctrl_ird |= IETF_PEER_TO_PEER; |
787 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
788 | 787 | ||
789 | switch (mpa_key) { | 788 | switch (mpa_key) { |
790 | case MPA_KEY_REQUEST: | 789 | case MPA_KEY_REQUEST: |
@@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node, | |||
2446 | } else { | 2445 | } else { |
2447 | type = I40IW_CM_EVENT_CONNECTED; | 2446 | type = I40IW_CM_EVENT_CONNECTED; |
2448 | cm_node->state = I40IW_CM_STATE_OFFLOADED; | 2447 | cm_node->state = I40IW_CM_STATE_OFFLOADED; |
2449 | i40iw_send_ack(cm_node); | ||
2450 | } | 2448 | } |
2449 | i40iw_send_ack(cm_node); | ||
2451 | break; | 2450 | break; |
2452 | default: | 2451 | default: |
2453 | pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); | 2452 | pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index f82483b3d1e7..a027e2072477 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c | |||
@@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa | |||
285 | struct i40iw_sc_dev *dev = vsi->dev; | 285 | struct i40iw_sc_dev *dev = vsi->dev; |
286 | struct i40iw_sc_qp *qp = NULL; | 286 | struct i40iw_sc_qp *qp = NULL; |
287 | bool qs_handle_change = false; | 287 | bool qs_handle_change = false; |
288 | bool mss_change = false; | ||
289 | unsigned long flags; | 288 | unsigned long flags; |
290 | u16 qs_handle; | 289 | u16 qs_handle; |
291 | int i; | 290 | int i; |
292 | 291 | ||
293 | if (vsi->mss != l2params->mss) { | 292 | vsi->mss = l2params->mss; |
294 | mss_change = true; | ||
295 | vsi->mss = l2params->mss; | ||
296 | } | ||
297 | 293 | ||
298 | i40iw_fill_qos_list(l2params->qs_handle_list); | 294 | i40iw_fill_qos_list(l2params->qs_handle_list); |
299 | for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { | 295 | for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { |
300 | qs_handle = l2params->qs_handle_list[i]; | 296 | qs_handle = l2params->qs_handle_list[i]; |
301 | if (vsi->qos[i].qs_handle != qs_handle) | 297 | if (vsi->qos[i].qs_handle != qs_handle) |
302 | qs_handle_change = true; | 298 | qs_handle_change = true; |
303 | else if (!mss_change) | ||
304 | continue; /* no MSS nor qs handle change */ | ||
305 | spin_lock_irqsave(&vsi->qos[i].lock, flags); | 299 | spin_lock_irqsave(&vsi->qos[i].lock, flags); |
306 | qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); | 300 | qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); |
307 | while (qp) { | 301 | while (qp) { |
308 | if (mss_change) | ||
309 | i40iw_qp_mss_modify(dev, qp); | ||
310 | if (qs_handle_change) { | 302 | if (qs_handle_change) { |
311 | qp->qs_handle = qs_handle; | 303 | qp->qs_handle = qs_handle; |
312 | /* issue cqp suspend command */ | 304 | /* issue cqp suspend command */ |
@@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( | |||
2395 | 2387 | ||
2396 | set_64bit_val(wqe, | 2388 | set_64bit_val(wqe, |
2397 | 8, | 2389 | 8, |
2398 | LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) | | ||
2399 | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); | 2390 | LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); |
2400 | 2391 | ||
2401 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); | 2392 | set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); |
@@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify( | |||
2410 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | | 2401 | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | |
2411 | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | | 2402 | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | |
2412 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | | 2403 | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | |
2413 | LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) | | ||
2414 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | | 2404 | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | |
2415 | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | | 2405 | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | |
2416 | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | | 2406 | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index 2728af3103ce..a3f18a22f5ed 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c | |||
@@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, | |||
1319 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, | 1319 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, |
1320 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); | 1320 | I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); |
1321 | if (status) | 1321 | if (status) |
1322 | goto exit; | 1322 | goto error; |
1323 | info.fpm_query_buf_pa = mem.pa; | 1323 | info.fpm_query_buf_pa = mem.pa; |
1324 | info.fpm_query_buf = mem.va; | 1324 | info.fpm_query_buf = mem.va; |
1325 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, | 1325 | status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, |
1326 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); | 1326 | I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); |
1327 | if (status) | 1327 | if (status) |
1328 | goto exit; | 1328 | goto error; |
1329 | info.fpm_commit_buf_pa = mem.pa; | 1329 | info.fpm_commit_buf_pa = mem.pa; |
1330 | info.fpm_commit_buf = mem.va; | 1330 | info.fpm_commit_buf = mem.va; |
1331 | info.hmc_fn_id = ldev->fid; | 1331 | info.hmc_fn_id = ldev->fid; |
@@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev, | |||
1347 | info.exception_lan_queue = 1; | 1347 | info.exception_lan_queue = 1; |
1348 | info.vchnl_send = i40iw_virtchnl_send; | 1348 | info.vchnl_send = i40iw_virtchnl_send; |
1349 | status = i40iw_device_init(&iwdev->sc_dev, &info); | 1349 | status = i40iw_device_init(&iwdev->sc_dev, &info); |
1350 | exit: | 1350 | |
1351 | if (status) { | 1351 | if (status) |
1352 | kfree(iwdev->hmc_info_mem); | 1352 | goto error; |
1353 | iwdev->hmc_info_mem = NULL; | ||
1354 | } | ||
1355 | memset(&vsi_info, 0, sizeof(vsi_info)); | 1353 | memset(&vsi_info, 0, sizeof(vsi_info)); |
1356 | vsi_info.dev = &iwdev->sc_dev; | 1354 | vsi_info.dev = &iwdev->sc_dev; |
1357 | vsi_info.back_vsi = (void *)iwdev; | 1355 | vsi_info.back_vsi = (void *)iwdev; |
@@ -1362,11 +1360,19 @@ exit: | |||
1362 | memset(&stats_info, 0, sizeof(stats_info)); | 1360 | memset(&stats_info, 0, sizeof(stats_info)); |
1363 | stats_info.fcn_id = ldev->fid; | 1361 | stats_info.fcn_id = ldev->fid; |
1364 | stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); | 1362 | stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); |
1363 | if (!stats_info.pestat) { | ||
1364 | status = I40IW_ERR_NO_MEMORY; | ||
1365 | goto error; | ||
1366 | } | ||
1365 | stats_info.stats_initialize = true; | 1367 | stats_info.stats_initialize = true; |
1366 | if (stats_info.pestat) | 1368 | if (stats_info.pestat) |
1367 | i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); | 1369 | i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); |
1368 | } | 1370 | } |
1369 | return status; | 1371 | return status; |
1372 | error: | ||
1373 | kfree(iwdev->hmc_info_mem); | ||
1374 | iwdev->hmc_info_mem = NULL; | ||
1375 | return status; | ||
1370 | } | 1376 | } |
1371 | 1377 | ||
1372 | /** | 1378 | /** |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_osdep.h b/drivers/infiniband/hw/i40iw/i40iw_osdep.h index aa66c1c63dfa..f27be3e7830b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_osdep.h +++ b/drivers/infiniband/hw/i40iw/i40iw_osdep.h | |||
@@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev, | |||
199 | struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); | 199 | struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); |
200 | void *i40iw_remove_head(struct list_head *list); | 200 | void *i40iw_remove_head(struct list_head *list); |
201 | void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); | 201 | void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); |
202 | void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp); | ||
203 | 202 | ||
204 | void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); | 203 | void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); |
205 | void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); | 204 | void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_type.h b/drivers/infiniband/hw/i40iw/i40iw_type.h index 7b76259752b0..959ec81fba99 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_type.h +++ b/drivers/infiniband/hw/i40iw/i40iw_type.h | |||
@@ -541,7 +541,6 @@ struct i40iw_create_qp_info { | |||
541 | struct i40iw_modify_qp_info { | 541 | struct i40iw_modify_qp_info { |
542 | u64 rx_win0; | 542 | u64 rx_win0; |
543 | u64 rx_win1; | 543 | u64 rx_win1; |
544 | u16 new_mss; | ||
545 | u8 next_iwarp_state; | 544 | u8 next_iwarp_state; |
546 | u8 termlen; | 545 | u8 termlen; |
547 | bool ord_valid; | 546 | bool ord_valid; |
@@ -554,7 +553,6 @@ struct i40iw_modify_qp_info { | |||
554 | bool dont_send_term; | 553 | bool dont_send_term; |
555 | bool dont_send_fin; | 554 | bool dont_send_fin; |
556 | bool cached_var_valid; | 555 | bool cached_var_valid; |
557 | bool mss_change; | ||
558 | bool force_loopback; | 556 | bool force_loopback; |
559 | }; | 557 | }; |
560 | 558 | ||
diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 409a3781e735..56d986924a4c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c | |||
@@ -757,23 +757,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b | |||
757 | } | 757 | } |
758 | 758 | ||
759 | /** | 759 | /** |
760 | * i40iw_qp_mss_modify - modify mss for qp | ||
761 | * @dev: hardware control device structure | ||
762 | * @qp: hardware control qp | ||
763 | */ | ||
764 | void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp) | ||
765 | { | ||
766 | struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev; | ||
767 | struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp; | ||
768 | struct i40iw_modify_qp_info info; | ||
769 | |||
770 | memset(&info, 0, sizeof(info)); | ||
771 | info.mss_change = true; | ||
772 | info.new_mss = qp->vsi->mss; | ||
773 | i40iw_hw_modify_qp(iwdev, iwqp, &info, false); | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * i40iw_term_modify_qp - modify qp for term message | 760 | * i40iw_term_modify_qp - modify qp for term message |
778 | * @qp: hardware control qp | 761 | * @qp: hardware control qp |
779 | * @next_state: qp's next state | 762 | * @next_state: qp's next state |
diff --git a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c index f4d13683a403..48fd327f876b 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_virtchnl.c | |||
@@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev, | |||
443 | if (!dev->vchnl_up) | 443 | if (!dev->vchnl_up) |
444 | return I40IW_ERR_NOT_READY; | 444 | return I40IW_ERR_NOT_READY; |
445 | if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { | 445 | if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { |
446 | if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) | 446 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); |
447 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); | ||
448 | else | ||
449 | vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg); | ||
450 | return I40IW_SUCCESS; | 447 | return I40IW_SUCCESS; |
451 | } | 448 | } |
452 | for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { | 449 | for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { |
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index b4694717f6f3..21d31cb1325f 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc | |||
1578 | if (port < 0) | 1578 | if (port < 0) |
1579 | return; | 1579 | return; |
1580 | ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); | 1580 | ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); |
1581 | ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port); | ||
1581 | 1582 | ||
1582 | mlx4_ib_query_ah(&ah.ibah, &ah_attr); | 1583 | mlx4_ib_query_ah(&ah.ibah, &ah_attr); |
1583 | if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) | 1584 | if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 42defaa0d6c6..852a6a75db98 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -2978,6 +2978,18 @@ error_0: | |||
2978 | return ret; | 2978 | return ret; |
2979 | } | 2979 | } |
2980 | 2980 | ||
2981 | static u8 mlx5_get_umr_fence(u8 umr_fence_cap) | ||
2982 | { | ||
2983 | switch (umr_fence_cap) { | ||
2984 | case MLX5_CAP_UMR_FENCE_NONE: | ||
2985 | return MLX5_FENCE_MODE_NONE; | ||
2986 | case MLX5_CAP_UMR_FENCE_SMALL: | ||
2987 | return MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
2988 | default: | ||
2989 | return MLX5_FENCE_MODE_STRONG_ORDERING; | ||
2990 | } | ||
2991 | } | ||
2992 | |||
2981 | static int create_dev_resources(struct mlx5_ib_resources *devr) | 2993 | static int create_dev_resources(struct mlx5_ib_resources *devr) |
2982 | { | 2994 | { |
2983 | struct ib_srq_init_attr attr; | 2995 | struct ib_srq_init_attr attr; |
@@ -3692,6 +3704,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3692 | 3704 | ||
3693 | mlx5_ib_internal_fill_odp_caps(dev); | 3705 | mlx5_ib_internal_fill_odp_caps(dev); |
3694 | 3706 | ||
3707 | dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); | ||
3708 | |||
3695 | if (MLX5_CAP_GEN(mdev, imaicl)) { | 3709 | if (MLX5_CAP_GEN(mdev, imaicl)) { |
3696 | dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; | 3710 | dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; |
3697 | dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; | 3711 | dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index 38c877bc45e5..bdcf25410c99 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -349,7 +349,7 @@ struct mlx5_ib_qp { | |||
349 | struct mlx5_ib_wq rq; | 349 | struct mlx5_ib_wq rq; |
350 | 350 | ||
351 | u8 sq_signal_bits; | 351 | u8 sq_signal_bits; |
352 | u8 fm_cache; | 352 | u8 next_fence; |
353 | struct mlx5_ib_wq sq; | 353 | struct mlx5_ib_wq sq; |
354 | 354 | ||
355 | /* serialize qp state modifications | 355 | /* serialize qp state modifications |
@@ -654,6 +654,7 @@ struct mlx5_ib_dev { | |||
654 | struct mlx5_ib_port *port; | 654 | struct mlx5_ib_port *port; |
655 | struct mlx5_sq_bfreg bfreg; | 655 | struct mlx5_sq_bfreg bfreg; |
656 | struct mlx5_sq_bfreg fp_bfreg; | 656 | struct mlx5_sq_bfreg fp_bfreg; |
657 | u8 umr_fence; | ||
657 | }; | 658 | }; |
658 | 659 | ||
659 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) | 660 | static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index d17aad0f54c0..0889ff367c86 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16) | |||
3738 | } | 3738 | } |
3739 | } | 3739 | } |
3740 | 3740 | ||
3741 | static u8 get_fence(u8 fence, struct ib_send_wr *wr) | ||
3742 | { | ||
3743 | if (unlikely(wr->opcode == IB_WR_LOCAL_INV && | ||
3744 | wr->send_flags & IB_SEND_FENCE)) | ||
3745 | return MLX5_FENCE_MODE_STRONG_ORDERING; | ||
3746 | |||
3747 | if (unlikely(fence)) { | ||
3748 | if (wr->send_flags & IB_SEND_FENCE) | ||
3749 | return MLX5_FENCE_MODE_SMALL_AND_FENCE; | ||
3750 | else | ||
3751 | return fence; | ||
3752 | } else if (unlikely(wr->send_flags & IB_SEND_FENCE)) { | ||
3753 | return MLX5_FENCE_MODE_FENCE; | ||
3754 | } | ||
3755 | |||
3756 | return 0; | ||
3757 | } | ||
3758 | |||
3759 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | 3741 | static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, |
3760 | struct mlx5_wqe_ctrl_seg **ctrl, | 3742 | struct mlx5_wqe_ctrl_seg **ctrl, |
3761 | struct ib_send_wr *wr, unsigned *idx, | 3743 | struct ib_send_wr *wr, unsigned *idx, |
@@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, | |||
3784 | static void finish_wqe(struct mlx5_ib_qp *qp, | 3766 | static void finish_wqe(struct mlx5_ib_qp *qp, |
3785 | struct mlx5_wqe_ctrl_seg *ctrl, | 3767 | struct mlx5_wqe_ctrl_seg *ctrl, |
3786 | u8 size, unsigned idx, u64 wr_id, | 3768 | u8 size, unsigned idx, u64 wr_id, |
3787 | int nreq, u8 fence, u8 next_fence, | 3769 | int nreq, u8 fence, u32 mlx5_opcode) |
3788 | u32 mlx5_opcode) | ||
3789 | { | 3770 | { |
3790 | u8 opmod = 0; | 3771 | u8 opmod = 0; |
3791 | 3772 | ||
@@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp, | |||
3793 | mlx5_opcode | ((u32)opmod << 24)); | 3774 | mlx5_opcode | ((u32)opmod << 24)); |
3794 | ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); | 3775 | ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); |
3795 | ctrl->fm_ce_se |= fence; | 3776 | ctrl->fm_ce_se |= fence; |
3796 | qp->fm_cache = next_fence; | ||
3797 | if (unlikely(qp->wq_sig)) | 3777 | if (unlikely(qp->wq_sig)) |
3798 | ctrl->signature = wq_sig(ctrl); | 3778 | ctrl->signature = wq_sig(ctrl); |
3799 | 3779 | ||
@@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3853 | goto out; | 3833 | goto out; |
3854 | } | 3834 | } |
3855 | 3835 | ||
3856 | fence = qp->fm_cache; | ||
3857 | num_sge = wr->num_sge; | 3836 | num_sge = wr->num_sge; |
3858 | if (unlikely(num_sge > qp->sq.max_gs)) { | 3837 | if (unlikely(num_sge > qp->sq.max_gs)) { |
3859 | mlx5_ib_warn(dev, "\n"); | 3838 | mlx5_ib_warn(dev, "\n"); |
@@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3870 | goto out; | 3849 | goto out; |
3871 | } | 3850 | } |
3872 | 3851 | ||
3852 | if (wr->opcode == IB_WR_LOCAL_INV || | ||
3853 | wr->opcode == IB_WR_REG_MR) { | ||
3854 | fence = dev->umr_fence; | ||
3855 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
3856 | } else if (wr->send_flags & IB_SEND_FENCE) { | ||
3857 | if (qp->next_fence) | ||
3858 | fence = MLX5_FENCE_MODE_SMALL_AND_FENCE; | ||
3859 | else | ||
3860 | fence = MLX5_FENCE_MODE_FENCE; | ||
3861 | } else { | ||
3862 | fence = qp->next_fence; | ||
3863 | } | ||
3864 | |||
3873 | switch (ibqp->qp_type) { | 3865 | switch (ibqp->qp_type) { |
3874 | case IB_QPT_XRC_INI: | 3866 | case IB_QPT_XRC_INI: |
3875 | xrc = seg; | 3867 | xrc = seg; |
@@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3896 | goto out; | 3888 | goto out; |
3897 | 3889 | ||
3898 | case IB_WR_LOCAL_INV: | 3890 | case IB_WR_LOCAL_INV: |
3899 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
3900 | qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; | 3891 | qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; |
3901 | ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); | 3892 | ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); |
3902 | set_linv_wr(qp, &seg, &size); | 3893 | set_linv_wr(qp, &seg, &size); |
@@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3904 | break; | 3895 | break; |
3905 | 3896 | ||
3906 | case IB_WR_REG_MR: | 3897 | case IB_WR_REG_MR: |
3907 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
3908 | qp->sq.wr_data[idx] = IB_WR_REG_MR; | 3898 | qp->sq.wr_data[idx] = IB_WR_REG_MR; |
3909 | ctrl->imm = cpu_to_be32(reg_wr(wr)->key); | 3899 | ctrl->imm = cpu_to_be32(reg_wr(wr)->key); |
3910 | err = set_reg_wr(qp, reg_wr(wr), &seg, &size); | 3900 | err = set_reg_wr(qp, reg_wr(wr), &seg, &size); |
@@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3927 | goto out; | 3917 | goto out; |
3928 | } | 3918 | } |
3929 | 3919 | ||
3930 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3920 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
3931 | nreq, get_fence(fence, wr), | 3921 | fence, MLX5_OPCODE_UMR); |
3932 | next_fence, MLX5_OPCODE_UMR); | ||
3933 | /* | 3922 | /* |
3934 | * SET_PSV WQEs are not signaled and solicited | 3923 | * SET_PSV WQEs are not signaled and solicited |
3935 | * on error | 3924 | * on error |
@@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3954 | goto out; | 3943 | goto out; |
3955 | } | 3944 | } |
3956 | 3945 | ||
3957 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3946 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
3958 | nreq, get_fence(fence, wr), | 3947 | fence, MLX5_OPCODE_SET_PSV); |
3959 | next_fence, MLX5_OPCODE_SET_PSV); | ||
3960 | err = begin_wqe(qp, &seg, &ctrl, wr, | 3948 | err = begin_wqe(qp, &seg, &ctrl, wr, |
3961 | &idx, &size, nreq); | 3949 | &idx, &size, nreq); |
3962 | if (err) { | 3950 | if (err) { |
@@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3966 | goto out; | 3954 | goto out; |
3967 | } | 3955 | } |
3968 | 3956 | ||
3969 | next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; | ||
3970 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, | 3957 | err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, |
3971 | mr->sig->psv_wire.psv_idx, &seg, | 3958 | mr->sig->psv_wire.psv_idx, &seg, |
3972 | &size); | 3959 | &size); |
@@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
3976 | goto out; | 3963 | goto out; |
3977 | } | 3964 | } |
3978 | 3965 | ||
3979 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, | 3966 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, |
3980 | nreq, get_fence(fence, wr), | 3967 | fence, MLX5_OPCODE_SET_PSV); |
3981 | next_fence, MLX5_OPCODE_SET_PSV); | 3968 | qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL; |
3982 | num_sge = 0; | 3969 | num_sge = 0; |
3983 | goto skip_psv; | 3970 | goto skip_psv; |
3984 | 3971 | ||
@@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
4089 | } | 4076 | } |
4090 | } | 4077 | } |
4091 | 4078 | ||
4092 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, | 4079 | qp->next_fence = next_fence; |
4093 | get_fence(fence, wr), next_fence, | 4080 | finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence, |
4094 | mlx5_ib_opcode[wr->opcode]); | 4081 | mlx5_ib_opcode[wr->opcode]); |
4095 | skip_psv: | 4082 | skip_psv: |
4096 | if (0) | 4083 | if (0) |
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index fb983df7c157..30b256a2c54e 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c | |||
@@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, | |||
610 | ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; | 610 | ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; |
611 | } | 611 | } |
612 | ctrl_ird |= IETF_PEER_TO_PEER; | 612 | ctrl_ird |= IETF_PEER_TO_PEER; |
613 | ctrl_ird |= IETF_FLPDU_ZERO_LEN; | ||
614 | 613 | ||
615 | switch (mpa_key) { | 614 | switch (mpa_key) { |
616 | case MPA_KEY_REQUEST: | 615 | case MPA_KEY_REQUEST: |
@@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb) | |||
1826 | type = NES_CM_EVENT_CONNECTED; | 1825 | type = NES_CM_EVENT_CONNECTED; |
1827 | cm_node->state = NES_CM_STATE_TSA; | 1826 | cm_node->state = NES_CM_STATE_TSA; |
1828 | } | 1827 | } |
1829 | 1828 | send_ack(cm_node, NULL); | |
1830 | break; | 1829 | break; |
1831 | default: | 1830 | default: |
1832 | WARN_ON(1); | 1831 | WARN_ON(1); |
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c index 3d7705cec770..d86dbe814d98 100644 --- a/drivers/infiniband/hw/qedr/qedr_cm.c +++ b/drivers/infiniband/hw/qedr/qedr_cm.c | |||
@@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev, | |||
270 | return rc; | 270 | return rc; |
271 | } | 271 | } |
272 | 272 | ||
273 | vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); | 273 | if (sgid_attr.ndev) { |
274 | if (vlan_id < VLAN_CFI_MASK) | 274 | vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); |
275 | has_vlan = true; | 275 | if (vlan_id < VLAN_CFI_MASK) |
276 | if (sgid_attr.ndev) | 276 | has_vlan = true; |
277 | |||
277 | dev_put(sgid_attr.ndev); | 278 | dev_put(sgid_attr.ndev); |
279 | } | ||
278 | 280 | ||
279 | if (!memcmp(&sgid, &zgid, sizeof(sgid))) { | 281 | if (!memcmp(&sgid, &zgid, sizeof(sgid))) { |
280 | DP_ERR(dev, "gsi post send: GID not found GID index %d\n", | 282 | DP_ERR(dev, "gsi post send: GID not found GID index %d\n", |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index fc8b88514da5..4ddbcac5eabe 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1956,8 +1956,10 @@ send_last: | |||
1956 | ret = qib_get_rwqe(qp, 1); | 1956 | ret = qib_get_rwqe(qp, 1); |
1957 | if (ret < 0) | 1957 | if (ret < 0) |
1958 | goto nack_op_err; | 1958 | goto nack_op_err; |
1959 | if (!ret) | 1959 | if (!ret) { |
1960 | rvt_put_ss(&qp->r_sge); | ||
1960 | goto rnr_nak; | 1961 | goto rnr_nak; |
1962 | } | ||
1961 | wc.ex.imm_data = ohdr->u.rc.imm_data; | 1963 | wc.ex.imm_data = ohdr->u.rc.imm_data; |
1962 | hdrsize += 4; | 1964 | hdrsize += 4; |
1963 | wc.wc_flags = IB_WC_WITH_IMM; | 1965 | wc.wc_flags = IB_WC_WITH_IMM; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c index 874b24366e4d..7871379342f4 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c | |||
@@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed) | |||
178 | static int ipoib_get_link_ksettings(struct net_device *netdev, | 178 | static int ipoib_get_link_ksettings(struct net_device *netdev, |
179 | struct ethtool_link_ksettings *cmd) | 179 | struct ethtool_link_ksettings *cmd) |
180 | { | 180 | { |
181 | struct ipoib_dev_priv *priv = netdev_priv(netdev); | 181 | struct ipoib_dev_priv *priv = ipoib_priv(netdev); |
182 | struct ib_port_attr attr; | 182 | struct ib_port_attr attr; |
183 | int ret, speed, width; | 183 | int ret, speed, width; |
184 | 184 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 2869d1adb1de..a115c0b7a310 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -1590,7 +1590,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) | |||
1590 | wait_for_completion(&priv->ntbl.deleted); | 1590 | wait_for_completion(&priv->ntbl.deleted); |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | void ipoib_dev_uninit_default(struct net_device *dev) | 1593 | static void ipoib_dev_uninit_default(struct net_device *dev) |
1594 | { | 1594 | { |
1595 | struct ipoib_dev_priv *priv = ipoib_priv(dev); | 1595 | struct ipoib_dev_priv *priv = ipoib_priv(dev); |
1596 | 1596 | ||
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index def723a5df29..2354c742caa1 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch) | |||
320 | ch->path.sgid = target->sgid; | 320 | ch->path.sgid = target->sgid; |
321 | ch->path.dgid = target->orig_dgid; | 321 | ch->path.dgid = target->orig_dgid; |
322 | ch->path.pkey = target->pkey; | 322 | ch->path.pkey = target->pkey; |
323 | sa_path_set_service_id(&ch->path, target->service_id); | 323 | ch->path.service_id = target->service_id; |
324 | 324 | ||
325 | return 0; | 325 | return 0; |
326 | } | 326 | } |
@@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch) | |||
575 | return 0; | 575 | return 0; |
576 | 576 | ||
577 | err_qp: | 577 | err_qp: |
578 | srp_destroy_qp(ch, qp); | 578 | ib_destroy_qp(qp); |
579 | 579 | ||
580 | err_send_cq: | 580 | err_send_cq: |
581 | ib_free_cq(send_cq); | 581 | ib_free_cq(send_cq); |
diff --git a/drivers/input/keyboard/tm2-touchkey.c b/drivers/input/keyboard/tm2-touchkey.c index 485900f953e0..abc266e40e17 100644 --- a/drivers/input/keyboard/tm2-touchkey.c +++ b/drivers/input/keyboard/tm2-touchkey.c | |||
@@ -213,7 +213,7 @@ static int tm2_touchkey_probe(struct i2c_client *client, | |||
213 | /* led device */ | 213 | /* led device */ |
214 | touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME; | 214 | touchkey->led_dev.name = TM2_TOUCHKEY_DEV_NAME; |
215 | touchkey->led_dev.brightness = LED_FULL; | 215 | touchkey->led_dev.brightness = LED_FULL; |
216 | touchkey->led_dev.max_brightness = LED_FULL; | 216 | touchkey->led_dev.max_brightness = LED_ON; |
217 | touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set; | 217 | touchkey->led_dev.brightness_set = tm2_touchkey_led_brightness_set; |
218 | 218 | ||
219 | error = devm_led_classdev_register(&client->dev, &touchkey->led_dev); | 219 | error = devm_led_classdev_register(&client->dev, &touchkey->led_dev); |
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index f11807db6979..400869e61a06 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c | |||
@@ -256,6 +256,42 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek, | |||
256 | return 0; | 256 | return 0; |
257 | } | 257 | } |
258 | 258 | ||
259 | #ifdef CONFIG_ACPI | ||
260 | static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, | ||
261 | struct platform_device *pdev) | ||
262 | { | ||
263 | unsigned long long hrv = 0; | ||
264 | acpi_status status; | ||
265 | |||
266 | if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) && | ||
267 | axp20x_pek->axp20x->variant == AXP288_ID) { | ||
268 | status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent), | ||
269 | "_HRV", NULL, &hrv); | ||
270 | if (ACPI_FAILURE(status)) | ||
271 | dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n"); | ||
272 | |||
273 | /* | ||
274 | * On Cherry Trail platforms (hrv == 3), do not register the | ||
275 | * input device if there is an "INTCFD9" or "ACPI0011" gpio | ||
276 | * button ACPI device, as that handles the power button too, | ||
277 | * and otherwise we end up reporting all presses twice. | ||
278 | */ | ||
279 | if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) || | ||
280 | acpi_dev_present("ACPI0011", NULL, -1))) | ||
281 | return false; | ||
282 | |||
283 | } | ||
284 | |||
285 | return true; | ||
286 | } | ||
287 | #else | ||
288 | static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek, | ||
289 | struct platform_device *pdev) | ||
290 | { | ||
291 | return true; | ||
292 | } | ||
293 | #endif | ||
294 | |||
259 | static int axp20x_pek_probe(struct platform_device *pdev) | 295 | static int axp20x_pek_probe(struct platform_device *pdev) |
260 | { | 296 | { |
261 | struct axp20x_pek *axp20x_pek; | 297 | struct axp20x_pek *axp20x_pek; |
@@ -268,13 +304,7 @@ static int axp20x_pek_probe(struct platform_device *pdev) | |||
268 | 304 | ||
269 | axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); | 305 | axp20x_pek->axp20x = dev_get_drvdata(pdev->dev.parent); |
270 | 306 | ||
271 | /* | 307 | if (axp20x_pek_should_register_input(axp20x_pek, pdev)) { |
272 | * Do not register the input device if there is an "INTCFD9" | ||
273 | * gpio button ACPI device, that handles the power button too, | ||
274 | * and otherwise we end up reporting all presses twice. | ||
275 | */ | ||
276 | if (!acpi_dev_found("INTCFD9") || | ||
277 | !IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY)) { | ||
278 | error = axp20x_pek_probe_input_device(axp20x_pek, pdev); | 308 | error = axp20x_pek_probe_input_device(axp20x_pek, pdev); |
279 | if (error) | 309 | if (error) |
280 | return error; | 310 | return error; |
diff --git a/drivers/input/mouse/elan_i2c_i2c.c b/drivers/input/mouse/elan_i2c_i2c.c index a679e56c44cd..f431da07f861 100644 --- a/drivers/input/mouse/elan_i2c_i2c.c +++ b/drivers/input/mouse/elan_i2c_i2c.c | |||
@@ -554,32 +554,34 @@ static int elan_i2c_finish_fw_update(struct i2c_client *client, | |||
554 | struct completion *completion) | 554 | struct completion *completion) |
555 | { | 555 | { |
556 | struct device *dev = &client->dev; | 556 | struct device *dev = &client->dev; |
557 | long ret; | ||
558 | int error; | 557 | int error; |
559 | int len; | 558 | int len; |
560 | u8 buffer[ETP_I2C_INF_LENGTH]; | 559 | u8 buffer[ETP_I2C_REPORT_LEN]; |
560 | |||
561 | len = i2c_master_recv(client, buffer, ETP_I2C_REPORT_LEN); | ||
562 | if (len != ETP_I2C_REPORT_LEN) { | ||
563 | error = len < 0 ? len : -EIO; | ||
564 | dev_warn(dev, "failed to read I2C data after FW WDT reset: %d (%d)\n", | ||
565 | error, len); | ||
566 | } | ||
561 | 567 | ||
562 | reinit_completion(completion); | 568 | reinit_completion(completion); |
563 | enable_irq(client->irq); | 569 | enable_irq(client->irq); |
564 | 570 | ||
565 | error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); | 571 | error = elan_i2c_write_cmd(client, ETP_I2C_STAND_CMD, ETP_I2C_RESET); |
566 | if (!error) | ||
567 | ret = wait_for_completion_interruptible_timeout(completion, | ||
568 | msecs_to_jiffies(300)); | ||
569 | disable_irq(client->irq); | ||
570 | |||
571 | if (error) { | 572 | if (error) { |
572 | dev_err(dev, "device reset failed: %d\n", error); | 573 | dev_err(dev, "device reset failed: %d\n", error); |
573 | return error; | 574 | } else if (!wait_for_completion_timeout(completion, |
574 | } else if (ret == 0) { | 575 | msecs_to_jiffies(300))) { |
575 | dev_err(dev, "timeout waiting for device reset\n"); | 576 | dev_err(dev, "timeout waiting for device reset\n"); |
576 | return -ETIMEDOUT; | 577 | error = -ETIMEDOUT; |
577 | } else if (ret < 0) { | ||
578 | error = ret; | ||
579 | dev_err(dev, "error waiting for device reset: %d\n", error); | ||
580 | return error; | ||
581 | } | 578 | } |
582 | 579 | ||
580 | disable_irq(client->irq); | ||
581 | |||
582 | if (error) | ||
583 | return error; | ||
584 | |||
583 | len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); | 585 | len = i2c_master_recv(client, buffer, ETP_I2C_INF_LENGTH); |
584 | if (len != ETP_I2C_INF_LENGTH) { | 586 | if (len != ETP_I2C_INF_LENGTH) { |
585 | error = len < 0 ? len : -EIO; | 587 | error = len < 0 ? len : -EIO; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 131df9d3660f..16c30460ef04 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -176,6 +176,12 @@ static const char * const smbus_pnp_ids[] = { | |||
176 | NULL | 176 | NULL |
177 | }; | 177 | }; |
178 | 178 | ||
179 | static const char * const forcepad_pnp_ids[] = { | ||
180 | "SYN300D", | ||
181 | "SYN3014", | ||
182 | NULL | ||
183 | }; | ||
184 | |||
179 | /* | 185 | /* |
180 | * Send a command to the synpatics touchpad by special commands | 186 | * Send a command to the synpatics touchpad by special commands |
181 | */ | 187 | */ |
@@ -397,6 +403,8 @@ static int synaptics_query_hardware(struct psmouse *psmouse, | |||
397 | { | 403 | { |
398 | int error; | 404 | int error; |
399 | 405 | ||
406 | memset(info, 0, sizeof(*info)); | ||
407 | |||
400 | error = synaptics_identify(psmouse, info); | 408 | error = synaptics_identify(psmouse, info); |
401 | if (error) | 409 | if (error) |
402 | return error; | 410 | return error; |
@@ -480,13 +488,6 @@ static const struct min_max_quirk min_max_pnpid_table[] = { | |||
480 | { } | 488 | { } |
481 | }; | 489 | }; |
482 | 490 | ||
483 | /* This list has been kindly provided by Synaptics. */ | ||
484 | static const char * const forcepad_pnp_ids[] = { | ||
485 | "SYN300D", | ||
486 | "SYN3014", | ||
487 | NULL | ||
488 | }; | ||
489 | |||
490 | /***************************************************************************** | 491 | /***************************************************************************** |
491 | * Synaptics communications functions | 492 | * Synaptics communications functions |
492 | ****************************************************************************/ | 493 | ****************************************************************************/ |
@@ -1687,7 +1688,8 @@ enum { | |||
1687 | SYNAPTICS_INTERTOUCH_ON, | 1688 | SYNAPTICS_INTERTOUCH_ON, |
1688 | }; | 1689 | }; |
1689 | 1690 | ||
1690 | static int synaptics_intertouch = SYNAPTICS_INTERTOUCH_NOT_SET; | 1691 | static int synaptics_intertouch = IS_ENABLED(CONFIG_RMI4_SMB) ? |
1692 | SYNAPTICS_INTERTOUCH_NOT_SET : SYNAPTICS_INTERTOUCH_OFF; | ||
1691 | module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644); | 1693 | module_param_named(synaptics_intertouch, synaptics_intertouch, int, 0644); |
1692 | MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device."); | 1694 | MODULE_PARM_DESC(synaptics_intertouch, "Use a secondary bus for the Synaptics device."); |
1693 | 1695 | ||
@@ -1737,8 +1739,16 @@ static int synaptics_setup_intertouch(struct psmouse *psmouse, | |||
1737 | 1739 | ||
1738 | if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) { | 1740 | if (synaptics_intertouch == SYNAPTICS_INTERTOUCH_NOT_SET) { |
1739 | if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && | 1741 | if (!psmouse_matches_pnp_id(psmouse, topbuttonpad_pnp_ids) && |
1740 | !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) | 1742 | !psmouse_matches_pnp_id(psmouse, smbus_pnp_ids)) { |
1743 | |||
1744 | if (!psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) | ||
1745 | psmouse_info(psmouse, | ||
1746 | "Your touchpad (%s) says it can support a different bus. " | ||
1747 | "If i2c-hid and hid-rmi are not used, you might want to try setting psmouse.synaptics_intertouch to 1 and report this to linux-input@vger.kernel.org.\n", | ||
1748 | psmouse->ps2dev.serio->firmware_id); | ||
1749 | |||
1741 | return -ENXIO; | 1750 | return -ENXIO; |
1751 | } | ||
1742 | } | 1752 | } |
1743 | 1753 | ||
1744 | psmouse_info(psmouse, "Trying to set up SMBus access\n"); | 1754 | psmouse_info(psmouse, "Trying to set up SMBus access\n"); |
@@ -1810,6 +1820,15 @@ int synaptics_init(struct psmouse *psmouse) | |||
1810 | } | 1820 | } |
1811 | 1821 | ||
1812 | if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) { | 1822 | if (SYN_CAP_INTERTOUCH(info.ext_cap_0c)) { |
1823 | if ((!IS_ENABLED(CONFIG_RMI4_SMB) || | ||
1824 | !IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS)) && | ||
1825 | /* Forcepads need F21, which is not ready */ | ||
1826 | !psmouse_matches_pnp_id(psmouse, forcepad_pnp_ids)) { | ||
1827 | psmouse_warn(psmouse, | ||
1828 | "The touchpad can support a better bus than the too old PS/2 protocol. " | ||
1829 | "Make sure MOUSE_PS2_SYNAPTICS_SMBUS and RMI4_SMB are enabled to get a better touchpad experience.\n"); | ||
1830 | } | ||
1831 | |||
1813 | error = synaptics_setup_intertouch(psmouse, &info, true); | 1832 | error = synaptics_setup_intertouch(psmouse, &info, true); |
1814 | if (!error) | 1833 | if (!error) |
1815 | return PSMOUSE_SYNAPTICS_SMBUS; | 1834 | return PSMOUSE_SYNAPTICS_SMBUS; |
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c index 2302aef2b2d4..dd042a9b0aaa 100644 --- a/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/drivers/input/touchscreen/atmel_mxt_ts.c | |||
@@ -350,6 +350,7 @@ static bool mxt_object_readable(unsigned int type) | |||
350 | case MXT_TOUCH_KEYARRAY_T15: | 350 | case MXT_TOUCH_KEYARRAY_T15: |
351 | case MXT_TOUCH_PROXIMITY_T23: | 351 | case MXT_TOUCH_PROXIMITY_T23: |
352 | case MXT_TOUCH_PROXKEY_T52: | 352 | case MXT_TOUCH_PROXKEY_T52: |
353 | case MXT_TOUCH_MULTITOUCHSCREEN_T100: | ||
353 | case MXT_PROCI_GRIPFACE_T20: | 354 | case MXT_PROCI_GRIPFACE_T20: |
354 | case MXT_PROCG_NOISE_T22: | 355 | case MXT_PROCG_NOISE_T22: |
355 | case MXT_PROCI_ONETOUCH_T24: | 356 | case MXT_PROCI_ONETOUCH_T24: |
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c index 8cf8d8d5d4ef..f872817e81e4 100644 --- a/drivers/input/touchscreen/edt-ft5x06.c +++ b/drivers/input/touchscreen/edt-ft5x06.c | |||
@@ -471,7 +471,7 @@ static EDT_ATTR(gain, S_IWUSR | S_IRUGO, WORK_REGISTER_GAIN, | |||
471 | static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, | 471 | static EDT_ATTR(offset, S_IWUSR | S_IRUGO, WORK_REGISTER_OFFSET, |
472 | M09_REGISTER_OFFSET, 0, 31); | 472 | M09_REGISTER_OFFSET, 0, 31); |
473 | static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, | 473 | static EDT_ATTR(threshold, S_IWUSR | S_IRUGO, WORK_REGISTER_THRESHOLD, |
474 | M09_REGISTER_THRESHOLD, 20, 80); | 474 | M09_REGISTER_THRESHOLD, 0, 80); |
475 | static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, | 475 | static EDT_ATTR(report_rate, S_IWUSR | S_IRUGO, WORK_REGISTER_REPORT_RATE, |
476 | NO_REGISTER, 3, 14); | 476 | NO_REGISTER, 3, 14); |
477 | 477 | ||
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c index 813dd68a5c82..0dbcf105f7db 100644 --- a/drivers/input/touchscreen/silead.c +++ b/drivers/input/touchscreen/silead.c | |||
@@ -526,6 +526,7 @@ static int __maybe_unused silead_ts_suspend(struct device *dev) | |||
526 | { | 526 | { |
527 | struct i2c_client *client = to_i2c_client(dev); | 527 | struct i2c_client *client = to_i2c_client(dev); |
528 | 528 | ||
529 | disable_irq(client->irq); | ||
529 | silead_ts_set_power(client, SILEAD_POWER_OFF); | 530 | silead_ts_set_power(client, SILEAD_POWER_OFF); |
530 | return 0; | 531 | return 0; |
531 | } | 532 | } |
@@ -551,6 +552,8 @@ static int __maybe_unused silead_ts_resume(struct device *dev) | |||
551 | return -ENODEV; | 552 | return -ENODEV; |
552 | } | 553 | } |
553 | 554 | ||
555 | enable_irq(client->irq); | ||
556 | |||
554 | return 0; | 557 | return 0; |
555 | } | 558 | } |
556 | 559 | ||
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c index d07dd5196ffc..8aa158a09180 100644 --- a/drivers/isdn/i4l/isdn_ppp.c +++ b/drivers/isdn/i4l/isdn_ppp.c | |||
@@ -2364,7 +2364,7 @@ static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_s | |||
2364 | id); | 2364 | id); |
2365 | return NULL; | 2365 | return NULL; |
2366 | } else { | 2366 | } else { |
2367 | rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL); | 2367 | rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC); |
2368 | if (!rs) | 2368 | if (!rs) |
2369 | return NULL; | 2369 | return NULL; |
2370 | rs->state = CCPResetIdle; | 2370 | rs->state = CCPResetIdle; |
diff --git a/drivers/isdn/mISDN/stack.c b/drivers/isdn/mISDN/stack.c index 8b7faea2ddf8..422dced7c90a 100644 --- a/drivers/isdn/mISDN/stack.c +++ b/drivers/isdn/mISDN/stack.c | |||
@@ -75,7 +75,7 @@ send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb) | |||
75 | if (sk->sk_state != MISDN_BOUND) | 75 | if (sk->sk_state != MISDN_BOUND) |
76 | continue; | 76 | continue; |
77 | if (!cskb) | 77 | if (!cskb) |
78 | cskb = skb_copy(skb, GFP_KERNEL); | 78 | cskb = skb_copy(skb, GFP_ATOMIC); |
79 | if (!cskb) { | 79 | if (!cskb) { |
80 | printk(KERN_WARNING "%s no skb\n", __func__); | 80 | printk(KERN_WARNING "%s no skb\n", __func__); |
81 | break; | 81 | break; |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index bf7419a56454..f4eace5ea184 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -485,10 +485,10 @@ void bitmap_print_sb(struct bitmap *bitmap) | |||
485 | pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); | 485 | pr_debug(" magic: %08x\n", le32_to_cpu(sb->magic)); |
486 | pr_debug(" version: %d\n", le32_to_cpu(sb->version)); | 486 | pr_debug(" version: %d\n", le32_to_cpu(sb->version)); |
487 | pr_debug(" uuid: %08x.%08x.%08x.%08x\n", | 487 | pr_debug(" uuid: %08x.%08x.%08x.%08x\n", |
488 | *(__u32 *)(sb->uuid+0), | 488 | le32_to_cpu(*(__u32 *)(sb->uuid+0)), |
489 | *(__u32 *)(sb->uuid+4), | 489 | le32_to_cpu(*(__u32 *)(sb->uuid+4)), |
490 | *(__u32 *)(sb->uuid+8), | 490 | le32_to_cpu(*(__u32 *)(sb->uuid+8)), |
491 | *(__u32 *)(sb->uuid+12)); | 491 | le32_to_cpu(*(__u32 *)(sb->uuid+12))); |
492 | pr_debug(" events: %llu\n", | 492 | pr_debug(" events: %llu\n", |
493 | (unsigned long long) le64_to_cpu(sb->events)); | 493 | (unsigned long long) le64_to_cpu(sb->events)); |
494 | pr_debug("events cleared: %llu\n", | 494 | pr_debug("events cleared: %llu\n", |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index cd8139593ccd..840c1496b2b1 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -1334,7 +1334,7 @@ int dm_bufio_issue_flush(struct dm_bufio_client *c) | |||
1334 | { | 1334 | { |
1335 | struct dm_io_request io_req = { | 1335 | struct dm_io_request io_req = { |
1336 | .bi_op = REQ_OP_WRITE, | 1336 | .bi_op = REQ_OP_WRITE, |
1337 | .bi_op_flags = REQ_PREFLUSH, | 1337 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
1338 | .mem.type = DM_IO_KMEM, | 1338 | .mem.type = DM_IO_KMEM, |
1339 | .mem.ptr.addr = NULL, | 1339 | .mem.ptr.addr = NULL, |
1340 | .client = c->dm_io, | 1340 | .client = c->dm_io, |
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index c7f7c8d76576..7910bfe50da4 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c | |||
@@ -783,7 +783,8 @@ static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsi | |||
783 | for (i = 0; i < commit_sections; i++) | 783 | for (i = 0; i < commit_sections; i++) |
784 | rw_section_mac(ic, commit_start + i, true); | 784 | rw_section_mac(ic, commit_start + i, true); |
785 | } | 785 | } |
786 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, commit_sections, &io_comp); | 786 | rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start, |
787 | commit_sections, &io_comp); | ||
787 | } else { | 788 | } else { |
788 | unsigned to_end; | 789 | unsigned to_end; |
789 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); | 790 | io_comp.in_flight = (atomic_t)ATOMIC_INIT(2); |
@@ -2374,21 +2375,6 @@ static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic) | |||
2374 | blk_queue_max_integrity_segments(disk->queue, UINT_MAX); | 2375 | blk_queue_max_integrity_segments(disk->queue, UINT_MAX); |
2375 | } | 2376 | } |
2376 | 2377 | ||
2377 | /* FIXME: use new kvmalloc */ | ||
2378 | static void *dm_integrity_kvmalloc(size_t size, gfp_t gfp) | ||
2379 | { | ||
2380 | void *ptr = NULL; | ||
2381 | |||
2382 | if (size <= PAGE_SIZE) | ||
2383 | ptr = kmalloc(size, GFP_KERNEL | gfp); | ||
2384 | if (!ptr && size <= KMALLOC_MAX_SIZE) | ||
2385 | ptr = kmalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | gfp); | ||
2386 | if (!ptr) | ||
2387 | ptr = __vmalloc(size, GFP_KERNEL | gfp, PAGE_KERNEL); | ||
2388 | |||
2389 | return ptr; | ||
2390 | } | ||
2391 | |||
2392 | static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) | 2378 | static void dm_integrity_free_page_list(struct dm_integrity_c *ic, struct page_list *pl) |
2393 | { | 2379 | { |
2394 | unsigned i; | 2380 | unsigned i; |
@@ -2407,7 +2393,7 @@ static struct page_list *dm_integrity_alloc_page_list(struct dm_integrity_c *ic) | |||
2407 | struct page_list *pl; | 2393 | struct page_list *pl; |
2408 | unsigned i; | 2394 | unsigned i; |
2409 | 2395 | ||
2410 | pl = dm_integrity_kvmalloc(page_list_desc_size, __GFP_ZERO); | 2396 | pl = kvmalloc(page_list_desc_size, GFP_KERNEL | __GFP_ZERO); |
2411 | if (!pl) | 2397 | if (!pl) |
2412 | return NULL; | 2398 | return NULL; |
2413 | 2399 | ||
@@ -2437,7 +2423,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int | |||
2437 | struct scatterlist **sl; | 2423 | struct scatterlist **sl; |
2438 | unsigned i; | 2424 | unsigned i; |
2439 | 2425 | ||
2440 | sl = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), __GFP_ZERO); | 2426 | sl = kvmalloc(ic->journal_sections * sizeof(struct scatterlist *), GFP_KERNEL | __GFP_ZERO); |
2441 | if (!sl) | 2427 | if (!sl) |
2442 | return NULL; | 2428 | return NULL; |
2443 | 2429 | ||
@@ -2453,7 +2439,7 @@ static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_int | |||
2453 | 2439 | ||
2454 | n_pages = (end_index - start_index + 1); | 2440 | n_pages = (end_index - start_index + 1); |
2455 | 2441 | ||
2456 | s = dm_integrity_kvmalloc(n_pages * sizeof(struct scatterlist), 0); | 2442 | s = kvmalloc(n_pages * sizeof(struct scatterlist), GFP_KERNEL); |
2457 | if (!s) { | 2443 | if (!s) { |
2458 | dm_integrity_free_journal_scatterlist(ic, sl); | 2444 | dm_integrity_free_journal_scatterlist(ic, sl); |
2459 | return NULL; | 2445 | return NULL; |
@@ -2617,7 +2603,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
2617 | goto bad; | 2603 | goto bad; |
2618 | } | 2604 | } |
2619 | 2605 | ||
2620 | sg = dm_integrity_kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), 0); | 2606 | sg = kvmalloc((ic->journal_pages + 1) * sizeof(struct scatterlist), GFP_KERNEL); |
2621 | if (!sg) { | 2607 | if (!sg) { |
2622 | *error = "Unable to allocate sg list"; | 2608 | *error = "Unable to allocate sg list"; |
2623 | r = -ENOMEM; | 2609 | r = -ENOMEM; |
@@ -2673,7 +2659,7 @@ static int create_journal(struct dm_integrity_c *ic, char **error) | |||
2673 | r = -ENOMEM; | 2659 | r = -ENOMEM; |
2674 | goto bad; | 2660 | goto bad; |
2675 | } | 2661 | } |
2676 | ic->sk_requests = dm_integrity_kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), __GFP_ZERO); | 2662 | ic->sk_requests = kvmalloc(ic->journal_sections * sizeof(struct skcipher_request *), GFP_KERNEL | __GFP_ZERO); |
2677 | if (!ic->sk_requests) { | 2663 | if (!ic->sk_requests) { |
2678 | *error = "Unable to allocate sk requests"; | 2664 | *error = "Unable to allocate sk requests"; |
2679 | r = -ENOMEM; | 2665 | r = -ENOMEM; |
@@ -2740,7 +2726,7 @@ retest_commit_id: | |||
2740 | r = -ENOMEM; | 2726 | r = -ENOMEM; |
2741 | goto bad; | 2727 | goto bad; |
2742 | } | 2728 | } |
2743 | ic->journal_tree = dm_integrity_kvmalloc(journal_tree_size, 0); | 2729 | ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL); |
2744 | if (!ic->journal_tree) { | 2730 | if (!ic->journal_tree) { |
2745 | *error = "Could not allocate memory for journal tree"; | 2731 | *error = "Could not allocate memory for journal tree"; |
2746 | r = -ENOMEM; | 2732 | r = -ENOMEM; |
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 0555b4410e05..41852ae287a5 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c | |||
@@ -1710,12 +1710,13 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern | |||
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | /* | 1712 | /* |
1713 | * Try to avoid low memory issues when a device is suspended. | 1713 | * Use __GFP_HIGH to avoid low memory issues when a device is |
1714 | * suspended and the ioctl is needed to resume it. | ||
1714 | * Use kmalloc() rather than vmalloc() when we can. | 1715 | * Use kmalloc() rather than vmalloc() when we can. |
1715 | */ | 1716 | */ |
1716 | dmi = NULL; | 1717 | dmi = NULL; |
1717 | noio_flag = memalloc_noio_save(); | 1718 | noio_flag = memalloc_noio_save(); |
1718 | dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL); | 1719 | dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH); |
1719 | memalloc_noio_restore(noio_flag); | 1720 | memalloc_noio_restore(noio_flag); |
1720 | 1721 | ||
1721 | if (!dmi) { | 1722 | if (!dmi) { |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index a95cbb80fb34..e61c45047c25 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -260,7 +260,7 @@ static int mirror_flush(struct dm_target *ti) | |||
260 | struct mirror *m; | 260 | struct mirror *m; |
261 | struct dm_io_request io_req = { | 261 | struct dm_io_request io_req = { |
262 | .bi_op = REQ_OP_WRITE, | 262 | .bi_op = REQ_OP_WRITE, |
263 | .bi_op_flags = REQ_PREFLUSH, | 263 | .bi_op_flags = REQ_PREFLUSH | REQ_SYNC, |
264 | .mem.type = DM_IO_KMEM, | 264 | .mem.type = DM_IO_KMEM, |
265 | .mem.ptr.addr = NULL, | 265 | .mem.ptr.addr = NULL, |
266 | .client = ms->io_client, | 266 | .client = ms->io_client, |
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index b93476c3ba3f..c5534d294773 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c | |||
@@ -741,7 +741,8 @@ static void persistent_commit_exception(struct dm_exception_store *store, | |||
741 | /* | 741 | /* |
742 | * Commit exceptions to disk. | 742 | * Commit exceptions to disk. |
743 | */ | 743 | */ |
744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, REQ_PREFLUSH | REQ_FUA)) | 744 | if (ps->valid && area_io(ps, REQ_OP_WRITE, |
745 | REQ_PREFLUSH | REQ_FUA | REQ_SYNC)) | ||
745 | ps->valid = 0; | 746 | ps->valid = 0; |
746 | 747 | ||
747 | /* | 748 | /* |
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 97de961a3bfc..1ec9b2c51c07 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c | |||
@@ -166,7 +166,7 @@ static int verity_hash_init(struct dm_verity *v, struct ahash_request *req, | |||
166 | return r; | 166 | return r; |
167 | } | 167 | } |
168 | 168 | ||
169 | if (likely(v->version >= 1)) | 169 | if (likely(v->salt_size && (v->version >= 1))) |
170 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 170 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); |
171 | 171 | ||
172 | return r; | 172 | return r; |
@@ -177,7 +177,7 @@ static int verity_hash_final(struct dm_verity *v, struct ahash_request *req, | |||
177 | { | 177 | { |
178 | int r; | 178 | int r; |
179 | 179 | ||
180 | if (unlikely(!v->version)) { | 180 | if (unlikely(v->salt_size && (!v->version))) { |
181 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); | 181 | r = verity_hash_update(v, req, v->salt, v->salt_size, res); |
182 | 182 | ||
183 | if (r < 0) { | 183 | if (r < 0) { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6ef9500226c0..37ccd73c79ec 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -1657,7 +1657,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
1657 | 1657 | ||
1658 | bio_init(&md->flush_bio, NULL, 0); | 1658 | bio_init(&md->flush_bio, NULL, 0); |
1659 | md->flush_bio.bi_bdev = md->bdev; | 1659 | md->flush_bio.bi_bdev = md->bdev; |
1660 | md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; | 1660 | md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; |
1661 | 1661 | ||
1662 | dm_stats_init(&md->stats); | 1662 | dm_stats_init(&md->stats); |
1663 | 1663 | ||
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index 7299ce2f08a8..03082e17c65c 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
@@ -1311,8 +1311,10 @@ static int add_new_disk(struct mddev *mddev, struct md_rdev *rdev) | |||
1311 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); | 1311 | cmsg.raid_slot = cpu_to_le32(rdev->desc_nr); |
1312 | lock_comm(cinfo, 1); | 1312 | lock_comm(cinfo, 1); |
1313 | ret = __sendmsg(cinfo, &cmsg); | 1313 | ret = __sendmsg(cinfo, &cmsg); |
1314 | if (ret) | 1314 | if (ret) { |
1315 | unlock_comm(cinfo); | ||
1315 | return ret; | 1316 | return ret; |
1317 | } | ||
1316 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; | 1318 | cinfo->no_new_dev_lockres->flags |= DLM_LKF_NOQUEUE; |
1317 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); | 1319 | ret = dlm_lock_sync(cinfo->no_new_dev_lockres, DLM_LOCK_EX); |
1318 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; | 1320 | cinfo->no_new_dev_lockres->flags &= ~DLM_LKF_NOQUEUE; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 10367ffe92e3..212a6777ff31 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -765,7 +765,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
765 | test_bit(FailFast, &rdev->flags) && | 765 | test_bit(FailFast, &rdev->flags) && |
766 | !test_bit(LastDev, &rdev->flags)) | 766 | !test_bit(LastDev, &rdev->flags)) |
767 | ff = MD_FAILFAST; | 767 | ff = MD_FAILFAST; |
768 | bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_FUA | ff; | 768 | bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH | REQ_FUA | ff; |
769 | 769 | ||
770 | atomic_inc(&mddev->pending_writes); | 770 | atomic_inc(&mddev->pending_writes); |
771 | submit_bio(bio); | 771 | submit_bio(bio); |
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index 4c00bc248287..0a7af8b0a80a 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c | |||
@@ -1782,7 +1782,7 @@ static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | |||
1782 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 1782 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
1783 | mb, PAGE_SIZE)); | 1783 | mb, PAGE_SIZE)); |
1784 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, | 1784 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, |
1785 | REQ_FUA, false)) { | 1785 | REQ_SYNC | REQ_FUA, false)) { |
1786 | __free_page(page); | 1786 | __free_page(page); |
1787 | return -EIO; | 1787 | return -EIO; |
1788 | } | 1788 | } |
@@ -2388,7 +2388,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | |||
2388 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 2388 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, |
2389 | mb, PAGE_SIZE)); | 2389 | mb, PAGE_SIZE)); |
2390 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, | 2390 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, |
2391 | REQ_OP_WRITE, REQ_FUA, false); | 2391 | REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); |
2392 | sh->log_start = ctx->pos; | 2392 | sh->log_start = ctx->pos; |
2393 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); | 2393 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); |
2394 | atomic_inc(&log->stripe_in_journal_count); | 2394 | atomic_inc(&log->stripe_in_journal_count); |
diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 5d25bebf3328..ccce92e68d7f 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c | |||
@@ -907,8 +907,8 @@ static int ppl_write_empty_header(struct ppl_log *log) | |||
907 | pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); | 907 | pplhdr->checksum = cpu_to_le32(~crc32c_le(~0, pplhdr, PAGE_SIZE)); |
908 | 908 | ||
909 | if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, | 909 | if (!sync_page_io(rdev, rdev->ppl.sector - rdev->data_offset, |
910 | PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_FUA, 0, | 910 | PPL_HEADER_SIZE, page, REQ_OP_WRITE | REQ_SYNC | |
911 | false)) { | 911 | REQ_FUA, 0, false)) { |
912 | md_error(rdev->mddev, rdev); | 912 | md_error(rdev->mddev, rdev); |
913 | ret = -EIO; | 913 | ret = -EIO; |
914 | } | 914 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 9c4f7659f8b1..722064689e82 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4085,10 +4085,15 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, | |||
4085 | set_bit(STRIPE_INSYNC, &sh->state); | 4085 | set_bit(STRIPE_INSYNC, &sh->state); |
4086 | else { | 4086 | else { |
4087 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); | 4087 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
4088 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 4088 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { |
4089 | /* don't try to repair!! */ | 4089 | /* don't try to repair!! */ |
4090 | set_bit(STRIPE_INSYNC, &sh->state); | 4090 | set_bit(STRIPE_INSYNC, &sh->state); |
4091 | else { | 4091 | pr_warn_ratelimited("%s: mismatch sector in range " |
4092 | "%llu-%llu\n", mdname(conf->mddev), | ||
4093 | (unsigned long long) sh->sector, | ||
4094 | (unsigned long long) sh->sector + | ||
4095 | STRIPE_SECTORS); | ||
4096 | } else { | ||
4092 | sh->check_state = check_state_compute_run; | 4097 | sh->check_state = check_state_compute_run; |
4093 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); | 4098 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
4094 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); | 4099 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
@@ -4237,10 +4242,15 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, | |||
4237 | } | 4242 | } |
4238 | } else { | 4243 | } else { |
4239 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); | 4244 | atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); |
4240 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) | 4245 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) { |
4241 | /* don't try to repair!! */ | 4246 | /* don't try to repair!! */ |
4242 | set_bit(STRIPE_INSYNC, &sh->state); | 4247 | set_bit(STRIPE_INSYNC, &sh->state); |
4243 | else { | 4248 | pr_warn_ratelimited("%s: mismatch sector in range " |
4249 | "%llu-%llu\n", mdname(conf->mddev), | ||
4250 | (unsigned long long) sh->sector, | ||
4251 | (unsigned long long) sh->sector + | ||
4252 | STRIPE_SECTORS); | ||
4253 | } else { | ||
4244 | int *target = &sh->ops.target; | 4254 | int *target = &sh->ops.target; |
4245 | 4255 | ||
4246 | sh->ops.target = -1; | 4256 | sh->ops.target = -1; |
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index b72edd27f880..55d9c2b82b7e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig | |||
@@ -2,6 +2,12 @@ | |||
2 | # Multimedia device configuration | 2 | # Multimedia device configuration |
3 | # | 3 | # |
4 | 4 | ||
5 | config CEC_CORE | ||
6 | tristate | ||
7 | |||
8 | config CEC_NOTIFIER | ||
9 | bool | ||
10 | |||
5 | menuconfig MEDIA_SUPPORT | 11 | menuconfig MEDIA_SUPPORT |
6 | tristate "Multimedia support" | 12 | tristate "Multimedia support" |
7 | depends on HAS_IOMEM | 13 | depends on HAS_IOMEM |
diff --git a/drivers/media/Makefile b/drivers/media/Makefile index 523fea3648ad..044503aa8801 100644 --- a/drivers/media/Makefile +++ b/drivers/media/Makefile | |||
@@ -4,8 +4,6 @@ | |||
4 | 4 | ||
5 | media-objs := media-device.o media-devnode.o media-entity.o | 5 | media-objs := media-device.o media-devnode.o media-entity.o |
6 | 6 | ||
7 | obj-$(CONFIG_CEC_CORE) += cec/ | ||
8 | |||
9 | # | 7 | # |
10 | # I2C drivers should come before other drivers, otherwise they'll fail | 8 | # I2C drivers should come before other drivers, otherwise they'll fail |
11 | # when compiled as builtin drivers | 9 | # when compiled as builtin drivers |
@@ -26,6 +24,8 @@ obj-$(CONFIG_DVB_CORE) += dvb-core/ | |||
26 | # There are both core and drivers at RC subtree - merge before drivers | 24 | # There are both core and drivers at RC subtree - merge before drivers |
27 | obj-y += rc/ | 25 | obj-y += rc/ |
28 | 26 | ||
27 | obj-$(CONFIG_CEC_CORE) += cec/ | ||
28 | |||
29 | # | 29 | # |
30 | # Finally, merge the drivers that require the core | 30 | # Finally, merge the drivers that require the core |
31 | # | 31 | # |
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig index f944d93e3167..4e25a950ae6f 100644 --- a/drivers/media/cec/Kconfig +++ b/drivers/media/cec/Kconfig | |||
@@ -1,19 +1,5 @@ | |||
1 | config CEC_CORE | ||
2 | tristate | ||
3 | depends on MEDIA_CEC_SUPPORT | ||
4 | default y | ||
5 | |||
6 | config MEDIA_CEC_NOTIFIER | ||
7 | bool | ||
8 | |||
9 | config MEDIA_CEC_RC | 1 | config MEDIA_CEC_RC |
10 | bool "HDMI CEC RC integration" | 2 | bool "HDMI CEC RC integration" |
11 | depends on CEC_CORE && RC_CORE | 3 | depends on CEC_CORE && RC_CORE |
12 | ---help--- | 4 | ---help--- |
13 | Pass on CEC remote control messages to the RC framework. | 5 | Pass on CEC remote control messages to the RC framework. |
14 | |||
15 | config MEDIA_CEC_DEBUG | ||
16 | bool "HDMI CEC debugfs interface" | ||
17 | depends on CEC_CORE && DEBUG_FS | ||
18 | ---help--- | ||
19 | Turns on the DebugFS interface for CEC devices. | ||
diff --git a/drivers/media/cec/Makefile b/drivers/media/cec/Makefile index 402a6c62a3e8..eaf408e64669 100644 --- a/drivers/media/cec/Makefile +++ b/drivers/media/cec/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o | 1 | cec-objs := cec-core.o cec-adap.o cec-api.o cec-edid.o |
2 | 2 | ||
3 | ifeq ($(CONFIG_MEDIA_CEC_NOTIFIER),y) | 3 | ifeq ($(CONFIG_CEC_NOTIFIER),y) |
4 | cec-objs += cec-notifier.o | 4 | cec-objs += cec-notifier.o |
5 | endif | 5 | endif |
6 | 6 | ||
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index f5fe01c9da8a..9dfc79800c71 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
@@ -1864,7 +1864,7 @@ void cec_monitor_all_cnt_dec(struct cec_adapter *adap) | |||
1864 | WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); | 1864 | WARN_ON(call_op(adap, adap_monitor_all_enable, 0)); |
1865 | } | 1865 | } |
1866 | 1866 | ||
1867 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 1867 | #ifdef CONFIG_DEBUG_FS |
1868 | /* | 1868 | /* |
1869 | * Log the current state of the CEC adapter. | 1869 | * Log the current state of the CEC adapter. |
1870 | * Very useful for debugging. | 1870 | * Very useful for debugging. |
diff --git a/drivers/media/cec/cec-core.c b/drivers/media/cec/cec-core.c index f9ebff90f8eb..2f87748ba4fc 100644 --- a/drivers/media/cec/cec-core.c +++ b/drivers/media/cec/cec-core.c | |||
@@ -187,7 +187,7 @@ static void cec_devnode_unregister(struct cec_devnode *devnode) | |||
187 | put_device(&devnode->dev); | 187 | put_device(&devnode->dev); |
188 | } | 188 | } |
189 | 189 | ||
190 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 190 | #ifdef CONFIG_CEC_NOTIFIER |
191 | static void cec_cec_notify(struct cec_adapter *adap, u16 pa) | 191 | static void cec_cec_notify(struct cec_adapter *adap, u16 pa) |
192 | { | 192 | { |
193 | cec_s_phys_addr(adap, pa, false); | 193 | cec_s_phys_addr(adap, pa, false); |
@@ -323,7 +323,7 @@ int cec_register_adapter(struct cec_adapter *adap, | |||
323 | } | 323 | } |
324 | 324 | ||
325 | dev_set_drvdata(&adap->devnode.dev, adap); | 325 | dev_set_drvdata(&adap->devnode.dev, adap); |
326 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 326 | #ifdef CONFIG_DEBUG_FS |
327 | if (!top_cec_dir) | 327 | if (!top_cec_dir) |
328 | return 0; | 328 | return 0; |
329 | 329 | ||
@@ -355,7 +355,7 @@ void cec_unregister_adapter(struct cec_adapter *adap) | |||
355 | adap->rc = NULL; | 355 | adap->rc = NULL; |
356 | #endif | 356 | #endif |
357 | debugfs_remove_recursive(adap->cec_dir); | 357 | debugfs_remove_recursive(adap->cec_dir); |
358 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 358 | #ifdef CONFIG_CEC_NOTIFIER |
359 | if (adap->notifier) | 359 | if (adap->notifier) |
360 | cec_notifier_unregister(adap->notifier); | 360 | cec_notifier_unregister(adap->notifier); |
361 | #endif | 361 | #endif |
@@ -395,7 +395,7 @@ static int __init cec_devnode_init(void) | |||
395 | return ret; | 395 | return ret; |
396 | } | 396 | } |
397 | 397 | ||
398 | #ifdef CONFIG_MEDIA_CEC_DEBUG | 398 | #ifdef CONFIG_DEBUG_FS |
399 | top_cec_dir = debugfs_create_dir("cec", NULL); | 399 | top_cec_dir = debugfs_create_dir("cec", NULL); |
400 | if (IS_ERR_OR_NULL(top_cec_dir)) { | 400 | if (IS_ERR_OR_NULL(top_cec_dir)) { |
401 | pr_warn("cec: Failed to create debugfs cec dir\n"); | 401 | pr_warn("cec: Failed to create debugfs cec dir\n"); |
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig index fd181c99ce11..aaa9471c7d11 100644 --- a/drivers/media/i2c/Kconfig +++ b/drivers/media/i2c/Kconfig | |||
@@ -220,7 +220,8 @@ config VIDEO_ADV7604 | |||
220 | 220 | ||
221 | config VIDEO_ADV7604_CEC | 221 | config VIDEO_ADV7604_CEC |
222 | bool "Enable Analog Devices ADV7604 CEC support" | 222 | bool "Enable Analog Devices ADV7604 CEC support" |
223 | depends on VIDEO_ADV7604 && CEC_CORE | 223 | depends on VIDEO_ADV7604 |
224 | select CEC_CORE | ||
224 | ---help--- | 225 | ---help--- |
225 | When selected the adv7604 will support the optional | 226 | When selected the adv7604 will support the optional |
226 | HDMI CEC feature. | 227 | HDMI CEC feature. |
@@ -240,7 +241,8 @@ config VIDEO_ADV7842 | |||
240 | 241 | ||
241 | config VIDEO_ADV7842_CEC | 242 | config VIDEO_ADV7842_CEC |
242 | bool "Enable Analog Devices ADV7842 CEC support" | 243 | bool "Enable Analog Devices ADV7842 CEC support" |
243 | depends on VIDEO_ADV7842 && CEC_CORE | 244 | depends on VIDEO_ADV7842 |
245 | select CEC_CORE | ||
244 | ---help--- | 246 | ---help--- |
245 | When selected the adv7842 will support the optional | 247 | When selected the adv7842 will support the optional |
246 | HDMI CEC feature. | 248 | HDMI CEC feature. |
@@ -478,7 +480,8 @@ config VIDEO_ADV7511 | |||
478 | 480 | ||
479 | config VIDEO_ADV7511_CEC | 481 | config VIDEO_ADV7511_CEC |
480 | bool "Enable Analog Devices ADV7511 CEC support" | 482 | bool "Enable Analog Devices ADV7511 CEC support" |
481 | depends on VIDEO_ADV7511 && CEC_CORE | 483 | depends on VIDEO_ADV7511 |
484 | select CEC_CORE | ||
482 | ---help--- | 485 | ---help--- |
483 | When selected the adv7511 will support the optional | 486 | When selected the adv7511 will support the optional |
484 | HDMI CEC feature. | 487 | HDMI CEC feature. |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index ac026ee1ca07..041cb80a26b1 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -501,8 +501,9 @@ if CEC_PLATFORM_DRIVERS | |||
501 | 501 | ||
502 | config VIDEO_SAMSUNG_S5P_CEC | 502 | config VIDEO_SAMSUNG_S5P_CEC |
503 | tristate "Samsung S5P CEC driver" | 503 | tristate "Samsung S5P CEC driver" |
504 | depends on CEC_CORE && (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) | 504 | depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST |
505 | select MEDIA_CEC_NOTIFIER | 505 | select CEC_CORE |
506 | select CEC_NOTIFIER | ||
506 | ---help--- | 507 | ---help--- |
507 | This is a driver for Samsung S5P HDMI CEC interface. It uses the | 508 | This is a driver for Samsung S5P HDMI CEC interface. It uses the |
508 | generic CEC framework interface. | 509 | generic CEC framework interface. |
@@ -511,8 +512,9 @@ config VIDEO_SAMSUNG_S5P_CEC | |||
511 | 512 | ||
512 | config VIDEO_STI_HDMI_CEC | 513 | config VIDEO_STI_HDMI_CEC |
513 | tristate "STMicroelectronics STiH4xx HDMI CEC driver" | 514 | tristate "STMicroelectronics STiH4xx HDMI CEC driver" |
514 | depends on CEC_CORE && (ARCH_STI || COMPILE_TEST) | 515 | depends on ARCH_STI || COMPILE_TEST |
515 | select MEDIA_CEC_NOTIFIER | 516 | select CEC_CORE |
517 | select CEC_NOTIFIER | ||
516 | ---help--- | 518 | ---help--- |
517 | This is a driver for STIH4xx HDMI CEC interface. It uses the | 519 | This is a driver for STIH4xx HDMI CEC interface. It uses the |
518 | generic CEC framework interface. | 520 | generic CEC framework interface. |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c index 57a842ff3097..b7731b18ecae 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_if.c | |||
@@ -493,10 +493,10 @@ static int vdec_h264_get_param(unsigned long h_vdec, | |||
493 | } | 493 | } |
494 | 494 | ||
495 | static struct vdec_common_if vdec_h264_if = { | 495 | static struct vdec_common_if vdec_h264_if = { |
496 | vdec_h264_init, | 496 | .init = vdec_h264_init, |
497 | vdec_h264_decode, | 497 | .decode = vdec_h264_decode, |
498 | vdec_h264_get_param, | 498 | .get_param = vdec_h264_get_param, |
499 | vdec_h264_deinit, | 499 | .deinit = vdec_h264_deinit, |
500 | }; | 500 | }; |
501 | 501 | ||
502 | struct vdec_common_if *get_h264_dec_comm_if(void); | 502 | struct vdec_common_if *get_h264_dec_comm_if(void); |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c index 6e7a62ae0842..b9fad6a48879 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp8_if.c | |||
@@ -620,10 +620,10 @@ static void vdec_vp8_deinit(unsigned long h_vdec) | |||
620 | } | 620 | } |
621 | 621 | ||
622 | static struct vdec_common_if vdec_vp8_if = { | 622 | static struct vdec_common_if vdec_vp8_if = { |
623 | vdec_vp8_init, | 623 | .init = vdec_vp8_init, |
624 | vdec_vp8_decode, | 624 | .decode = vdec_vp8_decode, |
625 | vdec_vp8_get_param, | 625 | .get_param = vdec_vp8_get_param, |
626 | vdec_vp8_deinit, | 626 | .deinit = vdec_vp8_deinit, |
627 | }; | 627 | }; |
628 | 628 | ||
629 | struct vdec_common_if *get_vp8_dec_comm_if(void); | 629 | struct vdec_common_if *get_vp8_dec_comm_if(void); |
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c index 5539b1853f16..1daee1207469 100644 --- a/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c +++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_vp9_if.c | |||
@@ -979,10 +979,10 @@ static int vdec_vp9_get_param(unsigned long h_vdec, | |||
979 | } | 979 | } |
980 | 980 | ||
981 | static struct vdec_common_if vdec_vp9_if = { | 981 | static struct vdec_common_if vdec_vp9_if = { |
982 | vdec_vp9_init, | 982 | .init = vdec_vp9_init, |
983 | vdec_vp9_decode, | 983 | .decode = vdec_vp9_decode, |
984 | vdec_vp9_get_param, | 984 | .get_param = vdec_vp9_get_param, |
985 | vdec_vp9_deinit, | 985 | .deinit = vdec_vp9_deinit, |
986 | }; | 986 | }; |
987 | 987 | ||
988 | struct vdec_common_if *get_vp9_dec_comm_if(void); | 988 | struct vdec_common_if *get_vp9_dec_comm_if(void); |
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig index b36ac19dc6e4..154de92dd809 100644 --- a/drivers/media/platform/vivid/Kconfig +++ b/drivers/media/platform/vivid/Kconfig | |||
@@ -26,7 +26,8 @@ config VIDEO_VIVID | |||
26 | 26 | ||
27 | config VIDEO_VIVID_CEC | 27 | config VIDEO_VIVID_CEC |
28 | bool "Enable CEC emulation support" | 28 | bool "Enable CEC emulation support" |
29 | depends on VIDEO_VIVID && CEC_CORE | 29 | depends on VIDEO_VIVID |
30 | select CEC_CORE | ||
30 | ---help--- | 31 | ---help--- |
31 | When selected the vivid module will emulate the optional | 32 | When selected the vivid module will emulate the optional |
32 | HDMI CEC feature. | 33 | HDMI CEC feature. |
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c index 90f66dc7c0d7..a2fc1a1d58b0 100644 --- a/drivers/media/rc/rc-ir-raw.c +++ b/drivers/media/rc/rc-ir-raw.c | |||
@@ -211,7 +211,7 @@ EXPORT_SYMBOL_GPL(ir_raw_event_set_idle); | |||
211 | */ | 211 | */ |
212 | void ir_raw_event_handle(struct rc_dev *dev) | 212 | void ir_raw_event_handle(struct rc_dev *dev) |
213 | { | 213 | { |
214 | if (!dev->raw) | 214 | if (!dev->raw || !dev->raw->thread) |
215 | return; | 215 | return; |
216 | 216 | ||
217 | wake_up_process(dev->raw->thread); | 217 | wake_up_process(dev->raw->thread); |
@@ -490,6 +490,7 @@ int ir_raw_event_register(struct rc_dev *dev) | |||
490 | { | 490 | { |
491 | int rc; | 491 | int rc; |
492 | struct ir_raw_handler *handler; | 492 | struct ir_raw_handler *handler; |
493 | struct task_struct *thread; | ||
493 | 494 | ||
494 | if (!dev) | 495 | if (!dev) |
495 | return -EINVAL; | 496 | return -EINVAL; |
@@ -507,13 +508,15 @@ int ir_raw_event_register(struct rc_dev *dev) | |||
507 | * because the event is coming from userspace | 508 | * because the event is coming from userspace |
508 | */ | 509 | */ |
509 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { | 510 | if (dev->driver_type != RC_DRIVER_IR_RAW_TX) { |
510 | dev->raw->thread = kthread_run(ir_raw_event_thread, dev->raw, | 511 | thread = kthread_run(ir_raw_event_thread, dev->raw, "rc%u", |
511 | "rc%u", dev->minor); | 512 | dev->minor); |
512 | 513 | ||
513 | if (IS_ERR(dev->raw->thread)) { | 514 | if (IS_ERR(thread)) { |
514 | rc = PTR_ERR(dev->raw->thread); | 515 | rc = PTR_ERR(thread); |
515 | goto out; | 516 | goto out; |
516 | } | 517 | } |
518 | |||
519 | dev->raw->thread = thread; | ||
517 | } | 520 | } |
518 | 521 | ||
519 | mutex_lock(&ir_raw_handler_lock); | 522 | mutex_lock(&ir_raw_handler_lock); |
diff --git a/drivers/media/usb/pulse8-cec/Kconfig b/drivers/media/usb/pulse8-cec/Kconfig index 8937f3986a01..18ead44824ba 100644 --- a/drivers/media/usb/pulse8-cec/Kconfig +++ b/drivers/media/usb/pulse8-cec/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config USB_PULSE8_CEC | 1 | config USB_PULSE8_CEC |
2 | tristate "Pulse Eight HDMI CEC" | 2 | tristate "Pulse Eight HDMI CEC" |
3 | depends on USB_ACM && CEC_CORE | 3 | depends on USB_ACM |
4 | select CEC_CORE | ||
4 | select SERIO | 5 | select SERIO |
5 | select SERIO_SERPORT | 6 | select SERIO_SERPORT |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/media/usb/rainshadow-cec/Kconfig b/drivers/media/usb/rainshadow-cec/Kconfig index 3eb86607efb8..030ef01b1ff0 100644 --- a/drivers/media/usb/rainshadow-cec/Kconfig +++ b/drivers/media/usb/rainshadow-cec/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | config USB_RAINSHADOW_CEC | 1 | config USB_RAINSHADOW_CEC |
2 | tristate "RainShadow Tech HDMI CEC" | 2 | tristate "RainShadow Tech HDMI CEC" |
3 | depends on USB_ACM && CEC_CORE | 3 | depends on USB_ACM |
4 | select CEC_CORE | ||
4 | select SERIO | 5 | select SERIO |
5 | select SERIO_SERPORT | 6 | select SERIO_SERPORT |
6 | ---help--- | 7 | ---help--- |
diff --git a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c index 541ca543f71f..71bd68548c9c 100644 --- a/drivers/media/usb/rainshadow-cec/rainshadow-cec.c +++ b/drivers/media/usb/rainshadow-cec/rainshadow-cec.c | |||
@@ -119,7 +119,7 @@ static void rain_irq_work_handler(struct work_struct *work) | |||
119 | 119 | ||
120 | while (true) { | 120 | while (true) { |
121 | unsigned long flags; | 121 | unsigned long flags; |
122 | bool exit_loop; | 122 | bool exit_loop = false; |
123 | char data; | 123 | char data; |
124 | 124 | ||
125 | spin_lock_irqsave(&rain->buf_lock, flags); | 125 | spin_lock_irqsave(&rain->buf_lock, flags); |
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index c862cd4583cc..b8069eec18cb 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -309,6 +309,9 @@ static inline enum xp_retval | |||
309 | xpc_send(short partid, int ch_number, u32 flags, void *payload, | 309 | xpc_send(short partid, int ch_number, u32 flags, void *payload, |
310 | u16 payload_size) | 310 | u16 payload_size) |
311 | { | 311 | { |
312 | if (!xpc_interface.send) | ||
313 | return xpNotLoaded; | ||
314 | |||
312 | return xpc_interface.send(partid, ch_number, flags, payload, | 315 | return xpc_interface.send(partid, ch_number, flags, payload, |
313 | payload_size); | 316 | payload_size); |
314 | } | 317 | } |
@@ -317,6 +320,9 @@ static inline enum xp_retval | |||
317 | xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, | 320 | xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, |
318 | u16 payload_size, xpc_notify_func func, void *key) | 321 | u16 payload_size, xpc_notify_func func, void *key) |
319 | { | 322 | { |
323 | if (!xpc_interface.send_notify) | ||
324 | return xpNotLoaded; | ||
325 | |||
320 | return xpc_interface.send_notify(partid, ch_number, flags, payload, | 326 | return xpc_interface.send_notify(partid, ch_number, flags, payload, |
321 | payload_size, func, key); | 327 | payload_size, func, key); |
322 | } | 328 | } |
@@ -324,12 +330,16 @@ xpc_send_notify(short partid, int ch_number, u32 flags, void *payload, | |||
324 | static inline void | 330 | static inline void |
325 | xpc_received(short partid, int ch_number, void *payload) | 331 | xpc_received(short partid, int ch_number, void *payload) |
326 | { | 332 | { |
327 | return xpc_interface.received(partid, ch_number, payload); | 333 | if (xpc_interface.received) |
334 | xpc_interface.received(partid, ch_number, payload); | ||
328 | } | 335 | } |
329 | 336 | ||
330 | static inline enum xp_retval | 337 | static inline enum xp_retval |
331 | xpc_partid_to_nasids(short partid, void *nasids) | 338 | xpc_partid_to_nasids(short partid, void *nasids) |
332 | { | 339 | { |
340 | if (!xpc_interface.partid_to_nasids) | ||
341 | return xpNotLoaded; | ||
342 | |||
333 | return xpc_interface.partid_to_nasids(partid, nasids); | 343 | return xpc_interface.partid_to_nasids(partid, nasids); |
334 | } | 344 | } |
335 | 345 | ||
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 01be66d02ca8..6d7f557fd1c1 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -69,23 +69,9 @@ struct xpc_registration xpc_registrations[XPC_MAX_NCHANNELS]; | |||
69 | EXPORT_SYMBOL_GPL(xpc_registrations); | 69 | EXPORT_SYMBOL_GPL(xpc_registrations); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Initialize the XPC interface to indicate that XPC isn't loaded. | 72 | * Initialize the XPC interface to NULL to indicate that XPC isn't loaded. |
73 | */ | 73 | */ |
74 | static enum xp_retval | 74 | struct xpc_interface xpc_interface = { }; |
75 | xpc_notloaded(void) | ||
76 | { | ||
77 | return xpNotLoaded; | ||
78 | } | ||
79 | |||
80 | struct xpc_interface xpc_interface = { | ||
81 | (void (*)(int))xpc_notloaded, | ||
82 | (void (*)(int))xpc_notloaded, | ||
83 | (enum xp_retval(*)(short, int, u32, void *, u16))xpc_notloaded, | ||
84 | (enum xp_retval(*)(short, int, u32, void *, u16, xpc_notify_func, | ||
85 | void *))xpc_notloaded, | ||
86 | (void (*)(short, int, void *))xpc_notloaded, | ||
87 | (enum xp_retval(*)(short, void *))xpc_notloaded | ||
88 | }; | ||
89 | EXPORT_SYMBOL_GPL(xpc_interface); | 75 | EXPORT_SYMBOL_GPL(xpc_interface); |
90 | 76 | ||
91 | /* | 77 | /* |
@@ -115,17 +101,7 @@ EXPORT_SYMBOL_GPL(xpc_set_interface); | |||
115 | void | 101 | void |
116 | xpc_clear_interface(void) | 102 | xpc_clear_interface(void) |
117 | { | 103 | { |
118 | xpc_interface.connect = (void (*)(int))xpc_notloaded; | 104 | memset(&xpc_interface, 0, sizeof(xpc_interface)); |
119 | xpc_interface.disconnect = (void (*)(int))xpc_notloaded; | ||
120 | xpc_interface.send = (enum xp_retval(*)(short, int, u32, void *, u16)) | ||
121 | xpc_notloaded; | ||
122 | xpc_interface.send_notify = (enum xp_retval(*)(short, int, u32, void *, | ||
123 | u16, xpc_notify_func, | ||
124 | void *))xpc_notloaded; | ||
125 | xpc_interface.received = (void (*)(short, int, void *)) | ||
126 | xpc_notloaded; | ||
127 | xpc_interface.partid_to_nasids = (enum xp_retval(*)(short, void *)) | ||
128 | xpc_notloaded; | ||
129 | } | 105 | } |
130 | EXPORT_SYMBOL_GPL(xpc_clear_interface); | 106 | EXPORT_SYMBOL_GPL(xpc_clear_interface); |
131 | 107 | ||
@@ -188,7 +164,8 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
188 | 164 | ||
189 | mutex_unlock(®istration->mutex); | 165 | mutex_unlock(®istration->mutex); |
190 | 166 | ||
191 | xpc_interface.connect(ch_number); | 167 | if (xpc_interface.connect) |
168 | xpc_interface.connect(ch_number); | ||
192 | 169 | ||
193 | return xpSuccess; | 170 | return xpSuccess; |
194 | } | 171 | } |
@@ -237,7 +214,8 @@ xpc_disconnect(int ch_number) | |||
237 | registration->assigned_limit = 0; | 214 | registration->assigned_limit = 0; |
238 | registration->idle_limit = 0; | 215 | registration->idle_limit = 0; |
239 | 216 | ||
240 | xpc_interface.disconnect(ch_number); | 217 | if (xpc_interface.disconnect) |
218 | xpc_interface.disconnect(ch_number); | ||
241 | 219 | ||
242 | mutex_unlock(®istration->mutex); | 220 | mutex_unlock(®istration->mutex); |
243 | 221 | ||
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index d474378ed810..b1dd12729f19 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c | |||
@@ -202,7 +202,7 @@ static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section, | |||
202 | return 0; | 202 | return 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { | 205 | static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = { |
206 | .ecc = nand_ooblayout_ecc_lp_hamming, | 206 | .ecc = nand_ooblayout_ecc_lp_hamming, |
207 | .free = nand_ooblayout_free_lp_hamming, | 207 | .free = nand_ooblayout_free_lp_hamming, |
208 | }; | 208 | }; |
@@ -4361,7 +4361,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
4361 | /* Initialize the ->data_interface field. */ | 4361 | /* Initialize the ->data_interface field. */ |
4362 | ret = nand_init_data_interface(chip); | 4362 | ret = nand_init_data_interface(chip); |
4363 | if (ret) | 4363 | if (ret) |
4364 | return ret; | 4364 | goto err_nand_init; |
4365 | 4365 | ||
4366 | /* | 4366 | /* |
4367 | * Setup the data interface correctly on the chip and controller side. | 4367 | * Setup the data interface correctly on the chip and controller side. |
@@ -4373,7 +4373,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
4373 | */ | 4373 | */ |
4374 | ret = nand_setup_data_interface(chip); | 4374 | ret = nand_setup_data_interface(chip); |
4375 | if (ret) | 4375 | if (ret) |
4376 | return ret; | 4376 | goto err_nand_init; |
4377 | 4377 | ||
4378 | nand_maf_id = chip->id.data[0]; | 4378 | nand_maf_id = chip->id.data[0]; |
4379 | nand_dev_id = chip->id.data[1]; | 4379 | nand_dev_id = chip->id.data[1]; |
@@ -4404,6 +4404,12 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, | |||
4404 | mtd->size = i * chip->chipsize; | 4404 | mtd->size = i * chip->chipsize; |
4405 | 4405 | ||
4406 | return 0; | 4406 | return 0; |
4407 | |||
4408 | err_nand_init: | ||
4409 | /* Free manufacturer priv data. */ | ||
4410 | nand_manufacturer_cleanup(chip); | ||
4411 | |||
4412 | return ret; | ||
4407 | } | 4413 | } |
4408 | EXPORT_SYMBOL(nand_scan_ident); | 4414 | EXPORT_SYMBOL(nand_scan_ident); |
4409 | 4415 | ||
@@ -4574,18 +4580,23 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
4574 | 4580 | ||
4575 | /* New bad blocks should be marked in OOB, flash-based BBT, or both */ | 4581 | /* New bad blocks should be marked in OOB, flash-based BBT, or both */ |
4576 | if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && | 4582 | if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) && |
4577 | !(chip->bbt_options & NAND_BBT_USE_FLASH))) | 4583 | !(chip->bbt_options & NAND_BBT_USE_FLASH))) { |
4578 | return -EINVAL; | 4584 | ret = -EINVAL; |
4585 | goto err_ident; | ||
4586 | } | ||
4579 | 4587 | ||
4580 | if (invalid_ecc_page_accessors(chip)) { | 4588 | if (invalid_ecc_page_accessors(chip)) { |
4581 | pr_err("Invalid ECC page accessors setup\n"); | 4589 | pr_err("Invalid ECC page accessors setup\n"); |
4582 | return -EINVAL; | 4590 | ret = -EINVAL; |
4591 | goto err_ident; | ||
4583 | } | 4592 | } |
4584 | 4593 | ||
4585 | if (!(chip->options & NAND_OWN_BUFFERS)) { | 4594 | if (!(chip->options & NAND_OWN_BUFFERS)) { |
4586 | nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); | 4595 | nbuf = kzalloc(sizeof(*nbuf), GFP_KERNEL); |
4587 | if (!nbuf) | 4596 | if (!nbuf) { |
4588 | return -ENOMEM; | 4597 | ret = -ENOMEM; |
4598 | goto err_ident; | ||
4599 | } | ||
4589 | 4600 | ||
4590 | nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); | 4601 | nbuf->ecccalc = kmalloc(mtd->oobsize, GFP_KERNEL); |
4591 | if (!nbuf->ecccalc) { | 4602 | if (!nbuf->ecccalc) { |
@@ -4608,8 +4619,10 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
4608 | 4619 | ||
4609 | chip->buffers = nbuf; | 4620 | chip->buffers = nbuf; |
4610 | } else { | 4621 | } else { |
4611 | if (!chip->buffers) | 4622 | if (!chip->buffers) { |
4612 | return -ENOMEM; | 4623 | ret = -ENOMEM; |
4624 | goto err_ident; | ||
4625 | } | ||
4613 | } | 4626 | } |
4614 | 4627 | ||
4615 | /* Set the internal oob buffer location, just after the page data */ | 4628 | /* Set the internal oob buffer location, just after the page data */ |
@@ -4842,7 +4855,11 @@ int nand_scan_tail(struct mtd_info *mtd) | |||
4842 | return 0; | 4855 | return 0; |
4843 | 4856 | ||
4844 | /* Build bad block table */ | 4857 | /* Build bad block table */ |
4845 | return chip->scan_bbt(mtd); | 4858 | ret = chip->scan_bbt(mtd); |
4859 | if (ret) | ||
4860 | goto err_free; | ||
4861 | return 0; | ||
4862 | |||
4846 | err_free: | 4863 | err_free: |
4847 | if (nbuf) { | 4864 | if (nbuf) { |
4848 | kfree(nbuf->databuf); | 4865 | kfree(nbuf->databuf); |
@@ -4850,6 +4867,13 @@ err_free: | |||
4850 | kfree(nbuf->ecccalc); | 4867 | kfree(nbuf->ecccalc); |
4851 | kfree(nbuf); | 4868 | kfree(nbuf); |
4852 | } | 4869 | } |
4870 | |||
4871 | err_ident: | ||
4872 | /* Clean up nand_scan_ident(). */ | ||
4873 | |||
4874 | /* Free manufacturer priv data. */ | ||
4875 | nand_manufacturer_cleanup(chip); | ||
4876 | |||
4853 | return ret; | 4877 | return ret; |
4854 | } | 4878 | } |
4855 | EXPORT_SYMBOL(nand_scan_tail); | 4879 | EXPORT_SYMBOL(nand_scan_tail); |
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c index 9d5ca0e540b5..92e2cf8e9ff9 100644 --- a/drivers/mtd/nand/nand_ids.c +++ b/drivers/mtd/nand/nand_ids.c | |||
@@ -6,7 +6,6 @@ | |||
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | * | 7 | * |
8 | */ | 8 | */ |
9 | #include <linux/module.h> | ||
10 | #include <linux/mtd/nand.h> | 9 | #include <linux/mtd/nand.h> |
11 | #include <linux/sizes.h> | 10 | #include <linux/sizes.h> |
12 | 11 | ||
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c index 9cfc4035a420..1e0755997762 100644 --- a/drivers/mtd/nand/nand_samsung.c +++ b/drivers/mtd/nand/nand_samsung.c | |||
@@ -84,6 +84,9 @@ static void samsung_nand_decode_id(struct nand_chip *chip) | |||
84 | case 7: | 84 | case 7: |
85 | chip->ecc_strength_ds = 60; | 85 | chip->ecc_strength_ds = 60; |
86 | break; | 86 | break; |
87 | default: | ||
88 | WARN(1, "Could not decode ECC info"); | ||
89 | chip->ecc_step_ds = 0; | ||
87 | } | 90 | } |
88 | } | 91 | } |
89 | } else { | 92 | } else { |
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 05b6e1065203..49b286c6c10f 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c | |||
@@ -55,10 +55,10 @@ | |||
55 | * byte 1 for other packets in the page (PKT_N, for N > 0) | 55 | * byte 1 for other packets in the page (PKT_N, for N > 0) |
56 | * ERR_COUNT_PKT_N is the max error count over all but the first packet. | 56 | * ERR_COUNT_PKT_N is the max error count over all but the first packet. |
57 | */ | 57 | */ |
58 | #define DECODE_OK_PKT_0(v) ((v) & BIT(7)) | ||
59 | #define DECODE_OK_PKT_N(v) ((v) & BIT(15)) | ||
60 | #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) | 58 | #define ERR_COUNT_PKT_0(v) (((v) >> 0) & 0x3f) |
61 | #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) | 59 | #define ERR_COUNT_PKT_N(v) (((v) >> 8) & 0x3f) |
60 | #define DECODE_FAIL_PKT_0(v) (((v) & BIT(7)) == 0) | ||
61 | #define DECODE_FAIL_PKT_N(v) (((v) & BIT(15)) == 0) | ||
62 | 62 | ||
63 | /* Offsets relative to pbus_base */ | 63 | /* Offsets relative to pbus_base */ |
64 | #define PBUS_CS_CTRL 0x83c | 64 | #define PBUS_CS_CTRL 0x83c |
@@ -193,6 +193,8 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) | |||
193 | chip->ecc.strength); | 193 | chip->ecc.strength); |
194 | if (res < 0) | 194 | if (res < 0) |
195 | mtd->ecc_stats.failed++; | 195 | mtd->ecc_stats.failed++; |
196 | else | ||
197 | mtd->ecc_stats.corrected += res; | ||
196 | 198 | ||
197 | bitflips = max(res, bitflips); | 199 | bitflips = max(res, bitflips); |
198 | buf += pkt_size; | 200 | buf += pkt_size; |
@@ -202,9 +204,11 @@ static int check_erased_page(struct nand_chip *chip, u8 *buf) | |||
202 | return bitflips; | 204 | return bitflips; |
203 | } | 205 | } |
204 | 206 | ||
205 | static int decode_error_report(struct tango_nfc *nfc) | 207 | static int decode_error_report(struct nand_chip *chip) |
206 | { | 208 | { |
207 | u32 status, res; | 209 | u32 status, res; |
210 | struct mtd_info *mtd = nand_to_mtd(chip); | ||
211 | struct tango_nfc *nfc = to_tango_nfc(chip->controller); | ||
208 | 212 | ||
209 | status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); | 213 | status = readl_relaxed(nfc->reg_base + NFC_XFER_STATUS); |
210 | if (status & PAGE_IS_EMPTY) | 214 | if (status & PAGE_IS_EMPTY) |
@@ -212,10 +216,14 @@ static int decode_error_report(struct tango_nfc *nfc) | |||
212 | 216 | ||
213 | res = readl_relaxed(nfc->mem_base + ERROR_REPORT); | 217 | res = readl_relaxed(nfc->mem_base + ERROR_REPORT); |
214 | 218 | ||
215 | if (DECODE_OK_PKT_0(res) && DECODE_OK_PKT_N(res)) | 219 | if (DECODE_FAIL_PKT_0(res) || DECODE_FAIL_PKT_N(res)) |
216 | return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); | 220 | return -EBADMSG; |
221 | |||
222 | /* ERR_COUNT_PKT_N is max, not sum, but that's all we have */ | ||
223 | mtd->ecc_stats.corrected += | ||
224 | ERR_COUNT_PKT_0(res) + ERR_COUNT_PKT_N(res); | ||
217 | 225 | ||
218 | return -EBADMSG; | 226 | return max(ERR_COUNT_PKT_0(res), ERR_COUNT_PKT_N(res)); |
219 | } | 227 | } |
220 | 228 | ||
221 | static void tango_dma_callback(void *arg) | 229 | static void tango_dma_callback(void *arg) |
@@ -282,7 +290,7 @@ static int tango_read_page(struct mtd_info *mtd, struct nand_chip *chip, | |||
282 | if (err) | 290 | if (err) |
283 | return err; | 291 | return err; |
284 | 292 | ||
285 | res = decode_error_report(nfc); | 293 | res = decode_error_report(chip); |
286 | if (res < 0) { | 294 | if (res < 0) { |
287 | chip->ecc.read_oob_raw(mtd, chip, page); | 295 | chip->ecc.read_oob_raw(mtd, chip, page); |
288 | res = check_erased_page(chip, buf); | 296 | res = check_erased_page(chip, buf); |
@@ -663,6 +671,7 @@ static const struct of_device_id tango_nand_ids[] = { | |||
663 | { .compatible = "sigma,smp8758-nand" }, | 671 | { .compatible = "sigma,smp8758-nand" }, |
664 | { /* sentinel */ } | 672 | { /* sentinel */ } |
665 | }; | 673 | }; |
674 | MODULE_DEVICE_TABLE(of, tango_nand_ids); | ||
666 | 675 | ||
667 | static struct platform_driver tango_nand_driver = { | 676 | static struct platform_driver tango_nand_driver = { |
668 | .probe = tango_nand_probe, | 677 | .probe = tango_nand_probe, |
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h index 84a5ea69a4ac..479d42971706 100644 --- a/drivers/net/dsa/mv88e6xxx/global2.h +++ b/drivers/net/dsa/mv88e6xxx/global2.h | |||
@@ -217,13 +217,13 @@ static inline int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip, | |||
217 | return -EOPNOTSUPP; | 217 | return -EOPNOTSUPP; |
218 | } | 218 | } |
219 | 219 | ||
220 | int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, int src_dev, | 220 | static inline int mv88e6xxx_g2_pvt_write(struct mv88e6xxx_chip *chip, |
221 | int src_port, u16 data) | 221 | int src_dev, int src_port, u16 data) |
222 | { | 222 | { |
223 | return -EOPNOTSUPP; | 223 | return -EOPNOTSUPP; |
224 | } | 224 | } |
225 | 225 | ||
226 | int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) | 226 | static inline int mv88e6xxx_g2_misc_4_bit_port(struct mv88e6xxx_chip *chip) |
227 | { | 227 | { |
228 | return -EOPNOTSUPP; | 228 | return -EOPNOTSUPP; |
229 | } | 229 | } |
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c index b3bc87fe3764..0a98c369df20 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c | |||
@@ -324,7 +324,7 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, | |||
324 | struct xgbe_ring *ring, | 324 | struct xgbe_ring *ring, |
325 | struct xgbe_ring_data *rdata) | 325 | struct xgbe_ring_data *rdata) |
326 | { | 326 | { |
327 | int order, ret; | 327 | int ret; |
328 | 328 | ||
329 | if (!ring->rx_hdr_pa.pages) { | 329 | if (!ring->rx_hdr_pa.pages) { |
330 | ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); | 330 | ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); |
@@ -333,9 +333,8 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, | |||
333 | } | 333 | } |
334 | 334 | ||
335 | if (!ring->rx_buf_pa.pages) { | 335 | if (!ring->rx_buf_pa.pages) { |
336 | order = max_t(int, PAGE_ALLOC_COSTLY_ORDER - 1, 0); | ||
337 | ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, | 336 | ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, |
338 | order); | 337 | PAGE_ALLOC_COSTLY_ORDER); |
339 | if (ret) | 338 | if (ret) |
340 | return ret; | 339 | return ret; |
341 | } | 340 | } |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 099b374c1b17..5274501428e4 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -2026,9 +2026,12 @@ static int bcm_sysport_probe(struct platform_device *pdev) | |||
2026 | priv->num_rx_desc_words = params->num_rx_desc_words; | 2026 | priv->num_rx_desc_words = params->num_rx_desc_words; |
2027 | 2027 | ||
2028 | priv->irq0 = platform_get_irq(pdev, 0); | 2028 | priv->irq0 = platform_get_irq(pdev, 0); |
2029 | if (!priv->is_lite) | 2029 | if (!priv->is_lite) { |
2030 | priv->irq1 = platform_get_irq(pdev, 1); | 2030 | priv->irq1 = platform_get_irq(pdev, 1); |
2031 | priv->wol_irq = platform_get_irq(pdev, 2); | 2031 | priv->wol_irq = platform_get_irq(pdev, 2); |
2032 | } else { | ||
2033 | priv->wol_irq = platform_get_irq(pdev, 1); | ||
2034 | } | ||
2032 | if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { | 2035 | if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) { |
2033 | dev_err(&pdev->dev, "invalid interrupts\n"); | 2036 | dev_err(&pdev->dev, "invalid interrupts\n"); |
2034 | ret = -EINVAL; | 2037 | ret = -EINVAL; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index eccb3d1b6abb..5f49334dcad5 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | |||
@@ -1926,7 +1926,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, | |||
1926 | } | 1926 | } |
1927 | 1927 | ||
1928 | /* select a non-FCoE queue */ | 1928 | /* select a non-FCoE queue */ |
1929 | return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp); | 1929 | return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos); |
1930 | } | 1930 | } |
1931 | 1931 | ||
1932 | void bnx2x_set_num_queues(struct bnx2x *bp) | 1932 | void bnx2x_set_num_queues(struct bnx2x *bp) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 64af40662b3e..6c463703e072 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | |||
@@ -2196,10 +2196,14 @@ static int cxgb_up(struct adapter *adap) | |||
2196 | if (err) | 2196 | if (err) |
2197 | goto irq_err; | 2197 | goto irq_err; |
2198 | } | 2198 | } |
2199 | |||
2200 | mutex_lock(&uld_mutex); | ||
2199 | enable_rx(adap); | 2201 | enable_rx(adap); |
2200 | t4_sge_start(adap); | 2202 | t4_sge_start(adap); |
2201 | t4_intr_enable(adap); | 2203 | t4_intr_enable(adap); |
2202 | adap->flags |= FULL_INIT_DONE; | 2204 | adap->flags |= FULL_INIT_DONE; |
2205 | mutex_unlock(&uld_mutex); | ||
2206 | |||
2203 | notify_ulds(adap, CXGB4_STATE_UP); | 2207 | notify_ulds(adap, CXGB4_STATE_UP); |
2204 | #if IS_ENABLED(CONFIG_IPV6) | 2208 | #if IS_ENABLED(CONFIG_IPV6) |
2205 | update_clip(adap); | 2209 | update_clip(adap); |
@@ -2895,6 +2899,9 @@ void t4_fatal_err(struct adapter *adap) | |||
2895 | { | 2899 | { |
2896 | int port; | 2900 | int port; |
2897 | 2901 | ||
2902 | if (pci_channel_offline(adap->pdev)) | ||
2903 | return; | ||
2904 | |||
2898 | /* Disable the SGE since ULDs are going to free resources that | 2905 | /* Disable the SGE since ULDs are going to free resources that |
2899 | * could be exposed to the adapter. RDMA MWs for example... | 2906 | * could be exposed to the adapter. RDMA MWs for example... |
2900 | */ | 2907 | */ |
@@ -4006,9 +4013,10 @@ static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev, | |||
4006 | spin_lock(&adap->stats_lock); | 4013 | spin_lock(&adap->stats_lock); |
4007 | for_each_port(adap, i) { | 4014 | for_each_port(adap, i) { |
4008 | struct net_device *dev = adap->port[i]; | 4015 | struct net_device *dev = adap->port[i]; |
4009 | 4016 | if (dev) { | |
4010 | netif_device_detach(dev); | 4017 | netif_device_detach(dev); |
4011 | netif_carrier_off(dev); | 4018 | netif_carrier_off(dev); |
4019 | } | ||
4012 | } | 4020 | } |
4013 | spin_unlock(&adap->stats_lock); | 4021 | spin_unlock(&adap->stats_lock); |
4014 | disable_interrupts(adap); | 4022 | disable_interrupts(adap); |
@@ -4087,12 +4095,13 @@ static void eeh_resume(struct pci_dev *pdev) | |||
4087 | rtnl_lock(); | 4095 | rtnl_lock(); |
4088 | for_each_port(adap, i) { | 4096 | for_each_port(adap, i) { |
4089 | struct net_device *dev = adap->port[i]; | 4097 | struct net_device *dev = adap->port[i]; |
4090 | 4098 | if (dev) { | |
4091 | if (netif_running(dev)) { | 4099 | if (netif_running(dev)) { |
4092 | link_start(dev); | 4100 | link_start(dev); |
4093 | cxgb_set_rxmode(dev); | 4101 | cxgb_set_rxmode(dev); |
4102 | } | ||
4103 | netif_device_attach(dev); | ||
4094 | } | 4104 | } |
4095 | netif_device_attach(dev); | ||
4096 | } | 4105 | } |
4097 | rtnl_unlock(); | 4106 | rtnl_unlock(); |
4098 | } | 4107 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 822c560fb310..4618185d6bc2 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c | |||
@@ -4557,8 +4557,13 @@ void t4_intr_enable(struct adapter *adapter) | |||
4557 | */ | 4557 | */ |
4558 | void t4_intr_disable(struct adapter *adapter) | 4558 | void t4_intr_disable(struct adapter *adapter) |
4559 | { | 4559 | { |
4560 | u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A); | 4560 | u32 whoami, pf; |
4561 | u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? | 4561 | |
4562 | if (pci_channel_offline(adapter->pdev)) | ||
4563 | return; | ||
4564 | |||
4565 | whoami = t4_read_reg(adapter, PL_WHOAMI_A); | ||
4566 | pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ? | ||
4562 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); | 4567 | SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami); |
4563 | 4568 | ||
4564 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); | 4569 | t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0); |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h index 3549d3876278..f2d623a7aee0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h | |||
@@ -37,7 +37,7 @@ | |||
37 | 37 | ||
38 | #define T4FW_VERSION_MAJOR 0x01 | 38 | #define T4FW_VERSION_MAJOR 0x01 |
39 | #define T4FW_VERSION_MINOR 0x10 | 39 | #define T4FW_VERSION_MINOR 0x10 |
40 | #define T4FW_VERSION_MICRO 0x2B | 40 | #define T4FW_VERSION_MICRO 0x2D |
41 | #define T4FW_VERSION_BUILD 0x00 | 41 | #define T4FW_VERSION_BUILD 0x00 |
42 | 42 | ||
43 | #define T4FW_MIN_VERSION_MAJOR 0x01 | 43 | #define T4FW_MIN_VERSION_MAJOR 0x01 |
@@ -46,7 +46,7 @@ | |||
46 | 46 | ||
47 | #define T5FW_VERSION_MAJOR 0x01 | 47 | #define T5FW_VERSION_MAJOR 0x01 |
48 | #define T5FW_VERSION_MINOR 0x10 | 48 | #define T5FW_VERSION_MINOR 0x10 |
49 | #define T5FW_VERSION_MICRO 0x2B | 49 | #define T5FW_VERSION_MICRO 0x2D |
50 | #define T5FW_VERSION_BUILD 0x00 | 50 | #define T5FW_VERSION_BUILD 0x00 |
51 | 51 | ||
52 | #define T5FW_MIN_VERSION_MAJOR 0x00 | 52 | #define T5FW_MIN_VERSION_MAJOR 0x00 |
@@ -55,7 +55,7 @@ | |||
55 | 55 | ||
56 | #define T6FW_VERSION_MAJOR 0x01 | 56 | #define T6FW_VERSION_MAJOR 0x01 |
57 | #define T6FW_VERSION_MINOR 0x10 | 57 | #define T6FW_VERSION_MINOR 0x10 |
58 | #define T6FW_VERSION_MICRO 0x2B | 58 | #define T6FW_VERSION_MICRO 0x2D |
59 | #define T6FW_VERSION_BUILD 0x00 | 59 | #define T6FW_VERSION_BUILD 0x00 |
60 | 60 | ||
61 | #define T6FW_MIN_VERSION_MAJOR 0x00 | 61 | #define T6FW_MIN_VERSION_MAJOR 0x00 |
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index e863ba74d005..8bb0db990c8f 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c | |||
@@ -739,6 +739,8 @@ static int ethoc_open(struct net_device *dev) | |||
739 | if (ret) | 739 | if (ret) |
740 | return ret; | 740 | return ret; |
741 | 741 | ||
742 | napi_enable(&priv->napi); | ||
743 | |||
742 | ethoc_init_ring(priv, dev->mem_start); | 744 | ethoc_init_ring(priv, dev->mem_start); |
743 | ethoc_reset(priv); | 745 | ethoc_reset(priv); |
744 | 746 | ||
@@ -754,7 +756,6 @@ static int ethoc_open(struct net_device *dev) | |||
754 | priv->old_duplex = -1; | 756 | priv->old_duplex = -1; |
755 | 757 | ||
756 | phy_start(dev->phydev); | 758 | phy_start(dev->phydev); |
757 | napi_enable(&priv->napi); | ||
758 | 759 | ||
759 | if (netif_msg_ifup(priv)) { | 760 | if (netif_msg_ifup(priv)) { |
760 | dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", | 761 | dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n", |
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 446c7b374ff5..a10de1e9c157 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c | |||
@@ -381,7 +381,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
381 | { | 381 | { |
382 | const struct of_device_id *id = | 382 | const struct of_device_id *id = |
383 | of_match_device(fsl_pq_mdio_match, &pdev->dev); | 383 | of_match_device(fsl_pq_mdio_match, &pdev->dev); |
384 | const struct fsl_pq_mdio_data *data = id->data; | 384 | const struct fsl_pq_mdio_data *data; |
385 | struct device_node *np = pdev->dev.of_node; | 385 | struct device_node *np = pdev->dev.of_node; |
386 | struct resource res; | 386 | struct resource res; |
387 | struct device_node *tbi; | 387 | struct device_node *tbi; |
@@ -389,6 +389,13 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) | |||
389 | struct mii_bus *new_bus; | 389 | struct mii_bus *new_bus; |
390 | int err; | 390 | int err; |
391 | 391 | ||
392 | if (!id) { | ||
393 | dev_err(&pdev->dev, "Failed to match device\n"); | ||
394 | return -ENODEV; | ||
395 | } | ||
396 | |||
397 | data = id->data; | ||
398 | |||
392 | dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); | 399 | dev_dbg(&pdev->dev, "found %s compatible node\n", id->compatible); |
393 | 400 | ||
394 | new_bus = mdiobus_alloc_size(sizeof(*priv)); | 401 | new_bus = mdiobus_alloc_size(sizeof(*priv)); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 8dcf58088178..7d84e20b4887 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -81,7 +81,7 @@ | |||
81 | static const char ibmvnic_driver_name[] = "ibmvnic"; | 81 | static const char ibmvnic_driver_name[] = "ibmvnic"; |
82 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; | 82 | static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver"; |
83 | 83 | ||
84 | MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>"); | 84 | MODULE_AUTHOR("Santiago Leon"); |
85 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); | 85 | MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver"); |
86 | MODULE_LICENSE("GPL"); | 86 | MODULE_LICENSE("GPL"); |
87 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); | 87 | MODULE_VERSION(IBMVNIC_DRIVER_VERSION); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e4eb97832413..5fef27ebfa52 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -295,7 +295,7 @@ struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) | |||
295 | **/ | 295 | **/ |
296 | void i40e_service_event_schedule(struct i40e_pf *pf) | 296 | void i40e_service_event_schedule(struct i40e_pf *pf) |
297 | { | 297 | { |
298 | if (!test_bit(__I40E_VSI_DOWN, pf->state) && | 298 | if (!test_bit(__I40E_DOWN, pf->state) && |
299 | !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) | 299 | !test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) |
300 | queue_work(i40e_wq, &pf->service_task); | 300 | queue_work(i40e_wq, &pf->service_task); |
301 | } | 301 | } |
@@ -3611,7 +3611,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
3611 | * this is not a performance path and napi_schedule() | 3611 | * this is not a performance path and napi_schedule() |
3612 | * can deal with rescheduling. | 3612 | * can deal with rescheduling. |
3613 | */ | 3613 | */ |
3614 | if (!test_bit(__I40E_VSI_DOWN, pf->state)) | 3614 | if (!test_bit(__I40E_DOWN, pf->state)) |
3615 | napi_schedule_irqoff(&q_vector->napi); | 3615 | napi_schedule_irqoff(&q_vector->napi); |
3616 | } | 3616 | } |
3617 | 3617 | ||
@@ -3687,7 +3687,7 @@ static irqreturn_t i40e_intr(int irq, void *data) | |||
3687 | enable_intr: | 3687 | enable_intr: |
3688 | /* re-enable interrupt causes */ | 3688 | /* re-enable interrupt causes */ |
3689 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); | 3689 | wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); |
3690 | if (!test_bit(__I40E_VSI_DOWN, pf->state)) { | 3690 | if (!test_bit(__I40E_DOWN, pf->state)) { |
3691 | i40e_service_event_schedule(pf); | 3691 | i40e_service_event_schedule(pf); |
3692 | i40e_irq_dynamic_enable_icr0(pf, false); | 3692 | i40e_irq_dynamic_enable_icr0(pf, false); |
3693 | } | 3693 | } |
@@ -6203,7 +6203,7 @@ static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) | |||
6203 | { | 6203 | { |
6204 | 6204 | ||
6205 | /* if interface is down do nothing */ | 6205 | /* if interface is down do nothing */ |
6206 | if (test_bit(__I40E_VSI_DOWN, pf->state)) | 6206 | if (test_bit(__I40E_DOWN, pf->state)) |
6207 | return; | 6207 | return; |
6208 | 6208 | ||
6209 | if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) | 6209 | if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) |
@@ -6344,7 +6344,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf) | |||
6344 | int i; | 6344 | int i; |
6345 | 6345 | ||
6346 | /* if interface is down do nothing */ | 6346 | /* if interface is down do nothing */ |
6347 | if (test_bit(__I40E_VSI_DOWN, pf->state) || | 6347 | if (test_bit(__I40E_DOWN, pf->state) || |
6348 | test_bit(__I40E_CONFIG_BUSY, pf->state)) | 6348 | test_bit(__I40E_CONFIG_BUSY, pf->state)) |
6349 | return; | 6349 | return; |
6350 | 6350 | ||
@@ -6400,9 +6400,9 @@ static void i40e_reset_subtask(struct i40e_pf *pf) | |||
6400 | reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); | 6400 | reset_flags |= BIT(__I40E_GLOBAL_RESET_REQUESTED); |
6401 | clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); | 6401 | clear_bit(__I40E_GLOBAL_RESET_REQUESTED, pf->state); |
6402 | } | 6402 | } |
6403 | if (test_bit(__I40E_VSI_DOWN_REQUESTED, pf->state)) { | 6403 | if (test_bit(__I40E_DOWN_REQUESTED, pf->state)) { |
6404 | reset_flags |= BIT(__I40E_VSI_DOWN_REQUESTED); | 6404 | reset_flags |= BIT(__I40E_DOWN_REQUESTED); |
6405 | clear_bit(__I40E_VSI_DOWN_REQUESTED, pf->state); | 6405 | clear_bit(__I40E_DOWN_REQUESTED, pf->state); |
6406 | } | 6406 | } |
6407 | 6407 | ||
6408 | /* If there's a recovery already waiting, it takes | 6408 | /* If there's a recovery already waiting, it takes |
@@ -6416,7 +6416,7 @@ static void i40e_reset_subtask(struct i40e_pf *pf) | |||
6416 | 6416 | ||
6417 | /* If we're already down or resetting, just bail */ | 6417 | /* If we're already down or resetting, just bail */ |
6418 | if (reset_flags && | 6418 | if (reset_flags && |
6419 | !test_bit(__I40E_VSI_DOWN, pf->state) && | 6419 | !test_bit(__I40E_DOWN, pf->state) && |
6420 | !test_bit(__I40E_CONFIG_BUSY, pf->state)) { | 6420 | !test_bit(__I40E_CONFIG_BUSY, pf->state)) { |
6421 | rtnl_lock(); | 6421 | rtnl_lock(); |
6422 | i40e_do_reset(pf, reset_flags, true); | 6422 | i40e_do_reset(pf, reset_flags, true); |
@@ -7003,7 +7003,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) | |||
7003 | u32 val; | 7003 | u32 val; |
7004 | int v; | 7004 | int v; |
7005 | 7005 | ||
7006 | if (test_bit(__I40E_VSI_DOWN, pf->state)) | 7006 | if (test_bit(__I40E_DOWN, pf->state)) |
7007 | goto clear_recovery; | 7007 | goto clear_recovery; |
7008 | dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); | 7008 | dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); |
7009 | 7009 | ||
@@ -9768,7 +9768,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) | |||
9768 | return -ENODEV; | 9768 | return -ENODEV; |
9769 | } | 9769 | } |
9770 | if (vsi == pf->vsi[pf->lan_vsi] && | 9770 | if (vsi == pf->vsi[pf->lan_vsi] && |
9771 | !test_bit(__I40E_VSI_DOWN, pf->state)) { | 9771 | !test_bit(__I40E_DOWN, pf->state)) { |
9772 | dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); | 9772 | dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); |
9773 | return -ENODEV; | 9773 | return -ENODEV; |
9774 | } | 9774 | } |
@@ -11004,7 +11004,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11004 | } | 11004 | } |
11005 | pf->next_vsi = 0; | 11005 | pf->next_vsi = 0; |
11006 | pf->pdev = pdev; | 11006 | pf->pdev = pdev; |
11007 | set_bit(__I40E_VSI_DOWN, pf->state); | 11007 | set_bit(__I40E_DOWN, pf->state); |
11008 | 11008 | ||
11009 | hw = &pf->hw; | 11009 | hw = &pf->hw; |
11010 | hw->back = pf; | 11010 | hw->back = pf; |
@@ -11294,7 +11294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11294 | * before setting up the misc vector or we get a race and the vector | 11294 | * before setting up the misc vector or we get a race and the vector |
11295 | * ends up disabled forever. | 11295 | * ends up disabled forever. |
11296 | */ | 11296 | */ |
11297 | clear_bit(__I40E_VSI_DOWN, pf->state); | 11297 | clear_bit(__I40E_DOWN, pf->state); |
11298 | 11298 | ||
11299 | /* In case of MSIX we are going to setup the misc vector right here | 11299 | /* In case of MSIX we are going to setup the misc vector right here |
11300 | * to handle admin queue events etc. In case of legacy and MSI | 11300 | * to handle admin queue events etc. In case of legacy and MSI |
@@ -11449,7 +11449,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
11449 | 11449 | ||
11450 | /* Unwind what we've done if something failed in the setup */ | 11450 | /* Unwind what we've done if something failed in the setup */ |
11451 | err_vsis: | 11451 | err_vsis: |
11452 | set_bit(__I40E_VSI_DOWN, pf->state); | 11452 | set_bit(__I40E_DOWN, pf->state); |
11453 | i40e_clear_interrupt_scheme(pf); | 11453 | i40e_clear_interrupt_scheme(pf); |
11454 | kfree(pf->vsi); | 11454 | kfree(pf->vsi); |
11455 | err_switch_setup: | 11455 | err_switch_setup: |
@@ -11501,7 +11501,7 @@ static void i40e_remove(struct pci_dev *pdev) | |||
11501 | 11501 | ||
11502 | /* no more scheduling of any task */ | 11502 | /* no more scheduling of any task */ |
11503 | set_bit(__I40E_SUSPENDED, pf->state); | 11503 | set_bit(__I40E_SUSPENDED, pf->state); |
11504 | set_bit(__I40E_VSI_DOWN, pf->state); | 11504 | set_bit(__I40E_DOWN, pf->state); |
11505 | if (pf->service_timer.data) | 11505 | if (pf->service_timer.data) |
11506 | del_timer_sync(&pf->service_timer); | 11506 | del_timer_sync(&pf->service_timer); |
11507 | if (pf->service_task.func) | 11507 | if (pf->service_task.func) |
@@ -11741,7 +11741,7 @@ static void i40e_shutdown(struct pci_dev *pdev) | |||
11741 | struct i40e_hw *hw = &pf->hw; | 11741 | struct i40e_hw *hw = &pf->hw; |
11742 | 11742 | ||
11743 | set_bit(__I40E_SUSPENDED, pf->state); | 11743 | set_bit(__I40E_SUSPENDED, pf->state); |
11744 | set_bit(__I40E_VSI_DOWN, pf->state); | 11744 | set_bit(__I40E_DOWN, pf->state); |
11745 | rtnl_lock(); | 11745 | rtnl_lock(); |
11746 | i40e_prep_for_reset(pf, true); | 11746 | i40e_prep_for_reset(pf, true); |
11747 | rtnl_unlock(); | 11747 | rtnl_unlock(); |
@@ -11790,7 +11790,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state) | |||
11790 | int retval = 0; | 11790 | int retval = 0; |
11791 | 11791 | ||
11792 | set_bit(__I40E_SUSPENDED, pf->state); | 11792 | set_bit(__I40E_SUSPENDED, pf->state); |
11793 | set_bit(__I40E_VSI_DOWN, pf->state); | 11793 | set_bit(__I40E_DOWN, pf->state); |
11794 | 11794 | ||
11795 | if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) | 11795 | if (pf->wol_en && (pf->flags & I40E_FLAG_WOL_MC_MAGIC_PKT_WAKE)) |
11796 | i40e_enable_mc_magic_wake(pf); | 11796 | i40e_enable_mc_magic_wake(pf); |
@@ -11842,7 +11842,7 @@ static int i40e_resume(struct pci_dev *pdev) | |||
11842 | 11842 | ||
11843 | /* handling the reset will rebuild the device state */ | 11843 | /* handling the reset will rebuild the device state */ |
11844 | if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { | 11844 | if (test_and_clear_bit(__I40E_SUSPENDED, pf->state)) { |
11845 | clear_bit(__I40E_VSI_DOWN, pf->state); | 11845 | clear_bit(__I40E_DOWN, pf->state); |
11846 | rtnl_lock(); | 11846 | rtnl_lock(); |
11847 | i40e_reset_and_rebuild(pf, false, true); | 11847 | i40e_reset_and_rebuild(pf, false, true); |
11848 | rtnl_unlock(); | 11848 | rtnl_unlock(); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c2e9013d05eb..ddf885084c77 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -1854,7 +1854,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, | |||
1854 | #if (PAGE_SIZE < 8192) | 1854 | #if (PAGE_SIZE < 8192) |
1855 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; | 1855 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; |
1856 | #else | 1856 | #else |
1857 | unsigned int truesize = SKB_DATA_ALIGN(size); | 1857 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
1858 | SKB_DATA_ALIGN(I40E_SKB_PAD + size); | ||
1858 | #endif | 1859 | #endif |
1859 | struct sk_buff *skb; | 1860 | struct sk_buff *skb; |
1860 | 1861 | ||
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index dfe241a12ad0..12b02e530503 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c | |||
@@ -1190,7 +1190,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring, | |||
1190 | #if (PAGE_SIZE < 8192) | 1190 | #if (PAGE_SIZE < 8192) |
1191 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; | 1191 | unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2; |
1192 | #else | 1192 | #else |
1193 | unsigned int truesize = SKB_DATA_ALIGN(size); | 1193 | unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + |
1194 | SKB_DATA_ALIGN(I40E_SKB_PAD + size); | ||
1194 | #endif | 1195 | #endif |
1195 | struct sk_buff *skb; | 1196 | struct sk_buff *skb; |
1196 | 1197 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ae5fdc2df654..ffbcb27c05e5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c | |||
@@ -1562,11 +1562,6 @@ static int mlx4_en_flow_replace(struct net_device *dev, | |||
1562 | qpn = priv->drop_qp.qpn; | 1562 | qpn = priv->drop_qp.qpn; |
1563 | else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { | 1563 | else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { |
1564 | qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); | 1564 | qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); |
1565 | if (qpn < priv->rss_map.base_qpn || | ||
1566 | qpn >= priv->rss_map.base_qpn + priv->rx_ring_num) { | ||
1567 | en_warn(priv, "rxnfc: QP (0x%x) doesn't exist\n", qpn); | ||
1568 | return -EINVAL; | ||
1569 | } | ||
1570 | } else { | 1565 | } else { |
1571 | if (cmd->fs.ring_cookie >= priv->rx_ring_num) { | 1566 | if (cmd->fs.ring_cookie >= priv->rx_ring_num) { |
1572 | en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", | 1567 | en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n", |
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 1a670b681555..0710b3677464 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/etherdevice.h> | 35 | #include <linux/etherdevice.h> |
36 | 36 | ||
37 | #include <linux/mlx4/cmd.h> | 37 | #include <linux/mlx4/cmd.h> |
38 | #include <linux/mlx4/qp.h> | ||
38 | #include <linux/export.h> | 39 | #include <linux/export.h> |
39 | 40 | ||
40 | #include "mlx4.h" | 41 | #include "mlx4.h" |
@@ -985,16 +986,21 @@ int mlx4_flow_attach(struct mlx4_dev *dev, | |||
985 | if (IS_ERR(mailbox)) | 986 | if (IS_ERR(mailbox)) |
986 | return PTR_ERR(mailbox); | 987 | return PTR_ERR(mailbox); |
987 | 988 | ||
989 | if (!mlx4_qp_lookup(dev, rule->qpn)) { | ||
990 | mlx4_err_rule(dev, "QP doesn't exist\n", rule); | ||
991 | ret = -EINVAL; | ||
992 | goto out; | ||
993 | } | ||
994 | |||
988 | trans_rule_ctrl_to_hw(rule, mailbox->buf); | 995 | trans_rule_ctrl_to_hw(rule, mailbox->buf); |
989 | 996 | ||
990 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); | 997 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); |
991 | 998 | ||
992 | list_for_each_entry(cur, &rule->list, list) { | 999 | list_for_each_entry(cur, &rule->list, list) { |
993 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); | 1000 | ret = parse_trans_rule(dev, cur, mailbox->buf + size); |
994 | if (ret < 0) { | 1001 | if (ret < 0) |
995 | mlx4_free_cmd_mailbox(dev, mailbox); | 1002 | goto out; |
996 | return ret; | 1003 | |
997 | } | ||
998 | size += ret; | 1004 | size += ret; |
999 | } | 1005 | } |
1000 | 1006 | ||
@@ -1021,6 +1027,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev, | |||
1021 | } | 1027 | } |
1022 | } | 1028 | } |
1023 | 1029 | ||
1030 | out: | ||
1024 | mlx4_free_cmd_mailbox(dev, mailbox); | 1031 | mlx4_free_cmd_mailbox(dev, mailbox); |
1025 | 1032 | ||
1026 | return ret; | 1033 | return ret; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2d6abd4662b1..5a310d313e94 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c | |||
@@ -384,6 +384,19 @@ static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) | |||
384 | __mlx4_qp_free_icm(dev, qpn); | 384 | __mlx4_qp_free_icm(dev, qpn); |
385 | } | 385 | } |
386 | 386 | ||
387 | struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) | ||
388 | { | ||
389 | struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table; | ||
390 | struct mlx4_qp *qp; | ||
391 | |||
392 | spin_lock(&qp_table->lock); | ||
393 | |||
394 | qp = __mlx4_qp_lookup(dev, qpn); | ||
395 | |||
396 | spin_unlock(&qp_table->lock); | ||
397 | return qp; | ||
398 | } | ||
399 | |||
387 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) | 400 | int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) |
388 | { | 401 | { |
389 | struct mlx4_priv *priv = mlx4_priv(dev); | 402 | struct mlx4_priv *priv = mlx4_priv(dev); |
@@ -471,6 +484,12 @@ int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, | |||
471 | } | 484 | } |
472 | 485 | ||
473 | if (attr & MLX4_UPDATE_QP_QOS_VPORT) { | 486 | if (attr & MLX4_UPDATE_QP_QOS_VPORT) { |
487 | if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_QOS_VPP)) { | ||
488 | mlx4_warn(dev, "Granular QoS per VF is not enabled\n"); | ||
489 | err = -EOPNOTSUPP; | ||
490 | goto out; | ||
491 | } | ||
492 | |||
474 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; | 493 | qp_mask |= 1ULL << MLX4_UPD_QP_MASK_QOS_VPP; |
475 | cmd->qp_context.qos_vport = params->qos_vport; | 494 | cmd->qp_context.qos_vport = params->qos_vport; |
476 | } | 495 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 07516545474f..812783865205 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
@@ -5255,6 +5255,13 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) | |||
5255 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); | 5255 | mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); |
5256 | } | 5256 | } |
5257 | 5257 | ||
5258 | static void update_qos_vpp(struct mlx4_update_qp_context *ctx, | ||
5259 | struct mlx4_vf_immed_vlan_work *work) | ||
5260 | { | ||
5261 | ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP); | ||
5262 | ctx->qp_context.qos_vport = work->qos_vport; | ||
5263 | } | ||
5264 | |||
5258 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | 5265 | void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) |
5259 | { | 5266 | { |
5260 | struct mlx4_vf_immed_vlan_work *work = | 5267 | struct mlx4_vf_immed_vlan_work *work = |
@@ -5369,11 +5376,10 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work) | |||
5369 | qp->sched_queue & 0xC7; | 5376 | qp->sched_queue & 0xC7; |
5370 | upd_context->qp_context.pri_path.sched_queue |= | 5377 | upd_context->qp_context.pri_path.sched_queue |= |
5371 | ((work->qos & 0x7) << 3); | 5378 | ((work->qos & 0x7) << 3); |
5372 | upd_context->qp_mask |= | 5379 | |
5373 | cpu_to_be64(1ULL << | 5380 | if (dev->caps.flags2 & |
5374 | MLX4_UPD_QP_MASK_QOS_VPP); | 5381 | MLX4_DEV_CAP_FLAG2_QOS_VPP) |
5375 | upd_context->qp_context.qos_vport = | 5382 | update_qos_vpp(upd_context, work); |
5376 | work->qos_vport; | ||
5377 | } | 5383 | } |
5378 | 5384 | ||
5379 | err = mlx4_cmd(dev, mailbox->dma, | 5385 | err = mlx4_cmd(dev, mailbox->dma, |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 361cd112bb5b..9274d93d3183 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -622,10 +622,9 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) | |||
622 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), | 622 | cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), |
623 | priv->irq_info[i].mask); | 623 | priv->irq_info[i].mask); |
624 | 624 | ||
625 | #ifdef CONFIG_SMP | 625 | if (IS_ENABLED(CONFIG_SMP) && |
626 | if (irq_set_affinity_hint(irq, priv->irq_info[i].mask)) | 626 | irq_set_affinity_hint(irq, priv->irq_info[i].mask)) |
627 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); | 627 | mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); |
628 | #endif | ||
629 | 628 | ||
630 | return 0; | 629 | return 0; |
631 | } | 630 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 9877d3e762fe..16cc30b11cce 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -1769,7 +1769,8 @@ void qed_get_protocol_stats(struct qed_dev *cdev, | |||
1769 | qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); | 1769 | qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); |
1770 | break; | 1770 | break; |
1771 | default: | 1771 | default: |
1772 | DP_ERR(cdev, "Invalid protocol type = %d\n", type); | 1772 | DP_VERBOSE(cdev, QED_MSG_SP, |
1773 | "Invalid protocol type = %d\n", type); | ||
1773 | return; | 1774 | return; |
1774 | } | 1775 | } |
1775 | } | 1776 | } |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 7245b1072518..81312924df14 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h | |||
@@ -1824,22 +1824,44 @@ struct qlcnic_hardware_ops { | |||
1824 | u32 (*get_cap_size)(void *, int); | 1824 | u32 (*get_cap_size)(void *, int); |
1825 | void (*set_sys_info)(void *, int, u32); | 1825 | void (*set_sys_info)(void *, int, u32); |
1826 | void (*store_cap_mask)(void *, u32); | 1826 | void (*store_cap_mask)(void *, u32); |
1827 | bool (*encap_rx_offload) (struct qlcnic_adapter *adapter); | ||
1828 | bool (*encap_tx_offload) (struct qlcnic_adapter *adapter); | ||
1827 | }; | 1829 | }; |
1828 | 1830 | ||
1829 | extern struct qlcnic_nic_template qlcnic_vf_ops; | 1831 | extern struct qlcnic_nic_template qlcnic_vf_ops; |
1830 | 1832 | ||
1831 | static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) | 1833 | static inline bool qlcnic_83xx_encap_tx_offload(struct qlcnic_adapter *adapter) |
1832 | { | 1834 | { |
1833 | return adapter->ahw->extra_capability[0] & | 1835 | return adapter->ahw->extra_capability[0] & |
1834 | QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; | 1836 | QLCNIC_83XX_FW_CAPAB_ENCAP_TX_OFFLOAD; |
1835 | } | 1837 | } |
1836 | 1838 | ||
1837 | static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) | 1839 | static inline bool qlcnic_83xx_encap_rx_offload(struct qlcnic_adapter *adapter) |
1838 | { | 1840 | { |
1839 | return adapter->ahw->extra_capability[0] & | 1841 | return adapter->ahw->extra_capability[0] & |
1840 | QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; | 1842 | QLCNIC_83XX_FW_CAPAB_ENCAP_RX_OFFLOAD; |
1841 | } | 1843 | } |
1842 | 1844 | ||
1845 | static inline bool qlcnic_82xx_encap_tx_offload(struct qlcnic_adapter *adapter) | ||
1846 | { | ||
1847 | return false; | ||
1848 | } | ||
1849 | |||
1850 | static inline bool qlcnic_82xx_encap_rx_offload(struct qlcnic_adapter *adapter) | ||
1851 | { | ||
1852 | return false; | ||
1853 | } | ||
1854 | |||
1855 | static inline bool qlcnic_encap_rx_offload(struct qlcnic_adapter *adapter) | ||
1856 | { | ||
1857 | return adapter->ahw->hw_ops->encap_rx_offload(adapter); | ||
1858 | } | ||
1859 | |||
1860 | static inline bool qlcnic_encap_tx_offload(struct qlcnic_adapter *adapter) | ||
1861 | { | ||
1862 | return adapter->ahw->hw_ops->encap_tx_offload(adapter); | ||
1863 | } | ||
1864 | |||
1843 | static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) | 1865 | static inline int qlcnic_start_firmware(struct qlcnic_adapter *adapter) |
1844 | { | 1866 | { |
1845 | return adapter->nic_ops->start_firmware(adapter); | 1867 | return adapter->nic_ops->start_firmware(adapter); |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 4fb68797630e..f7080d0ab874 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -242,6 +242,8 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = { | |||
242 | .get_cap_size = qlcnic_83xx_get_cap_size, | 242 | .get_cap_size = qlcnic_83xx_get_cap_size, |
243 | .set_sys_info = qlcnic_83xx_set_sys_info, | 243 | .set_sys_info = qlcnic_83xx_set_sys_info, |
244 | .store_cap_mask = qlcnic_83xx_store_cap_mask, | 244 | .store_cap_mask = qlcnic_83xx_store_cap_mask, |
245 | .encap_rx_offload = qlcnic_83xx_encap_rx_offload, | ||
246 | .encap_tx_offload = qlcnic_83xx_encap_tx_offload, | ||
245 | }; | 247 | }; |
246 | 248 | ||
247 | static struct qlcnic_nic_template qlcnic_83xx_ops = { | 249 | static struct qlcnic_nic_template qlcnic_83xx_ops = { |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 838cc0ceafd8..7848cf04b29a 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c | |||
@@ -341,7 +341,7 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg) | |||
341 | } | 341 | } |
342 | return -EIO; | 342 | return -EIO; |
343 | } | 343 | } |
344 | usleep_range(1000, 1500); | 344 | udelay(1200); |
345 | } | 345 | } |
346 | 346 | ||
347 | if (id_reg) | 347 | if (id_reg) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index b6628aaa6e4a..1b5f7d57b6f8 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -632,6 +632,8 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = { | |||
632 | .get_cap_size = qlcnic_82xx_get_cap_size, | 632 | .get_cap_size = qlcnic_82xx_get_cap_size, |
633 | .set_sys_info = qlcnic_82xx_set_sys_info, | 633 | .set_sys_info = qlcnic_82xx_set_sys_info, |
634 | .store_cap_mask = qlcnic_82xx_store_cap_mask, | 634 | .store_cap_mask = qlcnic_82xx_store_cap_mask, |
635 | .encap_rx_offload = qlcnic_82xx_encap_rx_offload, | ||
636 | .encap_tx_offload = qlcnic_82xx_encap_tx_offload, | ||
635 | }; | 637 | }; |
636 | 638 | ||
637 | static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) | 639 | static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 2f656f395f39..c58180f40844 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c | |||
@@ -77,6 +77,8 @@ static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = { | |||
77 | .free_mac_list = qlcnic_sriov_vf_free_mac_list, | 77 | .free_mac_list = qlcnic_sriov_vf_free_mac_list, |
78 | .enable_sds_intr = qlcnic_83xx_enable_sds_intr, | 78 | .enable_sds_intr = qlcnic_83xx_enable_sds_intr, |
79 | .disable_sds_intr = qlcnic_83xx_disable_sds_intr, | 79 | .disable_sds_intr = qlcnic_83xx_disable_sds_intr, |
80 | .encap_rx_offload = qlcnic_83xx_encap_rx_offload, | ||
81 | .encap_tx_offload = qlcnic_83xx_encap_tx_offload, | ||
80 | }; | 82 | }; |
81 | 83 | ||
82 | static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { | 84 | static struct qlcnic_nic_template qlcnic_sriov_vf_ops = { |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index cc065ffbe4b5..bcd4708b3745 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -931,7 +931,7 @@ int emac_mac_up(struct emac_adapter *adpt) | |||
931 | emac_mac_config(adpt); | 931 | emac_mac_config(adpt); |
932 | emac_mac_rx_descs_refill(adpt, &adpt->rx_q); | 932 | emac_mac_rx_descs_refill(adpt, &adpt->rx_q); |
933 | 933 | ||
934 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; | 934 | adpt->phydev->irq = PHY_POLL; |
935 | ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, | 935 | ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, |
936 | PHY_INTERFACE_MODE_SGMII); | 936 | PHY_INTERFACE_MODE_SGMII); |
937 | if (ret) { | 937 | if (ret) { |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 441c19366489..18461fcb9815 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c | |||
@@ -13,15 +13,11 @@ | |||
13 | /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. | 13 | /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/of_net.h> | ||
19 | #include <linux/of_mdio.h> | 16 | #include <linux/of_mdio.h> |
20 | #include <linux/phy.h> | 17 | #include <linux/phy.h> |
21 | #include <linux/iopoll.h> | 18 | #include <linux/iopoll.h> |
22 | #include <linux/acpi.h> | 19 | #include <linux/acpi.h> |
23 | #include "emac.h" | 20 | #include "emac.h" |
24 | #include "emac-mac.h" | ||
25 | 21 | ||
26 | /* EMAC base register offsets */ | 22 | /* EMAC base register offsets */ |
27 | #define EMAC_MDIO_CTRL 0x001414 | 23 | #define EMAC_MDIO_CTRL 0x001414 |
@@ -52,62 +48,10 @@ | |||
52 | 48 | ||
53 | #define MDIO_WAIT_TIMES 1000 | 49 | #define MDIO_WAIT_TIMES 1000 |
54 | 50 | ||
55 | #define EMAC_LINK_SPEED_DEFAULT (\ | ||
56 | EMAC_LINK_SPEED_10_HALF |\ | ||
57 | EMAC_LINK_SPEED_10_FULL |\ | ||
58 | EMAC_LINK_SPEED_100_HALF |\ | ||
59 | EMAC_LINK_SPEED_100_FULL |\ | ||
60 | EMAC_LINK_SPEED_1GB_FULL) | ||
61 | |||
62 | /** | ||
63 | * emac_phy_mdio_autopoll_disable() - disable mdio autopoll | ||
64 | * @adpt: the emac adapter | ||
65 | * | ||
66 | * The autopoll feature takes over the MDIO bus. In order for | ||
67 | * the PHY driver to be able to talk to the PHY over the MDIO | ||
68 | * bus, we need to temporarily disable the autopoll feature. | ||
69 | */ | ||
70 | static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt) | ||
71 | { | ||
72 | u32 val; | ||
73 | |||
74 | /* disable autopoll */ | ||
75 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0); | ||
76 | |||
77 | /* wait for any mdio polling to complete */ | ||
78 | if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val, | ||
79 | !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100)) | ||
80 | return 0; | ||
81 | |||
82 | /* failed to disable; ensure it is enabled before returning */ | ||
83 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); | ||
84 | |||
85 | return -EBUSY; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * emac_phy_mdio_autopoll_disable() - disable mdio autopoll | ||
90 | * @adpt: the emac adapter | ||
91 | * | ||
92 | * The EMAC has the ability to poll the external PHY on the MDIO | ||
93 | * bus for link state changes. This eliminates the need for the | ||
94 | * driver to poll the phy. If if the link state does change, | ||
95 | * the EMAC issues an interrupt on behalf of the PHY. | ||
96 | */ | ||
97 | static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt) | ||
98 | { | ||
99 | emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); | ||
100 | } | ||
101 | |||
102 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | 51 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) |
103 | { | 52 | { |
104 | struct emac_adapter *adpt = bus->priv; | 53 | struct emac_adapter *adpt = bus->priv; |
105 | u32 reg; | 54 | u32 reg; |
106 | int ret; | ||
107 | |||
108 | ret = emac_phy_mdio_autopoll_disable(adpt); | ||
109 | if (ret) | ||
110 | return ret; | ||
111 | 55 | ||
112 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, | 56 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
113 | (addr << PHY_ADDR_SHFT)); | 57 | (addr << PHY_ADDR_SHFT)); |
@@ -122,24 +66,15 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | |||
122 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 66 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
123 | !(reg & (MDIO_START | MDIO_BUSY)), | 67 | !(reg & (MDIO_START | MDIO_BUSY)), |
124 | 100, MDIO_WAIT_TIMES * 100)) | 68 | 100, MDIO_WAIT_TIMES * 100)) |
125 | ret = -EIO; | 69 | return -EIO; |
126 | else | ||
127 | ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; | ||
128 | 70 | ||
129 | emac_phy_mdio_autopoll_enable(adpt); | 71 | return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; |
130 | |||
131 | return ret; | ||
132 | } | 72 | } |
133 | 73 | ||
134 | static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | 74 | static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) |
135 | { | 75 | { |
136 | struct emac_adapter *adpt = bus->priv; | 76 | struct emac_adapter *adpt = bus->priv; |
137 | u32 reg; | 77 | u32 reg; |
138 | int ret; | ||
139 | |||
140 | ret = emac_phy_mdio_autopoll_disable(adpt); | ||
141 | if (ret) | ||
142 | return ret; | ||
143 | 78 | ||
144 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, | 79 | emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, |
145 | (addr << PHY_ADDR_SHFT)); | 80 | (addr << PHY_ADDR_SHFT)); |
@@ -155,11 +90,9 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | |||
155 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 90 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
156 | !(reg & (MDIO_START | MDIO_BUSY)), 100, | 91 | !(reg & (MDIO_START | MDIO_BUSY)), 100, |
157 | MDIO_WAIT_TIMES * 100)) | 92 | MDIO_WAIT_TIMES * 100)) |
158 | ret = -EIO; | 93 | return -EIO; |
159 | 94 | ||
160 | emac_phy_mdio_autopoll_enable(adpt); | 95 | return 0; |
161 | |||
162 | return ret; | ||
163 | } | 96 | } |
164 | 97 | ||
165 | /* Configure the MDIO bus and connect the external PHY */ | 98 | /* Configure the MDIO bus and connect the external PHY */ |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 28a8cdc36485..98a326faea29 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c | |||
@@ -50,19 +50,7 @@ | |||
50 | #define DMAR_DLY_CNT_DEF 15 | 50 | #define DMAR_DLY_CNT_DEF 15 |
51 | #define DMAW_DLY_CNT_DEF 4 | 51 | #define DMAW_DLY_CNT_DEF 4 |
52 | 52 | ||
53 | #define IMR_NORMAL_MASK (\ | 53 | #define IMR_NORMAL_MASK (ISR_ERROR | ISR_OVER | ISR_TX_PKT) |
54 | ISR_ERROR |\ | ||
55 | ISR_GPHY_LINK |\ | ||
56 | ISR_TX_PKT |\ | ||
57 | GPHY_WAKEUP_INT) | ||
58 | |||
59 | #define IMR_EXTENDED_MASK (\ | ||
60 | SW_MAN_INT |\ | ||
61 | ISR_OVER |\ | ||
62 | ISR_ERROR |\ | ||
63 | ISR_GPHY_LINK |\ | ||
64 | ISR_TX_PKT |\ | ||
65 | GPHY_WAKEUP_INT) | ||
66 | 54 | ||
67 | #define ISR_TX_PKT (\ | 55 | #define ISR_TX_PKT (\ |
68 | TX_PKT_INT |\ | 56 | TX_PKT_INT |\ |
@@ -70,10 +58,6 @@ | |||
70 | TX_PKT_INT2 |\ | 58 | TX_PKT_INT2 |\ |
71 | TX_PKT_INT3) | 59 | TX_PKT_INT3) |
72 | 60 | ||
73 | #define ISR_GPHY_LINK (\ | ||
74 | GPHY_LINK_UP_INT |\ | ||
75 | GPHY_LINK_DOWN_INT) | ||
76 | |||
77 | #define ISR_OVER (\ | 61 | #define ISR_OVER (\ |
78 | RFD0_UR_INT |\ | 62 | RFD0_UR_INT |\ |
79 | RFD1_UR_INT |\ | 63 | RFD1_UR_INT |\ |
@@ -187,10 +171,6 @@ irqreturn_t emac_isr(int _irq, void *data) | |||
187 | if (status & ISR_OVER) | 171 | if (status & ISR_OVER) |
188 | net_warn_ratelimited("warning: TX/RX overflow\n"); | 172 | net_warn_ratelimited("warning: TX/RX overflow\n"); |
189 | 173 | ||
190 | /* link event */ | ||
191 | if (status & ISR_GPHY_LINK) | ||
192 | phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); | ||
193 | |||
194 | exit: | 174 | exit: |
195 | /* enable the interrupt */ | 175 | /* enable the interrupt */ |
196 | writel(irq->mask, adpt->base + EMAC_INT_MASK); | 176 | writel(irq->mask, adpt->base + EMAC_INT_MASK); |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 3cd7989c007d..784782da3a85 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -230,18 +230,6 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
230 | int ring_size; | 230 | int ring_size; |
231 | int i; | 231 | int i; |
232 | 232 | ||
233 | /* Free RX skb ringbuffer */ | ||
234 | if (priv->rx_skb[q]) { | ||
235 | for (i = 0; i < priv->num_rx_ring[q]; i++) | ||
236 | dev_kfree_skb(priv->rx_skb[q][i]); | ||
237 | } | ||
238 | kfree(priv->rx_skb[q]); | ||
239 | priv->rx_skb[q] = NULL; | ||
240 | |||
241 | /* Free aligned TX buffers */ | ||
242 | kfree(priv->tx_align[q]); | ||
243 | priv->tx_align[q] = NULL; | ||
244 | |||
245 | if (priv->rx_ring[q]) { | 233 | if (priv->rx_ring[q]) { |
246 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | 234 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
247 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; | 235 | struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; |
@@ -270,6 +258,18 @@ static void ravb_ring_free(struct net_device *ndev, int q) | |||
270 | priv->tx_ring[q] = NULL; | 258 | priv->tx_ring[q] = NULL; |
271 | } | 259 | } |
272 | 260 | ||
261 | /* Free RX skb ringbuffer */ | ||
262 | if (priv->rx_skb[q]) { | ||
263 | for (i = 0; i < priv->num_rx_ring[q]; i++) | ||
264 | dev_kfree_skb(priv->rx_skb[q][i]); | ||
265 | } | ||
266 | kfree(priv->rx_skb[q]); | ||
267 | priv->rx_skb[q] = NULL; | ||
268 | |||
269 | /* Free aligned TX buffers */ | ||
270 | kfree(priv->tx_align[q]); | ||
271 | priv->tx_align[q] = NULL; | ||
272 | |||
273 | /* Free TX skb ringbuffer. | 273 | /* Free TX skb ringbuffer. |
274 | * SKBs are freed by ravb_tx_free() call above. | 274 | * SKBs are freed by ravb_tx_free() call above. |
275 | */ | 275 | */ |
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index 489ef146201e..6a9c954492f2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) | 37 | #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12) |
38 | #define TSE_PCS_CONTROL_REG 0x00 | 38 | #define TSE_PCS_CONTROL_REG 0x00 |
39 | #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) | 39 | #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9) |
40 | #define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140 | ||
40 | #define TSE_PCS_IF_MODE_REG 0x28 | 41 | #define TSE_PCS_IF_MODE_REG 0x28 |
41 | #define TSE_PCS_LINK_TIMER_0_REG 0x24 | 42 | #define TSE_PCS_LINK_TIMER_0_REG 0x24 |
42 | #define TSE_PCS_LINK_TIMER_1_REG 0x26 | 43 | #define TSE_PCS_LINK_TIMER_1_REG 0x26 |
@@ -65,6 +66,7 @@ | |||
65 | #define TSE_PCS_SW_RESET_TIMEOUT 100 | 66 | #define TSE_PCS_SW_RESET_TIMEOUT 100 |
66 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) | 67 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) |
67 | #define TSE_PCS_USE_SGMII_ENA BIT(0) | 68 | #define TSE_PCS_USE_SGMII_ENA BIT(0) |
69 | #define TSE_PCS_IF_USE_SGMII 0x03 | ||
68 | 70 | ||
69 | #define SGMII_ADAPTER_CTRL_REG 0x00 | 71 | #define SGMII_ADAPTER_CTRL_REG 0x00 |
70 | #define SGMII_ADAPTER_DISABLE 0x0001 | 72 | #define SGMII_ADAPTER_DISABLE 0x0001 |
@@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs) | |||
101 | { | 103 | { |
102 | int ret = 0; | 104 | int ret = 0; |
103 | 105 | ||
104 | writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG); | 106 | writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG); |
107 | |||
108 | writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG); | ||
105 | 109 | ||
106 | writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); | 110 | writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG); |
107 | writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); | 111 | writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 68a188e74c54..f446f368dd20 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -1207,7 +1207,7 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags) | |||
1207 | u32 rx_count = priv->plat->rx_queues_to_use; | 1207 | u32 rx_count = priv->plat->rx_queues_to_use; |
1208 | unsigned int bfsize = 0; | 1208 | unsigned int bfsize = 0; |
1209 | int ret = -ENOMEM; | 1209 | int ret = -ENOMEM; |
1210 | u32 queue; | 1210 | int queue; |
1211 | int i; | 1211 | int i; |
1212 | 1212 | ||
1213 | if (priv->hw->mode->set_16kib_bfsize) | 1213 | if (priv->hw->mode->set_16kib_bfsize) |
@@ -2723,7 +2723,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des, | |||
2723 | 2723 | ||
2724 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, | 2724 | priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size, |
2725 | 0, 1, | 2725 | 0, 1, |
2726 | (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE), | 2726 | (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE), |
2727 | 0, 0); | 2727 | 0, 0); |
2728 | 2728 | ||
2729 | tmp_len -= TSO_MAX_BUFF_SIZE; | 2729 | tmp_len -= TSO_MAX_BUFF_SIZE; |
@@ -2945,7 +2945,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2945 | int i, csum_insertion = 0, is_jumbo = 0; | 2945 | int i, csum_insertion = 0, is_jumbo = 0; |
2946 | u32 queue = skb_get_queue_mapping(skb); | 2946 | u32 queue = skb_get_queue_mapping(skb); |
2947 | int nfrags = skb_shinfo(skb)->nr_frags; | 2947 | int nfrags = skb_shinfo(skb)->nr_frags; |
2948 | unsigned int entry, first_entry; | 2948 | int entry; |
2949 | unsigned int first_entry; | ||
2949 | struct dma_desc *desc, *first; | 2950 | struct dma_desc *desc, *first; |
2950 | struct stmmac_tx_queue *tx_q; | 2951 | struct stmmac_tx_queue *tx_q; |
2951 | unsigned int enh_desc; | 2952 | unsigned int enh_desc; |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 959fd12d2e67..6ebb0f559a42 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1133,7 +1133,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, | |||
1133 | 1133 | ||
1134 | /* make enough headroom for basic scenario */ | 1134 | /* make enough headroom for basic scenario */ |
1135 | encap_len = GENEVE_BASE_HLEN + ETH_HLEN; | 1135 | encap_len = GENEVE_BASE_HLEN + ETH_HLEN; |
1136 | if (ip_tunnel_info_af(info) == AF_INET) { | 1136 | if (!metadata && ip_tunnel_info_af(info) == AF_INET) { |
1137 | encap_len += sizeof(struct iphdr); | 1137 | encap_len += sizeof(struct iphdr); |
1138 | dev->max_mtu -= sizeof(struct iphdr); | 1138 | dev->max_mtu -= sizeof(struct iphdr); |
1139 | } else { | 1139 | } else { |
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 8c3633c1d078..97e3bc60c3e7 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -576,6 +576,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
576 | case HDLCDRVCTL_CALIBRATE: | 576 | case HDLCDRVCTL_CALIBRATE: |
577 | if(!capable(CAP_SYS_RAWIO)) | 577 | if(!capable(CAP_SYS_RAWIO)) |
578 | return -EPERM; | 578 | return -EPERM; |
579 | if (s->par.bitrate <= 0) | ||
580 | return -EINVAL; | ||
579 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) | 581 | if (bi.data.calibrate > INT_MAX / s->par.bitrate) |
580 | return -EINVAL; | 582 | return -EINVAL; |
581 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; | 583 | s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index d4782e902e2e..4c169dbf9138 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -647,6 +647,18 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
647 | return 0; | 647 | return 0; |
648 | } | 648 | } |
649 | 649 | ||
650 | static int mdio_uevent(struct device *dev, struct kobj_uevent_env *env) | ||
651 | { | ||
652 | int rc; | ||
653 | |||
654 | /* Some devices have extra OF data and an OF-style MODALIAS */ | ||
655 | rc = of_device_uevent_modalias(dev, env); | ||
656 | if (rc != -ENODEV) | ||
657 | return rc; | ||
658 | |||
659 | return 0; | ||
660 | } | ||
661 | |||
650 | #ifdef CONFIG_PM | 662 | #ifdef CONFIG_PM |
651 | static int mdio_bus_suspend(struct device *dev) | 663 | static int mdio_bus_suspend(struct device *dev) |
652 | { | 664 | { |
@@ -697,6 +709,7 @@ static const struct dev_pm_ops mdio_bus_pm_ops = { | |||
697 | struct bus_type mdio_bus_type = { | 709 | struct bus_type mdio_bus_type = { |
698 | .name = "mdio_bus", | 710 | .name = "mdio_bus", |
699 | .match = mdio_bus_match, | 711 | .match = mdio_bus_match, |
712 | .uevent = mdio_uevent, | ||
700 | .pm = MDIO_BUS_PM_OPS, | 713 | .pm = MDIO_BUS_PM_OPS, |
701 | }; | 714 | }; |
702 | EXPORT_SYMBOL(mdio_bus_type); | 715 | EXPORT_SYMBOL(mdio_bus_type); |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 46e80bcc7a8a..9365b0792309 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -269,23 +269,12 @@ out: | |||
269 | return ret; | 269 | return ret; |
270 | } | 270 | } |
271 | 271 | ||
272 | static int kszphy_config_init(struct phy_device *phydev) | 272 | /* Some config bits need to be set again on resume, handle them here. */ |
273 | static int kszphy_config_reset(struct phy_device *phydev) | ||
273 | { | 274 | { |
274 | struct kszphy_priv *priv = phydev->priv; | 275 | struct kszphy_priv *priv = phydev->priv; |
275 | const struct kszphy_type *type; | ||
276 | int ret; | 276 | int ret; |
277 | 277 | ||
278 | if (!priv) | ||
279 | return 0; | ||
280 | |||
281 | type = priv->type; | ||
282 | |||
283 | if (type->has_broadcast_disable) | ||
284 | kszphy_broadcast_disable(phydev); | ||
285 | |||
286 | if (type->has_nand_tree_disable) | ||
287 | kszphy_nand_tree_disable(phydev); | ||
288 | |||
289 | if (priv->rmii_ref_clk_sel) { | 278 | if (priv->rmii_ref_clk_sel) { |
290 | ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); | 279 | ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val); |
291 | if (ret) { | 280 | if (ret) { |
@@ -296,11 +285,30 @@ static int kszphy_config_init(struct phy_device *phydev) | |||
296 | } | 285 | } |
297 | 286 | ||
298 | if (priv->led_mode >= 0) | 287 | if (priv->led_mode >= 0) |
299 | kszphy_setup_led(phydev, type->led_mode_reg, priv->led_mode); | 288 | kszphy_setup_led(phydev, priv->type->led_mode_reg, priv->led_mode); |
300 | 289 | ||
301 | return 0; | 290 | return 0; |
302 | } | 291 | } |
303 | 292 | ||
293 | static int kszphy_config_init(struct phy_device *phydev) | ||
294 | { | ||
295 | struct kszphy_priv *priv = phydev->priv; | ||
296 | const struct kszphy_type *type; | ||
297 | |||
298 | if (!priv) | ||
299 | return 0; | ||
300 | |||
301 | type = priv->type; | ||
302 | |||
303 | if (type->has_broadcast_disable) | ||
304 | kszphy_broadcast_disable(phydev); | ||
305 | |||
306 | if (type->has_nand_tree_disable) | ||
307 | kszphy_nand_tree_disable(phydev); | ||
308 | |||
309 | return kszphy_config_reset(phydev); | ||
310 | } | ||
311 | |||
304 | static int ksz8041_config_init(struct phy_device *phydev) | 312 | static int ksz8041_config_init(struct phy_device *phydev) |
305 | { | 313 | { |
306 | struct device_node *of_node = phydev->mdio.dev.of_node; | 314 | struct device_node *of_node = phydev->mdio.dev.of_node; |
@@ -701,8 +709,14 @@ static int kszphy_suspend(struct phy_device *phydev) | |||
701 | 709 | ||
702 | static int kszphy_resume(struct phy_device *phydev) | 710 | static int kszphy_resume(struct phy_device *phydev) |
703 | { | 711 | { |
712 | int ret; | ||
713 | |||
704 | genphy_resume(phydev); | 714 | genphy_resume(phydev); |
705 | 715 | ||
716 | ret = kszphy_config_reset(phydev); | ||
717 | if (ret) | ||
718 | return ret; | ||
719 | |||
706 | /* Enable PHY Interrupts */ | 720 | /* Enable PHY Interrupts */ |
707 | if (phy_interrupt_is_valid(phydev)) { | 721 | if (phy_interrupt_is_valid(phydev)) { |
708 | phydev->interrupts = PHY_INTERRUPT_ENABLED; | 722 | phydev->interrupts = PHY_INTERRUPT_ENABLED; |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 12548e5b6037..14fc5bc75cd1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -266,7 +266,7 @@ static const struct phy_setting settings[] = { | |||
266 | * phy_lookup_setting - lookup a PHY setting | 266 | * phy_lookup_setting - lookup a PHY setting |
267 | * @speed: speed to match | 267 | * @speed: speed to match |
268 | * @duplex: duplex to match | 268 | * @duplex: duplex to match |
269 | * @feature: allowed link modes | 269 | * @features: allowed link modes |
270 | * @exact: an exact match is required | 270 | * @exact: an exact match is required |
271 | * | 271 | * |
272 | * Search the settings array for a setting that matches the speed and | 272 | * Search the settings array for a setting that matches the speed and |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 57763d30cabb..1f8c15cb63b0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -869,7 +869,7 @@ static unsigned int get_mergeable_buf_len(struct receive_queue *rq, | |||
869 | unsigned int len; | 869 | unsigned int len; |
870 | 870 | ||
871 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), | 871 | len = hdr_len + clamp_t(unsigned int, ewma_pkt_len_read(avg_pkt_len), |
872 | rq->min_buf_len - hdr_len, PAGE_SIZE - hdr_len); | 872 | rq->min_buf_len, PAGE_SIZE - hdr_len); |
873 | return ALIGN(len, L1_CACHE_BYTES); | 873 | return ALIGN(len, L1_CACHE_BYTES); |
874 | } | 874 | } |
875 | 875 | ||
@@ -2149,7 +2149,8 @@ static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqu | |||
2149 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; | 2149 | unsigned int buf_len = hdr_len + ETH_HLEN + VLAN_HLEN + packet_len; |
2150 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); | 2150 | unsigned int min_buf_len = DIV_ROUND_UP(buf_len, rq_size); |
2151 | 2151 | ||
2152 | return max(min_buf_len, hdr_len); | 2152 | return max(max(min_buf_len, hdr_len) - hdr_len, |
2153 | (unsigned int)GOOD_PACKET_LEN); | ||
2153 | } | 2154 | } |
2154 | 2155 | ||
2155 | static int virtnet_find_vqs(struct virtnet_info *vi) | 2156 | static int virtnet_find_vqs(struct virtnet_info *vi) |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 328b4712683c..a6b5052c1d36 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -59,6 +59,8 @@ static const u8 all_zeros_mac[ETH_ALEN + 2]; | |||
59 | 59 | ||
60 | static int vxlan_sock_add(struct vxlan_dev *vxlan); | 60 | static int vxlan_sock_add(struct vxlan_dev *vxlan); |
61 | 61 | ||
62 | static void vxlan_vs_del_dev(struct vxlan_dev *vxlan); | ||
63 | |||
62 | /* per-network namespace private data for this module */ | 64 | /* per-network namespace private data for this module */ |
63 | struct vxlan_net { | 65 | struct vxlan_net { |
64 | struct list_head vxlan_list; | 66 | struct list_head vxlan_list; |
@@ -740,6 +742,22 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f) | |||
740 | call_rcu(&f->rcu, vxlan_fdb_free); | 742 | call_rcu(&f->rcu, vxlan_fdb_free); |
741 | } | 743 | } |
742 | 744 | ||
745 | static void vxlan_dst_free(struct rcu_head *head) | ||
746 | { | ||
747 | struct vxlan_rdst *rd = container_of(head, struct vxlan_rdst, rcu); | ||
748 | |||
749 | dst_cache_destroy(&rd->dst_cache); | ||
750 | kfree(rd); | ||
751 | } | ||
752 | |||
753 | static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, | ||
754 | struct vxlan_rdst *rd) | ||
755 | { | ||
756 | list_del_rcu(&rd->list); | ||
757 | vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); | ||
758 | call_rcu(&rd->rcu, vxlan_dst_free); | ||
759 | } | ||
760 | |||
743 | static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, | 761 | static int vxlan_fdb_parse(struct nlattr *tb[], struct vxlan_dev *vxlan, |
744 | union vxlan_addr *ip, __be16 *port, __be32 *src_vni, | 762 | union vxlan_addr *ip, __be16 *port, __be32 *src_vni, |
745 | __be32 *vni, u32 *ifindex) | 763 | __be32 *vni, u32 *ifindex) |
@@ -864,9 +882,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, | |||
864 | * otherwise destroy the fdb entry | 882 | * otherwise destroy the fdb entry |
865 | */ | 883 | */ |
866 | if (rd && !list_is_singular(&f->remotes)) { | 884 | if (rd && !list_is_singular(&f->remotes)) { |
867 | list_del_rcu(&rd->list); | 885 | vxlan_fdb_dst_destroy(vxlan, f, rd); |
868 | vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); | ||
869 | kfree_rcu(rd, rcu); | ||
870 | goto out; | 886 | goto out; |
871 | } | 887 | } |
872 | 888 | ||
@@ -1067,6 +1083,8 @@ static void vxlan_sock_release(struct vxlan_dev *vxlan) | |||
1067 | rcu_assign_pointer(vxlan->vn4_sock, NULL); | 1083 | rcu_assign_pointer(vxlan->vn4_sock, NULL); |
1068 | synchronize_net(); | 1084 | synchronize_net(); |
1069 | 1085 | ||
1086 | vxlan_vs_del_dev(vxlan); | ||
1087 | |||
1070 | if (__vxlan_sock_release_prep(sock4)) { | 1088 | if (__vxlan_sock_release_prep(sock4)) { |
1071 | udp_tunnel_sock_release(sock4->sock); | 1089 | udp_tunnel_sock_release(sock4->sock); |
1072 | kfree(sock4); | 1090 | kfree(sock4); |
@@ -2342,6 +2360,15 @@ static void vxlan_cleanup(unsigned long arg) | |||
2342 | mod_timer(&vxlan->age_timer, next_timer); | 2360 | mod_timer(&vxlan->age_timer, next_timer); |
2343 | } | 2361 | } |
2344 | 2362 | ||
2363 | static void vxlan_vs_del_dev(struct vxlan_dev *vxlan) | ||
2364 | { | ||
2365 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | ||
2366 | |||
2367 | spin_lock(&vn->sock_lock); | ||
2368 | hlist_del_init_rcu(&vxlan->hlist); | ||
2369 | spin_unlock(&vn->sock_lock); | ||
2370 | } | ||
2371 | |||
2345 | static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) | 2372 | static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan) |
2346 | { | 2373 | { |
2347 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | 2374 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); |
@@ -3286,15 +3313,9 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], | |||
3286 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) | 3313 | static void vxlan_dellink(struct net_device *dev, struct list_head *head) |
3287 | { | 3314 | { |
3288 | struct vxlan_dev *vxlan = netdev_priv(dev); | 3315 | struct vxlan_dev *vxlan = netdev_priv(dev); |
3289 | struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); | ||
3290 | 3316 | ||
3291 | vxlan_flush(vxlan, true); | 3317 | vxlan_flush(vxlan, true); |
3292 | 3318 | ||
3293 | spin_lock(&vn->sock_lock); | ||
3294 | if (!hlist_unhashed(&vxlan->hlist)) | ||
3295 | hlist_del_rcu(&vxlan->hlist); | ||
3296 | spin_unlock(&vn->sock_lock); | ||
3297 | |||
3298 | gro_cells_destroy(&vxlan->gro_cells); | 3319 | gro_cells_destroy(&vxlan->gro_cells); |
3299 | list_del(&vxlan->next); | 3320 | list_del(&vxlan->next); |
3300 | unregister_netdevice_queue(dev, head); | 3321 | unregister_netdevice_queue(dev, head); |
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index d5e993dc9b23..517a315e259b 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c | |||
@@ -1271,6 +1271,8 @@ static int wcn36xx_remove(struct platform_device *pdev) | |||
1271 | qcom_smem_state_put(wcn->tx_enable_state); | 1271 | qcom_smem_state_put(wcn->tx_enable_state); |
1272 | qcom_smem_state_put(wcn->tx_rings_empty_state); | 1272 | qcom_smem_state_put(wcn->tx_rings_empty_state); |
1273 | 1273 | ||
1274 | rpmsg_destroy_ept(wcn->smd_channel); | ||
1275 | |||
1274 | iounmap(wcn->dxe_base); | 1276 | iounmap(wcn->dxe_base); |
1275 | iounmap(wcn->ccu_base); | 1277 | iounmap(wcn->ccu_base); |
1276 | 1278 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index fc64b8913aa6..e03450059b06 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c | |||
@@ -3422,7 +3422,7 @@ static int brcmf_sdio_bus_preinit(struct device *dev) | |||
3422 | /* otherwise, set txglomalign */ | 3422 | /* otherwise, set txglomalign */ |
3423 | value = sdiodev->settings->bus.sdio.sd_sgentry_align; | 3423 | value = sdiodev->settings->bus.sdio.sd_sgentry_align; |
3424 | /* SDIO ADMA requires at least 32 bit alignment */ | 3424 | /* SDIO ADMA requires at least 32 bit alignment */ |
3425 | value = max_t(u32, value, 4); | 3425 | value = max_t(u32, value, ALIGNMENT); |
3426 | err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, | 3426 | err = brcmf_iovar_data_set(dev, "bus:txglomalign", &value, |
3427 | sizeof(u32)); | 3427 | sizeof(u32)); |
3428 | } | 3428 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c index 3b3e076571d6..45e2efc70d19 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-7000.c | |||
@@ -79,8 +79,8 @@ | |||
79 | /* Lowest firmware API version supported */ | 79 | /* Lowest firmware API version supported */ |
80 | #define IWL7260_UCODE_API_MIN 17 | 80 | #define IWL7260_UCODE_API_MIN 17 |
81 | #define IWL7265_UCODE_API_MIN 17 | 81 | #define IWL7265_UCODE_API_MIN 17 |
82 | #define IWL7265D_UCODE_API_MIN 17 | 82 | #define IWL7265D_UCODE_API_MIN 22 |
83 | #define IWL3168_UCODE_API_MIN 20 | 83 | #define IWL3168_UCODE_API_MIN 22 |
84 | 84 | ||
85 | /* NVM versions */ | 85 | /* NVM versions */ |
86 | #define IWL7260_NVM_VERSION 0x0a1d | 86 | #define IWL7260_NVM_VERSION 0x0a1d |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c index b9718c0cf174..89137717c1fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c | |||
@@ -74,8 +74,8 @@ | |||
74 | #define IWL8265_UCODE_API_MAX 30 | 74 | #define IWL8265_UCODE_API_MAX 30 |
75 | 75 | ||
76 | /* Lowest firmware API version supported */ | 76 | /* Lowest firmware API version supported */ |
77 | #define IWL8000_UCODE_API_MIN 17 | 77 | #define IWL8000_UCODE_API_MIN 22 |
78 | #define IWL8265_UCODE_API_MIN 20 | 78 | #define IWL8265_UCODE_API_MIN 22 |
79 | 79 | ||
80 | /* NVM versions */ | 80 | /* NVM versions */ |
81 | #define IWL8000_NVM_VERSION 0x0a1d | 81 | #define IWL8000_NVM_VERSION 0x0a1d |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 306bc967742e..77efbb78e867 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h | |||
@@ -370,6 +370,7 @@ | |||
370 | #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) | 370 | #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) |
371 | 371 | ||
372 | #define DBGC_IN_SAMPLE (0xa03c00) | 372 | #define DBGC_IN_SAMPLE (0xa03c00) |
373 | #define DBGC_OUT_CTRL (0xa03c0c) | ||
373 | 374 | ||
374 | /* enable the ID buf for read */ | 375 | /* enable the ID buf for read */ |
375 | #define WFPM_PS_CTL_CLR 0xA0300C | 376 | #define WFPM_PS_CTL_CLR 0xA0300C |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h index 1b7d265ffb0a..a10c6aae9ab9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-rs.h | |||
@@ -307,6 +307,11 @@ enum { | |||
307 | /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ | 307 | /* Bit 1-3: LQ command color. Used to match responses to LQ commands */ |
308 | #define LQ_FLAG_COLOR_POS 1 | 308 | #define LQ_FLAG_COLOR_POS 1 |
309 | #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) | 309 | #define LQ_FLAG_COLOR_MSK (7 << LQ_FLAG_COLOR_POS) |
310 | #define LQ_FLAG_COLOR_GET(_f) (((_f) & LQ_FLAG_COLOR_MSK) >>\ | ||
311 | LQ_FLAG_COLOR_POS) | ||
312 | #define LQ_FLAGS_COLOR_INC(_c) ((((_c) + 1) << LQ_FLAG_COLOR_POS) &\ | ||
313 | LQ_FLAG_COLOR_MSK) | ||
314 | #define LQ_FLAG_COLOR_SET(_f, _c) ((_c) | ((_f) & ~LQ_FLAG_COLOR_MSK)) | ||
310 | 315 | ||
311 | /* Bit 4-5: Tx RTS BW Signalling | 316 | /* Bit 4-5: Tx RTS BW Signalling |
312 | * (0) No RTS BW signalling | 317 | * (0) No RTS BW signalling |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h index 81b98915b1a4..1360ebfdc51b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-api-tx.h | |||
@@ -519,8 +519,11 @@ struct agg_tx_status { | |||
519 | * bit-7 invalid rate indication | 519 | * bit-7 invalid rate indication |
520 | */ | 520 | */ |
521 | #define TX_RES_INIT_RATE_INDEX_MSK 0x0f | 521 | #define TX_RES_INIT_RATE_INDEX_MSK 0x0f |
522 | #define TX_RES_RATE_TABLE_COLOR_POS 4 | ||
522 | #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 | 523 | #define TX_RES_RATE_TABLE_COLOR_MSK 0x70 |
523 | #define TX_RES_INV_RATE_INDEX_MSK 0x80 | 524 | #define TX_RES_INV_RATE_INDEX_MSK 0x80 |
525 | #define TX_RES_RATE_TABLE_COL_GET(_f) (((_f) & TX_RES_RATE_TABLE_COLOR_MSK) >>\ | ||
526 | TX_RES_RATE_TABLE_COLOR_POS) | ||
524 | 527 | ||
525 | #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) | 528 | #define IWL_MVM_TX_RES_GET_TID(_ra_tid) ((_ra_tid) & 0x0f) |
526 | #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) | 529 | #define IWL_MVM_TX_RES_GET_RA(_ra_tid) ((_ra_tid) >> 4) |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c index 7b86a4f1b574..c8712e6eea74 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw-dbg.c | |||
@@ -1002,14 +1002,6 @@ int iwl_mvm_fw_dbg_collect_trig(struct iwl_mvm *mvm, | |||
1002 | return 0; | 1002 | return 0; |
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | static inline void iwl_mvm_restart_early_start(struct iwl_mvm *mvm) | ||
1006 | { | ||
1007 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) | ||
1008 | iwl_clear_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | ||
1009 | else | ||
1010 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 1); | ||
1011 | } | ||
1012 | |||
1013 | int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) | 1005 | int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) |
1014 | { | 1006 | { |
1015 | u8 *ptr; | 1007 | u8 *ptr; |
@@ -1023,10 +1015,8 @@ int iwl_mvm_start_fw_dbg_conf(struct iwl_mvm *mvm, u8 conf_id) | |||
1023 | /* EARLY START - firmware's configuration is hard coded */ | 1015 | /* EARLY START - firmware's configuration is hard coded */ |
1024 | if ((!mvm->fw->dbg_conf_tlv[conf_id] || | 1016 | if ((!mvm->fw->dbg_conf_tlv[conf_id] || |
1025 | !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && | 1017 | !mvm->fw->dbg_conf_tlv[conf_id]->num_of_hcmds) && |
1026 | conf_id == FW_DBG_START_FROM_ALIVE) { | 1018 | conf_id == FW_DBG_START_FROM_ALIVE) |
1027 | iwl_mvm_restart_early_start(mvm); | ||
1028 | return 0; | 1019 | return 0; |
1029 | } | ||
1030 | 1020 | ||
1031 | if (!mvm->fw->dbg_conf_tlv[conf_id]) | 1021 | if (!mvm->fw->dbg_conf_tlv[conf_id]) |
1032 | return -EINVAL; | 1022 | return -EINVAL; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 0f1831b41915..fd2fc46e2fe5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c | |||
@@ -1040,7 +1040,7 @@ static int iwl_mvm_mac_ctxt_send_beacon(struct iwl_mvm *mvm, | |||
1040 | struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; | 1040 | struct iwl_mac_beacon_cmd_v6 beacon_cmd_v6; |
1041 | struct iwl_mac_beacon_cmd_v7 beacon_cmd; | 1041 | struct iwl_mac_beacon_cmd_v7 beacon_cmd; |
1042 | } u = {}; | 1042 | } u = {}; |
1043 | struct iwl_mac_beacon_cmd beacon_cmd; | 1043 | struct iwl_mac_beacon_cmd beacon_cmd = {}; |
1044 | struct ieee80211_tx_info *info; | 1044 | struct ieee80211_tx_info *info; |
1045 | u32 beacon_skb_len; | 1045 | u32 beacon_skb_len; |
1046 | u32 rate, tx_flags; | 1046 | u32 rate, tx_flags; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 4e74a6b90e70..52f8d7a6a7dc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -1730,8 +1730,11 @@ int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, u8 minq, u8 maxq); | |||
1730 | */ | 1730 | */ |
1731 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) | 1731 | static inline u32 iwl_mvm_flushable_queues(struct iwl_mvm *mvm) |
1732 | { | 1732 | { |
1733 | u32 cmd_queue = iwl_mvm_is_dqa_supported(mvm) ? IWL_MVM_DQA_CMD_QUEUE : | ||
1734 | IWL_MVM_CMD_QUEUE; | ||
1735 | |||
1733 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & | 1736 | return ((BIT(mvm->cfg->base_params->num_of_queues) - 1) & |
1734 | ~BIT(IWL_MVM_CMD_QUEUE)); | 1737 | ~BIT(cmd_queue)); |
1735 | } | 1738 | } |
1736 | 1739 | ||
1737 | static inline | 1740 | static inline |
@@ -1753,6 +1756,7 @@ static inline void iwl_mvm_stop_device(struct iwl_mvm *mvm) | |||
1753 | if (!iwl_mvm_has_new_tx_api(mvm)) | 1756 | if (!iwl_mvm_has_new_tx_api(mvm)) |
1754 | iwl_free_fw_paging(mvm); | 1757 | iwl_free_fw_paging(mvm); |
1755 | mvm->ucode_loaded = false; | 1758 | mvm->ucode_loaded = false; |
1759 | mvm->fw_dbg_conf = FW_DBG_INVALID; | ||
1756 | iwl_trans_stop_device(mvm->trans); | 1760 | iwl_trans_stop_device(mvm->trans); |
1757 | } | 1761 | } |
1758 | 1762 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 9ffff6ed8133..3da5ec40aaea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -1149,21 +1149,37 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work) | |||
1149 | 1149 | ||
1150 | mutex_lock(&mvm->mutex); | 1150 | mutex_lock(&mvm->mutex); |
1151 | 1151 | ||
1152 | /* stop recording */ | ||
1153 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | 1152 | if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
1153 | /* stop recording */ | ||
1154 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); | 1154 | iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100); |
1155 | |||
1156 | iwl_mvm_fw_error_dump(mvm); | ||
1157 | |||
1158 | /* start recording again if the firmware is not crashed */ | ||
1159 | if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && | ||
1160 | mvm->fw->dbg_dest_tlv) | ||
1161 | iwl_clear_bits_prph(mvm->trans, | ||
1162 | MON_BUFF_SAMPLE_CTL, 0x100); | ||
1155 | } else { | 1163 | } else { |
1164 | u32 in_sample = iwl_read_prph(mvm->trans, DBGC_IN_SAMPLE); | ||
1165 | u32 out_ctrl = iwl_read_prph(mvm->trans, DBGC_OUT_CTRL); | ||
1166 | |||
1167 | /* stop recording */ | ||
1156 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); | 1168 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0); |
1157 | /* wait before we collect the data till the DBGC stop */ | ||
1158 | udelay(100); | 1169 | udelay(100); |
1159 | } | 1170 | iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, 0); |
1171 | /* wait before we collect the data till the DBGC stop */ | ||
1172 | udelay(500); | ||
1160 | 1173 | ||
1161 | iwl_mvm_fw_error_dump(mvm); | 1174 | iwl_mvm_fw_error_dump(mvm); |
1162 | 1175 | ||
1163 | /* start recording again if the firmware is not crashed */ | 1176 | /* start recording again if the firmware is not crashed */ |
1164 | WARN_ON_ONCE((!test_bit(STATUS_FW_ERROR, &mvm->trans->status)) && | 1177 | if (!test_bit(STATUS_FW_ERROR, &mvm->trans->status) && |
1165 | mvm->fw->dbg_dest_tlv && | 1178 | mvm->fw->dbg_dest_tlv) { |
1166 | iwl_mvm_start_fw_dbg_conf(mvm, mvm->fw_dbg_conf)); | 1179 | iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, in_sample); |
1180 | iwl_write_prph(mvm->trans, DBGC_OUT_CTRL, out_ctrl); | ||
1181 | } | ||
1182 | } | ||
1167 | 1183 | ||
1168 | mutex_unlock(&mvm->mutex); | 1184 | mutex_unlock(&mvm->mutex); |
1169 | 1185 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 7788eefcd2bd..aa785cf3cf68 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. |
4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH |
5 | * Copyright(c) 2016 Intel Deutschland GmbH | 5 | * Copyright(c) 2016 - 2017 Intel Deutschland GmbH |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms of version 2 of the GNU General Public License as | 8 | * under the terms of version 2 of the GNU General Public License as |
@@ -1083,34 +1083,6 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta, | |||
1083 | rs_get_lower_rate_in_column(lq_sta, rate); | 1083 | rs_get_lower_rate_in_column(lq_sta, rate); |
1084 | } | 1084 | } |
1085 | 1085 | ||
1086 | /* Check if both rates are identical | ||
1087 | * allow_ant_mismatch enables matching a SISO rate on ANT_A or ANT_B | ||
1088 | * with a rate indicating STBC/BFER and ANT_AB. | ||
1089 | */ | ||
1090 | static inline bool rs_rate_equal(struct rs_rate *a, | ||
1091 | struct rs_rate *b, | ||
1092 | bool allow_ant_mismatch) | ||
1093 | |||
1094 | { | ||
1095 | bool ant_match = (a->ant == b->ant) && (a->stbc == b->stbc) && | ||
1096 | (a->bfer == b->bfer); | ||
1097 | |||
1098 | if (allow_ant_mismatch) { | ||
1099 | if (a->stbc || a->bfer) { | ||
1100 | WARN_ONCE(a->ant != ANT_AB, "stbc %d bfer %d ant %d", | ||
1101 | a->stbc, a->bfer, a->ant); | ||
1102 | ant_match |= (b->ant == ANT_A || b->ant == ANT_B); | ||
1103 | } else if (b->stbc || b->bfer) { | ||
1104 | WARN_ONCE(b->ant != ANT_AB, "stbc %d bfer %d ant %d", | ||
1105 | b->stbc, b->bfer, b->ant); | ||
1106 | ant_match |= (a->ant == ANT_A || a->ant == ANT_B); | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | return (a->type == b->type) && (a->bw == b->bw) && (a->sgi == b->sgi) && | ||
1111 | (a->ldpc == b->ldpc) && (a->index == b->index) && ant_match; | ||
1112 | } | ||
1113 | |||
1114 | /* Check if both rates share the same column */ | 1086 | /* Check if both rates share the same column */ |
1115 | static inline bool rs_rate_column_match(struct rs_rate *a, | 1087 | static inline bool rs_rate_column_match(struct rs_rate *a, |
1116 | struct rs_rate *b) | 1088 | struct rs_rate *b) |
@@ -1182,12 +1154,12 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
1182 | u32 lq_hwrate; | 1154 | u32 lq_hwrate; |
1183 | struct rs_rate lq_rate, tx_resp_rate; | 1155 | struct rs_rate lq_rate, tx_resp_rate; |
1184 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; | 1156 | struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl; |
1185 | u8 reduced_txp = (uintptr_t)info->status.status_driver_data[0]; | 1157 | u32 tlc_info = (uintptr_t)info->status.status_driver_data[0]; |
1158 | u8 reduced_txp = tlc_info & RS_DRV_DATA_TXP_MSK; | ||
1159 | u8 lq_color = RS_DRV_DATA_LQ_COLOR_GET(tlc_info); | ||
1186 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; | 1160 | u32 tx_resp_hwrate = (uintptr_t)info->status.status_driver_data[1]; |
1187 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); | 1161 | struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); |
1188 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; | 1162 | struct iwl_lq_sta *lq_sta = &mvmsta->lq_sta; |
1189 | bool allow_ant_mismatch = fw_has_api(&mvm->fw->ucode_capa, | ||
1190 | IWL_UCODE_TLV_API_LQ_SS_PARAMS); | ||
1191 | 1163 | ||
1192 | /* Treat uninitialized rate scaling data same as non-existing. */ | 1164 | /* Treat uninitialized rate scaling data same as non-existing. */ |
1193 | if (!lq_sta) { | 1165 | if (!lq_sta) { |
@@ -1262,10 +1234,10 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | |||
1262 | rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); | 1234 | rs_rate_from_ucode_rate(lq_hwrate, info->band, &lq_rate); |
1263 | 1235 | ||
1264 | /* Here we actually compare this rate to the latest LQ command */ | 1236 | /* Here we actually compare this rate to the latest LQ command */ |
1265 | if (!rs_rate_equal(&tx_resp_rate, &lq_rate, allow_ant_mismatch)) { | 1237 | if (lq_color != LQ_FLAG_COLOR_GET(table->flags)) { |
1266 | IWL_DEBUG_RATE(mvm, | 1238 | IWL_DEBUG_RATE(mvm, |
1267 | "initial tx resp rate 0x%x does not match 0x%x\n", | 1239 | "tx resp color 0x%x does not match 0x%x\n", |
1268 | tx_resp_hwrate, lq_hwrate); | 1240 | lq_color, LQ_FLAG_COLOR_GET(table->flags)); |
1269 | 1241 | ||
1270 | /* | 1242 | /* |
1271 | * Since rates mis-match, the last LQ command may have failed. | 1243 | * Since rates mis-match, the last LQ command may have failed. |
@@ -3326,6 +3298,7 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, | |||
3326 | u8 valid_tx_ant = 0; | 3298 | u8 valid_tx_ant = 0; |
3327 | struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; | 3299 | struct iwl_lq_cmd *lq_cmd = &lq_sta->lq; |
3328 | bool toggle_ant = false; | 3300 | bool toggle_ant = false; |
3301 | u32 color; | ||
3329 | 3302 | ||
3330 | memcpy(&rate, initial_rate, sizeof(rate)); | 3303 | memcpy(&rate, initial_rate, sizeof(rate)); |
3331 | 3304 | ||
@@ -3380,6 +3353,9 @@ static void rs_build_rates_table(struct iwl_mvm *mvm, | |||
3380 | num_rates, num_retries, valid_tx_ant, | 3353 | num_rates, num_retries, valid_tx_ant, |
3381 | toggle_ant); | 3354 | toggle_ant); |
3382 | 3355 | ||
3356 | /* update the color of the LQ command (as a counter at bits 1-3) */ | ||
3357 | color = LQ_FLAGS_COLOR_INC(LQ_FLAG_COLOR_GET(lq_cmd->flags)); | ||
3358 | lq_cmd->flags = LQ_FLAG_COLOR_SET(lq_cmd->flags, color); | ||
3383 | } | 3359 | } |
3384 | 3360 | ||
3385 | struct rs_bfer_active_iter_data { | 3361 | struct rs_bfer_active_iter_data { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index ee207f2c0a90..3abde1cb0303 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * | 2 | * |
3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
4 | * Copyright(c) 2015 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2015 Intel Mobile Communications GmbH |
5 | * Copyright(c) 2017 Intel Deutschland GmbH | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of version 2 of the GNU General Public License as | 8 | * under the terms of version 2 of the GNU General Public License as |
@@ -357,6 +358,20 @@ struct iwl_lq_sta { | |||
357 | } pers; | 358 | } pers; |
358 | }; | 359 | }; |
359 | 360 | ||
361 | /* ieee80211_tx_info's status_driver_data[0] is packed with lq color and txp | ||
362 | * Note, it's iwlmvm <-> mac80211 interface. | ||
363 | * bits 0-7: reduced tx power | ||
364 | * bits 8-10: LQ command's color | ||
365 | */ | ||
366 | #define RS_DRV_DATA_TXP_MSK 0xff | ||
367 | #define RS_DRV_DATA_LQ_COLOR_POS 8 | ||
368 | #define RS_DRV_DATA_LQ_COLOR_MSK (7 << RS_DRV_DATA_LQ_COLOR_POS) | ||
369 | #define RS_DRV_DATA_LQ_COLOR_GET(_f) (((_f) & RS_DRV_DATA_LQ_COLOR_MSK) >>\ | ||
370 | RS_DRV_DATA_LQ_COLOR_POS) | ||
371 | #define RS_DRV_DATA_PACK(_c, _p) ((void *)(uintptr_t)\ | ||
372 | (((uintptr_t)_p) |\ | ||
373 | ((_c) << RS_DRV_DATA_LQ_COLOR_POS))) | ||
374 | |||
360 | /* Initialize station's rate scaling information after adding station */ | 375 | /* Initialize station's rate scaling information after adding station */ |
361 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, | 376 | void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta, |
362 | enum nl80211_band band, bool init); | 377 | enum nl80211_band band, bool init); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index f5c786ddc526..614d67810d05 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c | |||
@@ -2120,7 +2120,8 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2120 | if (!iwl_mvm_is_dqa_supported(mvm)) | 2120 | if (!iwl_mvm_is_dqa_supported(mvm)) |
2121 | return 0; | 2121 | return 0; |
2122 | 2122 | ||
2123 | if (WARN_ON(vif->type != NL80211_IFTYPE_AP)) | 2123 | if (WARN_ON(vif->type != NL80211_IFTYPE_AP && |
2124 | vif->type != NL80211_IFTYPE_ADHOC)) | ||
2124 | return -ENOTSUPP; | 2125 | return -ENOTSUPP; |
2125 | 2126 | ||
2126 | /* | 2127 | /* |
@@ -2155,6 +2156,16 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) | |||
2155 | mvmvif->cab_queue = queue; | 2156 | mvmvif->cab_queue = queue; |
2156 | } else if (!fw_has_api(&mvm->fw->ucode_capa, | 2157 | } else if (!fw_has_api(&mvm->fw->ucode_capa, |
2157 | IWL_UCODE_TLV_API_STA_TYPE)) { | 2158 | IWL_UCODE_TLV_API_STA_TYPE)) { |
2159 | /* | ||
2160 | * In IBSS, ieee80211_check_queues() sets the cab_queue to be | ||
2161 | * invalid, so make sure we use the queue we want. | ||
2162 | * Note that this is done here as we want to avoid making DQA | ||
2163 | * changes in mac80211 layer. | ||
2164 | */ | ||
2165 | if (vif->type == NL80211_IFTYPE_ADHOC) { | ||
2166 | vif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; | ||
2167 | mvmvif->cab_queue = vif->cab_queue; | ||
2168 | } | ||
2158 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, | 2169 | iwl_mvm_enable_txq(mvm, vif->cab_queue, vif->cab_queue, 0, |
2159 | &cfg, timeout); | 2170 | &cfg, timeout); |
2160 | } | 2171 | } |
@@ -3321,18 +3332,15 @@ int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, | |||
3321 | 3332 | ||
3322 | /* Get the station from the mvm local station table */ | 3333 | /* Get the station from the mvm local station table */ |
3323 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); | 3334 | mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); |
3324 | if (!mvm_sta) { | 3335 | if (mvm_sta) |
3325 | IWL_ERR(mvm, "Failed to find station\n"); | 3336 | sta_id = mvm_sta->sta_id; |
3326 | return -EINVAL; | ||
3327 | } | ||
3328 | sta_id = mvm_sta->sta_id; | ||
3329 | 3337 | ||
3330 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", | 3338 | IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", |
3331 | keyconf->keyidx, sta_id); | 3339 | keyconf->keyidx, sta_id); |
3332 | 3340 | ||
3333 | if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || | 3341 | if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || |
3334 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || | 3342 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || |
3335 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) | 3343 | keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) |
3336 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); | 3344 | return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); |
3337 | 3345 | ||
3338 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { | 3346 | if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 2716cb5483bf..ad62b67dceb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h | |||
@@ -313,6 +313,7 @@ enum iwl_mvm_agg_state { | |||
313 | * This is basically (last acked packet++). | 313 | * This is basically (last acked packet++). |
314 | * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the | 314 | * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the |
315 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). | 315 | * Tx response (TX_CMD), and the block ack notification (COMPRESSED_BA). |
316 | * @lq_color: the color of the LQ command as it appears in tx response. | ||
316 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. | 317 | * @amsdu_in_ampdu_allowed: true if A-MSDU in A-MPDU is allowed. |
317 | * @state: state of the BA agreement establishment / tear down. | 318 | * @state: state of the BA agreement establishment / tear down. |
318 | * @txq_id: Tx queue used by the BA session / DQA | 319 | * @txq_id: Tx queue used by the BA session / DQA |
@@ -331,6 +332,7 @@ struct iwl_mvm_tid_data { | |||
331 | u16 next_reclaimed; | 332 | u16 next_reclaimed; |
332 | /* The rest is Tx AGG related */ | 333 | /* The rest is Tx AGG related */ |
333 | u32 rate_n_flags; | 334 | u32 rate_n_flags; |
335 | u8 lq_color; | ||
334 | bool amsdu_in_ampdu_allowed; | 336 | bool amsdu_in_ampdu_allowed; |
335 | enum iwl_mvm_agg_state state; | 337 | enum iwl_mvm_agg_state state; |
336 | u16 txq_id; | 338 | u16 txq_id; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f9cbd197246f..506d58104e1c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
@@ -790,11 +790,13 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, | |||
790 | struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); | 790 | struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); |
791 | int ret; | 791 | int ret; |
792 | 792 | ||
793 | if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) | ||
794 | return -EIO; | ||
795 | |||
796 | mutex_lock(&mvm->mutex); | 793 | mutex_lock(&mvm->mutex); |
797 | 794 | ||
795 | if (!mvm->ucode_loaded || !(mvm->cur_ucode == IWL_UCODE_REGULAR)) { | ||
796 | ret = -EIO; | ||
797 | goto unlock; | ||
798 | } | ||
799 | |||
798 | if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { | 800 | if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { |
799 | ret = -EINVAL; | 801 | ret = -EINVAL; |
800 | goto unlock; | 802 | goto unlock; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index bcaceb64a6e8..f21901cd4a4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c | |||
@@ -1323,6 +1323,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
1323 | struct iwl_mvm_sta *mvmsta; | 1323 | struct iwl_mvm_sta *mvmsta; |
1324 | struct sk_buff_head skbs; | 1324 | struct sk_buff_head skbs; |
1325 | u8 skb_freed = 0; | 1325 | u8 skb_freed = 0; |
1326 | u8 lq_color; | ||
1326 | u16 next_reclaimed, seq_ctl; | 1327 | u16 next_reclaimed, seq_ctl; |
1327 | bool is_ndp = false; | 1328 | bool is_ndp = false; |
1328 | 1329 | ||
@@ -1405,8 +1406,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, | |||
1405 | info->status.tx_time = | 1406 | info->status.tx_time = |
1406 | le16_to_cpu(tx_resp->wireless_media_time); | 1407 | le16_to_cpu(tx_resp->wireless_media_time); |
1407 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); | 1408 | BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); |
1409 | lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info); | ||
1408 | info->status.status_driver_data[0] = | 1410 | info->status.status_driver_data[0] = |
1409 | (void *)(uintptr_t)tx_resp->reduced_tpc; | 1411 | RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc); |
1410 | 1412 | ||
1411 | ieee80211_tx_status(mvm->hw, skb); | 1413 | ieee80211_tx_status(mvm->hw, skb); |
1412 | } | 1414 | } |
@@ -1638,6 +1640,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm, | |||
1638 | le32_to_cpu(tx_resp->initial_rate); | 1640 | le32_to_cpu(tx_resp->initial_rate); |
1639 | mvmsta->tid_data[tid].tx_time = | 1641 | mvmsta->tid_data[tid].tx_time = |
1640 | le16_to_cpu(tx_resp->wireless_media_time); | 1642 | le16_to_cpu(tx_resp->wireless_media_time); |
1643 | mvmsta->tid_data[tid].lq_color = | ||
1644 | (tx_resp->tlc_info & TX_RES_RATE_TABLE_COLOR_MSK) >> | ||
1645 | TX_RES_RATE_TABLE_COLOR_POS; | ||
1641 | } | 1646 | } |
1642 | 1647 | ||
1643 | rcu_read_unlock(); | 1648 | rcu_read_unlock(); |
@@ -1707,6 +1712,11 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, | |||
1707 | iwl_mvm_check_ratid_empty(mvm, sta, tid); | 1712 | iwl_mvm_check_ratid_empty(mvm, sta, tid); |
1708 | 1713 | ||
1709 | freed = 0; | 1714 | freed = 0; |
1715 | |||
1716 | /* pack lq color from tid_data along the reduced txp */ | ||
1717 | ba_info->status.status_driver_data[0] = | ||
1718 | RS_DRV_DATA_PACK(tid_data->lq_color, | ||
1719 | ba_info->status.status_driver_data[0]); | ||
1710 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; | 1720 | ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate; |
1711 | 1721 | ||
1712 | skb_queue_walk(&reclaimed_skbs, skb) { | 1722 | skb_queue_walk(&reclaimed_skbs, skb) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 70acf850a9f1..93cbc7a69bcd 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c | |||
@@ -2803,7 +2803,8 @@ static struct iwl_trans_dump_data | |||
2803 | #ifdef CONFIG_PM_SLEEP | 2803 | #ifdef CONFIG_PM_SLEEP |
2804 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | 2804 | static int iwl_trans_pcie_suspend(struct iwl_trans *trans) |
2805 | { | 2805 | { |
2806 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) | 2806 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
2807 | (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) | ||
2807 | return iwl_pci_fw_enter_d0i3(trans); | 2808 | return iwl_pci_fw_enter_d0i3(trans); |
2808 | 2809 | ||
2809 | return 0; | 2810 | return 0; |
@@ -2811,7 +2812,8 @@ static int iwl_trans_pcie_suspend(struct iwl_trans *trans) | |||
2811 | 2812 | ||
2812 | static void iwl_trans_pcie_resume(struct iwl_trans *trans) | 2813 | static void iwl_trans_pcie_resume(struct iwl_trans *trans) |
2813 | { | 2814 | { |
2814 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3) | 2815 | if (trans->runtime_pm_mode == IWL_PLAT_PM_MODE_D0I3 && |
2816 | (trans->system_pm_mode == IWL_PLAT_PM_MODE_D0I3)) | ||
2815 | iwl_pci_fw_exit_d0i3(trans); | 2817 | iwl_pci_fw_exit_d0i3(trans); |
2816 | } | 2818 | } |
2817 | #endif /* CONFIG_PM_SLEEP */ | 2819 | #endif /* CONFIG_PM_SLEEP */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 9fb46a6f47cf..9c9bfbbabdf1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c | |||
@@ -906,7 +906,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
906 | 906 | ||
907 | if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { | 907 | if (WARN_ON(iwl_rx_packet_payload_len(hcmd.resp_pkt) != sizeof(*rsp))) { |
908 | ret = -EINVAL; | 908 | ret = -EINVAL; |
909 | goto error; | 909 | goto error_free_resp; |
910 | } | 910 | } |
911 | 911 | ||
912 | rsp = (void *)hcmd.resp_pkt->data; | 912 | rsp = (void *)hcmd.resp_pkt->data; |
@@ -915,13 +915,13 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
915 | if (qid > ARRAY_SIZE(trans_pcie->txq)) { | 915 | if (qid > ARRAY_SIZE(trans_pcie->txq)) { |
916 | WARN_ONCE(1, "queue index %d unsupported", qid); | 916 | WARN_ONCE(1, "queue index %d unsupported", qid); |
917 | ret = -EIO; | 917 | ret = -EIO; |
918 | goto error; | 918 | goto error_free_resp; |
919 | } | 919 | } |
920 | 920 | ||
921 | if (test_and_set_bit(qid, trans_pcie->queue_used)) { | 921 | if (test_and_set_bit(qid, trans_pcie->queue_used)) { |
922 | WARN_ONCE(1, "queue %d already used", qid); | 922 | WARN_ONCE(1, "queue %d already used", qid); |
923 | ret = -EIO; | 923 | ret = -EIO; |
924 | goto error; | 924 | goto error_free_resp; |
925 | } | 925 | } |
926 | 926 | ||
927 | txq->id = qid; | 927 | txq->id = qid; |
@@ -934,8 +934,11 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, | |||
934 | (txq->write_ptr) | (qid << 16)); | 934 | (txq->write_ptr) | (qid << 16)); |
935 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); | 935 | IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); |
936 | 936 | ||
937 | iwl_free_resp(&hcmd); | ||
937 | return qid; | 938 | return qid; |
938 | 939 | ||
940 | error_free_resp: | ||
941 | iwl_free_resp(&hcmd); | ||
939 | error: | 942 | error: |
940 | iwl_pcie_gen2_txq_free_memory(trans, txq); | 943 | iwl_pcie_gen2_txq_free_memory(trans, txq); |
941 | return ret; | 944 | return ret; |
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c index 34c862f213c7..0a9b78705ee8 100644 --- a/drivers/perf/arm_pmu_acpi.c +++ b/drivers/perf/arm_pmu_acpi.c | |||
@@ -29,6 +29,17 @@ static int arm_pmu_acpi_register_irq(int cpu) | |||
29 | return -EINVAL; | 29 | return -EINVAL; |
30 | 30 | ||
31 | gsi = gicc->performance_interrupt; | 31 | gsi = gicc->performance_interrupt; |
32 | |||
33 | /* | ||
34 | * Per the ACPI spec, the MADT cannot describe a PMU that doesn't | ||
35 | * have an interrupt. QEMU advertises this by using a GSI of zero, | ||
36 | * which is not known to be valid on any hardware despite being | ||
37 | * valid per the spec. Take the pragmatic approach and reject a | ||
38 | * GSI of zero for now. | ||
39 | */ | ||
40 | if (!gsi) | ||
41 | return 0; | ||
42 | |||
32 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) | 43 | if (gicc->flags & ACPI_MADT_PERFORMANCE_IRQ_MODE) |
33 | trigger = ACPI_EDGE_SENSITIVE; | 44 | trigger = ACPI_EDGE_SENSITIVE; |
34 | else | 45 | else |
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 1653cbda6a82..bd459a93b0e7 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c | |||
@@ -680,30 +680,16 @@ EXPORT_SYMBOL_GPL(pinctrl_generic_remove_group); | |||
680 | * pinctrl_generic_free_groups() - removes all pin groups | 680 | * pinctrl_generic_free_groups() - removes all pin groups |
681 | * @pctldev: pin controller device | 681 | * @pctldev: pin controller device |
682 | * | 682 | * |
683 | * Note that the caller must take care of locking. | 683 | * Note that the caller must take care of locking. The pinctrl groups |
684 | * are allocated with devm_kzalloc() so no need to free them here. | ||
684 | */ | 685 | */ |
685 | static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) | 686 | static void pinctrl_generic_free_groups(struct pinctrl_dev *pctldev) |
686 | { | 687 | { |
687 | struct radix_tree_iter iter; | 688 | struct radix_tree_iter iter; |
688 | struct group_desc *group; | ||
689 | unsigned long *indices; | ||
690 | void **slot; | 689 | void **slot; |
691 | int i = 0; | ||
692 | |||
693 | indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * | ||
694 | pctldev->num_groups, GFP_KERNEL); | ||
695 | if (!indices) | ||
696 | return; | ||
697 | 690 | ||
698 | radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) | 691 | radix_tree_for_each_slot(slot, &pctldev->pin_group_tree, &iter, 0) |
699 | indices[i++] = iter.index; | 692 | radix_tree_delete(&pctldev->pin_group_tree, iter.index); |
700 | |||
701 | for (i = 0; i < pctldev->num_groups; i++) { | ||
702 | group = radix_tree_lookup(&pctldev->pin_group_tree, | ||
703 | indices[i]); | ||
704 | radix_tree_delete(&pctldev->pin_group_tree, indices[i]); | ||
705 | devm_kfree(pctldev->dev, group); | ||
706 | } | ||
707 | 693 | ||
708 | pctldev->num_groups = 0; | 694 | pctldev->num_groups = 0; |
709 | } | 695 | } |
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c index 41b5b07d5a2b..6852010a6d70 100644 --- a/drivers/pinctrl/freescale/pinctrl-mxs.c +++ b/drivers/pinctrl/freescale/pinctrl-mxs.c | |||
@@ -194,6 +194,16 @@ static int mxs_pinctrl_get_func_groups(struct pinctrl_dev *pctldev, | |||
194 | return 0; | 194 | return 0; |
195 | } | 195 | } |
196 | 196 | ||
197 | static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg) | ||
198 | { | ||
199 | u32 tmp; | ||
200 | |||
201 | tmp = readl(reg); | ||
202 | tmp &= ~(mask << shift); | ||
203 | tmp |= value << shift; | ||
204 | writel(tmp, reg); | ||
205 | } | ||
206 | |||
197 | static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, | 207 | static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, |
198 | unsigned group) | 208 | unsigned group) |
199 | { | 209 | { |
@@ -211,8 +221,7 @@ static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector, | |||
211 | reg += bank * 0x20 + pin / 16 * 0x10; | 221 | reg += bank * 0x20 + pin / 16 * 0x10; |
212 | shift = pin % 16 * 2; | 222 | shift = pin % 16 * 2; |
213 | 223 | ||
214 | writel(0x3 << shift, reg + CLR); | 224 | mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg); |
215 | writel(g->muxsel[i] << shift, reg + SET); | ||
216 | } | 225 | } |
217 | 226 | ||
218 | return 0; | 227 | return 0; |
@@ -279,8 +288,7 @@ static int mxs_pinconf_group_set(struct pinctrl_dev *pctldev, | |||
279 | /* mA */ | 288 | /* mA */ |
280 | if (config & MA_PRESENT) { | 289 | if (config & MA_PRESENT) { |
281 | shift = pin % 8 * 4; | 290 | shift = pin % 8 * 4; |
282 | writel(0x3 << shift, reg + CLR); | 291 | mxs_pinctrl_rmwl(ma, 0x3, shift, reg); |
283 | writel(ma << shift, reg + SET); | ||
284 | } | 292 | } |
285 | 293 | ||
286 | /* vol */ | 294 | /* vol */ |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 2debba62fac9..20f1b4493994 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1539,15 +1539,29 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) | |||
1539 | * is not listed below. | 1539 | * is not listed below. |
1540 | */ | 1540 | */ |
1541 | static const struct dmi_system_id chv_no_valid_mask[] = { | 1541 | static const struct dmi_system_id chv_no_valid_mask[] = { |
1542 | /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ | ||
1542 | { | 1543 | { |
1543 | /* See https://bugzilla.kernel.org/show_bug.cgi?id=194945 */ | 1544 | .ident = "Intel_Strago based Chromebooks (All models)", |
1544 | .ident = "Acer Chromebook (CYAN)", | ||
1545 | .matches = { | 1545 | .matches = { |
1546 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1546 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1547 | DMI_MATCH(DMI_PRODUCT_NAME, "Edgar"), | 1547 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), |
1548 | DMI_MATCH(DMI_BIOS_DATE, "05/21/2016"), | ||
1549 | }, | 1548 | }, |
1550 | } | 1549 | }, |
1550 | { | ||
1551 | .ident = "Acer Chromebook R11 (Cyan)", | ||
1552 | .matches = { | ||
1553 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | ||
1554 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), | ||
1555 | }, | ||
1556 | }, | ||
1557 | { | ||
1558 | .ident = "Samsung Chromebook 3 (Celes)", | ||
1559 | .matches = { | ||
1560 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | ||
1561 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), | ||
1562 | }, | ||
1563 | }, | ||
1564 | {} | ||
1551 | }; | 1565 | }; |
1552 | 1566 | ||
1553 | static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | 1567 | static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) |
diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c index 0d6b7f4b82af..720a19fd38d2 100644 --- a/drivers/pinctrl/pinconf-generic.c +++ b/drivers/pinctrl/pinconf-generic.c | |||
@@ -35,7 +35,6 @@ static const struct pin_config_item conf_items[] = { | |||
35 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, | 35 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, |
36 | "input bias pull to pin specific state", NULL, false), | 36 | "input bias pull to pin specific state", NULL, false), |
37 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), | 37 | PCONFDUMP(PIN_CONFIG_BIAS_PULL_UP, "input bias pull up", NULL, false), |
38 | PCONFDUMP(PIN_CONFIG_BIDIRECTIONAL, "bi-directional pin operations", NULL, false), | ||
39 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), | 38 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_DRAIN, "output drive open drain", NULL, false), |
40 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), | 39 | PCONFDUMP(PIN_CONFIG_DRIVE_OPEN_SOURCE, "output drive open source", NULL, false), |
41 | PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), | 40 | PCONFDUMP(PIN_CONFIG_DRIVE_PUSH_PULL, "output drive push pull", NULL, false), |
@@ -161,7 +160,6 @@ static const struct pinconf_generic_params dt_params[] = { | |||
161 | { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, | 160 | { "bias-pull-up", PIN_CONFIG_BIAS_PULL_UP, 1 }, |
162 | { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, | 161 | { "bias-pull-pin-default", PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, 1 }, |
163 | { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, | 162 | { "bias-pull-down", PIN_CONFIG_BIAS_PULL_DOWN, 1 }, |
164 | { "bi-directional", PIN_CONFIG_BIDIRECTIONAL, 1 }, | ||
165 | { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, | 163 | { "drive-open-drain", PIN_CONFIG_DRIVE_OPEN_DRAIN, 0 }, |
166 | { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, | 164 | { "drive-open-source", PIN_CONFIG_DRIVE_OPEN_SOURCE, 0 }, |
167 | { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, | 165 | { "drive-push-pull", PIN_CONFIG_DRIVE_PUSH_PULL, 0 }, |
@@ -174,7 +172,6 @@ static const struct pinconf_generic_params dt_params[] = { | |||
174 | { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, | 172 | { "input-schmitt-enable", PIN_CONFIG_INPUT_SCHMITT_ENABLE, 1 }, |
175 | { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, | 173 | { "low-power-disable", PIN_CONFIG_LOW_POWER_MODE, 0 }, |
176 | { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, | 174 | { "low-power-enable", PIN_CONFIG_LOW_POWER_MODE, 1 }, |
177 | { "output-enable", PIN_CONFIG_OUTPUT, 1, }, | ||
178 | { "output-high", PIN_CONFIG_OUTPUT, 1, }, | 175 | { "output-high", PIN_CONFIG_OUTPUT, 1, }, |
179 | { "output-low", PIN_CONFIG_OUTPUT, 0, }, | 176 | { "output-low", PIN_CONFIG_OUTPUT, 0, }, |
180 | { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, | 177 | { "power-source", PIN_CONFIG_POWER_SOURCE, 0 }, |
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c index 9fd6d9087dc5..16b3ae5e4f44 100644 --- a/drivers/pinctrl/pinmux.c +++ b/drivers/pinctrl/pinmux.c | |||
@@ -826,30 +826,17 @@ EXPORT_SYMBOL_GPL(pinmux_generic_remove_function); | |||
826 | * pinmux_generic_free_functions() - removes all functions | 826 | * pinmux_generic_free_functions() - removes all functions |
827 | * @pctldev: pin controller device | 827 | * @pctldev: pin controller device |
828 | * | 828 | * |
829 | * Note that the caller must take care of locking. | 829 | * Note that the caller must take care of locking. The pinctrl |
830 | * functions are allocated with devm_kzalloc() so no need to free | ||
831 | * them here. | ||
830 | */ | 832 | */ |
831 | void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) | 833 | void pinmux_generic_free_functions(struct pinctrl_dev *pctldev) |
832 | { | 834 | { |
833 | struct radix_tree_iter iter; | 835 | struct radix_tree_iter iter; |
834 | struct function_desc *function; | ||
835 | unsigned long *indices; | ||
836 | void **slot; | 836 | void **slot; |
837 | int i = 0; | ||
838 | |||
839 | indices = devm_kzalloc(pctldev->dev, sizeof(*indices) * | ||
840 | pctldev->num_functions, GFP_KERNEL); | ||
841 | if (!indices) | ||
842 | return; | ||
843 | 837 | ||
844 | radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) | 838 | radix_tree_for_each_slot(slot, &pctldev->pin_function_tree, &iter, 0) |
845 | indices[i++] = iter.index; | 839 | radix_tree_delete(&pctldev->pin_function_tree, iter.index); |
846 | |||
847 | for (i = 0; i < pctldev->num_functions; i++) { | ||
848 | function = radix_tree_lookup(&pctldev->pin_function_tree, | ||
849 | indices[i]); | ||
850 | radix_tree_delete(&pctldev->pin_function_tree, indices[i]); | ||
851 | devm_kfree(pctldev->dev, function); | ||
852 | } | ||
853 | 840 | ||
854 | pctldev->num_functions = 0; | 841 | pctldev->num_functions = 0; |
855 | } | 842 | } |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c index 9aec1d2232dd..6624499eae72 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c | |||
@@ -394,7 +394,7 @@ static const struct sunxi_desc_pin sun8i_a83t_pins[] = { | |||
394 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), | 394 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18), |
395 | SUNXI_FUNCTION(0x0, "gpio_in"), | 395 | SUNXI_FUNCTION(0x0, "gpio_in"), |
396 | SUNXI_FUNCTION(0x1, "gpio_out"), | 396 | SUNXI_FUNCTION(0x1, "gpio_out"), |
397 | SUNXI_FUNCTION(0x3, "owa")), /* DOUT */ | 397 | SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */ |
398 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), | 398 | SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19), |
399 | SUNXI_FUNCTION(0x0, "gpio_in"), | 399 | SUNXI_FUNCTION(0x0, "gpio_in"), |
400 | SUNXI_FUNCTION(0x1, "gpio_out")), | 400 | SUNXI_FUNCTION(0x1, "gpio_out")), |
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index bd7d39ecbd24..fb06974c88c1 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c | |||
@@ -1873,6 +1873,11 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
1873 | tcp_task->dd_data = tdata; | 1873 | tcp_task->dd_data = tdata; |
1874 | task->hdr = NULL; | 1874 | task->hdr = NULL; |
1875 | 1875 | ||
1876 | if (tdata->skb) { | ||
1877 | kfree_skb(tdata->skb); | ||
1878 | tdata->skb = NULL; | ||
1879 | } | ||
1880 | |||
1876 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && | 1881 | if (SKB_MAX_HEAD(cdev->skb_tx_rsvd) > (512 * MAX_SKB_FRAGS) && |
1877 | (opcode == ISCSI_OP_SCSI_DATA_OUT || | 1882 | (opcode == ISCSI_OP_SCSI_DATA_OUT || |
1878 | (opcode == ISCSI_OP_SCSI_CMD && | 1883 | (opcode == ISCSI_OP_SCSI_CMD && |
@@ -1890,6 +1895,7 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode) | |||
1890 | return -ENOMEM; | 1895 | return -ENOMEM; |
1891 | } | 1896 | } |
1892 | 1897 | ||
1898 | skb_get(tdata->skb); | ||
1893 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); | 1899 | skb_reserve(tdata->skb, cdev->skb_tx_rsvd); |
1894 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; | 1900 | task->hdr = (struct iscsi_hdr *)tdata->skb->data; |
1895 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ | 1901 | task->hdr_max = SKB_TX_ISCSI_PDU_HEADER_MAX; /* BHS + AHS */ |
@@ -2035,9 +2041,9 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
2035 | unsigned int datalen; | 2041 | unsigned int datalen; |
2036 | int err; | 2042 | int err; |
2037 | 2043 | ||
2038 | if (!skb) { | 2044 | if (!skb || cxgbi_skcb_test_flag(skb, SKCBF_TX_DONE)) { |
2039 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, | 2045 | log_debug(1 << CXGBI_DBG_ISCSI | 1 << CXGBI_DBG_PDU_TX, |
2040 | "task 0x%p, skb NULL.\n", task); | 2046 | "task 0x%p, skb 0x%p\n", task, skb); |
2041 | return 0; | 2047 | return 0; |
2042 | } | 2048 | } |
2043 | 2049 | ||
@@ -2050,7 +2056,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
2050 | } | 2056 | } |
2051 | 2057 | ||
2052 | datalen = skb->data_len; | 2058 | datalen = skb->data_len; |
2053 | tdata->skb = NULL; | ||
2054 | 2059 | ||
2055 | /* write ppod first if using ofldq to write ppod */ | 2060 | /* write ppod first if using ofldq to write ppod */ |
2056 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { | 2061 | if (ttinfo->flags & CXGBI_PPOD_INFO_FLAG_VALID) { |
@@ -2078,6 +2083,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
2078 | pdulen += ISCSI_DIGEST_SIZE; | 2083 | pdulen += ISCSI_DIGEST_SIZE; |
2079 | 2084 | ||
2080 | task->conn->txdata_octets += pdulen; | 2085 | task->conn->txdata_octets += pdulen; |
2086 | cxgbi_skcb_set_flag(skb, SKCBF_TX_DONE); | ||
2081 | return 0; | 2087 | return 0; |
2082 | } | 2088 | } |
2083 | 2089 | ||
@@ -2086,7 +2092,6 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
2086 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", | 2092 | "task 0x%p, skb 0x%p, len %u/%u, %d EAGAIN.\n", |
2087 | task, skb, skb->len, skb->data_len, err); | 2093 | task, skb, skb->len, skb->data_len, err); |
2088 | /* reset skb to send when we are called again */ | 2094 | /* reset skb to send when we are called again */ |
2089 | tdata->skb = skb; | ||
2090 | return err; | 2095 | return err; |
2091 | } | 2096 | } |
2092 | 2097 | ||
@@ -2094,7 +2099,8 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *task) | |||
2094 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", | 2099 | "itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n", |
2095 | task->itt, skb, skb->len, skb->data_len, err); | 2100 | task->itt, skb, skb->len, skb->data_len, err); |
2096 | 2101 | ||
2097 | kfree_skb(skb); | 2102 | __kfree_skb(tdata->skb); |
2103 | tdata->skb = NULL; | ||
2098 | 2104 | ||
2099 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); | 2105 | iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err); |
2100 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); | 2106 | iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED); |
@@ -2113,8 +2119,10 @@ void cxgbi_cleanup_task(struct iscsi_task *task) | |||
2113 | 2119 | ||
2114 | tcp_task->dd_data = NULL; | 2120 | tcp_task->dd_data = NULL; |
2115 | /* never reached the xmit task callout */ | 2121 | /* never reached the xmit task callout */ |
2116 | if (tdata->skb) | 2122 | if (tdata->skb) { |
2117 | __kfree_skb(tdata->skb); | 2123 | kfree_skb(tdata->skb); |
2124 | tdata->skb = NULL; | ||
2125 | } | ||
2118 | 2126 | ||
2119 | task_release_itt(task, task->hdr_itt); | 2127 | task_release_itt(task, task->hdr_itt); |
2120 | memset(tdata, 0, sizeof(*tdata)); | 2128 | memset(tdata, 0, sizeof(*tdata)); |
@@ -2714,6 +2722,9 @@ EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); | |||
2714 | static int __init libcxgbi_init_module(void) | 2722 | static int __init libcxgbi_init_module(void) |
2715 | { | 2723 | { |
2716 | pr_info("%s", version); | 2724 | pr_info("%s", version); |
2725 | |||
2726 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < | ||
2727 | sizeof(struct cxgbi_skb_cb)); | ||
2717 | return 0; | 2728 | return 0; |
2718 | } | 2729 | } |
2719 | 2730 | ||
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 18e0ea83d361..239462a75760 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h | |||
@@ -195,7 +195,8 @@ struct cxgbi_skb_rx_cb { | |||
195 | }; | 195 | }; |
196 | 196 | ||
197 | struct cxgbi_skb_tx_cb { | 197 | struct cxgbi_skb_tx_cb { |
198 | void *l2t; | 198 | void *handle; |
199 | void *arp_err_handler; | ||
199 | struct sk_buff *wr_next; | 200 | struct sk_buff *wr_next; |
200 | }; | 201 | }; |
201 | 202 | ||
@@ -203,6 +204,7 @@ enum cxgbi_skcb_flags { | |||
203 | SKCBF_TX_NEED_HDR, /* packet needs a header */ | 204 | SKCBF_TX_NEED_HDR, /* packet needs a header */ |
204 | SKCBF_TX_MEM_WRITE, /* memory write */ | 205 | SKCBF_TX_MEM_WRITE, /* memory write */ |
205 | SKCBF_TX_FLAG_COMPL, /* wr completion flag */ | 206 | SKCBF_TX_FLAG_COMPL, /* wr completion flag */ |
207 | SKCBF_TX_DONE, /* skb tx done */ | ||
206 | SKCBF_RX_COALESCED, /* received whole pdu */ | 208 | SKCBF_RX_COALESCED, /* received whole pdu */ |
207 | SKCBF_RX_HDR, /* received pdu header */ | 209 | SKCBF_RX_HDR, /* received pdu header */ |
208 | SKCBF_RX_DATA, /* received pdu payload */ | 210 | SKCBF_RX_DATA, /* received pdu payload */ |
@@ -215,13 +217,13 @@ enum cxgbi_skcb_flags { | |||
215 | }; | 217 | }; |
216 | 218 | ||
217 | struct cxgbi_skb_cb { | 219 | struct cxgbi_skb_cb { |
218 | unsigned char ulp_mode; | ||
219 | unsigned long flags; | ||
220 | unsigned int seq; | ||
221 | union { | 220 | union { |
222 | struct cxgbi_skb_rx_cb rx; | 221 | struct cxgbi_skb_rx_cb rx; |
223 | struct cxgbi_skb_tx_cb tx; | 222 | struct cxgbi_skb_tx_cb tx; |
224 | }; | 223 | }; |
224 | unsigned char ulp_mode; | ||
225 | unsigned long flags; | ||
226 | unsigned int seq; | ||
225 | }; | 227 | }; |
226 | 228 | ||
227 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) | 229 | #define CXGBI_SKB_CB(skb) ((struct cxgbi_skb_cb *)&((skb)->cb[0])) |
@@ -374,11 +376,9 @@ static inline void cxgbi_sock_enqueue_wr(struct cxgbi_sock *csk, | |||
374 | cxgbi_skcb_tx_wr_next(skb) = NULL; | 376 | cxgbi_skcb_tx_wr_next(skb) = NULL; |
375 | /* | 377 | /* |
376 | * We want to take an extra reference since both us and the driver | 378 | * We want to take an extra reference since both us and the driver |
377 | * need to free the packet before it's really freed. We know there's | 379 | * need to free the packet before it's really freed. |
378 | * just one user currently so we use atomic_set rather than skb_get | ||
379 | * to avoid the atomic op. | ||
380 | */ | 380 | */ |
381 | atomic_set(&skb->users, 2); | 381 | skb_get(skb); |
382 | 382 | ||
383 | if (!csk->wr_pending_head) | 383 | if (!csk->wr_pending_head) |
384 | csk->wr_pending_head = skb; | 384 | csk->wr_pending_head = skb; |
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 3cbab8710e58..2ceff585f189 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c | |||
@@ -265,18 +265,16 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
265 | struct list_head *list, | 265 | struct list_head *list, |
266 | unsigned char *cdb) | 266 | unsigned char *cdb) |
267 | { | 267 | { |
268 | struct scsi_device *sdev = ctlr->ms_sdev; | ||
269 | struct rdac_dh_data *h = sdev->handler_data; | ||
270 | struct rdac_mode_common *common; | 268 | struct rdac_mode_common *common; |
271 | unsigned data_size; | 269 | unsigned data_size; |
272 | struct rdac_queue_data *qdata; | 270 | struct rdac_queue_data *qdata; |
273 | u8 *lun_table; | 271 | u8 *lun_table; |
274 | 272 | ||
275 | if (h->ctlr->use_ms10) { | 273 | if (ctlr->use_ms10) { |
276 | struct rdac_pg_expanded *rdac_pg; | 274 | struct rdac_pg_expanded *rdac_pg; |
277 | 275 | ||
278 | data_size = sizeof(struct rdac_pg_expanded); | 276 | data_size = sizeof(struct rdac_pg_expanded); |
279 | rdac_pg = &h->ctlr->mode_select.expanded; | 277 | rdac_pg = &ctlr->mode_select.expanded; |
280 | memset(rdac_pg, 0, data_size); | 278 | memset(rdac_pg, 0, data_size); |
281 | common = &rdac_pg->common; | 279 | common = &rdac_pg->common; |
282 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; | 280 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER + 0x40; |
@@ -288,7 +286,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
288 | struct rdac_pg_legacy *rdac_pg; | 286 | struct rdac_pg_legacy *rdac_pg; |
289 | 287 | ||
290 | data_size = sizeof(struct rdac_pg_legacy); | 288 | data_size = sizeof(struct rdac_pg_legacy); |
291 | rdac_pg = &h->ctlr->mode_select.legacy; | 289 | rdac_pg = &ctlr->mode_select.legacy; |
292 | memset(rdac_pg, 0, data_size); | 290 | memset(rdac_pg, 0, data_size); |
293 | common = &rdac_pg->common; | 291 | common = &rdac_pg->common; |
294 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; | 292 | rdac_pg->page_code = RDAC_PAGE_CODE_REDUNDANT_CONTROLLER; |
@@ -304,7 +302,7 @@ static unsigned int rdac_failover_get(struct rdac_controller *ctlr, | |||
304 | } | 302 | } |
305 | 303 | ||
306 | /* Prepare the command. */ | 304 | /* Prepare the command. */ |
307 | if (h->ctlr->use_ms10) { | 305 | if (ctlr->use_ms10) { |
308 | cdb[0] = MODE_SELECT_10; | 306 | cdb[0] = MODE_SELECT_10; |
309 | cdb[7] = data_size >> 8; | 307 | cdb[7] = data_size >> 8; |
310 | cdb[8] = data_size & 0xff; | 308 | cdb[8] = data_size & 0xff; |
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index d390325c99ec..abf6026645dd 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | |||
@@ -1170,6 +1170,8 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) | |||
1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, | 1170 | cmd = list_first_entry_or_null(&vscsi->free_cmd, |
1171 | struct ibmvscsis_cmd, list); | 1171 | struct ibmvscsis_cmd, list); |
1172 | if (cmd) { | 1172 | if (cmd) { |
1173 | if (cmd->abort_cmd) | ||
1174 | cmd->abort_cmd = NULL; | ||
1173 | cmd->flags &= ~(DELAY_SEND); | 1175 | cmd->flags &= ~(DELAY_SEND); |
1174 | list_del(&cmd->list); | 1176 | list_del(&cmd->list); |
1175 | cmd->iue = iue; | 1177 | cmd->iue = iue; |
@@ -1774,6 +1776,7 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
1774 | if (cmd->abort_cmd) { | 1776 | if (cmd->abort_cmd) { |
1775 | retry = true; | 1777 | retry = true; |
1776 | cmd->abort_cmd->flags &= ~(DELAY_SEND); | 1778 | cmd->abort_cmd->flags &= ~(DELAY_SEND); |
1779 | cmd->abort_cmd = NULL; | ||
1777 | } | 1780 | } |
1778 | 1781 | ||
1779 | /* | 1782 | /* |
@@ -1788,6 +1791,25 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) | |||
1788 | list_del(&cmd->list); | 1791 | list_del(&cmd->list); |
1789 | ibmvscsis_free_cmd_resources(vscsi, | 1792 | ibmvscsis_free_cmd_resources(vscsi, |
1790 | cmd); | 1793 | cmd); |
1794 | /* | ||
1795 | * With a successfully aborted op | ||
1796 | * through LIO we want to increment the | ||
1797 | * the vscsi credit so that when we dont | ||
1798 | * send a rsp to the original scsi abort | ||
1799 | * op (h_send_crq), but the tm rsp to | ||
1800 | * the abort is sent, the credit is | ||
1801 | * correctly sent with the abort tm rsp. | ||
1802 | * We would need 1 for the abort tm rsp | ||
1803 | * and 1 credit for the aborted scsi op. | ||
1804 | * Thus we need to increment here. | ||
1805 | * Also we want to increment the credit | ||
1806 | * here because we want to make sure | ||
1807 | * cmd is actually released first | ||
1808 | * otherwise the client will think it | ||
1809 | * it can send a new cmd, and we could | ||
1810 | * find ourselves short of cmd elements. | ||
1811 | */ | ||
1812 | vscsi->credit += 1; | ||
1791 | } else { | 1813 | } else { |
1792 | iue = cmd->iue; | 1814 | iue = cmd->iue; |
1793 | 1815 | ||
@@ -2962,10 +2984,7 @@ static long srp_build_response(struct scsi_info *vscsi, | |||
2962 | 2984 | ||
2963 | rsp->opcode = SRP_RSP; | 2985 | rsp->opcode = SRP_RSP; |
2964 | 2986 | ||
2965 | if (vscsi->credit > 0 && vscsi->state == SRP_PROCESSING) | 2987 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); |
2966 | rsp->req_lim_delta = cpu_to_be32(vscsi->credit); | ||
2967 | else | ||
2968 | rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit); | ||
2969 | rsp->tag = cmd->rsp.tag; | 2988 | rsp->tag = cmd->rsp.tag; |
2970 | rsp->flags = 0; | 2989 | rsp->flags = 0; |
2971 | 2990 | ||
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index 5ca3e8c28a3f..32632c9b2276 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h | |||
@@ -38,7 +38,7 @@ struct qedi_endpoint; | |||
38 | #define QEDI_MAX_ISCSI_TASK 4096 | 38 | #define QEDI_MAX_ISCSI_TASK 4096 |
39 | #define QEDI_MAX_TASK_NUM 0x0FFF | 39 | #define QEDI_MAX_TASK_NUM 0x0FFF |
40 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 | 40 | #define QEDI_MAX_ISCSI_CONNS_PER_HBA 1024 |
41 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 256 /* Firmware max BDs is 256 */ | 41 | #define QEDI_ISCSI_MAX_BDS_PER_CMD 255 /* Firmware max BDs is 255 */ |
42 | #define MAX_OUSTANDING_TASKS_PER_CON 1024 | 42 | #define MAX_OUSTANDING_TASKS_PER_CON 1024 |
43 | 43 | ||
44 | #define QEDI_MAX_BD_LEN 0xffff | 44 | #define QEDI_MAX_BD_LEN 0xffff |
@@ -63,6 +63,7 @@ struct qedi_endpoint; | |||
63 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) | 63 | #define QEDI_PAGE_MASK (~((QEDI_PAGE_SIZE) - 1)) |
64 | 64 | ||
65 | #define QEDI_PAGE_SIZE 4096 | 65 | #define QEDI_PAGE_SIZE 4096 |
66 | #define QEDI_HW_DMA_BOUNDARY 0xfff | ||
66 | #define QEDI_PATH_HANDLE 0xFE0000000UL | 67 | #define QEDI_PATH_HANDLE 0xFE0000000UL |
67 | 68 | ||
68 | struct qedi_uio_ctrl { | 69 | struct qedi_uio_ctrl { |
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 7658138f2283..2ee92aa90fe9 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c | |||
@@ -1494,6 +1494,8 @@ static int qedi_send_iscsi_tmf(struct qedi_conn *qedi_conn, | |||
1494 | tmf_hdr = (struct iscsi_tm *)mtask->hdr; | 1494 | tmf_hdr = (struct iscsi_tm *)mtask->hdr; |
1495 | qedi_cmd = (struct qedi_cmd *)mtask->dd_data; | 1495 | qedi_cmd = (struct qedi_cmd *)mtask->dd_data; |
1496 | ep = qedi_conn->ep; | 1496 | ep = qedi_conn->ep; |
1497 | if (!ep) | ||
1498 | return -ENODEV; | ||
1497 | 1499 | ||
1498 | tid = qedi_get_task_idx(qedi); | 1500 | tid = qedi_get_task_idx(qedi); |
1499 | if (tid == -1) | 1501 | if (tid == -1) |
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c index 0c8ccffa4c38..80edd28b635f 100644 --- a/drivers/scsi/qedi/qedi_iscsi.c +++ b/drivers/scsi/qedi/qedi_iscsi.c | |||
@@ -59,6 +59,7 @@ struct scsi_host_template qedi_host_template = { | |||
59 | .this_id = -1, | 59 | .this_id = -1, |
60 | .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, | 60 | .sg_tablesize = QEDI_ISCSI_MAX_BDS_PER_CMD, |
61 | .max_sectors = 0xffff, | 61 | .max_sectors = 0xffff, |
62 | .dma_boundary = QEDI_HW_DMA_BOUNDARY, | ||
62 | .cmd_per_lun = 128, | 63 | .cmd_per_lun = 128, |
63 | .use_clustering = ENABLE_CLUSTERING, | 64 | .use_clustering = ENABLE_CLUSTERING, |
64 | .shost_attrs = qedi_shost_attrs, | 65 | .shost_attrs = qedi_shost_attrs, |
@@ -1223,8 +1224,12 @@ static int qedi_set_path(struct Scsi_Host *shost, struct iscsi_path *path_data) | |||
1223 | 1224 | ||
1224 | iscsi_cid = (u32)path_data->handle; | 1225 | iscsi_cid = (u32)path_data->handle; |
1225 | qedi_ep = qedi->ep_tbl[iscsi_cid]; | 1226 | qedi_ep = qedi->ep_tbl[iscsi_cid]; |
1226 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN, | 1227 | QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, |
1227 | "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); | 1228 | "iscsi_cid=0x%x, qedi_ep=%p\n", iscsi_cid, qedi_ep); |
1229 | if (!qedi_ep) { | ||
1230 | ret = -EINVAL; | ||
1231 | goto set_path_exit; | ||
1232 | } | ||
1228 | 1233 | ||
1229 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { | 1234 | if (!is_valid_ether_addr(&path_data->mac_addr[0])) { |
1230 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); | 1235 | QEDI_NOTICE(&qedi->dbg_ctx, "dst mac NOT VALID\n"); |
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 073b3051bb8f..f46880315ba8 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c | |||
@@ -151,6 +151,11 @@ static int qedi_uio_close(struct uio_info *uinfo, struct inode *inode) | |||
151 | 151 | ||
152 | static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) | 152 | static void __qedi_free_uio_rings(struct qedi_uio_dev *udev) |
153 | { | 153 | { |
154 | if (udev->uctrl) { | ||
155 | free_page((unsigned long)udev->uctrl); | ||
156 | udev->uctrl = NULL; | ||
157 | } | ||
158 | |||
154 | if (udev->ll2_ring) { | 159 | if (udev->ll2_ring) { |
155 | free_page((unsigned long)udev->ll2_ring); | 160 | free_page((unsigned long)udev->ll2_ring); |
156 | udev->ll2_ring = NULL; | 161 | udev->ll2_ring = NULL; |
@@ -169,7 +174,6 @@ static void __qedi_free_uio(struct qedi_uio_dev *udev) | |||
169 | __qedi_free_uio_rings(udev); | 174 | __qedi_free_uio_rings(udev); |
170 | 175 | ||
171 | pci_dev_put(udev->pdev); | 176 | pci_dev_put(udev->pdev); |
172 | kfree(udev->uctrl); | ||
173 | kfree(udev); | 177 | kfree(udev); |
174 | } | 178 | } |
175 | 179 | ||
@@ -208,6 +212,11 @@ static int __qedi_alloc_uio_rings(struct qedi_uio_dev *udev) | |||
208 | if (udev->ll2_ring || udev->ll2_buf) | 212 | if (udev->ll2_ring || udev->ll2_buf) |
209 | return rc; | 213 | return rc; |
210 | 214 | ||
215 | /* Memory for control area. */ | ||
216 | udev->uctrl = (void *)get_zeroed_page(GFP_KERNEL); | ||
217 | if (!udev->uctrl) | ||
218 | return -ENOMEM; | ||
219 | |||
211 | /* Allocating memory for LL2 ring */ | 220 | /* Allocating memory for LL2 ring */ |
212 | udev->ll2_ring_size = QEDI_PAGE_SIZE; | 221 | udev->ll2_ring_size = QEDI_PAGE_SIZE; |
213 | udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); | 222 | udev->ll2_ring = (void *)get_zeroed_page(GFP_KERNEL | __GFP_COMP); |
@@ -237,7 +246,6 @@ exit_alloc_ring: | |||
237 | static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | 246 | static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) |
238 | { | 247 | { |
239 | struct qedi_uio_dev *udev = NULL; | 248 | struct qedi_uio_dev *udev = NULL; |
240 | struct qedi_uio_ctrl *uctrl = NULL; | ||
241 | int rc = 0; | 249 | int rc = 0; |
242 | 250 | ||
243 | list_for_each_entry(udev, &qedi_udev_list, list) { | 251 | list_for_each_entry(udev, &qedi_udev_list, list) { |
@@ -258,21 +266,14 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | |||
258 | goto err_udev; | 266 | goto err_udev; |
259 | } | 267 | } |
260 | 268 | ||
261 | uctrl = kzalloc(sizeof(*uctrl), GFP_KERNEL); | ||
262 | if (!uctrl) { | ||
263 | rc = -ENOMEM; | ||
264 | goto err_uctrl; | ||
265 | } | ||
266 | |||
267 | udev->uio_dev = -1; | 269 | udev->uio_dev = -1; |
268 | 270 | ||
269 | udev->qedi = qedi; | 271 | udev->qedi = qedi; |
270 | udev->pdev = qedi->pdev; | 272 | udev->pdev = qedi->pdev; |
271 | udev->uctrl = uctrl; | ||
272 | 273 | ||
273 | rc = __qedi_alloc_uio_rings(udev); | 274 | rc = __qedi_alloc_uio_rings(udev); |
274 | if (rc) | 275 | if (rc) |
275 | goto err_uio_rings; | 276 | goto err_uctrl; |
276 | 277 | ||
277 | list_add(&udev->list, &qedi_udev_list); | 278 | list_add(&udev->list, &qedi_udev_list); |
278 | 279 | ||
@@ -283,8 +284,6 @@ static int qedi_alloc_uio_rings(struct qedi_ctx *qedi) | |||
283 | udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; | 284 | udev->rx_pkt = udev->ll2_buf + LL2_SINGLE_BUF_SIZE; |
284 | return 0; | 285 | return 0; |
285 | 286 | ||
286 | err_uio_rings: | ||
287 | kfree(uctrl); | ||
288 | err_uctrl: | 287 | err_uctrl: |
289 | kfree(udev); | 288 | kfree(udev); |
290 | err_udev: | 289 | err_udev: |
@@ -828,6 +827,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi) | |||
828 | qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; | 827 | qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages; |
829 | qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; | 828 | qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues; |
830 | qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; | 829 | qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug; |
830 | qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000; | ||
831 | qedi->pf_params.iscsi_pf_params.max_fin_rt = 2; | ||
831 | 832 | ||
832 | for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { | 833 | for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) { |
833 | if ((1 << log_page_size) == PAGE_SIZE) | 834 | if ((1 << log_page_size) == PAGE_SIZE) |
diff --git a/drivers/staging/media/atomisp/i2c/Makefile b/drivers/staging/media/atomisp/i2c/Makefile index 8ea01904c0ea..466517c7c8e6 100644 --- a/drivers/staging/media/atomisp/i2c/Makefile +++ b/drivers/staging/media/atomisp/i2c/Makefile | |||
@@ -19,5 +19,3 @@ obj-$(CONFIG_VIDEO_AP1302) += ap1302.o | |||
19 | 19 | ||
20 | obj-$(CONFIG_VIDEO_LM3554) += lm3554.o | 20 | obj-$(CONFIG_VIDEO_LM3554) += lm3554.o |
21 | 21 | ||
22 | ccflags-y += -Werror | ||
23 | |||
diff --git a/drivers/staging/media/atomisp/i2c/imx/Makefile b/drivers/staging/media/atomisp/i2c/imx/Makefile index 1d7f7ab94cac..6b13a3a66e49 100644 --- a/drivers/staging/media/atomisp/i2c/imx/Makefile +++ b/drivers/staging/media/atomisp/i2c/imx/Makefile | |||
@@ -4,5 +4,3 @@ imx1x5-objs := imx.o drv201.o ad5816g.o dw9714.o dw9719.o dw9718.o vcm.o otp.o o | |||
4 | 4 | ||
5 | ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o | 5 | ov8858_driver-objs := ../ov8858.o dw9718.o vcm.o |
6 | obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o | 6 | obj-$(CONFIG_VIDEO_OV8858) += ov8858_driver.o |
7 | |||
8 | ccflags-y += -Werror | ||
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/Makefile b/drivers/staging/media/atomisp/i2c/ov5693/Makefile index fceb9e9b881b..c9c0e1245858 100644 --- a/drivers/staging/media/atomisp/i2c/ov5693/Makefile +++ b/drivers/staging/media/atomisp/i2c/ov5693/Makefile | |||
@@ -1,3 +1 @@ | |||
1 | obj-$(CONFIG_VIDEO_OV5693) += ov5693.o | obj-$(CONFIG_VIDEO_OV5693) += ov5693.o | |
2 | |||
3 | ccflags-y += -Werror | ||
diff --git a/drivers/staging/media/atomisp/pci/atomisp2/Makefile b/drivers/staging/media/atomisp/pci/atomisp2/Makefile index 3fa7c1c1479f..f126a89a08e9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp2/Makefile +++ b/drivers/staging/media/atomisp/pci/atomisp2/Makefile | |||
@@ -351,5 +351,5 @@ DEFINES := -DHRT_HW -DHRT_ISP_CSS_CUSTOM_HOST -DHRT_USE_VIR_ADDRS -D__HOST__ | |||
351 | DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 | 351 | DEFINES += -DATOMISP_POSTFIX=\"css2400b0_v21\" -DISP2400B0 |
352 | DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 | 352 | DEFINES += -DSYSTEM_hive_isp_css_2400_system -DISP2400 |
353 | 353 | ||
354 | ccflags-y += $(INCLUDES) $(DEFINES) -fno-common -Werror | 354 | ccflags-y += $(INCLUDES) $(DEFINES) -fno-common |
355 | 355 | ||
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 26a9bcd5ee6a..0d8f81591bed 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -3790,6 +3790,8 @@ int iscsi_target_tx_thread(void *arg) | |||
3790 | { | 3790 | { |
3791 | int ret = 0; | 3791 | int ret = 0; |
3792 | struct iscsi_conn *conn = arg; | 3792 | struct iscsi_conn *conn = arg; |
3793 | bool conn_freed = false; | ||
3794 | |||
3793 | /* | 3795 | /* |
3794 | * Allow ourselves to be interrupted by SIGINT so that a | 3796 | * Allow ourselves to be interrupted by SIGINT so that a |
3795 | * connection recovery / failure event can be triggered externally. | 3797 | * connection recovery / failure event can be triggered externally. |
@@ -3815,12 +3817,14 @@ get_immediate: | |||
3815 | goto transport_err; | 3817 | goto transport_err; |
3816 | 3818 | ||
3817 | ret = iscsit_handle_response_queue(conn); | 3819 | ret = iscsit_handle_response_queue(conn); |
3818 | if (ret == 1) | 3820 | if (ret == 1) { |
3819 | goto get_immediate; | 3821 | goto get_immediate; |
3820 | else if (ret == -ECONNRESET) | 3822 | } else if (ret == -ECONNRESET) { |
3823 | conn_freed = true; | ||
3821 | goto out; | 3824 | goto out; |
3822 | else if (ret < 0) | 3825 | } else if (ret < 0) { |
3823 | goto transport_err; | 3826 | goto transport_err; |
3827 | } | ||
3824 | } | 3828 | } |
3825 | 3829 | ||
3826 | transport_err: | 3830 | transport_err: |
@@ -3830,8 +3834,13 @@ transport_err: | |||
3830 | * responsible for cleaning up the early connection failure. | 3834 | * responsible for cleaning up the early connection failure. |
3831 | */ | 3835 | */ |
3832 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) | 3836 | if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN) |
3833 | iscsit_take_action_for_connection_exit(conn); | 3837 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
3834 | out: | 3838 | out: |
3839 | if (!conn_freed) { | ||
3840 | while (!kthread_should_stop()) { | ||
3841 | msleep(100); | ||
3842 | } | ||
3843 | } | ||
3835 | return 0; | 3844 | return 0; |
3836 | } | 3845 | } |
3837 | 3846 | ||
@@ -4004,6 +4013,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4004 | { | 4013 | { |
4005 | int rc; | 4014 | int rc; |
4006 | struct iscsi_conn *conn = arg; | 4015 | struct iscsi_conn *conn = arg; |
4016 | bool conn_freed = false; | ||
4007 | 4017 | ||
4008 | /* | 4018 | /* |
4009 | * Allow ourselves to be interrupted by SIGINT so that a | 4019 | * Allow ourselves to be interrupted by SIGINT so that a |
@@ -4016,7 +4026,7 @@ int iscsi_target_rx_thread(void *arg) | |||
4016 | */ | 4026 | */ |
4017 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); | 4027 | rc = wait_for_completion_interruptible(&conn->rx_login_comp); |
4018 | if (rc < 0 || iscsi_target_check_conn_state(conn)) | 4028 | if (rc < 0 || iscsi_target_check_conn_state(conn)) |
4019 | return 0; | 4029 | goto out; |
4020 | 4030 | ||
4021 | if (!conn->conn_transport->iscsit_get_rx_pdu) | 4031 | if (!conn->conn_transport->iscsit_get_rx_pdu) |
4022 | return 0; | 4032 | return 0; |
@@ -4025,7 +4035,15 @@ int iscsi_target_rx_thread(void *arg) | |||
4025 | 4035 | ||
4026 | if (!signal_pending(current)) | 4036 | if (!signal_pending(current)) |
4027 | atomic_set(&conn->transport_failed, 1); | 4037 | atomic_set(&conn->transport_failed, 1); |
4028 | iscsit_take_action_for_connection_exit(conn); | 4038 | iscsit_take_action_for_connection_exit(conn, &conn_freed); |
4039 | |||
4040 | out: | ||
4041 | if (!conn_freed) { | ||
4042 | while (!kthread_should_stop()) { | ||
4043 | msleep(100); | ||
4044 | } | ||
4045 | } | ||
4046 | |||
4029 | return 0; | 4047 | return 0; |
4030 | } | 4048 | } |
4031 | 4049 | ||
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 9a96e17bf7cd..7fe2aa73cff6 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c | |||
@@ -930,8 +930,10 @@ static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn) | |||
930 | } | 930 | } |
931 | } | 931 | } |
932 | 932 | ||
933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | 933 | void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed) |
934 | { | 934 | { |
935 | *conn_freed = false; | ||
936 | |||
935 | spin_lock_bh(&conn->state_lock); | 937 | spin_lock_bh(&conn->state_lock); |
936 | if (atomic_read(&conn->connection_exit)) { | 938 | if (atomic_read(&conn->connection_exit)) { |
937 | spin_unlock_bh(&conn->state_lock); | 939 | spin_unlock_bh(&conn->state_lock); |
@@ -942,6 +944,7 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
942 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { | 944 | if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { |
943 | spin_unlock_bh(&conn->state_lock); | 945 | spin_unlock_bh(&conn->state_lock); |
944 | iscsit_close_connection(conn); | 946 | iscsit_close_connection(conn); |
947 | *conn_freed = true; | ||
945 | return; | 948 | return; |
946 | } | 949 | } |
947 | 950 | ||
@@ -955,4 +958,5 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) | |||
955 | spin_unlock_bh(&conn->state_lock); | 958 | spin_unlock_bh(&conn->state_lock); |
956 | 959 | ||
957 | iscsit_handle_connection_cleanup(conn); | 960 | iscsit_handle_connection_cleanup(conn); |
961 | *conn_freed = true; | ||
958 | } | 962 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 60e69e2af6ed..3822d9cd1230 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h | |||
@@ -15,6 +15,6 @@ extern int iscsit_stop_time2retain_timer(struct iscsi_session *); | |||
15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); | 15 | extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); |
16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); | 16 | extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); |
17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); | 17 | extern void iscsit_fall_back_to_erl0(struct iscsi_session *); |
18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *); | 18 | extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *); |
19 | 19 | ||
20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ | 20 | #endif /*** ISCSI_TARGET_ERL0_H ***/ |
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 66238477137b..92b96b51d506 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c | |||
@@ -1464,5 +1464,9 @@ int iscsi_target_login_thread(void *arg) | |||
1464 | break; | 1464 | break; |
1465 | } | 1465 | } |
1466 | 1466 | ||
1467 | while (!kthread_should_stop()) { | ||
1468 | msleep(100); | ||
1469 | } | ||
1470 | |||
1467 | return 0; | 1471 | return 0; |
1468 | } | 1472 | } |
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7ccc9c1cbfd1..6f88b31242b0 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c | |||
@@ -493,14 +493,60 @@ static void iscsi_target_restore_sock_callbacks(struct iscsi_conn *conn) | |||
493 | 493 | ||
494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); | 494 | static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *); |
495 | 495 | ||
496 | static bool iscsi_target_sk_state_check(struct sock *sk) | 496 | static bool __iscsi_target_sk_check_close(struct sock *sk) |
497 | { | 497 | { |
498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { | 498 | if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) { |
499 | pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE," | 499 | pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE," |
500 | "returning FALSE\n"); | 500 | "returning FALSE\n"); |
501 | return false; | 501 | return true; |
502 | } | 502 | } |
503 | return true; | 503 | return false; |
504 | } | ||
505 | |||
506 | static bool iscsi_target_sk_check_close(struct iscsi_conn *conn) | ||
507 | { | ||
508 | bool state = false; | ||
509 | |||
510 | if (conn->sock) { | ||
511 | struct sock *sk = conn->sock->sk; | ||
512 | |||
513 | read_lock_bh(&sk->sk_callback_lock); | ||
514 | state = (__iscsi_target_sk_check_close(sk) || | ||
515 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
516 | read_unlock_bh(&sk->sk_callback_lock); | ||
517 | } | ||
518 | return state; | ||
519 | } | ||
520 | |||
521 | static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag) | ||
522 | { | ||
523 | bool state = false; | ||
524 | |||
525 | if (conn->sock) { | ||
526 | struct sock *sk = conn->sock->sk; | ||
527 | |||
528 | read_lock_bh(&sk->sk_callback_lock); | ||
529 | state = test_bit(flag, &conn->login_flags); | ||
530 | read_unlock_bh(&sk->sk_callback_lock); | ||
531 | } | ||
532 | return state; | ||
533 | } | ||
534 | |||
535 | static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag) | ||
536 | { | ||
537 | bool state = false; | ||
538 | |||
539 | if (conn->sock) { | ||
540 | struct sock *sk = conn->sock->sk; | ||
541 | |||
542 | write_lock_bh(&sk->sk_callback_lock); | ||
543 | state = (__iscsi_target_sk_check_close(sk) || | ||
544 | test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)); | ||
545 | if (!state) | ||
546 | clear_bit(flag, &conn->login_flags); | ||
547 | write_unlock_bh(&sk->sk_callback_lock); | ||
548 | } | ||
549 | return state; | ||
504 | } | 550 | } |
505 | 551 | ||
506 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) | 552 | static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login) |
@@ -540,6 +586,20 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
540 | 586 | ||
541 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", | 587 | pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n", |
542 | conn, current->comm, current->pid); | 588 | conn, current->comm, current->pid); |
589 | /* | ||
590 | * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready() | ||
591 | * before initial PDU processing in iscsi_target_start_negotiation() | ||
592 | * has completed, go ahead and retry until it's cleared. | ||
593 | * | ||
594 | * Otherwise if the TCP connection drops while this is occuring, | ||
595 | * iscsi_target_start_negotiation() will detect the failure, call | ||
596 | * cancel_delayed_work_sync(&conn->login_work), and cleanup the | ||
597 | * remaining iscsi connection resources from iscsi_np process context. | ||
598 | */ | ||
599 | if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) { | ||
600 | schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10)); | ||
601 | return; | ||
602 | } | ||
543 | 603 | ||
544 | spin_lock(&tpg->tpg_state_lock); | 604 | spin_lock(&tpg->tpg_state_lock); |
545 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); | 605 | state = (tpg->tpg_state == TPG_STATE_ACTIVE); |
@@ -547,26 +607,12 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
547 | 607 | ||
548 | if (!state) { | 608 | if (!state) { |
549 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); | 609 | pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n"); |
550 | iscsi_target_restore_sock_callbacks(conn); | 610 | goto err; |
551 | iscsi_target_login_drop(conn, login); | ||
552 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
553 | return; | ||
554 | } | 611 | } |
555 | 612 | ||
556 | if (conn->sock) { | 613 | if (iscsi_target_sk_check_close(conn)) { |
557 | struct sock *sk = conn->sock->sk; | 614 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); |
558 | 615 | goto err; | |
559 | read_lock_bh(&sk->sk_callback_lock); | ||
560 | state = iscsi_target_sk_state_check(sk); | ||
561 | read_unlock_bh(&sk->sk_callback_lock); | ||
562 | |||
563 | if (!state) { | ||
564 | pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n"); | ||
565 | iscsi_target_restore_sock_callbacks(conn); | ||
566 | iscsi_target_login_drop(conn, login); | ||
567 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
568 | return; | ||
569 | } | ||
570 | } | 616 | } |
571 | 617 | ||
572 | conn->login_kworker = current; | 618 | conn->login_kworker = current; |
@@ -584,34 +630,29 @@ static void iscsi_target_do_login_rx(struct work_struct *work) | |||
584 | flush_signals(current); | 630 | flush_signals(current); |
585 | conn->login_kworker = NULL; | 631 | conn->login_kworker = NULL; |
586 | 632 | ||
587 | if (rc < 0) { | 633 | if (rc < 0) |
588 | iscsi_target_restore_sock_callbacks(conn); | 634 | goto err; |
589 | iscsi_target_login_drop(conn, login); | ||
590 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
591 | return; | ||
592 | } | ||
593 | 635 | ||
594 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", | 636 | pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n", |
595 | conn, current->comm, current->pid); | 637 | conn, current->comm, current->pid); |
596 | 638 | ||
597 | rc = iscsi_target_do_login(conn, login); | 639 | rc = iscsi_target_do_login(conn, login); |
598 | if (rc < 0) { | 640 | if (rc < 0) { |
599 | iscsi_target_restore_sock_callbacks(conn); | 641 | goto err; |
600 | iscsi_target_login_drop(conn, login); | ||
601 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
602 | } else if (!rc) { | 642 | } else if (!rc) { |
603 | if (conn->sock) { | 643 | if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE)) |
604 | struct sock *sk = conn->sock->sk; | 644 | goto err; |
605 | |||
606 | write_lock_bh(&sk->sk_callback_lock); | ||
607 | clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags); | ||
608 | write_unlock_bh(&sk->sk_callback_lock); | ||
609 | } | ||
610 | } else if (rc == 1) { | 645 | } else if (rc == 1) { |
611 | iscsi_target_nego_release(conn); | 646 | iscsi_target_nego_release(conn); |
612 | iscsi_post_login_handler(np, conn, zero_tsih); | 647 | iscsi_post_login_handler(np, conn, zero_tsih); |
613 | iscsit_deaccess_np(np, tpg, tpg_np); | 648 | iscsit_deaccess_np(np, tpg, tpg_np); |
614 | } | 649 | } |
650 | return; | ||
651 | |||
652 | err: | ||
653 | iscsi_target_restore_sock_callbacks(conn); | ||
654 | iscsi_target_login_drop(conn, login); | ||
655 | iscsit_deaccess_np(np, tpg, tpg_np); | ||
615 | } | 656 | } |
616 | 657 | ||
617 | static void iscsi_target_do_cleanup(struct work_struct *work) | 658 | static void iscsi_target_do_cleanup(struct work_struct *work) |
@@ -659,31 +700,54 @@ static void iscsi_target_sk_state_change(struct sock *sk) | |||
659 | orig_state_change(sk); | 700 | orig_state_change(sk); |
660 | return; | 701 | return; |
661 | } | 702 | } |
703 | state = __iscsi_target_sk_check_close(sk); | ||
704 | pr_debug("__iscsi_target_sk_close_change: state: %d\n", state); | ||
705 | |||
662 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { | 706 | if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) { |
663 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" | 707 | pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change" |
664 | " conn: %p\n", conn); | 708 | " conn: %p\n", conn); |
709 | if (state) | ||
710 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
665 | write_unlock_bh(&sk->sk_callback_lock); | 711 | write_unlock_bh(&sk->sk_callback_lock); |
666 | orig_state_change(sk); | 712 | orig_state_change(sk); |
667 | return; | 713 | return; |
668 | } | 714 | } |
669 | if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { | 715 | if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) { |
670 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", | 716 | pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n", |
671 | conn); | 717 | conn); |
672 | write_unlock_bh(&sk->sk_callback_lock); | 718 | write_unlock_bh(&sk->sk_callback_lock); |
673 | orig_state_change(sk); | 719 | orig_state_change(sk); |
674 | return; | 720 | return; |
675 | } | 721 | } |
722 | /* | ||
723 | * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED, | ||
724 | * but only queue conn->login_work -> iscsi_target_do_login_rx() | ||
725 | * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared. | ||
726 | * | ||
727 | * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close() | ||
728 | * will detect the dropped TCP connection from delayed workqueue context. | ||
729 | * | ||
730 | * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial | ||
731 | * iscsi_target_start_negotiation() is running, iscsi_target_do_login() | ||
732 | * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation() | ||
733 | * via iscsi_target_sk_check_and_clear() is responsible for detecting the | ||
734 | * dropped TCP connection in iscsi_np process context, and cleaning up | ||
735 | * the remaining iscsi connection resources. | ||
736 | */ | ||
737 | if (state) { | ||
738 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | ||
739 | set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags); | ||
740 | state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
741 | write_unlock_bh(&sk->sk_callback_lock); | ||
676 | 742 | ||
677 | state = iscsi_target_sk_state_check(sk); | 743 | orig_state_change(sk); |
678 | write_unlock_bh(&sk->sk_callback_lock); | ||
679 | |||
680 | pr_debug("iscsi_target_sk_state_change: state: %d\n", state); | ||
681 | 744 | ||
682 | if (!state) { | 745 | if (!state) |
683 | pr_debug("iscsi_target_sk_state_change got failed state\n"); | 746 | schedule_delayed_work(&conn->login_work, 0); |
684 | schedule_delayed_work(&conn->login_cleanup_work, 0); | ||
685 | return; | 747 | return; |
686 | } | 748 | } |
749 | write_unlock_bh(&sk->sk_callback_lock); | ||
750 | |||
687 | orig_state_change(sk); | 751 | orig_state_change(sk); |
688 | } | 752 | } |
689 | 753 | ||
@@ -946,6 +1010,15 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
946 | if (iscsi_target_handle_csg_one(conn, login) < 0) | 1010 | if (iscsi_target_handle_csg_one(conn, login) < 0) |
947 | return -1; | 1011 | return -1; |
948 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { | 1012 | if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) { |
1013 | /* | ||
1014 | * Check to make sure the TCP connection has not | ||
1015 | * dropped asynchronously while session reinstatement | ||
1016 | * was occuring in this kthread context, before | ||
1017 | * transitioning to full feature phase operation. | ||
1018 | */ | ||
1019 | if (iscsi_target_sk_check_close(conn)) | ||
1020 | return -1; | ||
1021 | |||
949 | login->tsih = conn->sess->tsih; | 1022 | login->tsih = conn->sess->tsih; |
950 | login->login_complete = 1; | 1023 | login->login_complete = 1; |
951 | iscsi_target_restore_sock_callbacks(conn); | 1024 | iscsi_target_restore_sock_callbacks(conn); |
@@ -972,21 +1045,6 @@ static int iscsi_target_do_login(struct iscsi_conn *conn, struct iscsi_login *lo | |||
972 | break; | 1045 | break; |
973 | } | 1046 | } |
974 | 1047 | ||
975 | if (conn->sock) { | ||
976 | struct sock *sk = conn->sock->sk; | ||
977 | bool state; | ||
978 | |||
979 | read_lock_bh(&sk->sk_callback_lock); | ||
980 | state = iscsi_target_sk_state_check(sk); | ||
981 | read_unlock_bh(&sk->sk_callback_lock); | ||
982 | |||
983 | if (!state) { | ||
984 | pr_debug("iscsi_target_do_login() failed state for" | ||
985 | " conn: %p\n", conn); | ||
986 | return -1; | ||
987 | } | ||
988 | } | ||
989 | |||
990 | return 0; | 1048 | return 0; |
991 | } | 1049 | } |
992 | 1050 | ||
@@ -1255,10 +1313,22 @@ int iscsi_target_start_negotiation( | |||
1255 | 1313 | ||
1256 | write_lock_bh(&sk->sk_callback_lock); | 1314 | write_lock_bh(&sk->sk_callback_lock); |
1257 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); | 1315 | set_bit(LOGIN_FLAGS_READY, &conn->login_flags); |
1316 | set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags); | ||
1258 | write_unlock_bh(&sk->sk_callback_lock); | 1317 | write_unlock_bh(&sk->sk_callback_lock); |
1259 | } | 1318 | } |
1260 | 1319 | /* | |
1320 | * If iscsi_target_do_login returns zero to signal more PDU | ||
1321 | * exchanges are required to complete the login, go ahead and | ||
1322 | * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection | ||
1323 | * is still active. | ||
1324 | * | ||
1325 | * Otherwise if TCP connection dropped asynchronously, go ahead | ||
1326 | * and perform connection cleanup now. | ||
1327 | */ | ||
1261 | ret = iscsi_target_do_login(conn, login); | 1328 | ret = iscsi_target_do_login(conn, login); |
1329 | if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU)) | ||
1330 | ret = -1; | ||
1331 | |||
1262 | if (ret < 0) { | 1332 | if (ret < 0) { |
1263 | cancel_delayed_work_sync(&conn->login_work); | 1333 | cancel_delayed_work_sync(&conn->login_work); |
1264 | cancel_delayed_work_sync(&conn->login_cleanup_work); | 1334 | cancel_delayed_work_sync(&conn->login_cleanup_work); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 37f57357d4a0..6025935036c9 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1160,15 +1160,28 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size) | |||
1160 | if (cmd->unknown_data_length) { | 1160 | if (cmd->unknown_data_length) { |
1161 | cmd->data_length = size; | 1161 | cmd->data_length = size; |
1162 | } else if (size != cmd->data_length) { | 1162 | } else if (size != cmd->data_length) { |
1163 | pr_warn("TARGET_CORE[%s]: Expected Transfer Length:" | 1163 | pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" |
1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" | 1164 | " %u does not match SCSI CDB Length: %u for SAM Opcode:" |
1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), | 1165 | " 0x%02x\n", cmd->se_tfo->get_fabric_name(), |
1166 | cmd->data_length, size, cmd->t_task_cdb[0]); | 1166 | cmd->data_length, size, cmd->t_task_cdb[0]); |
1167 | 1167 | ||
1168 | if (cmd->data_direction == DMA_TO_DEVICE && | 1168 | if (cmd->data_direction == DMA_TO_DEVICE) { |
1169 | cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { | 1169 | if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { |
1170 | pr_err("Rejecting underflow/overflow WRITE data\n"); | 1170 | pr_err_ratelimited("Rejecting underflow/overflow" |
1171 | return TCM_INVALID_CDB_FIELD; | 1171 | " for WRITE data CDB\n"); |
1172 | return TCM_INVALID_CDB_FIELD; | ||
1173 | } | ||
1174 | /* | ||
1175 | * Some fabric drivers like iscsi-target still expect to | ||
1176 | * always reject overflow writes. Reject this case until | ||
1177 | * full fabric driver level support for overflow writes | ||
1178 | * is introduced tree-wide. | ||
1179 | */ | ||
1180 | if (size > cmd->data_length) { | ||
1181 | pr_err_ratelimited("Rejecting overflow for" | ||
1182 | " WRITE control CDB\n"); | ||
1183 | return TCM_INVALID_CDB_FIELD; | ||
1184 | } | ||
1172 | } | 1185 | } |
1173 | /* | 1186 | /* |
1174 | * Reject READ_* or WRITE_* with overflow/underflow for | 1187 | * Reject READ_* or WRITE_* with overflow/underflow for |
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 9045837f748b..beb5f098f32d 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c | |||
@@ -97,7 +97,7 @@ struct tcmu_hba { | |||
97 | 97 | ||
98 | struct tcmu_dev { | 98 | struct tcmu_dev { |
99 | struct list_head node; | 99 | struct list_head node; |
100 | 100 | struct kref kref; | |
101 | struct se_device se_dev; | 101 | struct se_device se_dev; |
102 | 102 | ||
103 | char *name; | 103 | char *name; |
@@ -969,6 +969,7 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) | |||
969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); | 969 | udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL); |
970 | if (!udev) | 970 | if (!udev) |
971 | return NULL; | 971 | return NULL; |
972 | kref_init(&udev->kref); | ||
972 | 973 | ||
973 | udev->name = kstrdup(name, GFP_KERNEL); | 974 | udev->name = kstrdup(name, GFP_KERNEL); |
974 | if (!udev->name) { | 975 | if (!udev->name) { |
@@ -1145,6 +1146,24 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) | |||
1145 | return 0; | 1146 | return 0; |
1146 | } | 1147 | } |
1147 | 1148 | ||
1149 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
1150 | { | ||
1151 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
1152 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
1153 | |||
1154 | kfree(udev->uio_info.name); | ||
1155 | kfree(udev->name); | ||
1156 | kfree(udev); | ||
1157 | } | ||
1158 | |||
1159 | static void tcmu_dev_kref_release(struct kref *kref) | ||
1160 | { | ||
1161 | struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref); | ||
1162 | struct se_device *dev = &udev->se_dev; | ||
1163 | |||
1164 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | ||
1165 | } | ||
1166 | |||
1148 | static int tcmu_release(struct uio_info *info, struct inode *inode) | 1167 | static int tcmu_release(struct uio_info *info, struct inode *inode) |
1149 | { | 1168 | { |
1150 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); | 1169 | struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); |
@@ -1152,7 +1171,8 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) | |||
1152 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); | 1171 | clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); |
1153 | 1172 | ||
1154 | pr_debug("close\n"); | 1173 | pr_debug("close\n"); |
1155 | 1174 | /* release ref from configure */ | |
1175 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1156 | return 0; | 1176 | return 0; |
1157 | } | 1177 | } |
1158 | 1178 | ||
@@ -1272,6 +1292,12 @@ static int tcmu_configure_device(struct se_device *dev) | |||
1272 | dev->dev_attrib.hw_max_sectors = 128; | 1292 | dev->dev_attrib.hw_max_sectors = 128; |
1273 | dev->dev_attrib.hw_queue_depth = 128; | 1293 | dev->dev_attrib.hw_queue_depth = 128; |
1274 | 1294 | ||
1295 | /* | ||
1296 | * Get a ref incase userspace does a close on the uio device before | ||
1297 | * LIO has initiated tcmu_free_device. | ||
1298 | */ | ||
1299 | kref_get(&udev->kref); | ||
1300 | |||
1275 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, | 1301 | ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, |
1276 | udev->uio_info.uio_dev->minor); | 1302 | udev->uio_info.uio_dev->minor); |
1277 | if (ret) | 1303 | if (ret) |
@@ -1284,11 +1310,13 @@ static int tcmu_configure_device(struct se_device *dev) | |||
1284 | return 0; | 1310 | return 0; |
1285 | 1311 | ||
1286 | err_netlink: | 1312 | err_netlink: |
1313 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1287 | uio_unregister_device(&udev->uio_info); | 1314 | uio_unregister_device(&udev->uio_info); |
1288 | err_register: | 1315 | err_register: |
1289 | vfree(udev->mb_addr); | 1316 | vfree(udev->mb_addr); |
1290 | err_vzalloc: | 1317 | err_vzalloc: |
1291 | kfree(info->name); | 1318 | kfree(info->name); |
1319 | info->name = NULL; | ||
1292 | 1320 | ||
1293 | return ret; | 1321 | return ret; |
1294 | } | 1322 | } |
@@ -1302,14 +1330,6 @@ static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd) | |||
1302 | return -EINVAL; | 1330 | return -EINVAL; |
1303 | } | 1331 | } |
1304 | 1332 | ||
1305 | static void tcmu_dev_call_rcu(struct rcu_head *p) | ||
1306 | { | ||
1307 | struct se_device *dev = container_of(p, struct se_device, rcu_head); | ||
1308 | struct tcmu_dev *udev = TCMU_DEV(dev); | ||
1309 | |||
1310 | kfree(udev); | ||
1311 | } | ||
1312 | |||
1313 | static bool tcmu_dev_configured(struct tcmu_dev *udev) | 1333 | static bool tcmu_dev_configured(struct tcmu_dev *udev) |
1314 | { | 1334 | { |
1315 | return udev->uio_info.uio_dev ? true : false; | 1335 | return udev->uio_info.uio_dev ? true : false; |
@@ -1364,10 +1384,10 @@ static void tcmu_free_device(struct se_device *dev) | |||
1364 | udev->uio_info.uio_dev->minor); | 1384 | udev->uio_info.uio_dev->minor); |
1365 | 1385 | ||
1366 | uio_unregister_device(&udev->uio_info); | 1386 | uio_unregister_device(&udev->uio_info); |
1367 | kfree(udev->uio_info.name); | ||
1368 | kfree(udev->name); | ||
1369 | } | 1387 | } |
1370 | call_rcu(&dev->rcu_head, tcmu_dev_call_rcu); | 1388 | |
1389 | /* release ref from init */ | ||
1390 | kref_put(&udev->kref, tcmu_dev_kref_release); | ||
1371 | } | 1391 | } |
1372 | 1392 | ||
1373 | enum { | 1393 | enum { |
diff --git a/drivers/thermal/broadcom/Kconfig b/drivers/thermal/broadcom/Kconfig index ab08af4654ef..42c098e86f84 100644 --- a/drivers/thermal/broadcom/Kconfig +++ b/drivers/thermal/broadcom/Kconfig | |||
@@ -9,8 +9,9 @@ config BCM2835_THERMAL | |||
9 | config BCM_NS_THERMAL | 9 | config BCM_NS_THERMAL |
10 | tristate "Northstar thermal driver" | 10 | tristate "Northstar thermal driver" |
11 | depends on ARCH_BCM_IPROC || COMPILE_TEST | 11 | depends on ARCH_BCM_IPROC || COMPILE_TEST |
12 | default y if ARCH_BCM_IPROC | ||
12 | help | 13 | help |
13 | Northstar is a family of SoCs that includes e.g. BCM4708, BCM47081, | 14 | Support for the Northstar and Northstar Plus family of SoCs (e.g. |
14 | BCM4709 and BCM47094. It contains DMU (Device Management Unit) block | 15 | BCM4708, BCM4709, BCM5301x, BCM95852X, etc). It contains DMU (Device |
15 | with a thermal sensor that allows checking CPU temperature. This | 16 | Management Unit) block with a thermal sensor that allows checking CPU |
16 | driver provides support for it. | 17 | temperature. |
diff --git a/drivers/thermal/qoriq_thermal.c b/drivers/thermal/qoriq_thermal.c index 644ba526d9ea..4362a69ac88d 100644 --- a/drivers/thermal/qoriq_thermal.c +++ b/drivers/thermal/qoriq_thermal.c | |||
@@ -195,7 +195,6 @@ static struct thermal_zone_of_device_ops tmu_tz_ops = { | |||
195 | static int qoriq_tmu_probe(struct platform_device *pdev) | 195 | static int qoriq_tmu_probe(struct platform_device *pdev) |
196 | { | 196 | { |
197 | int ret; | 197 | int ret; |
198 | const struct thermal_trip *trip; | ||
199 | struct qoriq_tmu_data *data; | 198 | struct qoriq_tmu_data *data; |
200 | struct device_node *np = pdev->dev.of_node; | 199 | struct device_node *np = pdev->dev.of_node; |
201 | u32 site = 0; | 200 | u32 site = 0; |
@@ -243,8 +242,6 @@ static int qoriq_tmu_probe(struct platform_device *pdev) | |||
243 | goto err_tmu; | 242 | goto err_tmu; |
244 | } | 243 | } |
245 | 244 | ||
246 | trip = of_thermal_get_trip_points(data->tz); | ||
247 | |||
248 | /* Enable monitoring */ | 245 | /* Enable monitoring */ |
249 | site |= 0x1 << (15 - data->sensor_id); | 246 | site |= 0x1 << (15 - data->sensor_id); |
250 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); | 247 | tmu_write(data, site | TMR_ME | TMR_ALPF, &data->regs->tmr); |
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index b21b9cc2c8d6..5a51c740e372 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c | |||
@@ -359,7 +359,7 @@ static DECLARE_DELAYED_WORK(thermal_emergency_poweroff_work, | |||
359 | * This may be called from any critical situation to trigger a system shutdown | 359 | * This may be called from any critical situation to trigger a system shutdown |
360 | * after a known period of time. By default this is not scheduled. | 360 | * after a known period of time. By default this is not scheduled. |
361 | */ | 361 | */ |
362 | void thermal_emergency_poweroff(void) | 362 | static void thermal_emergency_poweroff(void) |
363 | { | 363 | { |
364 | int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; | 364 | int poweroff_delay_ms = CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS; |
365 | /* | 365 | /* |
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index ba9c302454fb..696ab3046b87 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c | |||
@@ -1010,7 +1010,7 @@ ti_bandgap_force_single_read(struct ti_bandgap *bgp, int id) | |||
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | /** | 1012 | /** |
1013 | * ti_bandgap_set_continous_mode() - One time enabling of continuous mode | 1013 | * ti_bandgap_set_continuous_mode() - One time enabling of continuous mode |
1014 | * @bgp: pointer to struct ti_bandgap | 1014 | * @bgp: pointer to struct ti_bandgap |
1015 | * | 1015 | * |
1016 | * Call this function only if HAS(MODE_CONFIG) is set. As this driver may | 1016 | * Call this function only if HAS(MODE_CONFIG) is set. As this driver may |
@@ -1214,22 +1214,18 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev) | |||
1214 | } | 1214 | } |
1215 | 1215 | ||
1216 | bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); | 1216 | bgp = devm_kzalloc(&pdev->dev, sizeof(*bgp), GFP_KERNEL); |
1217 | if (!bgp) { | 1217 | if (!bgp) |
1218 | dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); | ||
1219 | return ERR_PTR(-ENOMEM); | 1218 | return ERR_PTR(-ENOMEM); |
1220 | } | ||
1221 | 1219 | ||
1222 | of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); | 1220 | of_id = of_match_device(of_ti_bandgap_match, &pdev->dev); |
1223 | if (of_id) | 1221 | if (of_id) |
1224 | bgp->conf = of_id->data; | 1222 | bgp->conf = of_id->data; |
1225 | 1223 | ||
1226 | /* register shadow for context save and restore */ | 1224 | /* register shadow for context save and restore */ |
1227 | bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * | 1225 | bgp->regval = devm_kcalloc(&pdev->dev, bgp->conf->sensor_count, |
1228 | bgp->conf->sensor_count, GFP_KERNEL); | 1226 | sizeof(*bgp->regval), GFP_KERNEL); |
1229 | if (!bgp->regval) { | 1227 | if (!bgp->regval) |
1230 | dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); | ||
1231 | return ERR_PTR(-ENOMEM); | 1228 | return ERR_PTR(-ENOMEM); |
1232 | } | ||
1233 | 1229 | ||
1234 | i = 0; | 1230 | i = 0; |
1235 | do { | 1231 | do { |
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c index 7ac9bcdf1e61..61fe8d6fd24e 100644 --- a/drivers/tty/ehv_bytechan.c +++ b/drivers/tty/ehv_bytechan.c | |||
@@ -764,7 +764,7 @@ static int __init ehv_bc_init(void) | |||
764 | ehv_bc_driver = alloc_tty_driver(count); | 764 | ehv_bc_driver = alloc_tty_driver(count); |
765 | if (!ehv_bc_driver) { | 765 | if (!ehv_bc_driver) { |
766 | ret = -ENOMEM; | 766 | ret = -ENOMEM; |
767 | goto error; | 767 | goto err_free_bcs; |
768 | } | 768 | } |
769 | 769 | ||
770 | ehv_bc_driver->driver_name = "ehv-bc"; | 770 | ehv_bc_driver->driver_name = "ehv-bc"; |
@@ -778,24 +778,23 @@ static int __init ehv_bc_init(void) | |||
778 | ret = tty_register_driver(ehv_bc_driver); | 778 | ret = tty_register_driver(ehv_bc_driver); |
779 | if (ret) { | 779 | if (ret) { |
780 | pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); | 780 | pr_err("ehv-bc: could not register tty driver (ret=%i)\n", ret); |
781 | goto error; | 781 | goto err_put_tty_driver; |
782 | } | 782 | } |
783 | 783 | ||
784 | ret = platform_driver_register(&ehv_bc_tty_driver); | 784 | ret = platform_driver_register(&ehv_bc_tty_driver); |
785 | if (ret) { | 785 | if (ret) { |
786 | pr_err("ehv-bc: could not register platform driver (ret=%i)\n", | 786 | pr_err("ehv-bc: could not register platform driver (ret=%i)\n", |
787 | ret); | 787 | ret); |
788 | goto error; | 788 | goto err_deregister_tty_driver; |
789 | } | 789 | } |
790 | 790 | ||
791 | return 0; | 791 | return 0; |
792 | 792 | ||
793 | error: | 793 | err_deregister_tty_driver: |
794 | if (ehv_bc_driver) { | 794 | tty_unregister_driver(ehv_bc_driver); |
795 | tty_unregister_driver(ehv_bc_driver); | 795 | err_put_tty_driver: |
796 | put_tty_driver(ehv_bc_driver); | 796 | put_tty_driver(ehv_bc_driver); |
797 | } | 797 | err_free_bcs: |
798 | |||
799 | kfree(bcs); | 798 | kfree(bcs); |
800 | 799 | ||
801 | return ret; | 800 | return ret; |
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 433de5ea9b02..f71b47334149 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c | |||
@@ -122,6 +122,18 @@ void serdev_device_write_wakeup(struct serdev_device *serdev) | |||
122 | } | 122 | } |
123 | EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); | 123 | EXPORT_SYMBOL_GPL(serdev_device_write_wakeup); |
124 | 124 | ||
125 | int serdev_device_write_buf(struct serdev_device *serdev, | ||
126 | const unsigned char *buf, size_t count) | ||
127 | { | ||
128 | struct serdev_controller *ctrl = serdev->ctrl; | ||
129 | |||
130 | if (!ctrl || !ctrl->ops->write_buf) | ||
131 | return -EINVAL; | ||
132 | |||
133 | return ctrl->ops->write_buf(ctrl, buf, count); | ||
134 | } | ||
135 | EXPORT_SYMBOL_GPL(serdev_device_write_buf); | ||
136 | |||
125 | int serdev_device_write(struct serdev_device *serdev, | 137 | int serdev_device_write(struct serdev_device *serdev, |
126 | const unsigned char *buf, size_t count, | 138 | const unsigned char *buf, size_t count, |
127 | unsigned long timeout) | 139 | unsigned long timeout) |
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index 487c88f6aa0e..d0a021c93986 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c | |||
@@ -102,9 +102,6 @@ static int ttyport_open(struct serdev_controller *ctrl) | |||
102 | return PTR_ERR(tty); | 102 | return PTR_ERR(tty); |
103 | serport->tty = tty; | 103 | serport->tty = tty; |
104 | 104 | ||
105 | serport->port->client_ops = &client_ops; | ||
106 | serport->port->client_data = ctrl; | ||
107 | |||
108 | if (tty->ops->open) | 105 | if (tty->ops->open) |
109 | tty->ops->open(serport->tty, NULL); | 106 | tty->ops->open(serport->tty, NULL); |
110 | else | 107 | else |
@@ -215,6 +212,7 @@ struct device *serdev_tty_port_register(struct tty_port *port, | |||
215 | struct device *parent, | 212 | struct device *parent, |
216 | struct tty_driver *drv, int idx) | 213 | struct tty_driver *drv, int idx) |
217 | { | 214 | { |
215 | const struct tty_port_client_operations *old_ops; | ||
218 | struct serdev_controller *ctrl; | 216 | struct serdev_controller *ctrl; |
219 | struct serport *serport; | 217 | struct serport *serport; |
220 | int ret; | 218 | int ret; |
@@ -233,28 +231,37 @@ struct device *serdev_tty_port_register(struct tty_port *port, | |||
233 | 231 | ||
234 | ctrl->ops = &ctrl_ops; | 232 | ctrl->ops = &ctrl_ops; |
235 | 233 | ||
234 | old_ops = port->client_ops; | ||
235 | port->client_ops = &client_ops; | ||
236 | port->client_data = ctrl; | ||
237 | |||
236 | ret = serdev_controller_add(ctrl); | 238 | ret = serdev_controller_add(ctrl); |
237 | if (ret) | 239 | if (ret) |
238 | goto err_controller_put; | 240 | goto err_reset_data; |
239 | 241 | ||
240 | dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); | 242 | dev_info(&ctrl->dev, "tty port %s%d registered\n", drv->name, idx); |
241 | return &ctrl->dev; | 243 | return &ctrl->dev; |
242 | 244 | ||
243 | err_controller_put: | 245 | err_reset_data: |
246 | port->client_data = NULL; | ||
247 | port->client_ops = old_ops; | ||
244 | serdev_controller_put(ctrl); | 248 | serdev_controller_put(ctrl); |
249 | |||
245 | return ERR_PTR(ret); | 250 | return ERR_PTR(ret); |
246 | } | 251 | } |
247 | 252 | ||
248 | void serdev_tty_port_unregister(struct tty_port *port) | 253 | int serdev_tty_port_unregister(struct tty_port *port) |
249 | { | 254 | { |
250 | struct serdev_controller *ctrl = port->client_data; | 255 | struct serdev_controller *ctrl = port->client_data; |
251 | struct serport *serport = serdev_controller_get_drvdata(ctrl); | 256 | struct serport *serport = serdev_controller_get_drvdata(ctrl); |
252 | 257 | ||
253 | if (!serport) | 258 | if (!serport) |
254 | return; | 259 | return -ENODEV; |
255 | 260 | ||
256 | serdev_controller_remove(ctrl); | 261 | serdev_controller_remove(ctrl); |
257 | port->client_ops = NULL; | 262 | port->client_ops = NULL; |
258 | port->client_data = NULL; | 263 | port->client_data = NULL; |
259 | serdev_controller_put(ctrl); | 264 | serdev_controller_put(ctrl); |
265 | |||
266 | return 0; | ||
260 | } | 267 | } |
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 09a65a3ec7f7..68fd045a7025 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c | |||
@@ -47,6 +47,7 @@ | |||
47 | /* | 47 | /* |
48 | * These are definitions for the Exar XR17V35X and XR17(C|D)15X | 48 | * These are definitions for the Exar XR17V35X and XR17(C|D)15X |
49 | */ | 49 | */ |
50 | #define UART_EXAR_INT0 0x80 | ||
50 | #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ | 51 | #define UART_EXAR_SLEEP 0x8b /* Sleep mode */ |
51 | #define UART_EXAR_DVID 0x8d /* Device identification */ | 52 | #define UART_EXAR_DVID 0x8d /* Device identification */ |
52 | 53 | ||
@@ -1337,7 +1338,7 @@ out_lock: | |||
1337 | /* | 1338 | /* |
1338 | * Check if the device is a Fintek F81216A | 1339 | * Check if the device is a Fintek F81216A |
1339 | */ | 1340 | */ |
1340 | if (port->type == PORT_16550A) | 1341 | if (port->type == PORT_16550A && port->iotype == UPIO_PORT) |
1341 | fintek_8250_probe(up); | 1342 | fintek_8250_probe(up); |
1342 | 1343 | ||
1343 | if (up->capabilities != old_capabilities) { | 1344 | if (up->capabilities != old_capabilities) { |
@@ -1869,17 +1870,13 @@ static int serial8250_default_handle_irq(struct uart_port *port) | |||
1869 | static int exar_handle_irq(struct uart_port *port) | 1870 | static int exar_handle_irq(struct uart_port *port) |
1870 | { | 1871 | { |
1871 | unsigned int iir = serial_port_in(port, UART_IIR); | 1872 | unsigned int iir = serial_port_in(port, UART_IIR); |
1872 | int ret; | 1873 | int ret = 0; |
1873 | 1874 | ||
1874 | ret = serial8250_handle_irq(port, iir); | 1875 | if (((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) && |
1876 | serial_port_in(port, UART_EXAR_INT0) != 0) | ||
1877 | ret = 1; | ||
1875 | 1878 | ||
1876 | if ((port->type == PORT_XR17V35X) || | 1879 | ret |= serial8250_handle_irq(port, iir); |
1877 | (port->type == PORT_XR17D15X)) { | ||
1878 | serial_port_in(port, 0x80); | ||
1879 | serial_port_in(port, 0x81); | ||
1880 | serial_port_in(port, 0x82); | ||
1881 | serial_port_in(port, 0x83); | ||
1882 | } | ||
1883 | 1880 | ||
1884 | return ret; | 1881 | return ret; |
1885 | } | 1882 | } |
@@ -2177,6 +2174,8 @@ int serial8250_do_startup(struct uart_port *port) | |||
2177 | serial_port_in(port, UART_RX); | 2174 | serial_port_in(port, UART_RX); |
2178 | serial_port_in(port, UART_IIR); | 2175 | serial_port_in(port, UART_IIR); |
2179 | serial_port_in(port, UART_MSR); | 2176 | serial_port_in(port, UART_MSR); |
2177 | if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) | ||
2178 | serial_port_in(port, UART_EXAR_INT0); | ||
2180 | 2179 | ||
2181 | /* | 2180 | /* |
2182 | * At this point, there's no way the LSR could still be 0xff; | 2181 | * At this point, there's no way the LSR could still be 0xff; |
@@ -2335,6 +2334,8 @@ dont_test_tx_en: | |||
2335 | serial_port_in(port, UART_RX); | 2334 | serial_port_in(port, UART_RX); |
2336 | serial_port_in(port, UART_IIR); | 2335 | serial_port_in(port, UART_IIR); |
2337 | serial_port_in(port, UART_MSR); | 2336 | serial_port_in(port, UART_MSR); |
2337 | if ((port->type == PORT_XR17V35X) || (port->type == PORT_XR17D15X)) | ||
2338 | serial_port_in(port, UART_EXAR_INT0); | ||
2338 | up->lsr_saved_flags = 0; | 2339 | up->lsr_saved_flags = 0; |
2339 | up->msr_saved_flags = 0; | 2340 | up->msr_saved_flags = 0; |
2340 | 2341 | ||
diff --git a/drivers/tty/serial/altera_jtaguart.c b/drivers/tty/serial/altera_jtaguart.c index 18e3f8342b85..0475f5d261ce 100644 --- a/drivers/tty/serial/altera_jtaguart.c +++ b/drivers/tty/serial/altera_jtaguart.c | |||
@@ -478,6 +478,7 @@ static int altera_jtaguart_remove(struct platform_device *pdev) | |||
478 | 478 | ||
479 | port = &altera_jtaguart_ports[i].port; | 479 | port = &altera_jtaguart_ports[i].port; |
480 | uart_remove_one_port(&altera_jtaguart_driver, port); | 480 | uart_remove_one_port(&altera_jtaguart_driver, port); |
481 | iounmap(port->membase); | ||
481 | 482 | ||
482 | return 0; | 483 | return 0; |
483 | } | 484 | } |
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c index 46d3438a0d27..3e4b717670d7 100644 --- a/drivers/tty/serial/altera_uart.c +++ b/drivers/tty/serial/altera_uart.c | |||
@@ -615,6 +615,7 @@ static int altera_uart_remove(struct platform_device *pdev) | |||
615 | if (port) { | 615 | if (port) { |
616 | uart_remove_one_port(&altera_uart_driver, port); | 616 | uart_remove_one_port(&altera_uart_driver, port); |
617 | port->mapbase = 0; | 617 | port->mapbase = 0; |
618 | iounmap(port->membase); | ||
618 | } | 619 | } |
619 | 620 | ||
620 | return 0; | 621 | return 0; |
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c index ebd8569f9ad5..9fff25be87f9 100644 --- a/drivers/tty/serial/efm32-uart.c +++ b/drivers/tty/serial/efm32-uart.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #define UARTn_FRAME 0x04 | 27 | #define UARTn_FRAME 0x04 |
28 | #define UARTn_FRAME_DATABITS__MASK 0x000f | 28 | #define UARTn_FRAME_DATABITS__MASK 0x000f |
29 | #define UARTn_FRAME_DATABITS(n) ((n) - 3) | 29 | #define UARTn_FRAME_DATABITS(n) ((n) - 3) |
30 | #define UARTn_FRAME_PARITY__MASK 0x0300 | ||
30 | #define UARTn_FRAME_PARITY_NONE 0x0000 | 31 | #define UARTn_FRAME_PARITY_NONE 0x0000 |
31 | #define UARTn_FRAME_PARITY_EVEN 0x0200 | 32 | #define UARTn_FRAME_PARITY_EVEN 0x0200 |
32 | #define UARTn_FRAME_PARITY_ODD 0x0300 | 33 | #define UARTn_FRAME_PARITY_ODD 0x0300 |
@@ -572,12 +573,16 @@ static void efm32_uart_console_get_options(struct efm32_uart_port *efm_port, | |||
572 | 16 * (4 + (clkdiv >> 6))); | 573 | 16 * (4 + (clkdiv >> 6))); |
573 | 574 | ||
574 | frame = efm32_uart_read32(efm_port, UARTn_FRAME); | 575 | frame = efm32_uart_read32(efm_port, UARTn_FRAME); |
575 | if (frame & UARTn_FRAME_PARITY_ODD) | 576 | switch (frame & UARTn_FRAME_PARITY__MASK) { |
577 | case UARTn_FRAME_PARITY_ODD: | ||
576 | *parity = 'o'; | 578 | *parity = 'o'; |
577 | else if (frame & UARTn_FRAME_PARITY_EVEN) | 579 | break; |
580 | case UARTn_FRAME_PARITY_EVEN: | ||
578 | *parity = 'e'; | 581 | *parity = 'e'; |
579 | else | 582 | break; |
583 | default: | ||
580 | *parity = 'n'; | 584 | *parity = 'n'; |
585 | } | ||
581 | 586 | ||
582 | *bits = (frame & UARTn_FRAME_DATABITS__MASK) - | 587 | *bits = (frame & UARTn_FRAME_DATABITS__MASK) - |
583 | UARTn_FRAME_DATABITS(4) + 4; | 588 | UARTn_FRAME_DATABITS(4) + 4; |
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c index 157883653256..f190a84a0246 100644 --- a/drivers/tty/serial/ifx6x60.c +++ b/drivers/tty/serial/ifx6x60.c | |||
@@ -1382,9 +1382,9 @@ static struct spi_driver ifx_spi_driver = { | |||
1382 | static void __exit ifx_spi_exit(void) | 1382 | static void __exit ifx_spi_exit(void) |
1383 | { | 1383 | { |
1384 | /* unregister */ | 1384 | /* unregister */ |
1385 | spi_unregister_driver(&ifx_spi_driver); | ||
1385 | tty_unregister_driver(tty_drv); | 1386 | tty_unregister_driver(tty_drv); |
1386 | put_tty_driver(tty_drv); | 1387 | put_tty_driver(tty_drv); |
1387 | spi_unregister_driver(&ifx_spi_driver); | ||
1388 | unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); | 1388 | unregister_reboot_notifier(&ifx_modem_reboot_notifier_block); |
1389 | } | 1389 | } |
1390 | 1390 | ||
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 33509b4beaec..bbefddd92bfe 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -2184,7 +2184,9 @@ static int serial_imx_probe(struct platform_device *pdev) | |||
2184 | * and DCD (when they are outputs) or enables the respective | 2184 | * and DCD (when they are outputs) or enables the respective |
2185 | * irqs. So set this bit early, i.e. before requesting irqs. | 2185 | * irqs. So set this bit early, i.e. before requesting irqs. |
2186 | */ | 2186 | */ |
2187 | writel(UFCR_DCEDTE, sport->port.membase + UFCR); | 2187 | reg = readl(sport->port.membase + UFCR); |
2188 | if (!(reg & UFCR_DCEDTE)) | ||
2189 | writel(reg | UFCR_DCEDTE, sport->port.membase + UFCR); | ||
2188 | 2190 | ||
2189 | /* | 2191 | /* |
2190 | * Disable UCR3_RI and UCR3_DCD irqs. They are also not | 2192 | * Disable UCR3_RI and UCR3_DCD irqs. They are also not |
@@ -2195,7 +2197,15 @@ static int serial_imx_probe(struct platform_device *pdev) | |||
2195 | sport->port.membase + UCR3); | 2197 | sport->port.membase + UCR3); |
2196 | 2198 | ||
2197 | } else { | 2199 | } else { |
2198 | writel(0, sport->port.membase + UFCR); | 2200 | unsigned long ucr3 = UCR3_DSR; |
2201 | |||
2202 | reg = readl(sport->port.membase + UFCR); | ||
2203 | if (reg & UFCR_DCEDTE) | ||
2204 | writel(reg & ~UFCR_DCEDTE, sport->port.membase + UFCR); | ||
2205 | |||
2206 | if (!is_imx1_uart(sport)) | ||
2207 | ucr3 |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; | ||
2208 | writel(ucr3, sport->port.membase + UCR3); | ||
2199 | } | 2209 | } |
2200 | 2210 | ||
2201 | clk_disable_unprepare(sport->clk_ipg); | 2211 | clk_disable_unprepare(sport->clk_ipg); |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 0f45b7884a2c..13bfd5dcffce 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -2083,7 +2083,7 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) | |||
2083 | mutex_lock(&port->mutex); | 2083 | mutex_lock(&port->mutex); |
2084 | 2084 | ||
2085 | tty_dev = device_find_child(uport->dev, &match, serial_match_port); | 2085 | tty_dev = device_find_child(uport->dev, &match, serial_match_port); |
2086 | if (device_may_wakeup(tty_dev)) { | 2086 | if (tty_dev && device_may_wakeup(tty_dev)) { |
2087 | if (!enable_irq_wake(uport->irq)) | 2087 | if (!enable_irq_wake(uport->irq)) |
2088 | uport->irq_wake = 1; | 2088 | uport->irq_wake = 1; |
2089 | put_device(tty_dev); | 2089 | put_device(tty_dev); |
@@ -2782,7 +2782,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport) | |||
2782 | * Register the port whether it's detected or not. This allows | 2782 | * Register the port whether it's detected or not. This allows |
2783 | * setserial to be used to alter this port's parameters. | 2783 | * setserial to be used to alter this port's parameters. |
2784 | */ | 2784 | */ |
2785 | tty_dev = tty_port_register_device_attr(port, drv->tty_driver, | 2785 | tty_dev = tty_port_register_device_attr_serdev(port, drv->tty_driver, |
2786 | uport->line, uport->dev, port, uport->tty_groups); | 2786 | uport->line, uport->dev, port, uport->tty_groups); |
2787 | if (likely(!IS_ERR(tty_dev))) { | 2787 | if (likely(!IS_ERR(tty_dev))) { |
2788 | device_set_wakeup_capable(tty_dev, 1); | 2788 | device_set_wakeup_capable(tty_dev, 1); |
@@ -2845,7 +2845,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport) | |||
2845 | /* | 2845 | /* |
2846 | * Remove the devices from the tty layer | 2846 | * Remove the devices from the tty layer |
2847 | */ | 2847 | */ |
2848 | tty_unregister_device(drv->tty_driver, uport->line); | 2848 | tty_port_unregister_device(port, drv->tty_driver, uport->line); |
2849 | 2849 | ||
2850 | tty = tty_port_tty_get(port); | 2850 | tty = tty_port_tty_get(port); |
2851 | if (tty) { | 2851 | if (tty) { |
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c index 1d21a9c1d33e..6b137194069f 100644 --- a/drivers/tty/tty_port.c +++ b/drivers/tty/tty_port.c | |||
@@ -129,19 +129,85 @@ struct device *tty_port_register_device_attr(struct tty_port *port, | |||
129 | struct device *device, void *drvdata, | 129 | struct device *device, void *drvdata, |
130 | const struct attribute_group **attr_grp) | 130 | const struct attribute_group **attr_grp) |
131 | { | 131 | { |
132 | tty_port_link_device(port, driver, index); | ||
133 | return tty_register_device_attr(driver, index, device, drvdata, | ||
134 | attr_grp); | ||
135 | } | ||
136 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr); | ||
137 | |||
138 | /** | ||
139 | * tty_port_register_device_attr_serdev - register tty or serdev device | ||
140 | * @port: tty_port of the device | ||
141 | * @driver: tty_driver for this device | ||
142 | * @index: index of the tty | ||
143 | * @device: parent if exists, otherwise NULL | ||
144 | * @drvdata: driver data for the device | ||
145 | * @attr_grp: attribute group for the device | ||
146 | * | ||
147 | * Register a serdev or tty device depending on if the parent device has any | ||
148 | * defined serdev clients or not. | ||
149 | */ | ||
150 | struct device *tty_port_register_device_attr_serdev(struct tty_port *port, | ||
151 | struct tty_driver *driver, unsigned index, | ||
152 | struct device *device, void *drvdata, | ||
153 | const struct attribute_group **attr_grp) | ||
154 | { | ||
132 | struct device *dev; | 155 | struct device *dev; |
133 | 156 | ||
134 | tty_port_link_device(port, driver, index); | 157 | tty_port_link_device(port, driver, index); |
135 | 158 | ||
136 | dev = serdev_tty_port_register(port, device, driver, index); | 159 | dev = serdev_tty_port_register(port, device, driver, index); |
137 | if (PTR_ERR(dev) != -ENODEV) | 160 | if (PTR_ERR(dev) != -ENODEV) { |
138 | /* Skip creating cdev if we registered a serdev device */ | 161 | /* Skip creating cdev if we registered a serdev device */ |
139 | return dev; | 162 | return dev; |
163 | } | ||
140 | 164 | ||
141 | return tty_register_device_attr(driver, index, device, drvdata, | 165 | return tty_register_device_attr(driver, index, device, drvdata, |
142 | attr_grp); | 166 | attr_grp); |
143 | } | 167 | } |
144 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr); | 168 | EXPORT_SYMBOL_GPL(tty_port_register_device_attr_serdev); |
169 | |||
170 | /** | ||
171 | * tty_port_register_device_serdev - register tty or serdev device | ||
172 | * @port: tty_port of the device | ||
173 | * @driver: tty_driver for this device | ||
174 | * @index: index of the tty | ||
175 | * @device: parent if exists, otherwise NULL | ||
176 | * | ||
177 | * Register a serdev or tty device depending on if the parent device has any | ||
178 | * defined serdev clients or not. | ||
179 | */ | ||
180 | struct device *tty_port_register_device_serdev(struct tty_port *port, | ||
181 | struct tty_driver *driver, unsigned index, | ||
182 | struct device *device) | ||
183 | { | ||
184 | return tty_port_register_device_attr_serdev(port, driver, index, | ||
185 | device, NULL, NULL); | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(tty_port_register_device_serdev); | ||
188 | |||
189 | /** | ||
190 | * tty_port_unregister_device - deregister a tty or serdev device | ||
191 | * @port: tty_port of the device | ||
192 | * @driver: tty_driver for this device | ||
193 | * @index: index of the tty | ||
194 | * | ||
195 | * If a tty or serdev device is registered with a call to | ||
196 | * tty_port_register_device_serdev() then this function must be called when | ||
197 | * the device is gone. | ||
198 | */ | ||
199 | void tty_port_unregister_device(struct tty_port *port, | ||
200 | struct tty_driver *driver, unsigned index) | ||
201 | { | ||
202 | int ret; | ||
203 | |||
204 | ret = serdev_tty_port_unregister(port); | ||
205 | if (ret == 0) | ||
206 | return; | ||
207 | |||
208 | tty_unregister_device(driver, index); | ||
209 | } | ||
210 | EXPORT_SYMBOL_GPL(tty_port_unregister_device); | ||
145 | 211 | ||
146 | int tty_port_alloc_xmit_buf(struct tty_port *port) | 212 | int tty_port_alloc_xmit_buf(struct tty_port *port) |
147 | { | 213 | { |
@@ -189,9 +255,6 @@ static void tty_port_destructor(struct kref *kref) | |||
189 | /* check if last port ref was dropped before tty release */ | 255 | /* check if last port ref was dropped before tty release */ |
190 | if (WARN_ON(port->itty)) | 256 | if (WARN_ON(port->itty)) |
191 | return; | 257 | return; |
192 | |||
193 | serdev_tty_port_unregister(port); | ||
194 | |||
195 | if (port->xmit_buf) | 258 | if (port->xmit_buf) |
196 | free_page((unsigned long)port->xmit_buf); | 259 | free_page((unsigned long)port->xmit_buf); |
197 | tty_port_destroy(port); | 260 | tty_port_destroy(port); |
@@ -1155,6 +1155,17 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, | |||
1155 | } | 1155 | } |
1156 | 1156 | ||
1157 | /* | 1157 | /* |
1158 | * It is possible, particularly with mixed reads & writes to private | ||
1159 | * mappings, that we have raced with a PMD fault that overlaps with | ||
1160 | * the PTE we need to set up. If so just return and the fault will be | ||
1161 | * retried. | ||
1162 | */ | ||
1163 | if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) { | ||
1164 | vmf_ret = VM_FAULT_NOPAGE; | ||
1165 | goto unlock_entry; | ||
1166 | } | ||
1167 | |||
1168 | /* | ||
1158 | * Note that we don't bother to use iomap_apply here: DAX required | 1169 | * Note that we don't bother to use iomap_apply here: DAX required |
1159 | * the file system block size to be equal the page size, which means | 1170 | * the file system block size to be equal the page size, which means |
1160 | * that we never have to deal with more than a single extent here. | 1171 | * that we never have to deal with more than a single extent here. |
@@ -1398,6 +1409,18 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, | |||
1398 | goto fallback; | 1409 | goto fallback; |
1399 | 1410 | ||
1400 | /* | 1411 | /* |
1412 | * It is possible, particularly with mixed reads & writes to private | ||
1413 | * mappings, that we have raced with a PTE fault that overlaps with | ||
1414 | * the PMD we need to set up. If so just return and the fault will be | ||
1415 | * retried. | ||
1416 | */ | ||
1417 | if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) && | ||
1418 | !pmd_devmap(*vmf->pmd)) { | ||
1419 | result = 0; | ||
1420 | goto unlock_entry; | ||
1421 | } | ||
1422 | |||
1423 | /* | ||
1401 | * Note that we don't use iomap_apply here. We aren't doing I/O, only | 1424 | * Note that we don't use iomap_apply here. We aren't doing I/O, only |
1402 | * setting up a mapping, so really we're using iomap_begin() as a way | 1425 | * setting up a mapping, so really we're using iomap_begin() as a way |
1403 | * to look up our filesystem block. | 1426 | * to look up our filesystem block. |
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index f865b96374df..d2955daf17a4 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -659,7 +659,7 @@ static void log_write_header(struct gfs2_sbd *sdp, u32 flags) | |||
659 | struct gfs2_log_header *lh; | 659 | struct gfs2_log_header *lh; |
660 | unsigned int tail; | 660 | unsigned int tail; |
661 | u32 hash; | 661 | u32 hash; |
662 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META; | 662 | int op_flags = REQ_PREFLUSH | REQ_FUA | REQ_META | REQ_SYNC; |
663 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); | 663 | struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); |
664 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); | 664 | enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state); |
665 | lh = page_address(page); | 665 | lh = page_address(page); |
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index f5714ee01000..23542dc44a25 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c | |||
@@ -454,6 +454,7 @@ ff_layout_alloc_lseg(struct pnfs_layout_hdr *lh, | |||
454 | goto out_err_free; | 454 | goto out_err_free; |
455 | 455 | ||
456 | /* fh */ | 456 | /* fh */ |
457 | rc = -EIO; | ||
457 | p = xdr_inline_decode(&stream, 4); | 458 | p = xdr_inline_decode(&stream, 4); |
458 | if (!p) | 459 | if (!p) |
459 | goto out_err_free; | 460 | goto out_err_free; |
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index e9b4c3320e37..3e24392f2caa 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h | |||
@@ -398,7 +398,6 @@ extern struct file_system_type nfs4_referral_fs_type; | |||
398 | bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t); | 398 | bool nfs_auth_info_match(const struct nfs_auth_info *, rpc_authflavor_t); |
399 | struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *, | 399 | struct dentry *nfs_try_mount(int, const char *, struct nfs_mount_info *, |
400 | struct nfs_subversion *); | 400 | struct nfs_subversion *); |
401 | void nfs_initialise_sb(struct super_block *); | ||
402 | int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); | 401 | int nfs_set_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); |
403 | int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); | 402 | int nfs_clone_sb_security(struct super_block *, struct dentry *, struct nfs_mount_info *); |
404 | struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *, | 403 | struct dentry *nfs_fs_mount_common(struct nfs_server *, int, const char *, |
@@ -458,7 +457,6 @@ extern void nfs_read_prepare(struct rpc_task *task, void *calldata); | |||
458 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); | 457 | extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); |
459 | 458 | ||
460 | /* super.c */ | 459 | /* super.c */ |
461 | void nfs_clone_super(struct super_block *, struct nfs_mount_info *); | ||
462 | void nfs_umount_begin(struct super_block *); | 460 | void nfs_umount_begin(struct super_block *); |
463 | int nfs_statfs(struct dentry *, struct kstatfs *); | 461 | int nfs_statfs(struct dentry *, struct kstatfs *); |
464 | int nfs_show_options(struct seq_file *, struct dentry *); | 462 | int nfs_show_options(struct seq_file *, struct dentry *); |
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 1a224a33a6c2..e5686be67be8 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c | |||
@@ -246,7 +246,7 @@ struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh, | |||
246 | 246 | ||
247 | devname = nfs_devname(dentry, page, PAGE_SIZE); | 247 | devname = nfs_devname(dentry, page, PAGE_SIZE); |
248 | if (IS_ERR(devname)) | 248 | if (IS_ERR(devname)) |
249 | mnt = (struct vfsmount *)devname; | 249 | mnt = ERR_CAST(devname); |
250 | else | 250 | else |
251 | mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata); | 251 | mnt = nfs_do_clone_mount(NFS_SB(dentry->d_sb), devname, &mountdata); |
252 | 252 | ||
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 929d09a5310a..319a47db218d 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c | |||
@@ -177,7 +177,7 @@ static ssize_t _nfs42_proc_copy(struct file *src, | |||
177 | if (status) | 177 | if (status) |
178 | goto out; | 178 | goto out; |
179 | 179 | ||
180 | if (!nfs_write_verifier_cmp(&res->write_res.verifier.verifier, | 180 | if (nfs_write_verifier_cmp(&res->write_res.verifier.verifier, |
181 | &res->commit_res.verf->verifier)) { | 181 | &res->commit_res.verf->verifier)) { |
182 | status = -EAGAIN; | 182 | status = -EAGAIN; |
183 | goto out; | 183 | goto out; |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 692a7a8bfc7a..66776f022111 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -582,7 +582,6 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
582 | */ | 582 | */ |
583 | nfs4_schedule_path_down_recovery(pos); | 583 | nfs4_schedule_path_down_recovery(pos); |
584 | default: | 584 | default: |
585 | spin_lock(&nn->nfs_client_lock); | ||
586 | goto out; | 585 | goto out; |
587 | } | 586 | } |
588 | 587 | ||
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index adc6ec28d4b5..c383d0913b54 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -2094,12 +2094,26 @@ pnfs_generic_pg_check_layout(struct nfs_pageio_descriptor *pgio) | |||
2094 | } | 2094 | } |
2095 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); | 2095 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_check_layout); |
2096 | 2096 | ||
2097 | /* | ||
2098 | * Check for any intersection between the request and the pgio->pg_lseg, | ||
2099 | * and if none, put this pgio->pg_lseg away. | ||
2100 | */ | ||
2101 | static void | ||
2102 | pnfs_generic_pg_check_range(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) | ||
2103 | { | ||
2104 | if (pgio->pg_lseg && !pnfs_lseg_request_intersecting(pgio->pg_lseg, req)) { | ||
2105 | pnfs_put_lseg(pgio->pg_lseg); | ||
2106 | pgio->pg_lseg = NULL; | ||
2107 | } | ||
2108 | } | ||
2109 | |||
2097 | void | 2110 | void |
2098 | pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) | 2111 | pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) |
2099 | { | 2112 | { |
2100 | u64 rd_size = req->wb_bytes; | 2113 | u64 rd_size = req->wb_bytes; |
2101 | 2114 | ||
2102 | pnfs_generic_pg_check_layout(pgio); | 2115 | pnfs_generic_pg_check_layout(pgio); |
2116 | pnfs_generic_pg_check_range(pgio, req); | ||
2103 | if (pgio->pg_lseg == NULL) { | 2117 | if (pgio->pg_lseg == NULL) { |
2104 | if (pgio->pg_dreq == NULL) | 2118 | if (pgio->pg_dreq == NULL) |
2105 | rd_size = i_size_read(pgio->pg_inode) - req_offset(req); | 2119 | rd_size = i_size_read(pgio->pg_inode) - req_offset(req); |
@@ -2131,6 +2145,7 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, | |||
2131 | struct nfs_page *req, u64 wb_size) | 2145 | struct nfs_page *req, u64 wb_size) |
2132 | { | 2146 | { |
2133 | pnfs_generic_pg_check_layout(pgio); | 2147 | pnfs_generic_pg_check_layout(pgio); |
2148 | pnfs_generic_pg_check_range(pgio, req); | ||
2134 | if (pgio->pg_lseg == NULL) { | 2149 | if (pgio->pg_lseg == NULL) { |
2135 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, | 2150 | pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, |
2136 | req->wb_context, | 2151 | req->wb_context, |
@@ -2191,16 +2206,10 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, | |||
2191 | seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset, | 2206 | seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset, |
2192 | pgio->pg_lseg->pls_range.length); | 2207 | pgio->pg_lseg->pls_range.length); |
2193 | req_start = req_offset(req); | 2208 | req_start = req_offset(req); |
2194 | WARN_ON_ONCE(req_start >= seg_end); | 2209 | |
2195 | /* start of request is past the last byte of this segment */ | 2210 | /* start of request is past the last byte of this segment */ |
2196 | if (req_start >= seg_end) { | 2211 | if (req_start >= seg_end) |
2197 | /* reference the new lseg */ | ||
2198 | if (pgio->pg_ops->pg_cleanup) | ||
2199 | pgio->pg_ops->pg_cleanup(pgio); | ||
2200 | if (pgio->pg_ops->pg_init) | ||
2201 | pgio->pg_ops->pg_init(pgio, req); | ||
2202 | return 0; | 2212 | return 0; |
2203 | } | ||
2204 | 2213 | ||
2205 | /* adjust 'size' iff there are fewer bytes left in the | 2214 | /* adjust 'size' iff there are fewer bytes left in the |
2206 | * segment than what nfs_generic_pg_test returned */ | 2215 | * segment than what nfs_generic_pg_test returned */ |
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 2d05b756a8d6..99731e3e332f 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -593,6 +593,16 @@ pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1, | |||
593 | return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); | 593 | return pnfs_is_range_intersecting(l1->offset, end1, l2->offset, end2); |
594 | } | 594 | } |
595 | 595 | ||
596 | static inline bool | ||
597 | pnfs_lseg_request_intersecting(struct pnfs_layout_segment *lseg, struct nfs_page *req) | ||
598 | { | ||
599 | u64 seg_last = pnfs_end_offset(lseg->pls_range.offset, lseg->pls_range.length); | ||
600 | u64 req_last = req_offset(req) + req->wb_bytes; | ||
601 | |||
602 | return pnfs_is_range_intersecting(lseg->pls_range.offset, seg_last, | ||
603 | req_offset(req), req_last); | ||
604 | } | ||
605 | |||
596 | extern unsigned int layoutstats_timer; | 606 | extern unsigned int layoutstats_timer; |
597 | 607 | ||
598 | #ifdef NFS_DEBUG | 608 | #ifdef NFS_DEBUG |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 2f3822a4a7d5..eceb4eabb064 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2301,7 +2301,7 @@ EXPORT_SYMBOL_GPL(nfs_remount); | |||
2301 | /* | 2301 | /* |
2302 | * Initialise the common bits of the superblock | 2302 | * Initialise the common bits of the superblock |
2303 | */ | 2303 | */ |
2304 | inline void nfs_initialise_sb(struct super_block *sb) | 2304 | static void nfs_initialise_sb(struct super_block *sb) |
2305 | { | 2305 | { |
2306 | struct nfs_server *server = NFS_SB(sb); | 2306 | struct nfs_server *server = NFS_SB(sb); |
2307 | 2307 | ||
@@ -2348,7 +2348,8 @@ EXPORT_SYMBOL_GPL(nfs_fill_super); | |||
2348 | /* | 2348 | /* |
2349 | * Finish setting up a cloned NFS2/3/4 superblock | 2349 | * Finish setting up a cloned NFS2/3/4 superblock |
2350 | */ | 2350 | */ |
2351 | void nfs_clone_super(struct super_block *sb, struct nfs_mount_info *mount_info) | 2351 | static void nfs_clone_super(struct super_block *sb, |
2352 | struct nfs_mount_info *mount_info) | ||
2352 | { | 2353 | { |
2353 | const struct super_block *old_sb = mount_info->cloned->sb; | 2354 | const struct super_block *old_sb = mount_info->cloned->sb; |
2354 | struct nfs_server *server = NFS_SB(sb); | 2355 | struct nfs_server *server = NFS_SB(sb); |
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 12feac6ee2fd..452334694a5d 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c | |||
@@ -334,11 +334,8 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
334 | if (!p) | 334 | if (!p) |
335 | return 0; | 335 | return 0; |
336 | p = xdr_decode_hyper(p, &args->offset); | 336 | p = xdr_decode_hyper(p, &args->offset); |
337 | args->count = ntohl(*p++); | ||
338 | |||
339 | if (!xdr_argsize_check(rqstp, p)) | ||
340 | return 0; | ||
341 | 337 | ||
338 | args->count = ntohl(*p++); | ||
342 | len = min(args->count, max_blocksize); | 339 | len = min(args->count, max_blocksize); |
343 | 340 | ||
344 | /* set up the kvec */ | 341 | /* set up the kvec */ |
@@ -352,7 +349,7 @@ nfs3svc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
352 | v++; | 349 | v++; |
353 | } | 350 | } |
354 | args->vlen = v; | 351 | args->vlen = v; |
355 | return 1; | 352 | return xdr_argsize_check(rqstp, p); |
356 | } | 353 | } |
357 | 354 | ||
358 | int | 355 | int |
@@ -544,11 +541,9 @@ nfs3svc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, | |||
544 | p = decode_fh(p, &args->fh); | 541 | p = decode_fh(p, &args->fh); |
545 | if (!p) | 542 | if (!p) |
546 | return 0; | 543 | return 0; |
547 | if (!xdr_argsize_check(rqstp, p)) | ||
548 | return 0; | ||
549 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 544 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
550 | 545 | ||
551 | return 1; | 546 | return xdr_argsize_check(rqstp, p); |
552 | } | 547 | } |
553 | 548 | ||
554 | int | 549 | int |
@@ -574,14 +569,10 @@ nfs3svc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, | |||
574 | args->verf = p; p += 2; | 569 | args->verf = p; p += 2; |
575 | args->dircount = ~0; | 570 | args->dircount = ~0; |
576 | args->count = ntohl(*p++); | 571 | args->count = ntohl(*p++); |
577 | |||
578 | if (!xdr_argsize_check(rqstp, p)) | ||
579 | return 0; | ||
580 | |||
581 | args->count = min_t(u32, args->count, PAGE_SIZE); | 572 | args->count = min_t(u32, args->count, PAGE_SIZE); |
582 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 573 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
583 | 574 | ||
584 | return 1; | 575 | return xdr_argsize_check(rqstp, p); |
585 | } | 576 | } |
586 | 577 | ||
587 | int | 578 | int |
@@ -599,9 +590,6 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, | |||
599 | args->dircount = ntohl(*p++); | 590 | args->dircount = ntohl(*p++); |
600 | args->count = ntohl(*p++); | 591 | args->count = ntohl(*p++); |
601 | 592 | ||
602 | if (!xdr_argsize_check(rqstp, p)) | ||
603 | return 0; | ||
604 | |||
605 | len = args->count = min(args->count, max_blocksize); | 593 | len = args->count = min(args->count, max_blocksize); |
606 | while (len > 0) { | 594 | while (len > 0) { |
607 | struct page *p = *(rqstp->rq_next_page++); | 595 | struct page *p = *(rqstp->rq_next_page++); |
@@ -609,7 +597,8 @@ nfs3svc_decode_readdirplusargs(struct svc_rqst *rqstp, __be32 *p, | |||
609 | args->buffer = page_address(p); | 597 | args->buffer = page_address(p); |
610 | len -= PAGE_SIZE; | 598 | len -= PAGE_SIZE; |
611 | } | 599 | } |
612 | return 1; | 600 | |
601 | return xdr_argsize_check(rqstp, p); | ||
613 | } | 602 | } |
614 | 603 | ||
615 | int | 604 | int |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index c453a1998e00..dadb3bf305b2 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -1769,6 +1769,12 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1769 | opdesc->op_get_currentstateid(cstate, &op->u); | 1769 | opdesc->op_get_currentstateid(cstate, &op->u); |
1770 | op->status = opdesc->op_func(rqstp, cstate, &op->u); | 1770 | op->status = opdesc->op_func(rqstp, cstate, &op->u); |
1771 | 1771 | ||
1772 | /* Only from SEQUENCE */ | ||
1773 | if (cstate->status == nfserr_replay_cache) { | ||
1774 | dprintk("%s NFS4.1 replay from cache\n", __func__); | ||
1775 | status = op->status; | ||
1776 | goto out; | ||
1777 | } | ||
1772 | if (!op->status) { | 1778 | if (!op->status) { |
1773 | if (opdesc->op_set_currentstateid) | 1779 | if (opdesc->op_set_currentstateid) |
1774 | opdesc->op_set_currentstateid(cstate, &op->u); | 1780 | opdesc->op_set_currentstateid(cstate, &op->u); |
@@ -1779,14 +1785,7 @@ nfsd4_proc_compound(struct svc_rqst *rqstp, | |||
1779 | if (need_wrongsec_check(rqstp)) | 1785 | if (need_wrongsec_check(rqstp)) |
1780 | op->status = check_nfsd_access(current_fh->fh_export, rqstp); | 1786 | op->status = check_nfsd_access(current_fh->fh_export, rqstp); |
1781 | } | 1787 | } |
1782 | |||
1783 | encode_op: | 1788 | encode_op: |
1784 | /* Only from SEQUENCE */ | ||
1785 | if (cstate->status == nfserr_replay_cache) { | ||
1786 | dprintk("%s NFS4.1 replay from cache\n", __func__); | ||
1787 | status = op->status; | ||
1788 | goto out; | ||
1789 | } | ||
1790 | if (op->status == nfserr_replay_me) { | 1789 | if (op->status == nfserr_replay_me) { |
1791 | op->replay = &cstate->replay_owner->so_replay; | 1790 | op->replay = &cstate->replay_owner->so_replay; |
1792 | nfsd4_encode_replay(&resp->xdr, op); | 1791 | nfsd4_encode_replay(&resp->xdr, op); |
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c index 6a4947a3f4fa..de07ff625777 100644 --- a/fs/nfsd/nfsxdr.c +++ b/fs/nfsd/nfsxdr.c | |||
@@ -257,9 +257,6 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
257 | len = args->count = ntohl(*p++); | 257 | len = args->count = ntohl(*p++); |
258 | p++; /* totalcount - unused */ | 258 | p++; /* totalcount - unused */ |
259 | 259 | ||
260 | if (!xdr_argsize_check(rqstp, p)) | ||
261 | return 0; | ||
262 | |||
263 | len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); | 260 | len = min_t(unsigned int, len, NFSSVC_MAXBLKSIZE_V2); |
264 | 261 | ||
265 | /* set up somewhere to store response. | 262 | /* set up somewhere to store response. |
@@ -275,7 +272,7 @@ nfssvc_decode_readargs(struct svc_rqst *rqstp, __be32 *p, | |||
275 | v++; | 272 | v++; |
276 | } | 273 | } |
277 | args->vlen = v; | 274 | args->vlen = v; |
278 | return 1; | 275 | return xdr_argsize_check(rqstp, p); |
279 | } | 276 | } |
280 | 277 | ||
281 | int | 278 | int |
@@ -365,11 +362,9 @@ nfssvc_decode_readlinkargs(struct svc_rqst *rqstp, __be32 *p, struct nfsd_readli | |||
365 | p = decode_fh(p, &args->fh); | 362 | p = decode_fh(p, &args->fh); |
366 | if (!p) | 363 | if (!p) |
367 | return 0; | 364 | return 0; |
368 | if (!xdr_argsize_check(rqstp, p)) | ||
369 | return 0; | ||
370 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 365 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
371 | 366 | ||
372 | return 1; | 367 | return xdr_argsize_check(rqstp, p); |
373 | } | 368 | } |
374 | 369 | ||
375 | int | 370 | int |
@@ -407,11 +402,9 @@ nfssvc_decode_readdirargs(struct svc_rqst *rqstp, __be32 *p, | |||
407 | args->cookie = ntohl(*p++); | 402 | args->cookie = ntohl(*p++); |
408 | args->count = ntohl(*p++); | 403 | args->count = ntohl(*p++); |
409 | args->count = min_t(u32, args->count, PAGE_SIZE); | 404 | args->count = min_t(u32, args->count, PAGE_SIZE); |
410 | if (!xdr_argsize_check(rqstp, p)) | ||
411 | return 0; | ||
412 | args->buffer = page_address(*(rqstp->rq_next_page++)); | 405 | args->buffer = page_address(*(rqstp->rq_next_page++)); |
413 | 406 | ||
414 | return 1; | 407 | return xdr_argsize_check(rqstp, p); |
415 | } | 408 | } |
416 | 409 | ||
417 | /* | 410 | /* |
diff --git a/fs/ntfs/namei.c b/fs/ntfs/namei.c index 358258364616..4690cd75d8d7 100644 --- a/fs/ntfs/namei.c +++ b/fs/ntfs/namei.c | |||
@@ -159,7 +159,7 @@ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, | |||
159 | PTR_ERR(dent_inode)); | 159 | PTR_ERR(dent_inode)); |
160 | kfree(name); | 160 | kfree(name); |
161 | /* Return the error code. */ | 161 | /* Return the error code. */ |
162 | return (struct dentry *)dent_inode; | 162 | return ERR_CAST(dent_inode); |
163 | } | 163 | } |
164 | /* It is guaranteed that @name is no longer allocated at this point. */ | 164 | /* It is guaranteed that @name is no longer allocated at this point. */ |
165 | if (MREF_ERR(mref) == -ENOENT) { | 165 | if (MREF_ERR(mref) == -ENOENT) { |
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c index 827fc9809bc2..9f88188060db 100644 --- a/fs/ocfs2/export.c +++ b/fs/ocfs2/export.c | |||
@@ -119,7 +119,7 @@ check_err: | |||
119 | 119 | ||
120 | if (IS_ERR(inode)) { | 120 | if (IS_ERR(inode)) { |
121 | mlog_errno(PTR_ERR(inode)); | 121 | mlog_errno(PTR_ERR(inode)); |
122 | result = (void *)inode; | 122 | result = ERR_CAST(inode); |
123 | goto bail; | 123 | goto bail; |
124 | } | 124 | } |
125 | 125 | ||
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index 0daac5112f7a..c0c9683934b7 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig | |||
@@ -1,5 +1,6 @@ | |||
1 | config OVERLAY_FS | 1 | config OVERLAY_FS |
2 | tristate "Overlay filesystem support" | 2 | tristate "Overlay filesystem support" |
3 | select EXPORTFS | ||
3 | help | 4 | help |
4 | An overlay filesystem combines two filesystems - an 'upper' filesystem | 5 | An overlay filesystem combines two filesystems - an 'upper' filesystem |
5 | and a 'lower' filesystem. When a name exists in both filesystems, the | 6 | and a 'lower' filesystem. When a name exists in both filesystems, the |
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 9008ab9fbd2e..7a44533f4bbf 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c | |||
@@ -300,7 +300,11 @@ static int ovl_set_origin(struct dentry *dentry, struct dentry *lower, | |||
300 | return PTR_ERR(fh); | 300 | return PTR_ERR(fh); |
301 | } | 301 | } |
302 | 302 | ||
303 | err = ovl_do_setxattr(upper, OVL_XATTR_ORIGIN, fh, fh ? fh->len : 0, 0); | 303 | /* |
304 | * Do not fail when upper doesn't support xattrs. | ||
305 | */ | ||
306 | err = ovl_check_setxattr(dentry, upper, OVL_XATTR_ORIGIN, fh, | ||
307 | fh ? fh->len : 0, 0); | ||
304 | kfree(fh); | 308 | kfree(fh); |
305 | 309 | ||
306 | return err; | 310 | return err; |
@@ -342,13 +346,14 @@ static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, | |||
342 | if (tmpfile) | 346 | if (tmpfile) |
343 | temp = ovl_do_tmpfile(upperdir, stat->mode); | 347 | temp = ovl_do_tmpfile(upperdir, stat->mode); |
344 | else | 348 | else |
345 | temp = ovl_lookup_temp(workdir, dentry); | 349 | temp = ovl_lookup_temp(workdir); |
346 | err = PTR_ERR(temp); | ||
347 | if (IS_ERR(temp)) | ||
348 | goto out1; | ||
349 | |||
350 | err = 0; | 350 | err = 0; |
351 | if (!tmpfile) | 351 | if (IS_ERR(temp)) { |
352 | err = PTR_ERR(temp); | ||
353 | temp = NULL; | ||
354 | } | ||
355 | |||
356 | if (!err && !tmpfile) | ||
352 | err = ovl_create_real(wdir, temp, &cattr, NULL, true); | 357 | err = ovl_create_real(wdir, temp, &cattr, NULL, true); |
353 | 358 | ||
354 | if (new_creds) { | 359 | if (new_creds) { |
@@ -454,6 +459,11 @@ static int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry, | |||
454 | ovl_path_upper(parent, &parentpath); | 459 | ovl_path_upper(parent, &parentpath); |
455 | upperdir = parentpath.dentry; | 460 | upperdir = parentpath.dentry; |
456 | 461 | ||
462 | /* Mark parent "impure" because it may now contain non-pure upper */ | ||
463 | err = ovl_set_impure(parent, upperdir); | ||
464 | if (err) | ||
465 | return err; | ||
466 | |||
457 | err = vfs_getattr(&parentpath, &pstat, | 467 | err = vfs_getattr(&parentpath, &pstat, |
458 | STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); | 468 | STATX_ATIME | STATX_MTIME, AT_STATX_SYNC_AS_STAT); |
459 | if (err) | 469 | if (err) |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 723b98b90698..a63a71656e9b 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
@@ -41,7 +41,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) | |||
41 | } | 41 | } |
42 | } | 42 | } |
43 | 43 | ||
44 | struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry) | 44 | struct dentry *ovl_lookup_temp(struct dentry *workdir) |
45 | { | 45 | { |
46 | struct dentry *temp; | 46 | struct dentry *temp; |
47 | char name[20]; | 47 | char name[20]; |
@@ -68,7 +68,7 @@ static struct dentry *ovl_whiteout(struct dentry *workdir, | |||
68 | struct dentry *whiteout; | 68 | struct dentry *whiteout; |
69 | struct inode *wdir = workdir->d_inode; | 69 | struct inode *wdir = workdir->d_inode; |
70 | 70 | ||
71 | whiteout = ovl_lookup_temp(workdir, dentry); | 71 | whiteout = ovl_lookup_temp(workdir); |
72 | if (IS_ERR(whiteout)) | 72 | if (IS_ERR(whiteout)) |
73 | return whiteout; | 73 | return whiteout; |
74 | 74 | ||
@@ -127,17 +127,28 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry, | |||
127 | return err; | 127 | return err; |
128 | } | 128 | } |
129 | 129 | ||
130 | static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) | 130 | static int ovl_set_opaque_xerr(struct dentry *dentry, struct dentry *upper, |
131 | int xerr) | ||
131 | { | 132 | { |
132 | int err; | 133 | int err; |
133 | 134 | ||
134 | err = ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); | 135 | err = ovl_check_setxattr(dentry, upper, OVL_XATTR_OPAQUE, "y", 1, xerr); |
135 | if (!err) | 136 | if (!err) |
136 | ovl_dentry_set_opaque(dentry); | 137 | ovl_dentry_set_opaque(dentry); |
137 | 138 | ||
138 | return err; | 139 | return err; |
139 | } | 140 | } |
140 | 141 | ||
142 | static int ovl_set_opaque(struct dentry *dentry, struct dentry *upperdentry) | ||
143 | { | ||
144 | /* | ||
145 | * Fail with -EIO when trying to create opaque dir and upper doesn't | ||
146 | * support xattrs. ovl_rename() calls ovl_set_opaque_xerr(-EXDEV) to | ||
147 | * return a specific error for noxattr case. | ||
148 | */ | ||
149 | return ovl_set_opaque_xerr(dentry, upperdentry, -EIO); | ||
150 | } | ||
151 | |||
141 | /* Common operations required to be done after creation of file on upper */ | 152 | /* Common operations required to be done after creation of file on upper */ |
142 | static void ovl_instantiate(struct dentry *dentry, struct inode *inode, | 153 | static void ovl_instantiate(struct dentry *dentry, struct inode *inode, |
143 | struct dentry *newdentry, bool hardlink) | 154 | struct dentry *newdentry, bool hardlink) |
@@ -162,6 +173,11 @@ static bool ovl_type_merge(struct dentry *dentry) | |||
162 | return OVL_TYPE_MERGE(ovl_path_type(dentry)); | 173 | return OVL_TYPE_MERGE(ovl_path_type(dentry)); |
163 | } | 174 | } |
164 | 175 | ||
176 | static bool ovl_type_origin(struct dentry *dentry) | ||
177 | { | ||
178 | return OVL_TYPE_ORIGIN(ovl_path_type(dentry)); | ||
179 | } | ||
180 | |||
165 | static int ovl_create_upper(struct dentry *dentry, struct inode *inode, | 181 | static int ovl_create_upper(struct dentry *dentry, struct inode *inode, |
166 | struct cattr *attr, struct dentry *hardlink) | 182 | struct cattr *attr, struct dentry *hardlink) |
167 | { | 183 | { |
@@ -250,7 +266,7 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry, | |||
250 | if (upper->d_parent->d_inode != udir) | 266 | if (upper->d_parent->d_inode != udir) |
251 | goto out_unlock; | 267 | goto out_unlock; |
252 | 268 | ||
253 | opaquedir = ovl_lookup_temp(workdir, dentry); | 269 | opaquedir = ovl_lookup_temp(workdir); |
254 | err = PTR_ERR(opaquedir); | 270 | err = PTR_ERR(opaquedir); |
255 | if (IS_ERR(opaquedir)) | 271 | if (IS_ERR(opaquedir)) |
256 | goto out_unlock; | 272 | goto out_unlock; |
@@ -382,7 +398,7 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode, | |||
382 | if (err) | 398 | if (err) |
383 | goto out; | 399 | goto out; |
384 | 400 | ||
385 | newdentry = ovl_lookup_temp(workdir, dentry); | 401 | newdentry = ovl_lookup_temp(workdir); |
386 | err = PTR_ERR(newdentry); | 402 | err = PTR_ERR(newdentry); |
387 | if (IS_ERR(newdentry)) | 403 | if (IS_ERR(newdentry)) |
388 | goto out_unlock; | 404 | goto out_unlock; |
@@ -846,18 +862,16 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir) | |||
846 | if (IS_ERR(redirect)) | 862 | if (IS_ERR(redirect)) |
847 | return PTR_ERR(redirect); | 863 | return PTR_ERR(redirect); |
848 | 864 | ||
849 | err = ovl_do_setxattr(ovl_dentry_upper(dentry), OVL_XATTR_REDIRECT, | 865 | err = ovl_check_setxattr(dentry, ovl_dentry_upper(dentry), |
850 | redirect, strlen(redirect), 0); | 866 | OVL_XATTR_REDIRECT, |
867 | redirect, strlen(redirect), -EXDEV); | ||
851 | if (!err) { | 868 | if (!err) { |
852 | spin_lock(&dentry->d_lock); | 869 | spin_lock(&dentry->d_lock); |
853 | ovl_dentry_set_redirect(dentry, redirect); | 870 | ovl_dentry_set_redirect(dentry, redirect); |
854 | spin_unlock(&dentry->d_lock); | 871 | spin_unlock(&dentry->d_lock); |
855 | } else { | 872 | } else { |
856 | kfree(redirect); | 873 | kfree(redirect); |
857 | if (err == -EOPNOTSUPP) | 874 | pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); |
858 | ovl_clear_redirect_dir(dentry->d_sb); | ||
859 | else | ||
860 | pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); | ||
861 | /* Fall back to userspace copy-up */ | 875 | /* Fall back to userspace copy-up */ |
862 | err = -EXDEV; | 876 | err = -EXDEV; |
863 | } | 877 | } |
@@ -943,6 +957,25 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, | |||
943 | old_upperdir = ovl_dentry_upper(old->d_parent); | 957 | old_upperdir = ovl_dentry_upper(old->d_parent); |
944 | new_upperdir = ovl_dentry_upper(new->d_parent); | 958 | new_upperdir = ovl_dentry_upper(new->d_parent); |
945 | 959 | ||
960 | if (!samedir) { | ||
961 | /* | ||
962 | * When moving a merge dir or non-dir with copy up origin into | ||
963 | * a new parent, we are marking the new parent dir "impure". | ||
964 | * When ovl_iterate() iterates an "impure" upper dir, it will | ||
965 | * lookup the origin inodes of the entries to fill d_ino. | ||
966 | */ | ||
967 | if (ovl_type_origin(old)) { | ||
968 | err = ovl_set_impure(new->d_parent, new_upperdir); | ||
969 | if (err) | ||
970 | goto out_revert_creds; | ||
971 | } | ||
972 | if (!overwrite && ovl_type_origin(new)) { | ||
973 | err = ovl_set_impure(old->d_parent, old_upperdir); | ||
974 | if (err) | ||
975 | goto out_revert_creds; | ||
976 | } | ||
977 | } | ||
978 | |||
946 | trap = lock_rename(new_upperdir, old_upperdir); | 979 | trap = lock_rename(new_upperdir, old_upperdir); |
947 | 980 | ||
948 | olddentry = lookup_one_len(old->d_name.name, old_upperdir, | 981 | olddentry = lookup_one_len(old->d_name.name, old_upperdir, |
@@ -992,7 +1025,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, | |||
992 | if (ovl_type_merge_or_lower(old)) | 1025 | if (ovl_type_merge_or_lower(old)) |
993 | err = ovl_set_redirect(old, samedir); | 1026 | err = ovl_set_redirect(old, samedir); |
994 | else if (!old_opaque && ovl_type_merge(new->d_parent)) | 1027 | else if (!old_opaque && ovl_type_merge(new->d_parent)) |
995 | err = ovl_set_opaque(old, olddentry); | 1028 | err = ovl_set_opaque_xerr(old, olddentry, -EXDEV); |
996 | if (err) | 1029 | if (err) |
997 | goto out_dput; | 1030 | goto out_dput; |
998 | } | 1031 | } |
@@ -1000,7 +1033,7 @@ static int ovl_rename(struct inode *olddir, struct dentry *old, | |||
1000 | if (ovl_type_merge_or_lower(new)) | 1033 | if (ovl_type_merge_or_lower(new)) |
1001 | err = ovl_set_redirect(new, samedir); | 1034 | err = ovl_set_redirect(new, samedir); |
1002 | else if (!new_opaque && ovl_type_merge(old->d_parent)) | 1035 | else if (!new_opaque && ovl_type_merge(old->d_parent)) |
1003 | err = ovl_set_opaque(new, newdentry); | 1036 | err = ovl_set_opaque_xerr(new, newdentry, -EXDEV); |
1004 | if (err) | 1037 | if (err) |
1005 | goto out_dput; | 1038 | goto out_dput; |
1006 | } | 1039 | } |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index ad9547f82da5..d613e2c41242 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
@@ -240,6 +240,16 @@ int ovl_xattr_get(struct dentry *dentry, const char *name, | |||
240 | return res; | 240 | return res; |
241 | } | 241 | } |
242 | 242 | ||
243 | static bool ovl_can_list(const char *s) | ||
244 | { | ||
245 | /* List all non-trusted xatts */ | ||
246 | if (strncmp(s, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) | ||
247 | return true; | ||
248 | |||
249 | /* Never list trusted.overlay, list other trusted for superuser only */ | ||
250 | return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN); | ||
251 | } | ||
252 | |||
243 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) | 253 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) |
244 | { | 254 | { |
245 | struct dentry *realdentry = ovl_dentry_real(dentry); | 255 | struct dentry *realdentry = ovl_dentry_real(dentry); |
@@ -263,7 +273,7 @@ ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size) | |||
263 | return -EIO; | 273 | return -EIO; |
264 | 274 | ||
265 | len -= slen; | 275 | len -= slen; |
266 | if (ovl_is_private_xattr(s)) { | 276 | if (!ovl_can_list(s)) { |
267 | res -= slen; | 277 | res -= slen; |
268 | memmove(s, s + slen, len); | 278 | memmove(s, s + slen, len); |
269 | } else { | 279 | } else { |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index bad0f665a635..f3136c31e72a 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
@@ -169,17 +169,7 @@ invalid: | |||
169 | 169 | ||
170 | static bool ovl_is_opaquedir(struct dentry *dentry) | 170 | static bool ovl_is_opaquedir(struct dentry *dentry) |
171 | { | 171 | { |
172 | int res; | 172 | return ovl_check_dir_xattr(dentry, OVL_XATTR_OPAQUE); |
173 | char val; | ||
174 | |||
175 | if (!d_is_dir(dentry)) | ||
176 | return false; | ||
177 | |||
178 | res = vfs_getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); | ||
179 | if (res == 1 && val == 'y') | ||
180 | return true; | ||
181 | |||
182 | return false; | ||
183 | } | 173 | } |
184 | 174 | ||
185 | static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, | 175 | static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d, |
@@ -351,6 +341,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
351 | unsigned int ctr = 0; | 341 | unsigned int ctr = 0; |
352 | struct inode *inode = NULL; | 342 | struct inode *inode = NULL; |
353 | bool upperopaque = false; | 343 | bool upperopaque = false; |
344 | bool upperimpure = false; | ||
354 | char *upperredirect = NULL; | 345 | char *upperredirect = NULL; |
355 | struct dentry *this; | 346 | struct dentry *this; |
356 | unsigned int i; | 347 | unsigned int i; |
@@ -395,6 +386,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
395 | poe = roe; | 386 | poe = roe; |
396 | } | 387 | } |
397 | upperopaque = d.opaque; | 388 | upperopaque = d.opaque; |
389 | if (upperdentry && d.is_dir) | ||
390 | upperimpure = ovl_is_impuredir(upperdentry); | ||
398 | } | 391 | } |
399 | 392 | ||
400 | if (!d.stop && poe->numlower) { | 393 | if (!d.stop && poe->numlower) { |
@@ -463,6 +456,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
463 | 456 | ||
464 | revert_creds(old_cred); | 457 | revert_creds(old_cred); |
465 | oe->opaque = upperopaque; | 458 | oe->opaque = upperopaque; |
459 | oe->impure = upperimpure; | ||
466 | oe->redirect = upperredirect; | 460 | oe->redirect = upperredirect; |
467 | oe->__upperdentry = upperdentry; | 461 | oe->__upperdentry = upperdentry; |
468 | memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); | 462 | memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index caa36cb9c46d..0623cebeefff 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -24,6 +24,7 @@ enum ovl_path_type { | |||
24 | #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque" | 24 | #define OVL_XATTR_OPAQUE OVL_XATTR_PREFIX "opaque" |
25 | #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect" | 25 | #define OVL_XATTR_REDIRECT OVL_XATTR_PREFIX "redirect" |
26 | #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin" | 26 | #define OVL_XATTR_ORIGIN OVL_XATTR_PREFIX "origin" |
27 | #define OVL_XATTR_IMPURE OVL_XATTR_PREFIX "impure" | ||
27 | 28 | ||
28 | /* | 29 | /* |
29 | * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, | 30 | * The tuple (fh,uuid) is a universal unique identifier for a copy up origin, |
@@ -203,10 +204,10 @@ struct dentry *ovl_dentry_real(struct dentry *dentry); | |||
203 | struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); | 204 | struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry); |
204 | void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); | 205 | void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache); |
205 | bool ovl_dentry_is_opaque(struct dentry *dentry); | 206 | bool ovl_dentry_is_opaque(struct dentry *dentry); |
207 | bool ovl_dentry_is_impure(struct dentry *dentry); | ||
206 | bool ovl_dentry_is_whiteout(struct dentry *dentry); | 208 | bool ovl_dentry_is_whiteout(struct dentry *dentry); |
207 | void ovl_dentry_set_opaque(struct dentry *dentry); | 209 | void ovl_dentry_set_opaque(struct dentry *dentry); |
208 | bool ovl_redirect_dir(struct super_block *sb); | 210 | bool ovl_redirect_dir(struct super_block *sb); |
209 | void ovl_clear_redirect_dir(struct super_block *sb); | ||
210 | const char *ovl_dentry_get_redirect(struct dentry *dentry); | 211 | const char *ovl_dentry_get_redirect(struct dentry *dentry); |
211 | void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); | 212 | void ovl_dentry_set_redirect(struct dentry *dentry, const char *redirect); |
212 | void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); | 213 | void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry); |
@@ -219,6 +220,17 @@ bool ovl_is_whiteout(struct dentry *dentry); | |||
219 | struct file *ovl_path_open(struct path *path, int flags); | 220 | struct file *ovl_path_open(struct path *path, int flags); |
220 | int ovl_copy_up_start(struct dentry *dentry); | 221 | int ovl_copy_up_start(struct dentry *dentry); |
221 | void ovl_copy_up_end(struct dentry *dentry); | 222 | void ovl_copy_up_end(struct dentry *dentry); |
223 | bool ovl_check_dir_xattr(struct dentry *dentry, const char *name); | ||
224 | int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, | ||
225 | const char *name, const void *value, size_t size, | ||
226 | int xerr); | ||
227 | int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry); | ||
228 | |||
229 | static inline bool ovl_is_impuredir(struct dentry *dentry) | ||
230 | { | ||
231 | return ovl_check_dir_xattr(dentry, OVL_XATTR_IMPURE); | ||
232 | } | ||
233 | |||
222 | 234 | ||
223 | /* namei.c */ | 235 | /* namei.c */ |
224 | int ovl_path_next(int idx, struct dentry *dentry, struct path *path); | 236 | int ovl_path_next(int idx, struct dentry *dentry, struct path *path); |
@@ -263,7 +275,7 @@ static inline void ovl_copyattr(struct inode *from, struct inode *to) | |||
263 | 275 | ||
264 | /* dir.c */ | 276 | /* dir.c */ |
265 | extern const struct inode_operations ovl_dir_inode_operations; | 277 | extern const struct inode_operations ovl_dir_inode_operations; |
266 | struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry); | 278 | struct dentry *ovl_lookup_temp(struct dentry *workdir); |
267 | struct cattr { | 279 | struct cattr { |
268 | dev_t rdev; | 280 | dev_t rdev; |
269 | umode_t mode; | 281 | umode_t mode; |
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index b2023ddb8532..34bc4a9f5c61 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h | |||
@@ -28,6 +28,7 @@ struct ovl_fs { | |||
28 | /* creds of process who forced instantiation of super block */ | 28 | /* creds of process who forced instantiation of super block */ |
29 | const struct cred *creator_cred; | 29 | const struct cred *creator_cred; |
30 | bool tmpfile; | 30 | bool tmpfile; |
31 | bool noxattr; | ||
31 | wait_queue_head_t copyup_wq; | 32 | wait_queue_head_t copyup_wq; |
32 | /* sb common to all layers */ | 33 | /* sb common to all layers */ |
33 | struct super_block *same_sb; | 34 | struct super_block *same_sb; |
@@ -42,6 +43,7 @@ struct ovl_entry { | |||
42 | u64 version; | 43 | u64 version; |
43 | const char *redirect; | 44 | const char *redirect; |
44 | bool opaque; | 45 | bool opaque; |
46 | bool impure; | ||
45 | bool copying; | 47 | bool copying; |
46 | }; | 48 | }; |
47 | struct rcu_head rcu; | 49 | struct rcu_head rcu; |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 9828b7de8999..4882ffb37bae 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -891,6 +891,19 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
891 | dput(temp); | 891 | dput(temp); |
892 | else | 892 | else |
893 | pr_warn("overlayfs: upper fs does not support tmpfile.\n"); | 893 | pr_warn("overlayfs: upper fs does not support tmpfile.\n"); |
894 | |||
895 | /* | ||
896 | * Check if upper/work fs supports trusted.overlay.* | ||
897 | * xattr | ||
898 | */ | ||
899 | err = ovl_do_setxattr(ufs->workdir, OVL_XATTR_OPAQUE, | ||
900 | "0", 1, 0); | ||
901 | if (err) { | ||
902 | ufs->noxattr = true; | ||
903 | pr_warn("overlayfs: upper fs does not support xattr.\n"); | ||
904 | } else { | ||
905 | vfs_removexattr(ufs->workdir, OVL_XATTR_OPAQUE); | ||
906 | } | ||
894 | } | 907 | } |
895 | } | 908 | } |
896 | 909 | ||
@@ -961,7 +974,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
961 | path_put(&workpath); | 974 | path_put(&workpath); |
962 | kfree(lowertmp); | 975 | kfree(lowertmp); |
963 | 976 | ||
964 | oe->__upperdentry = upperpath.dentry; | 977 | if (upperpath.dentry) { |
978 | oe->__upperdentry = upperpath.dentry; | ||
979 | oe->impure = ovl_is_impuredir(upperpath.dentry); | ||
980 | } | ||
965 | for (i = 0; i < numlower; i++) { | 981 | for (i = 0; i < numlower; i++) { |
966 | oe->lowerstack[i].dentry = stack[i].dentry; | 982 | oe->lowerstack[i].dentry = stack[i].dentry; |
967 | oe->lowerstack[i].mnt = ufs->lower_mnt[i]; | 983 | oe->lowerstack[i].mnt = ufs->lower_mnt[i]; |
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c index cfdea47313a1..809048913889 100644 --- a/fs/overlayfs/util.c +++ b/fs/overlayfs/util.c | |||
@@ -175,6 +175,13 @@ bool ovl_dentry_is_opaque(struct dentry *dentry) | |||
175 | return oe->opaque; | 175 | return oe->opaque; |
176 | } | 176 | } |
177 | 177 | ||
178 | bool ovl_dentry_is_impure(struct dentry *dentry) | ||
179 | { | ||
180 | struct ovl_entry *oe = dentry->d_fsdata; | ||
181 | |||
182 | return oe->impure; | ||
183 | } | ||
184 | |||
178 | bool ovl_dentry_is_whiteout(struct dentry *dentry) | 185 | bool ovl_dentry_is_whiteout(struct dentry *dentry) |
179 | { | 186 | { |
180 | return !dentry->d_inode && ovl_dentry_is_opaque(dentry); | 187 | return !dentry->d_inode && ovl_dentry_is_opaque(dentry); |
@@ -191,14 +198,7 @@ bool ovl_redirect_dir(struct super_block *sb) | |||
191 | { | 198 | { |
192 | struct ovl_fs *ofs = sb->s_fs_info; | 199 | struct ovl_fs *ofs = sb->s_fs_info; |
193 | 200 | ||
194 | return ofs->config.redirect_dir; | 201 | return ofs->config.redirect_dir && !ofs->noxattr; |
195 | } | ||
196 | |||
197 | void ovl_clear_redirect_dir(struct super_block *sb) | ||
198 | { | ||
199 | struct ovl_fs *ofs = sb->s_fs_info; | ||
200 | |||
201 | ofs->config.redirect_dir = false; | ||
202 | } | 202 | } |
203 | 203 | ||
204 | const char *ovl_dentry_get_redirect(struct dentry *dentry) | 204 | const char *ovl_dentry_get_redirect(struct dentry *dentry) |
@@ -303,3 +303,59 @@ void ovl_copy_up_end(struct dentry *dentry) | |||
303 | wake_up_locked(&ofs->copyup_wq); | 303 | wake_up_locked(&ofs->copyup_wq); |
304 | spin_unlock(&ofs->copyup_wq.lock); | 304 | spin_unlock(&ofs->copyup_wq.lock); |
305 | } | 305 | } |
306 | |||
307 | bool ovl_check_dir_xattr(struct dentry *dentry, const char *name) | ||
308 | { | ||
309 | int res; | ||
310 | char val; | ||
311 | |||
312 | if (!d_is_dir(dentry)) | ||
313 | return false; | ||
314 | |||
315 | res = vfs_getxattr(dentry, name, &val, 1); | ||
316 | if (res == 1 && val == 'y') | ||
317 | return true; | ||
318 | |||
319 | return false; | ||
320 | } | ||
321 | |||
322 | int ovl_check_setxattr(struct dentry *dentry, struct dentry *upperdentry, | ||
323 | const char *name, const void *value, size_t size, | ||
324 | int xerr) | ||
325 | { | ||
326 | int err; | ||
327 | struct ovl_fs *ofs = dentry->d_sb->s_fs_info; | ||
328 | |||
329 | if (ofs->noxattr) | ||
330 | return xerr; | ||
331 | |||
332 | err = ovl_do_setxattr(upperdentry, name, value, size, 0); | ||
333 | |||
334 | if (err == -EOPNOTSUPP) { | ||
335 | pr_warn("overlayfs: cannot set %s xattr on upper\n", name); | ||
336 | ofs->noxattr = true; | ||
337 | return xerr; | ||
338 | } | ||
339 | |||
340 | return err; | ||
341 | } | ||
342 | |||
343 | int ovl_set_impure(struct dentry *dentry, struct dentry *upperdentry) | ||
344 | { | ||
345 | int err; | ||
346 | struct ovl_entry *oe = dentry->d_fsdata; | ||
347 | |||
348 | if (oe->impure) | ||
349 | return 0; | ||
350 | |||
351 | /* | ||
352 | * Do not fail when upper doesn't support xattrs. | ||
353 | * Upper inodes won't have origin nor redirect xattr anyway. | ||
354 | */ | ||
355 | err = ovl_check_setxattr(dentry, upperdentry, OVL_XATTR_IMPURE, | ||
356 | "y", 1, 0); | ||
357 | if (!err) | ||
358 | oe->impure = true; | ||
359 | |||
360 | return err; | ||
361 | } | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index 45f6bf68fff3..f1e1927ccd48 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -821,7 +821,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf, | |||
821 | if (!mmget_not_zero(mm)) | 821 | if (!mmget_not_zero(mm)) |
822 | goto free; | 822 | goto free; |
823 | 823 | ||
824 | flags = write ? FOLL_WRITE : 0; | 824 | flags = FOLL_FORCE | (write ? FOLL_WRITE : 0); |
825 | 825 | ||
826 | while (count > 0) { | 826 | while (count > 0) { |
827 | int this_len = min_t(int, count, PAGE_SIZE); | 827 | int this_len = min_t(int, count, PAGE_SIZE); |
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index da01f497180a..39bb1e838d8d 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -1112,7 +1112,7 @@ static int flush_commit_list(struct super_block *s, | |||
1112 | depth = reiserfs_write_unlock_nested(s); | 1112 | depth = reiserfs_write_unlock_nested(s); |
1113 | if (reiserfs_barrier_flush(s)) | 1113 | if (reiserfs_barrier_flush(s)) |
1114 | __sync_dirty_buffer(jl->j_commit_bh, | 1114 | __sync_dirty_buffer(jl->j_commit_bh, |
1115 | REQ_PREFLUSH | REQ_FUA); | 1115 | REQ_SYNC | REQ_PREFLUSH | REQ_FUA); |
1116 | else | 1116 | else |
1117 | sync_dirty_buffer(jl->j_commit_bh); | 1117 | sync_dirty_buffer(jl->j_commit_bh); |
1118 | reiserfs_write_lock_nested(s, depth); | 1118 | reiserfs_write_lock_nested(s, depth); |
@@ -1271,7 +1271,7 @@ static int _update_journal_header_block(struct super_block *sb, | |||
1271 | 1271 | ||
1272 | if (reiserfs_barrier_flush(sb)) | 1272 | if (reiserfs_barrier_flush(sb)) |
1273 | __sync_dirty_buffer(journal->j_header_bh, | 1273 | __sync_dirty_buffer(journal->j_header_bh, |
1274 | REQ_PREFLUSH | REQ_FUA); | 1274 | REQ_SYNC | REQ_PREFLUSH | REQ_FUA); |
1275 | else | 1275 | else |
1276 | sync_dirty_buffer(journal->j_header_bh); | 1276 | sync_dirty_buffer(journal->j_header_bh); |
1277 | 1277 | ||
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 131b2b77c818..29ecaf739449 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -812,9 +812,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) | |||
812 | uspi->s_dirblksize = UFS_SECTOR_SIZE; | 812 | uspi->s_dirblksize = UFS_SECTOR_SIZE; |
813 | super_block_offset=UFS_SBLOCK; | 813 | super_block_offset=UFS_SBLOCK; |
814 | 814 | ||
815 | /* Keep 2Gig file limit. Some UFS variants need to override | 815 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
816 | this but as I don't know which I'll let those in the know loosen | 816 | |
817 | the rules */ | ||
818 | switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { | 817 | switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) { |
819 | case UFS_MOUNT_UFSTYPE_44BSD: | 818 | case UFS_MOUNT_UFSTYPE_44BSD: |
820 | UFSD("ufstype=44bsd\n"); | 819 | UFSD("ufstype=44bsd\n"); |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 62fa39276a24..07b77b73b024 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -97,12 +97,16 @@ static inline void | |||
97 | xfs_buf_ioacct_inc( | 97 | xfs_buf_ioacct_inc( |
98 | struct xfs_buf *bp) | 98 | struct xfs_buf *bp) |
99 | { | 99 | { |
100 | if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT)) | 100 | if (bp->b_flags & XBF_NO_IOACCT) |
101 | return; | 101 | return; |
102 | 102 | ||
103 | ASSERT(bp->b_flags & XBF_ASYNC); | 103 | ASSERT(bp->b_flags & XBF_ASYNC); |
104 | bp->b_flags |= _XBF_IN_FLIGHT; | 104 | spin_lock(&bp->b_lock); |
105 | percpu_counter_inc(&bp->b_target->bt_io_count); | 105 | if (!(bp->b_state & XFS_BSTATE_IN_FLIGHT)) { |
106 | bp->b_state |= XFS_BSTATE_IN_FLIGHT; | ||
107 | percpu_counter_inc(&bp->b_target->bt_io_count); | ||
108 | } | ||
109 | spin_unlock(&bp->b_lock); | ||
106 | } | 110 | } |
107 | 111 | ||
108 | /* | 112 | /* |
@@ -110,14 +114,24 @@ xfs_buf_ioacct_inc( | |||
110 | * freed and unaccount from the buftarg. | 114 | * freed and unaccount from the buftarg. |
111 | */ | 115 | */ |
112 | static inline void | 116 | static inline void |
113 | xfs_buf_ioacct_dec( | 117 | __xfs_buf_ioacct_dec( |
114 | struct xfs_buf *bp) | 118 | struct xfs_buf *bp) |
115 | { | 119 | { |
116 | if (!(bp->b_flags & _XBF_IN_FLIGHT)) | 120 | ASSERT(spin_is_locked(&bp->b_lock)); |
117 | return; | ||
118 | 121 | ||
119 | bp->b_flags &= ~_XBF_IN_FLIGHT; | 122 | if (bp->b_state & XFS_BSTATE_IN_FLIGHT) { |
120 | percpu_counter_dec(&bp->b_target->bt_io_count); | 123 | bp->b_state &= ~XFS_BSTATE_IN_FLIGHT; |
124 | percpu_counter_dec(&bp->b_target->bt_io_count); | ||
125 | } | ||
126 | } | ||
127 | |||
128 | static inline void | ||
129 | xfs_buf_ioacct_dec( | ||
130 | struct xfs_buf *bp) | ||
131 | { | ||
132 | spin_lock(&bp->b_lock); | ||
133 | __xfs_buf_ioacct_dec(bp); | ||
134 | spin_unlock(&bp->b_lock); | ||
121 | } | 135 | } |
122 | 136 | ||
123 | /* | 137 | /* |
@@ -149,9 +163,9 @@ xfs_buf_stale( | |||
149 | * unaccounted (released to LRU) before that occurs. Drop in-flight | 163 | * unaccounted (released to LRU) before that occurs. Drop in-flight |
150 | * status now to preserve accounting consistency. | 164 | * status now to preserve accounting consistency. |
151 | */ | 165 | */ |
152 | xfs_buf_ioacct_dec(bp); | ||
153 | |||
154 | spin_lock(&bp->b_lock); | 166 | spin_lock(&bp->b_lock); |
167 | __xfs_buf_ioacct_dec(bp); | ||
168 | |||
155 | atomic_set(&bp->b_lru_ref, 0); | 169 | atomic_set(&bp->b_lru_ref, 0); |
156 | if (!(bp->b_state & XFS_BSTATE_DISPOSE) && | 170 | if (!(bp->b_state & XFS_BSTATE_DISPOSE) && |
157 | (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) | 171 | (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru))) |
@@ -979,12 +993,12 @@ xfs_buf_rele( | |||
979 | * ensures the decrement occurs only once per-buf. | 993 | * ensures the decrement occurs only once per-buf. |
980 | */ | 994 | */ |
981 | if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) | 995 | if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru)) |
982 | xfs_buf_ioacct_dec(bp); | 996 | __xfs_buf_ioacct_dec(bp); |
983 | goto out_unlock; | 997 | goto out_unlock; |
984 | } | 998 | } |
985 | 999 | ||
986 | /* the last reference has been dropped ... */ | 1000 | /* the last reference has been dropped ... */ |
987 | xfs_buf_ioacct_dec(bp); | 1001 | __xfs_buf_ioacct_dec(bp); |
988 | if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { | 1002 | if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) { |
989 | /* | 1003 | /* |
990 | * If the buffer is added to the LRU take a new reference to the | 1004 | * If the buffer is added to the LRU take a new reference to the |
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 8d1d44f87ce9..1508121f29f2 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h | |||
@@ -63,7 +63,6 @@ typedef enum { | |||
63 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ | 63 | #define _XBF_KMEM (1 << 21)/* backed by heap memory */ |
64 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ | 64 | #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ |
65 | #define _XBF_COMPOUND (1 << 23)/* compound buffer */ | 65 | #define _XBF_COMPOUND (1 << 23)/* compound buffer */ |
66 | #define _XBF_IN_FLIGHT (1 << 25) /* I/O in flight, for accounting purposes */ | ||
67 | 66 | ||
68 | typedef unsigned int xfs_buf_flags_t; | 67 | typedef unsigned int xfs_buf_flags_t; |
69 | 68 | ||
@@ -84,14 +83,14 @@ typedef unsigned int xfs_buf_flags_t; | |||
84 | { _XBF_PAGES, "PAGES" }, \ | 83 | { _XBF_PAGES, "PAGES" }, \ |
85 | { _XBF_KMEM, "KMEM" }, \ | 84 | { _XBF_KMEM, "KMEM" }, \ |
86 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ | 85 | { _XBF_DELWRI_Q, "DELWRI_Q" }, \ |
87 | { _XBF_COMPOUND, "COMPOUND" }, \ | 86 | { _XBF_COMPOUND, "COMPOUND" } |
88 | { _XBF_IN_FLIGHT, "IN_FLIGHT" } | ||
89 | 87 | ||
90 | 88 | ||
91 | /* | 89 | /* |
92 | * Internal state flags. | 90 | * Internal state flags. |
93 | */ | 91 | */ |
94 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ | 92 | #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */ |
93 | #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */ | ||
95 | 94 | ||
96 | /* | 95 | /* |
97 | * The xfs_buftarg contains 2 notions of "sector size" - | 96 | * The xfs_buftarg contains 2 notions of "sector size" - |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index c0bd0d7651a9..bb837310c07e 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -913,4 +913,55 @@ void drm_dp_aux_unregister(struct drm_dp_aux *aux); | |||
913 | int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); | 913 | int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc); |
914 | int drm_dp_stop_crc(struct drm_dp_aux *aux); | 914 | int drm_dp_stop_crc(struct drm_dp_aux *aux); |
915 | 915 | ||
916 | struct drm_dp_dpcd_ident { | ||
917 | u8 oui[3]; | ||
918 | u8 device_id[6]; | ||
919 | u8 hw_rev; | ||
920 | u8 sw_major_rev; | ||
921 | u8 sw_minor_rev; | ||
922 | } __packed; | ||
923 | |||
924 | /** | ||
925 | * struct drm_dp_desc - DP branch/sink device descriptor | ||
926 | * @ident: DP device identification from DPCD 0x400 (sink) or 0x500 (branch). | ||
927 | * @quirks: Quirks; use drm_dp_has_quirk() to query for the quirks. | ||
928 | */ | ||
929 | struct drm_dp_desc { | ||
930 | struct drm_dp_dpcd_ident ident; | ||
931 | u32 quirks; | ||
932 | }; | ||
933 | |||
934 | int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc, | ||
935 | bool is_branch); | ||
936 | |||
937 | /** | ||
938 | * enum drm_dp_quirk - Display Port sink/branch device specific quirks | ||
939 | * | ||
940 | * Display Port sink and branch devices in the wild have a variety of bugs, try | ||
941 | * to collect them here. The quirks are shared, but it's up to the drivers to | ||
942 | * implement workarounds for them. | ||
943 | */ | ||
944 | enum drm_dp_quirk { | ||
945 | /** | ||
946 | * @DP_DPCD_QUIRK_LIMITED_M_N: | ||
947 | * | ||
948 | * The device requires main link attributes Mvid and Nvid to be limited | ||
949 | * to 16 bits. | ||
950 | */ | ||
951 | DP_DPCD_QUIRK_LIMITED_M_N, | ||
952 | }; | ||
953 | |||
954 | /** | ||
955 | * drm_dp_has_quirk() - does the DP device have a specific quirk | ||
956 | * @desc: Device decriptor filled by drm_dp_read_desc() | ||
957 | * @quirk: Quirk to query for | ||
958 | * | ||
959 | * Return true if DP device identified by @desc has @quirk. | ||
960 | */ | ||
961 | static inline bool | ||
962 | drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk) | ||
963 | { | ||
964 | return desc->quirks & BIT(quirk); | ||
965 | } | ||
966 | |||
916 | #endif /* _DRM_DP_HELPER_H_ */ | 967 | #endif /* _DRM_DP_HELPER_H_ */ |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 21745946cae1..ec47101cb1bf 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -48,6 +48,7 @@ enum { | |||
48 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ | 48 | CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ |
49 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ | 49 | CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ |
50 | CSS_VISIBLE = (1 << 3), /* css is visible to userland */ | 50 | CSS_VISIBLE = (1 << 3), /* css is visible to userland */ |
51 | CSS_DYING = (1 << 4), /* css is dying */ | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | /* bits in struct cgroup flags field */ | 54 | /* bits in struct cgroup flags field */ |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed2573e149fa..710a005c6b7a 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -344,6 +344,26 @@ static inline bool css_tryget_online(struct cgroup_subsys_state *css) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | /** | 346 | /** |
347 | * css_is_dying - test whether the specified css is dying | ||
348 | * @css: target css | ||
349 | * | ||
350 | * Test whether @css is in the process of offlining or already offline. In | ||
351 | * most cases, ->css_online() and ->css_offline() callbacks should be | ||
352 | * enough; however, the actual offline operations are RCU delayed and this | ||
353 | * test returns %true also when @css is scheduled to be offlined. | ||
354 | * | ||
355 | * This is useful, for example, when the use case requires synchronous | ||
356 | * behavior with respect to cgroup removal. cgroup removal schedules css | ||
357 | * offlining but the css can seem alive while the operation is being | ||
358 | * delayed. If the delay affects user visible semantics, this test can be | ||
359 | * used to resolve the situation. | ||
360 | */ | ||
361 | static inline bool css_is_dying(struct cgroup_subsys_state *css) | ||
362 | { | ||
363 | return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); | ||
364 | } | ||
365 | |||
366 | /** | ||
347 | * css_put - put a css reference | 367 | * css_put - put a css reference |
348 | * @css: target css | 368 | * @css: target css |
349 | * | 369 | * |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index de179993e039..ea9126006a69 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -15,3 +15,10 @@ | |||
15 | * with any version that can compile the kernel | 15 | * with any version that can compile the kernel |
16 | */ | 16 | */ |
17 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) | 17 | #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) |
18 | |||
19 | /* | ||
20 | * GCC does not warn about unused static inline functions for | ||
21 | * -Wunused-function. This turns out to avoid the need for complex #ifdef | ||
22 | * directives. Suppress the warning in clang as well. | ||
23 | */ | ||
24 | #define inline inline __attribute__((unused)) | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 2b1a44f5bdb6..a89d37e8b387 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -41,7 +41,7 @@ struct vm_area_struct; | |||
41 | #define ___GFP_WRITE 0x800000u | 41 | #define ___GFP_WRITE 0x800000u |
42 | #define ___GFP_KSWAPD_RECLAIM 0x1000000u | 42 | #define ___GFP_KSWAPD_RECLAIM 0x1000000u |
43 | #ifdef CONFIG_LOCKDEP | 43 | #ifdef CONFIG_LOCKDEP |
44 | #define ___GFP_NOLOCKDEP 0x4000000u | 44 | #define ___GFP_NOLOCKDEP 0x2000000u |
45 | #else | 45 | #else |
46 | #define ___GFP_NOLOCKDEP 0 | 46 | #define ___GFP_NOLOCKDEP 0 |
47 | #endif | 47 | #endif |
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index c0d712d22b07..f738d50cc17d 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h | |||
@@ -56,7 +56,14 @@ struct gpiod_lookup_table { | |||
56 | .flags = _flags, \ | 56 | .flags = _flags, \ |
57 | } | 57 | } |
58 | 58 | ||
59 | #ifdef CONFIG_GPIOLIB | ||
59 | void gpiod_add_lookup_table(struct gpiod_lookup_table *table); | 60 | void gpiod_add_lookup_table(struct gpiod_lookup_table *table); |
60 | void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); | 61 | void gpiod_remove_lookup_table(struct gpiod_lookup_table *table); |
62 | #else | ||
63 | static inline | ||
64 | void gpiod_add_lookup_table(struct gpiod_lookup_table *table) {} | ||
65 | static inline | ||
66 | void gpiod_remove_lookup_table(struct gpiod_lookup_table *table) {} | ||
67 | #endif | ||
61 | 68 | ||
62 | #endif /* __LINUX_GPIO_MACHINE_H */ | 69 | #endif /* __LINUX_GPIO_MACHINE_H */ |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 36872fbb815d..734377ad42e9 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -64,13 +64,17 @@ extern int register_refined_jiffies(long clock_tick_rate); | |||
64 | /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ | 64 | /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ |
65 | #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) | 65 | #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) |
66 | 66 | ||
67 | #ifndef __jiffy_arch_data | ||
68 | #define __jiffy_arch_data | ||
69 | #endif | ||
70 | |||
67 | /* | 71 | /* |
68 | * The 64-bit value is not atomic - you MUST NOT read it | 72 | * The 64-bit value is not atomic - you MUST NOT read it |
69 | * without sampling the sequence number in jiffies_lock. | 73 | * without sampling the sequence number in jiffies_lock. |
70 | * get_jiffies_64() will do this for you as appropriate. | 74 | * get_jiffies_64() will do this for you as appropriate. |
71 | */ | 75 | */ |
72 | extern u64 __cacheline_aligned_in_smp jiffies_64; | 76 | extern u64 __cacheline_aligned_in_smp jiffies_64; |
73 | extern unsigned long volatile __cacheline_aligned_in_smp jiffies; | 77 | extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies; |
74 | 78 | ||
75 | #if (BITS_PER_LONG < 64) | 79 | #if (BITS_PER_LONG < 64) |
76 | u64 get_jiffies_64(void); | 80 | u64 get_jiffies_64(void); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 4ce24a376262..8098695e5d8d 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -425,12 +425,20 @@ static inline void early_memtest(phys_addr_t start, phys_addr_t end) | |||
425 | } | 425 | } |
426 | #endif | 426 | #endif |
427 | 427 | ||
428 | extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, | ||
429 | phys_addr_t end_addr); | ||
428 | #else | 430 | #else |
429 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) | 431 | static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align) |
430 | { | 432 | { |
431 | return 0; | 433 | return 0; |
432 | } | 434 | } |
433 | 435 | ||
436 | static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr, | ||
437 | phys_addr_t end_addr) | ||
438 | { | ||
439 | return 0; | ||
440 | } | ||
441 | |||
434 | #endif /* CONFIG_HAVE_MEMBLOCK */ | 442 | #endif /* CONFIG_HAVE_MEMBLOCK */ |
435 | 443 | ||
436 | #endif /* __KERNEL__ */ | 444 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index b4ee8f62ce8d..8e2828d48d7f 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
@@ -470,6 +470,7 @@ struct mlx4_update_qp_params { | |||
470 | u16 rate_val; | 470 | u16 rate_val; |
471 | }; | 471 | }; |
472 | 472 | ||
473 | struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn); | ||
473 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, | 474 | int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, |
474 | enum mlx4_update_qp_attr attr, | 475 | enum mlx4_update_qp_attr attr, |
475 | struct mlx4_update_qp_params *params); | 476 | struct mlx4_update_qp_params *params); |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 6fa1eb6766af..56e96f6a0a45 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -769,6 +769,12 @@ enum { | |||
769 | MLX5_CAP_PORT_TYPE_ETH = 0x1, | 769 | MLX5_CAP_PORT_TYPE_ETH = 0x1, |
770 | }; | 770 | }; |
771 | 771 | ||
772 | enum { | ||
773 | MLX5_CAP_UMR_FENCE_STRONG = 0x0, | ||
774 | MLX5_CAP_UMR_FENCE_SMALL = 0x1, | ||
775 | MLX5_CAP_UMR_FENCE_NONE = 0x2, | ||
776 | }; | ||
777 | |||
772 | struct mlx5_ifc_cmd_hca_cap_bits { | 778 | struct mlx5_ifc_cmd_hca_cap_bits { |
773 | u8 reserved_at_0[0x80]; | 779 | u8 reserved_at_0[0x80]; |
774 | 780 | ||
@@ -879,7 +885,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
879 | u8 reserved_at_202[0x1]; | 885 | u8 reserved_at_202[0x1]; |
880 | u8 ipoib_enhanced_offloads[0x1]; | 886 | u8 ipoib_enhanced_offloads[0x1]; |
881 | u8 ipoib_basic_offloads[0x1]; | 887 | u8 ipoib_basic_offloads[0x1]; |
882 | u8 reserved_at_205[0xa]; | 888 | u8 reserved_at_205[0x5]; |
889 | u8 umr_fence[0x2]; | ||
890 | u8 reserved_at_20c[0x3]; | ||
883 | u8 drain_sigerr[0x1]; | 891 | u8 drain_sigerr[0x1]; |
884 | u8 cmdif_checksum[0x2]; | 892 | u8 cmdif_checksum[0x2]; |
885 | u8 sigerr_cqe[0x1]; | 893 | u8 sigerr_cqe[0x1]; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 7cb17c6b97de..b892e95d4929 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -2327,6 +2327,17 @@ static inline struct page *follow_page(struct vm_area_struct *vma, | |||
2327 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ | 2327 | #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ |
2328 | #define FOLL_COW 0x4000 /* internal GUP flag */ | 2328 | #define FOLL_COW 0x4000 /* internal GUP flag */ |
2329 | 2329 | ||
2330 | static inline int vm_fault_to_errno(int vm_fault, int foll_flags) | ||
2331 | { | ||
2332 | if (vm_fault & VM_FAULT_OOM) | ||
2333 | return -ENOMEM; | ||
2334 | if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | ||
2335 | return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT; | ||
2336 | if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) | ||
2337 | return -EFAULT; | ||
2338 | return 0; | ||
2339 | } | ||
2340 | |||
2330 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, | 2341 | typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, |
2331 | void *data); | 2342 | void *data); |
2332 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, | 2343 | extern int apply_to_page_range(struct mm_struct *mm, unsigned long address, |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ebaccd4e7d8c..ef6a13b7bd3e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -678,6 +678,7 @@ typedef struct pglist_data { | |||
678 | * is the first PFN that needs to be initialised. | 678 | * is the first PFN that needs to be initialised. |
679 | */ | 679 | */ |
680 | unsigned long first_deferred_pfn; | 680 | unsigned long first_deferred_pfn; |
681 | unsigned long static_init_size; | ||
681 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ | 682 | #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ |
682 | 683 | ||
683 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 684 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 566fda587fcf..3f74ef2281e8 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -467,6 +467,7 @@ enum dmi_field { | |||
467 | DMI_PRODUCT_VERSION, | 467 | DMI_PRODUCT_VERSION, |
468 | DMI_PRODUCT_SERIAL, | 468 | DMI_PRODUCT_SERIAL, |
469 | DMI_PRODUCT_UUID, | 469 | DMI_PRODUCT_UUID, |
470 | DMI_PRODUCT_FAMILY, | ||
470 | DMI_BOARD_VENDOR, | 471 | DMI_BOARD_VENDOR, |
471 | DMI_BOARD_NAME, | 472 | DMI_BOARD_NAME, |
472 | DMI_BOARD_VERSION, | 473 | DMI_BOARD_VERSION, |
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index 279e3c5326e3..7620eb127cff 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h | |||
@@ -42,8 +42,6 @@ | |||
42 | * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high | 42 | * @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high |
43 | * impedance to VDD). If the argument is != 0 pull-up is enabled, | 43 | * impedance to VDD). If the argument is != 0 pull-up is enabled, |
44 | * if it is 0, pull-up is total, i.e. the pin is connected to VDD. | 44 | * if it is 0, pull-up is total, i.e. the pin is connected to VDD. |
45 | * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous | ||
46 | * input and output operations. | ||
47 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open | 45 | * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open |
48 | * collector) which means it is usually wired with other output ports | 46 | * collector) which means it is usually wired with other output ports |
49 | * which are then pulled up with an external resistor. Setting this | 47 | * which are then pulled up with an external resistor. Setting this |
@@ -98,7 +96,6 @@ enum pin_config_param { | |||
98 | PIN_CONFIG_BIAS_PULL_DOWN, | 96 | PIN_CONFIG_BIAS_PULL_DOWN, |
99 | PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, | 97 | PIN_CONFIG_BIAS_PULL_PIN_DEFAULT, |
100 | PIN_CONFIG_BIAS_PULL_UP, | 98 | PIN_CONFIG_BIAS_PULL_UP, |
101 | PIN_CONFIG_BIDIRECTIONAL, | ||
102 | PIN_CONFIG_DRIVE_OPEN_DRAIN, | 99 | PIN_CONFIG_DRIVE_OPEN_DRAIN, |
103 | PIN_CONFIG_DRIVE_OPEN_SOURCE, | 100 | PIN_CONFIG_DRIVE_OPEN_SOURCE, |
104 | PIN_CONFIG_DRIVE_PUSH_PULL, | 101 | PIN_CONFIG_DRIVE_PUSH_PULL, |
diff --git a/include/linux/serdev.h b/include/linux/serdev.h index cda76c6506ca..e69402d4a8ae 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h | |||
@@ -195,6 +195,7 @@ int serdev_device_open(struct serdev_device *); | |||
195 | void serdev_device_close(struct serdev_device *); | 195 | void serdev_device_close(struct serdev_device *); |
196 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); | 196 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); |
197 | void serdev_device_set_flow_control(struct serdev_device *, bool); | 197 | void serdev_device_set_flow_control(struct serdev_device *, bool); |
198 | int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); | ||
198 | void serdev_device_wait_until_sent(struct serdev_device *, long); | 199 | void serdev_device_wait_until_sent(struct serdev_device *, long); |
199 | int serdev_device_get_tiocm(struct serdev_device *); | 200 | int serdev_device_get_tiocm(struct serdev_device *); |
200 | int serdev_device_set_tiocm(struct serdev_device *, int, int); | 201 | int serdev_device_set_tiocm(struct serdev_device *, int, int); |
@@ -236,6 +237,12 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev | |||
236 | return 0; | 237 | return 0; |
237 | } | 238 | } |
238 | static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} | 239 | static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {} |
240 | static inline int serdev_device_write_buf(struct serdev_device *serdev, | ||
241 | const unsigned char *buf, | ||
242 | size_t count) | ||
243 | { | ||
244 | return -ENODEV; | ||
245 | } | ||
239 | static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} | 246 | static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} |
240 | static inline int serdev_device_get_tiocm(struct serdev_device *serdev) | 247 | static inline int serdev_device_get_tiocm(struct serdev_device *serdev) |
241 | { | 248 | { |
@@ -301,7 +308,7 @@ struct tty_driver; | |||
301 | struct device *serdev_tty_port_register(struct tty_port *port, | 308 | struct device *serdev_tty_port_register(struct tty_port *port, |
302 | struct device *parent, | 309 | struct device *parent, |
303 | struct tty_driver *drv, int idx); | 310 | struct tty_driver *drv, int idx); |
304 | void serdev_tty_port_unregister(struct tty_port *port); | 311 | int serdev_tty_port_unregister(struct tty_port *port); |
305 | #else | 312 | #else |
306 | static inline struct device *serdev_tty_port_register(struct tty_port *port, | 313 | static inline struct device *serdev_tty_port_register(struct tty_port *port, |
307 | struct device *parent, | 314 | struct device *parent, |
@@ -309,14 +316,10 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port, | |||
309 | { | 316 | { |
310 | return ERR_PTR(-ENODEV); | 317 | return ERR_PTR(-ENODEV); |
311 | } | 318 | } |
312 | static inline void serdev_tty_port_unregister(struct tty_port *port) {} | 319 | static inline int serdev_tty_port_unregister(struct tty_port *port) |
313 | #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ | ||
314 | |||
315 | static inline int serdev_device_write_buf(struct serdev_device *serdev, | ||
316 | const unsigned char *data, | ||
317 | size_t count) | ||
318 | { | 320 | { |
319 | return serdev_device_write(serdev, data, count, 0); | 321 | return -ENODEV; |
320 | } | 322 | } |
323 | #endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */ | ||
321 | 324 | ||
322 | #endif /*_LINUX_SERDEV_H */ | 325 | #endif /*_LINUX_SERDEV_H */ |
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index 94631026f79c..11cef5a7bc87 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h | |||
@@ -336,7 +336,8 @@ xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p) | |||
336 | { | 336 | { |
337 | char *cp = (char *)p; | 337 | char *cp = (char *)p; |
338 | struct kvec *vec = &rqstp->rq_arg.head[0]; | 338 | struct kvec *vec = &rqstp->rq_arg.head[0]; |
339 | return cp == (char *)vec->iov_base + vec->iov_len; | 339 | return cp >= (char*)vec->iov_base |
340 | && cp <= (char*)vec->iov_base + vec->iov_len; | ||
340 | } | 341 | } |
341 | 342 | ||
342 | static inline int | 343 | static inline int |
diff --git a/include/linux/tty.h b/include/linux/tty.h index d07cd2105a6c..eccb4ec30a8a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -558,6 +558,15 @@ extern struct device *tty_port_register_device_attr(struct tty_port *port, | |||
558 | struct tty_driver *driver, unsigned index, | 558 | struct tty_driver *driver, unsigned index, |
559 | struct device *device, void *drvdata, | 559 | struct device *device, void *drvdata, |
560 | const struct attribute_group **attr_grp); | 560 | const struct attribute_group **attr_grp); |
561 | extern struct device *tty_port_register_device_serdev(struct tty_port *port, | ||
562 | struct tty_driver *driver, unsigned index, | ||
563 | struct device *device); | ||
564 | extern struct device *tty_port_register_device_attr_serdev(struct tty_port *port, | ||
565 | struct tty_driver *driver, unsigned index, | ||
566 | struct device *device, void *drvdata, | ||
567 | const struct attribute_group **attr_grp); | ||
568 | extern void tty_port_unregister_device(struct tty_port *port, | ||
569 | struct tty_driver *driver, unsigned index); | ||
561 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); | 570 | extern int tty_port_alloc_xmit_buf(struct tty_port *port); |
562 | extern void tty_port_free_xmit_buf(struct tty_port *port); | 571 | extern void tty_port_free_xmit_buf(struct tty_port *port); |
563 | extern void tty_port_destroy(struct tty_port *port); | 572 | extern void tty_port_destroy(struct tty_port *port); |
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h index eb50ce54b759..413335c8cb52 100644 --- a/include/media/cec-notifier.h +++ b/include/media/cec-notifier.h | |||
@@ -29,7 +29,7 @@ struct edid; | |||
29 | struct cec_adapter; | 29 | struct cec_adapter; |
30 | struct cec_notifier; | 30 | struct cec_notifier; |
31 | 31 | ||
32 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 32 | #if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER) |
33 | 33 | ||
34 | /** | 34 | /** |
35 | * cec_notifier_get - find or create a new cec_notifier for the given device. | 35 | * cec_notifier_get - find or create a new cec_notifier for the given device. |
diff --git a/include/media/cec.h b/include/media/cec.h index b8eb895731d5..bfa88d4d67e1 100644 --- a/include/media/cec.h +++ b/include/media/cec.h | |||
@@ -173,7 +173,7 @@ struct cec_adapter { | |||
173 | bool passthrough; | 173 | bool passthrough; |
174 | struct cec_log_addrs log_addrs; | 174 | struct cec_log_addrs log_addrs; |
175 | 175 | ||
176 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 176 | #ifdef CONFIG_CEC_NOTIFIER |
177 | struct cec_notifier *notifier; | 177 | struct cec_notifier *notifier; |
178 | #endif | 178 | #endif |
179 | 179 | ||
@@ -300,7 +300,7 @@ u16 cec_phys_addr_for_input(u16 phys_addr, u8 input); | |||
300 | */ | 300 | */ |
301 | int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); | 301 | int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port); |
302 | 302 | ||
303 | #ifdef CONFIG_MEDIA_CEC_NOTIFIER | 303 | #ifdef CONFIG_CEC_NOTIFIER |
304 | void cec_register_cec_notifier(struct cec_adapter *adap, | 304 | void cec_register_cec_notifier(struct cec_adapter *adap, |
305 | struct cec_notifier *notifier); | 305 | struct cec_notifier *notifier); |
306 | #endif | 306 | #endif |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index dbf0abba33b8..3e505bbff8ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -1007,6 +1007,7 @@ int inet6_hash_connect(struct inet_timewait_death_row *death_row, | |||
1007 | */ | 1007 | */ |
1008 | extern const struct proto_ops inet6_stream_ops; | 1008 | extern const struct proto_ops inet6_stream_ops; |
1009 | extern const struct proto_ops inet6_dgram_ops; | 1009 | extern const struct proto_ops inet6_dgram_ops; |
1010 | extern const struct proto_ops inet6_sockraw_ops; | ||
1010 | 1011 | ||
1011 | struct group_source_req; | 1012 | struct group_source_req; |
1012 | struct group_filter; | 1013 | struct group_filter; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 82462db97183..28b577a35786 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -969,7 +969,7 @@ struct tcp_congestion_ops { | |||
969 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); | 969 | void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev); |
970 | /* call when ack arrives (optional) */ | 970 | /* call when ack arrives (optional) */ |
971 | void (*in_ack_event)(struct sock *sk, u32 flags); | 971 | void (*in_ack_event)(struct sock *sk, u32 flags); |
972 | /* new value of cwnd after loss (optional) */ | 972 | /* new value of cwnd after loss (required) */ |
973 | u32 (*undo_cwnd)(struct sock *sk); | 973 | u32 (*undo_cwnd)(struct sock *sk); |
974 | /* hook for packet ack accounting (optional) */ | 974 | /* hook for packet ack accounting (optional) */ |
975 | void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); | 975 | void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample); |
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index f5f70e345318..355b81f4242d 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h | |||
@@ -158,7 +158,6 @@ enum sa_path_rec_type { | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | struct sa_path_rec_ib { | 160 | struct sa_path_rec_ib { |
161 | __be64 service_id; | ||
162 | __be16 dlid; | 161 | __be16 dlid; |
163 | __be16 slid; | 162 | __be16 slid; |
164 | u8 raw_traffic; | 163 | u8 raw_traffic; |
@@ -174,7 +173,6 @@ struct sa_path_rec_roce { | |||
174 | }; | 173 | }; |
175 | 174 | ||
176 | struct sa_path_rec_opa { | 175 | struct sa_path_rec_opa { |
177 | __be64 service_id; | ||
178 | __be32 dlid; | 176 | __be32 dlid; |
179 | __be32 slid; | 177 | __be32 slid; |
180 | u8 raw_traffic; | 178 | u8 raw_traffic; |
@@ -189,6 +187,7 @@ struct sa_path_rec_opa { | |||
189 | struct sa_path_rec { | 187 | struct sa_path_rec { |
190 | union ib_gid dgid; | 188 | union ib_gid dgid; |
191 | union ib_gid sgid; | 189 | union ib_gid sgid; |
190 | __be64 service_id; | ||
192 | /* reserved */ | 191 | /* reserved */ |
193 | __be32 flow_label; | 192 | __be32 flow_label; |
194 | u8 hop_limit; | 193 | u8 hop_limit; |
@@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib, | |||
262 | ib->ib.dlid = htons(ntohl(opa->opa.dlid)); | 261 | ib->ib.dlid = htons(ntohl(opa->opa.dlid)); |
263 | ib->ib.slid = htons(ntohl(opa->opa.slid)); | 262 | ib->ib.slid = htons(ntohl(opa->opa.slid)); |
264 | } | 263 | } |
265 | ib->ib.service_id = opa->opa.service_id; | 264 | ib->service_id = opa->service_id; |
266 | ib->ib.raw_traffic = opa->opa.raw_traffic; | 265 | ib->ib.raw_traffic = opa->opa.raw_traffic; |
267 | } | 266 | } |
268 | 267 | ||
@@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa, | |||
281 | } | 280 | } |
282 | opa->opa.slid = slid; | 281 | opa->opa.slid = slid; |
283 | opa->opa.dlid = dlid; | 282 | opa->opa.dlid = dlid; |
284 | opa->opa.service_id = ib->ib.service_id; | 283 | opa->service_id = ib->service_id; |
285 | opa->opa.raw_traffic = ib->ib.raw_traffic; | 284 | opa->opa.raw_traffic = ib->ib.raw_traffic; |
286 | } | 285 | } |
287 | 286 | ||
@@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec) | |||
591 | (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); | 590 | (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); |
592 | } | 591 | } |
593 | 592 | ||
594 | static inline void sa_path_set_service_id(struct sa_path_rec *rec, | ||
595 | __be64 service_id) | ||
596 | { | ||
597 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) | ||
598 | rec->ib.service_id = service_id; | ||
599 | else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) | ||
600 | rec->opa.service_id = service_id; | ||
601 | } | ||
602 | |||
603 | static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) | 593 | static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) |
604 | { | 594 | { |
605 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) | 595 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) |
@@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec, | |||
625 | rec->opa.raw_traffic = raw_traffic; | 615 | rec->opa.raw_traffic = raw_traffic; |
626 | } | 616 | } |
627 | 617 | ||
628 | static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec) | ||
629 | { | ||
630 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) | ||
631 | return rec->ib.service_id; | ||
632 | else if (rec->rec_type == SA_PATH_REC_TYPE_OPA) | ||
633 | return rec->opa.service_id; | ||
634 | return 0; | ||
635 | } | ||
636 | |||
637 | static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) | 618 | static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) |
638 | { | 619 | { |
639 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) | 620 | if (rec->rec_type == SA_PATH_REC_TYPE_IB) |
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h index 585266144329..348c102cb5f6 100644 --- a/include/rdma/rdma_netlink.h +++ b/include/rdma/rdma_netlink.h | |||
@@ -10,9 +10,6 @@ struct ibnl_client_cbs { | |||
10 | struct module *module; | 10 | struct module *module; |
11 | }; | 11 | }; |
12 | 12 | ||
13 | int ibnl_init(void); | ||
14 | void ibnl_cleanup(void); | ||
15 | |||
16 | /** | 13 | /** |
17 | * Add a a client to the list of IB netlink exporters. | 14 | * Add a a client to the list of IB netlink exporters. |
18 | * @index: Index of the added client | 15 | * @index: Index of the added client |
@@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
77 | int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, | 74 | int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, |
78 | unsigned int group, gfp_t flags); | 75 | unsigned int group, gfp_t flags); |
79 | 76 | ||
80 | /** | ||
81 | * Check if there are any listeners to the netlink group | ||
82 | * @group: the netlink group ID | ||
83 | * Returns 0 on success or a negative for no listeners. | ||
84 | */ | ||
85 | int ibnl_chk_listeners(unsigned int group); | ||
86 | |||
87 | #endif /* _RDMA_NETLINK_H */ | 77 | #endif /* _RDMA_NETLINK_H */ |
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 275581d483dd..5f17fb770477 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h | |||
@@ -557,6 +557,7 @@ struct iscsi_conn { | |||
557 | #define LOGIN_FLAGS_READ_ACTIVE 1 | 557 | #define LOGIN_FLAGS_READ_ACTIVE 1 |
558 | #define LOGIN_FLAGS_CLOSED 2 | 558 | #define LOGIN_FLAGS_CLOSED 2 |
559 | #define LOGIN_FLAGS_READY 4 | 559 | #define LOGIN_FLAGS_READY 4 |
560 | #define LOGIN_FLAGS_INITIAL_PDU 8 | ||
560 | unsigned long login_flags; | 561 | unsigned long login_flags; |
561 | struct delayed_work login_work; | 562 | struct delayed_work login_work; |
562 | struct delayed_work login_cleanup_work; | 563 | struct delayed_work login_cleanup_work; |
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index c3c9a0e1b3c9..8d4e85eae42c 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c | |||
@@ -4265,6 +4265,11 @@ static void kill_css(struct cgroup_subsys_state *css) | |||
4265 | { | 4265 | { |
4266 | lockdep_assert_held(&cgroup_mutex); | 4266 | lockdep_assert_held(&cgroup_mutex); |
4267 | 4267 | ||
4268 | if (css->flags & CSS_DYING) | ||
4269 | return; | ||
4270 | |||
4271 | css->flags |= CSS_DYING; | ||
4272 | |||
4268 | /* | 4273 | /* |
4269 | * This must happen before css is disassociated with its cgroup. | 4274 | * This must happen before css is disassociated with its cgroup. |
4270 | * See seq_css() for details. | 4275 | * See seq_css() for details. |
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index f6501f4f6040..ae643412948a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c | |||
@@ -176,9 +176,9 @@ typedef enum { | |||
176 | } cpuset_flagbits_t; | 176 | } cpuset_flagbits_t; |
177 | 177 | ||
178 | /* convenient tests for these bits */ | 178 | /* convenient tests for these bits */ |
179 | static inline bool is_cpuset_online(const struct cpuset *cs) | 179 | static inline bool is_cpuset_online(struct cpuset *cs) |
180 | { | 180 | { |
181 | return test_bit(CS_ONLINE, &cs->flags); | 181 | return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline int is_cpu_exclusive(const struct cpuset *cs) | 184 | static inline int is_cpu_exclusive(const struct cpuset *cs) |
diff --git a/kernel/fork.c b/kernel/fork.c index aa1076c5e4a9..e53770d2bf95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1577,6 +1577,18 @@ static __latent_entropy struct task_struct *copy_process( | |||
1577 | if (!p) | 1577 | if (!p) |
1578 | goto fork_out; | 1578 | goto fork_out; |
1579 | 1579 | ||
1580 | /* | ||
1581 | * This _must_ happen before we call free_task(), i.e. before we jump | ||
1582 | * to any of the bad_fork_* labels. This is to avoid freeing | ||
1583 | * p->set_child_tid which is (ab)used as a kthread's data pointer for | ||
1584 | * kernel threads (PF_KTHREAD). | ||
1585 | */ | ||
1586 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | ||
1587 | /* | ||
1588 | * Clear TID on mm_release()? | ||
1589 | */ | ||
1590 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; | ||
1591 | |||
1580 | ftrace_graph_init_task(p); | 1592 | ftrace_graph_init_task(p); |
1581 | 1593 | ||
1582 | rt_mutex_init_task(p); | 1594 | rt_mutex_init_task(p); |
@@ -1743,11 +1755,6 @@ static __latent_entropy struct task_struct *copy_process( | |||
1743 | } | 1755 | } |
1744 | } | 1756 | } |
1745 | 1757 | ||
1746 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | ||
1747 | /* | ||
1748 | * Clear TID on mm_release()? | ||
1749 | */ | ||
1750 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; | ||
1751 | #ifdef CONFIG_BLOCK | 1758 | #ifdef CONFIG_BLOCK |
1752 | p->plug = NULL; | 1759 | p->plug = NULL; |
1753 | #endif | 1760 | #endif |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 2d2d3a568e4e..adfe3b4cfe05 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -122,7 +122,7 @@ static void *alloc_insn_page(void) | |||
122 | return module_alloc(PAGE_SIZE); | 122 | return module_alloc(PAGE_SIZE); |
123 | } | 123 | } |
124 | 124 | ||
125 | static void free_insn_page(void *page) | 125 | void __weak free_insn_page(void *page) |
126 | { | 126 | { |
127 | module_memfree(page); | 127 | module_memfree(page); |
128 | } | 128 | } |
diff --git a/kernel/livepatch/Kconfig b/kernel/livepatch/Kconfig index 045022557936..ec4565122e65 100644 --- a/kernel/livepatch/Kconfig +++ b/kernel/livepatch/Kconfig | |||
@@ -10,6 +10,7 @@ config LIVEPATCH | |||
10 | depends on SYSFS | 10 | depends on SYSFS |
11 | depends on KALLSYMS_ALL | 11 | depends on KALLSYMS_ALL |
12 | depends on HAVE_LIVEPATCH | 12 | depends on HAVE_LIVEPATCH |
13 | depends on !TRIM_UNUSED_KSYMS | ||
13 | help | 14 | help |
14 | Say Y here if you want to support kernel live patching. | 15 | Say Y here if you want to support kernel live patching. |
15 | This option has no runtime impact until a kernel "patch" | 16 | This option has no runtime impact until a kernel "patch" |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b95509416909..28cd09e635ed 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
1785 | int ret; | 1785 | int ret; |
1786 | 1786 | ||
1787 | raw_spin_lock_irq(&lock->wait_lock); | 1787 | raw_spin_lock_irq(&lock->wait_lock); |
1788 | |||
1789 | set_current_state(TASK_INTERRUPTIBLE); | ||
1790 | |||
1791 | /* sleep on the mutex */ | 1788 | /* sleep on the mutex */ |
1789 | set_current_state(TASK_INTERRUPTIBLE); | ||
1792 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); | 1790 | ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); |
1793 | 1791 | /* | |
1792 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | ||
1793 | * have to fix that up. | ||
1794 | */ | ||
1795 | fixup_rt_mutex_waiters(lock); | ||
1794 | raw_spin_unlock_irq(&lock->wait_lock); | 1796 | raw_spin_unlock_irq(&lock->wait_lock); |
1795 | 1797 | ||
1796 | return ret; | 1798 | return ret; |
@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, | |||
1822 | 1824 | ||
1823 | raw_spin_lock_irq(&lock->wait_lock); | 1825 | raw_spin_lock_irq(&lock->wait_lock); |
1824 | /* | 1826 | /* |
1827 | * Do an unconditional try-lock, this deals with the lock stealing | ||
1828 | * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() | ||
1829 | * sets a NULL owner. | ||
1830 | * | ||
1831 | * We're not interested in the return value, because the subsequent | ||
1832 | * test on rt_mutex_owner() will infer that. If the trylock succeeded, | ||
1833 | * we will own the lock and it will have removed the waiter. If we | ||
1834 | * failed the trylock, we're still not owner and we need to remove | ||
1835 | * ourselves. | ||
1836 | */ | ||
1837 | try_to_take_rt_mutex(lock, current, waiter); | ||
1838 | /* | ||
1825 | * Unless we're the owner; we're still enqueued on the wait_list. | 1839 | * Unless we're the owner; we're still enqueued on the wait_list. |
1826 | * So check if we became owner, if not, take us off the wait_list. | 1840 | * So check if we became owner, if not, take us off the wait_list. |
1827 | */ | 1841 | */ |
1828 | if (rt_mutex_owner(lock) != current) { | 1842 | if (rt_mutex_owner(lock) != current) { |
1829 | remove_waiter(lock, waiter); | 1843 | remove_waiter(lock, waiter); |
1830 | fixup_rt_mutex_waiters(lock); | ||
1831 | cleanup = true; | 1844 | cleanup = true; |
1832 | } | 1845 | } |
1833 | |||
1834 | /* | 1846 | /* |
1835 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might | 1847 | * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might |
1836 | * have to fix that up. | 1848 | * have to fix that up. |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 1370f067fb51..d2a1e6dd0291 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -825,8 +825,10 @@ static void check_thread_timers(struct task_struct *tsk, | |||
825 | * At the hard limit, we just die. | 825 | * At the hard limit, we just die. |
826 | * No need to calculate anything else now. | 826 | * No need to calculate anything else now. |
827 | */ | 827 | */ |
828 | pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", | 828 | if (print_fatal_signals) { |
829 | tsk->comm, task_pid_nr(tsk)); | 829 | pr_info("CPU Watchdog Timeout (hard): %s[%d]\n", |
830 | tsk->comm, task_pid_nr(tsk)); | ||
831 | } | ||
830 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 832 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
831 | return; | 833 | return; |
832 | } | 834 | } |
@@ -838,8 +840,10 @@ static void check_thread_timers(struct task_struct *tsk, | |||
838 | soft += USEC_PER_SEC; | 840 | soft += USEC_PER_SEC; |
839 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; | 841 | sig->rlim[RLIMIT_RTTIME].rlim_cur = soft; |
840 | } | 842 | } |
841 | pr_info("RT Watchdog Timeout (soft): %s[%d]\n", | 843 | if (print_fatal_signals) { |
842 | tsk->comm, task_pid_nr(tsk)); | 844 | pr_info("RT Watchdog Timeout (soft): %s[%d]\n", |
845 | tsk->comm, task_pid_nr(tsk)); | ||
846 | } | ||
843 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | 847 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
844 | } | 848 | } |
845 | } | 849 | } |
@@ -936,8 +940,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
936 | * At the hard limit, we just die. | 940 | * At the hard limit, we just die. |
937 | * No need to calculate anything else now. | 941 | * No need to calculate anything else now. |
938 | */ | 942 | */ |
939 | pr_info("RT Watchdog Timeout (hard): %s[%d]\n", | 943 | if (print_fatal_signals) { |
940 | tsk->comm, task_pid_nr(tsk)); | 944 | pr_info("RT Watchdog Timeout (hard): %s[%d]\n", |
945 | tsk->comm, task_pid_nr(tsk)); | ||
946 | } | ||
941 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); | 947 | __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); |
942 | return; | 948 | return; |
943 | } | 949 | } |
@@ -945,8 +951,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
945 | /* | 951 | /* |
946 | * At the soft limit, send a SIGXCPU every second. | 952 | * At the soft limit, send a SIGXCPU every second. |
947 | */ | 953 | */ |
948 | pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", | 954 | if (print_fatal_signals) { |
949 | tsk->comm, task_pid_nr(tsk)); | 955 | pr_info("CPU Watchdog Timeout (soft): %s[%d]\n", |
956 | tsk->comm, task_pid_nr(tsk)); | ||
957 | } | ||
950 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); | 958 | __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); |
951 | if (soft < hard) { | 959 | if (soft < hard) { |
952 | soft++; | 960 | soft++; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 74fdfe9ed3db..9e5841dc14b5 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -5063,7 +5063,7 @@ ftrace_graph_release(struct inode *inode, struct file *file) | |||
5063 | } | 5063 | } |
5064 | 5064 | ||
5065 | out: | 5065 | out: |
5066 | kfree(fgd->new_hash); | 5066 | free_ftrace_hash(fgd->new_hash); |
5067 | kfree(fgd); | 5067 | kfree(fgd); |
5068 | 5068 | ||
5069 | return ret; | 5069 | return ret; |
@@ -407,12 +407,10 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, | |||
407 | 407 | ||
408 | ret = handle_mm_fault(vma, address, fault_flags); | 408 | ret = handle_mm_fault(vma, address, fault_flags); |
409 | if (ret & VM_FAULT_ERROR) { | 409 | if (ret & VM_FAULT_ERROR) { |
410 | if (ret & VM_FAULT_OOM) | 410 | int err = vm_fault_to_errno(ret, *flags); |
411 | return -ENOMEM; | 411 | |
412 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | 412 | if (err) |
413 | return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT; | 413 | return err; |
414 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) | ||
415 | return -EFAULT; | ||
416 | BUG(); | 414 | BUG(); |
417 | } | 415 | } |
418 | 416 | ||
@@ -723,12 +721,10 @@ retry: | |||
723 | ret = handle_mm_fault(vma, address, fault_flags); | 721 | ret = handle_mm_fault(vma, address, fault_flags); |
724 | major |= ret & VM_FAULT_MAJOR; | 722 | major |= ret & VM_FAULT_MAJOR; |
725 | if (ret & VM_FAULT_ERROR) { | 723 | if (ret & VM_FAULT_ERROR) { |
726 | if (ret & VM_FAULT_OOM) | 724 | int err = vm_fault_to_errno(ret, 0); |
727 | return -ENOMEM; | 725 | |
728 | if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE)) | 726 | if (err) |
729 | return -EHWPOISON; | 727 | return err; |
730 | if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) | ||
731 | return -EFAULT; | ||
732 | BUG(); | 728 | BUG(); |
733 | } | 729 | } |
734 | 730 | ||
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e5828875f7bb..3eedb187e549 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -4170,6 +4170,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
4170 | } | 4170 | } |
4171 | ret = hugetlb_fault(mm, vma, vaddr, fault_flags); | 4171 | ret = hugetlb_fault(mm, vma, vaddr, fault_flags); |
4172 | if (ret & VM_FAULT_ERROR) { | 4172 | if (ret & VM_FAULT_ERROR) { |
4173 | int err = vm_fault_to_errno(ret, flags); | ||
4174 | |||
4175 | if (err) | ||
4176 | return err; | ||
4177 | |||
4173 | remainder = 0; | 4178 | remainder = 0; |
4174 | break; | 4179 | break; |
4175 | } | 4180 | } |
@@ -1028,8 +1028,7 @@ static int try_to_merge_one_page(struct vm_area_struct *vma, | |||
1028 | goto out; | 1028 | goto out; |
1029 | 1029 | ||
1030 | if (PageTransCompound(page)) { | 1030 | if (PageTransCompound(page)) { |
1031 | err = split_huge_page(page); | 1031 | if (split_huge_page(page)) |
1032 | if (err) | ||
1033 | goto out_unlock; | 1032 | goto out_unlock; |
1034 | } | 1033 | } |
1035 | 1034 | ||
diff --git a/mm/memblock.c b/mm/memblock.c index b049c9b2dba8..7b8a5db76a2f 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -1739,6 +1739,29 @@ static void __init_memblock memblock_dump(struct memblock_type *type) | |||
1739 | } | 1739 | } |
1740 | } | 1740 | } |
1741 | 1741 | ||
1742 | extern unsigned long __init_memblock | ||
1743 | memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr) | ||
1744 | { | ||
1745 | struct memblock_region *rgn; | ||
1746 | unsigned long size = 0; | ||
1747 | int idx; | ||
1748 | |||
1749 | for_each_memblock_type((&memblock.reserved), rgn) { | ||
1750 | phys_addr_t start, end; | ||
1751 | |||
1752 | if (rgn->base + rgn->size < start_addr) | ||
1753 | continue; | ||
1754 | if (rgn->base > end_addr) | ||
1755 | continue; | ||
1756 | |||
1757 | start = rgn->base; | ||
1758 | end = start + rgn->size; | ||
1759 | size += end - start; | ||
1760 | } | ||
1761 | |||
1762 | return size; | ||
1763 | } | ||
1764 | |||
1742 | void __init_memblock __memblock_dump_all(void) | 1765 | void __init_memblock __memblock_dump_all(void) |
1743 | { | 1766 | { |
1744 | pr_info("MEMBLOCK configuration:\n"); | 1767 | pr_info("MEMBLOCK configuration:\n"); |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 2527dfeddb00..342fac9ba89b 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -1595,12 +1595,8 @@ static int soft_offline_huge_page(struct page *page, int flags) | |||
1595 | if (ret) { | 1595 | if (ret) { |
1596 | pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", | 1596 | pr_info("soft offline: %#lx: migration failed %d, type %lx (%pGp)\n", |
1597 | pfn, ret, page->flags, &page->flags); | 1597 | pfn, ret, page->flags, &page->flags); |
1598 | /* | 1598 | if (!list_empty(&pagelist)) |
1599 | * We know that soft_offline_huge_page() tries to migrate | 1599 | putback_movable_pages(&pagelist); |
1600 | * only one hugepage pointed to by hpage, so we need not | ||
1601 | * run through the pagelist here. | ||
1602 | */ | ||
1603 | putback_active_hugepage(hpage); | ||
1604 | if (ret > 0) | 1600 | if (ret > 0) |
1605 | ret = -EIO; | 1601 | ret = -EIO; |
1606 | } else { | 1602 | } else { |
diff --git a/mm/memory.c b/mm/memory.c index 6ff5d729ded0..2e65df1831d9 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3029,6 +3029,17 @@ static int __do_fault(struct vm_fault *vmf) | |||
3029 | return ret; | 3029 | return ret; |
3030 | } | 3030 | } |
3031 | 3031 | ||
3032 | /* | ||
3033 | * The ordering of these checks is important for pmds with _PAGE_DEVMAP set. | ||
3034 | * If we check pmd_trans_unstable() first we will trip the bad_pmd() check | ||
3035 | * inside of pmd_none_or_trans_huge_or_clear_bad(). This will end up correctly | ||
3036 | * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. | ||
3037 | */ | ||
3038 | static int pmd_devmap_trans_unstable(pmd_t *pmd) | ||
3039 | { | ||
3040 | return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); | ||
3041 | } | ||
3042 | |||
3032 | static int pte_alloc_one_map(struct vm_fault *vmf) | 3043 | static int pte_alloc_one_map(struct vm_fault *vmf) |
3033 | { | 3044 | { |
3034 | struct vm_area_struct *vma = vmf->vma; | 3045 | struct vm_area_struct *vma = vmf->vma; |
@@ -3052,18 +3063,27 @@ static int pte_alloc_one_map(struct vm_fault *vmf) | |||
3052 | map_pte: | 3063 | map_pte: |
3053 | /* | 3064 | /* |
3054 | * If a huge pmd materialized under us just retry later. Use | 3065 | * If a huge pmd materialized under us just retry later. Use |
3055 | * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd | 3066 | * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead of |
3056 | * didn't become pmd_trans_huge under us and then back to pmd_none, as | 3067 | * pmd_trans_huge() to ensure the pmd didn't become pmd_trans_huge |
3057 | * a result of MADV_DONTNEED running immediately after a huge pmd fault | 3068 | * under us and then back to pmd_none, as a result of MADV_DONTNEED |
3058 | * in a different thread of this mm, in turn leading to a misleading | 3069 | * running immediately after a huge pmd fault in a different thread of |
3059 | * pmd_trans_huge() retval. All we have to ensure is that it is a | 3070 | * this mm, in turn leading to a misleading pmd_trans_huge() retval. |
3060 | * regular pmd that we can walk with pte_offset_map() and we can do that | 3071 | * All we have to ensure is that it is a regular pmd that we can walk |
3061 | * through an atomic read in C, which is what pmd_trans_unstable() | 3072 | * with pte_offset_map() and we can do that through an atomic read in |
3062 | * provides. | 3073 | * C, which is what pmd_trans_unstable() provides. |
3063 | */ | 3074 | */ |
3064 | if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) | 3075 | if (pmd_devmap_trans_unstable(vmf->pmd)) |
3065 | return VM_FAULT_NOPAGE; | 3076 | return VM_FAULT_NOPAGE; |
3066 | 3077 | ||
3078 | /* | ||
3079 | * At this point we know that our vmf->pmd points to a page of ptes | ||
3080 | * and it cannot become pmd_none(), pmd_devmap() or pmd_trans_huge() | ||
3081 | * for the duration of the fault. If a racing MADV_DONTNEED runs and | ||
3082 | * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still | ||
3083 | * be valid and we will re-check to make sure the vmf->pte isn't | ||
3084 | * pte_none() under vmf->ptl protection when we return to | ||
3085 | * alloc_set_pte(). | ||
3086 | */ | ||
3067 | vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, | 3087 | vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, |
3068 | &vmf->ptl); | 3088 | &vmf->ptl); |
3069 | return 0; | 3089 | return 0; |
@@ -3690,7 +3710,7 @@ static int handle_pte_fault(struct vm_fault *vmf) | |||
3690 | vmf->pte = NULL; | 3710 | vmf->pte = NULL; |
3691 | } else { | 3711 | } else { |
3692 | /* See comment in pte_alloc_one_map() */ | 3712 | /* See comment in pte_alloc_one_map() */ |
3693 | if (pmd_trans_unstable(vmf->pmd) || pmd_devmap(*vmf->pmd)) | 3713 | if (pmd_devmap_trans_unstable(vmf->pmd)) |
3694 | return 0; | 3714 | return 0; |
3695 | /* | 3715 | /* |
3696 | * A regular pmd is established and it can't morph into a huge | 3716 | * A regular pmd is established and it can't morph into a huge |
diff --git a/mm/mlock.c b/mm/mlock.c index c483c5c20b4b..b562b5523a65 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -284,7 +284,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
284 | { | 284 | { |
285 | int i; | 285 | int i; |
286 | int nr = pagevec_count(pvec); | 286 | int nr = pagevec_count(pvec); |
287 | int delta_munlocked; | 287 | int delta_munlocked = -nr; |
288 | struct pagevec pvec_putback; | 288 | struct pagevec pvec_putback; |
289 | int pgrescued = 0; | 289 | int pgrescued = 0; |
290 | 290 | ||
@@ -304,6 +304,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
304 | continue; | 304 | continue; |
305 | else | 305 | else |
306 | __munlock_isolation_failed(page); | 306 | __munlock_isolation_failed(page); |
307 | } else { | ||
308 | delta_munlocked++; | ||
307 | } | 309 | } |
308 | 310 | ||
309 | /* | 311 | /* |
@@ -315,7 +317,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone) | |||
315 | pagevec_add(&pvec_putback, pvec->pages[i]); | 317 | pagevec_add(&pvec_putback, pvec->pages[i]); |
316 | pvec->pages[i] = NULL; | 318 | pvec->pages[i] = NULL; |
317 | } | 319 | } |
318 | delta_munlocked = -nr + pagevec_count(&pvec_putback); | ||
319 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); | 320 | __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked); |
320 | spin_unlock_irq(zone_lru_lock(zone)); | 321 | spin_unlock_irq(zone_lru_lock(zone)); |
321 | 322 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f9e450c6b6e4..2302f250d6b1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -292,6 +292,26 @@ int page_group_by_mobility_disabled __read_mostly; | |||
292 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT | 292 | #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT |
293 | static inline void reset_deferred_meminit(pg_data_t *pgdat) | 293 | static inline void reset_deferred_meminit(pg_data_t *pgdat) |
294 | { | 294 | { |
295 | unsigned long max_initialise; | ||
296 | unsigned long reserved_lowmem; | ||
297 | |||
298 | /* | ||
299 | * Initialise at least 2G of a node but also take into account that | ||
300 | * two large system hashes that can take up 1GB for 0.25TB/node. | ||
301 | */ | ||
302 | max_initialise = max(2UL << (30 - PAGE_SHIFT), | ||
303 | (pgdat->node_spanned_pages >> 8)); | ||
304 | |||
305 | /* | ||
306 | * Compensate the all the memblock reservations (e.g. crash kernel) | ||
307 | * from the initial estimation to make sure we will initialize enough | ||
308 | * memory to boot. | ||
309 | */ | ||
310 | reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn, | ||
311 | pgdat->node_start_pfn + max_initialise); | ||
312 | max_initialise += reserved_lowmem; | ||
313 | |||
314 | pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages); | ||
295 | pgdat->first_deferred_pfn = ULONG_MAX; | 315 | pgdat->first_deferred_pfn = ULONG_MAX; |
296 | } | 316 | } |
297 | 317 | ||
@@ -314,20 +334,11 @@ static inline bool update_defer_init(pg_data_t *pgdat, | |||
314 | unsigned long pfn, unsigned long zone_end, | 334 | unsigned long pfn, unsigned long zone_end, |
315 | unsigned long *nr_initialised) | 335 | unsigned long *nr_initialised) |
316 | { | 336 | { |
317 | unsigned long max_initialise; | ||
318 | |||
319 | /* Always populate low zones for address-contrained allocations */ | 337 | /* Always populate low zones for address-contrained allocations */ |
320 | if (zone_end < pgdat_end_pfn(pgdat)) | 338 | if (zone_end < pgdat_end_pfn(pgdat)) |
321 | return true; | 339 | return true; |
322 | /* | ||
323 | * Initialise at least 2G of a node but also take into account that | ||
324 | * two large system hashes that can take up 1GB for 0.25TB/node. | ||
325 | */ | ||
326 | max_initialise = max(2UL << (30 - PAGE_SHIFT), | ||
327 | (pgdat->node_spanned_pages >> 8)); | ||
328 | |||
329 | (*nr_initialised)++; | 340 | (*nr_initialised)++; |
330 | if ((*nr_initialised > max_initialise) && | 341 | if ((*nr_initialised > pgdat->static_init_size) && |
331 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { | 342 | (pfn & (PAGES_PER_SECTION - 1)) == 0) { |
332 | pgdat->first_deferred_pfn = pfn; | 343 | pgdat->first_deferred_pfn = pfn; |
333 | return false; | 344 | return false; |
@@ -3870,7 +3881,9 @@ retry: | |||
3870 | goto got_pg; | 3881 | goto got_pg; |
3871 | 3882 | ||
3872 | /* Avoid allocations with no watermarks from looping endlessly */ | 3883 | /* Avoid allocations with no watermarks from looping endlessly */ |
3873 | if (test_thread_flag(TIF_MEMDIE)) | 3884 | if (test_thread_flag(TIF_MEMDIE) && |
3885 | (alloc_flags == ALLOC_NO_WATERMARKS || | ||
3886 | (gfp_mask & __GFP_NOMEMALLOC))) | ||
3874 | goto nopage; | 3887 | goto nopage; |
3875 | 3888 | ||
3876 | /* Retry as long as the OOM killer is making progress */ | 3889 | /* Retry as long as the OOM killer is making progress */ |
@@ -6136,7 +6149,6 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
6136 | /* pg_data_t should be reset to zero when it's allocated */ | 6149 | /* pg_data_t should be reset to zero when it's allocated */ |
6137 | WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); | 6150 | WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx); |
6138 | 6151 | ||
6139 | reset_deferred_meminit(pgdat); | ||
6140 | pgdat->node_id = nid; | 6152 | pgdat->node_id = nid; |
6141 | pgdat->node_start_pfn = node_start_pfn; | 6153 | pgdat->node_start_pfn = node_start_pfn; |
6142 | pgdat->per_cpu_nodestats = NULL; | 6154 | pgdat->per_cpu_nodestats = NULL; |
@@ -6158,6 +6170,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size, | |||
6158 | (unsigned long)pgdat->node_mem_map); | 6170 | (unsigned long)pgdat->node_mem_map); |
6159 | #endif | 6171 | #endif |
6160 | 6172 | ||
6173 | reset_deferred_meminit(pgdat); | ||
6161 | free_area_init_core(pgdat); | 6174 | free_area_init_core(pgdat); |
6162 | } | 6175 | } |
6163 | 6176 | ||
@@ -5512,6 +5512,7 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
5512 | char mbuf[64]; | 5512 | char mbuf[64]; |
5513 | char *buf; | 5513 | char *buf; |
5514 | struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); | 5514 | struct slab_attribute *attr = to_slab_attr(slab_attrs[i]); |
5515 | ssize_t len; | ||
5515 | 5516 | ||
5516 | if (!attr || !attr->store || !attr->show) | 5517 | if (!attr || !attr->store || !attr->show) |
5517 | continue; | 5518 | continue; |
@@ -5536,8 +5537,9 @@ static void memcg_propagate_slab_attrs(struct kmem_cache *s) | |||
5536 | buf = buffer; | 5537 | buf = buffer; |
5537 | } | 5538 | } |
5538 | 5539 | ||
5539 | attr->show(root_cache, buf); | 5540 | len = attr->show(root_cache, buf); |
5540 | attr->store(s, buf, strlen(buf)); | 5541 | if (len > 0) |
5542 | attr->store(s, buf, len); | ||
5541 | } | 5543 | } |
5542 | 5544 | ||
5543 | if (buffer) | 5545 | if (buffer) |
@@ -357,8 +357,11 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node) | |||
357 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | 357 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * Make sure that larger requests are not too disruptive - no OOM | 360 | * We want to attempt a large physically contiguous block first because |
361 | * killer and no allocation failure warnings as we have a fallback | 361 | * it is less likely to fragment multiple larger blocks and therefore |
362 | * contribute to a long term fragmentation less than vmalloc fallback. | ||
363 | * However make sure that larger requests are not too disruptive - no | ||
364 | * OOM killer and no allocation failure warnings as we have a fallback. | ||
362 | */ | 365 | */ |
363 | if (size > PAGE_SIZE) { | 366 | if (size > PAGE_SIZE) { |
364 | kmalloc_flags |= __GFP_NOWARN; | 367 | kmalloc_flags |= __GFP_NOWARN; |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 1e63ec466d7c..3bcda556971e 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br, | |||
595 | err = 0; | 595 | err = 0; |
596 | switch (nla_type(attr)) { | 596 | switch (nla_type(attr)) { |
597 | case IFLA_BRIDGE_VLAN_TUNNEL_INFO: | 597 | case IFLA_BRIDGE_VLAN_TUNNEL_INFO: |
598 | if (!(p->flags & BR_VLAN_TUNNEL)) | 598 | if (!p || !(p->flags & BR_VLAN_TUNNEL)) |
599 | return -EINVAL; | 599 | return -EINVAL; |
600 | err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); | 600 | err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); |
601 | if (err) | 601 | if (err) |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 4efd5d54498a..89110319ef0f 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -173,7 +173,8 @@ static void br_stp_start(struct net_bridge *br) | |||
173 | br_debug(br, "using kernel STP\n"); | 173 | br_debug(br, "using kernel STP\n"); |
174 | 174 | ||
175 | /* To start timers on any ports left in blocking */ | 175 | /* To start timers on any ports left in blocking */ |
176 | mod_timer(&br->hello_timer, jiffies + br->hello_time); | 176 | if (br->dev->flags & IFF_UP) |
177 | mod_timer(&br->hello_timer, jiffies + br->hello_time); | ||
177 | br_port_state_selection(br); | 178 | br_port_state_selection(br); |
178 | } | 179 | } |
179 | 180 | ||
diff --git a/net/core/devlink.c b/net/core/devlink.c index b0b87a292e7c..a0adfc31a3fe 100644 --- a/net/core/devlink.c +++ b/net/core/devlink.c | |||
@@ -1680,8 +1680,10 @@ start_again: | |||
1680 | 1680 | ||
1681 | hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, | 1681 | hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, |
1682 | &devlink_nl_family, NLM_F_MULTI, cmd); | 1682 | &devlink_nl_family, NLM_F_MULTI, cmd); |
1683 | if (!hdr) | 1683 | if (!hdr) { |
1684 | nlmsg_free(skb); | ||
1684 | return -EMSGSIZE; | 1685 | return -EMSGSIZE; |
1686 | } | ||
1685 | 1687 | ||
1686 | if (devlink_nl_put_handle(skb, devlink)) | 1688 | if (devlink_nl_put_handle(skb, devlink)) |
1687 | goto nla_put_failure; | 1689 | goto nla_put_failure; |
@@ -2098,8 +2100,10 @@ start_again: | |||
2098 | 2100 | ||
2099 | hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, | 2101 | hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, |
2100 | &devlink_nl_family, NLM_F_MULTI, cmd); | 2102 | &devlink_nl_family, NLM_F_MULTI, cmd); |
2101 | if (!hdr) | 2103 | if (!hdr) { |
2104 | nlmsg_free(skb); | ||
2102 | return -EMSGSIZE; | 2105 | return -EMSGSIZE; |
2106 | } | ||
2103 | 2107 | ||
2104 | if (devlink_nl_put_handle(skb, devlink)) | 2108 | if (devlink_nl_put_handle(skb, devlink)) |
2105 | goto nla_put_failure; | 2109 | goto nla_put_failure; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bba33cf4f7cd..82cfc9c7a090 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -3799,8 +3799,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) | |||
3799 | 3799 | ||
3800 | spin_lock_irqsave(&q->lock, flags); | 3800 | spin_lock_irqsave(&q->lock, flags); |
3801 | skb = __skb_dequeue(q); | 3801 | skb = __skb_dequeue(q); |
3802 | if (skb && (skb_next = skb_peek(q))) | 3802 | if (skb && (skb_next = skb_peek(q))) { |
3803 | icmp_next = is_icmp_err_skb(skb_next); | 3803 | icmp_next = is_icmp_err_skb(skb_next); |
3804 | if (icmp_next) | ||
3805 | sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin; | ||
3806 | } | ||
3804 | spin_unlock_irqrestore(&q->lock, flags); | 3807 | spin_unlock_irqrestore(&q->lock, flags); |
3805 | 3808 | ||
3806 | if (is_icmp_err_skb(skb) && !icmp_next) | 3809 | if (is_icmp_err_skb(skb) && !icmp_next) |
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index fdc448b30e56..517215391514 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c | |||
@@ -228,6 +228,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev, | |||
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
231 | #ifdef CONFIG_PM_SLEEP | ||
232 | int dsa_switch_suspend(struct dsa_switch *ds) | ||
233 | { | ||
234 | int i, ret = 0; | ||
235 | |||
236 | /* Suspend slave network devices */ | ||
237 | for (i = 0; i < ds->num_ports; i++) { | ||
238 | if (!dsa_is_port_initialized(ds, i)) | ||
239 | continue; | ||
240 | |||
241 | ret = dsa_slave_suspend(ds->ports[i].netdev); | ||
242 | if (ret) | ||
243 | return ret; | ||
244 | } | ||
245 | |||
246 | if (ds->ops->suspend) | ||
247 | ret = ds->ops->suspend(ds); | ||
248 | |||
249 | return ret; | ||
250 | } | ||
251 | EXPORT_SYMBOL_GPL(dsa_switch_suspend); | ||
252 | |||
253 | int dsa_switch_resume(struct dsa_switch *ds) | ||
254 | { | ||
255 | int i, ret = 0; | ||
256 | |||
257 | if (ds->ops->resume) | ||
258 | ret = ds->ops->resume(ds); | ||
259 | |||
260 | if (ret) | ||
261 | return ret; | ||
262 | |||
263 | /* Resume slave network devices */ | ||
264 | for (i = 0; i < ds->num_ports; i++) { | ||
265 | if (!dsa_is_port_initialized(ds, i)) | ||
266 | continue; | ||
267 | |||
268 | ret = dsa_slave_resume(ds->ports[i].netdev); | ||
269 | if (ret) | ||
270 | return ret; | ||
271 | } | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | EXPORT_SYMBOL_GPL(dsa_switch_resume); | ||
276 | #endif | ||
277 | |||
231 | static struct packet_type dsa_pack_type __read_mostly = { | 278 | static struct packet_type dsa_pack_type __read_mostly = { |
232 | .type = cpu_to_be16(ETH_P_XDSA), | 279 | .type = cpu_to_be16(ETH_P_XDSA), |
233 | .func = dsa_switch_rcv, | 280 | .func = dsa_switch_rcv, |
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index cd13bb54a30c..f88e1dddb74a 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
@@ -474,8 +474,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst) | |||
474 | dsa_ds_unapply(dst, ds); | 474 | dsa_ds_unapply(dst, ds); |
475 | } | 475 | } |
476 | 476 | ||
477 | if (dst->cpu_dp) | 477 | if (dst->cpu_dp) { |
478 | dsa_cpu_port_ethtool_restore(dst->cpu_dp); | 478 | dsa_cpu_port_ethtool_restore(dst->cpu_dp); |
479 | dst->cpu_dp = NULL; | ||
480 | } | ||
479 | 481 | ||
480 | pr_info("DSA: tree %d unapplied\n", dst->tree); | 482 | pr_info("DSA: tree %d unapplied\n", dst->tree); |
481 | dst->applied = false; | 483 | dst->applied = false; |
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c index d534d8f4b9cf..3a56de8f51a8 100644 --- a/net/dsa/legacy.c +++ b/net/dsa/legacy.c | |||
@@ -288,53 +288,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds) | |||
288 | dsa_switch_unregister_notifier(ds); | 288 | dsa_switch_unregister_notifier(ds); |
289 | } | 289 | } |
290 | 290 | ||
291 | #ifdef CONFIG_PM_SLEEP | ||
292 | int dsa_switch_suspend(struct dsa_switch *ds) | ||
293 | { | ||
294 | int i, ret = 0; | ||
295 | |||
296 | /* Suspend slave network devices */ | ||
297 | for (i = 0; i < ds->num_ports; i++) { | ||
298 | if (!dsa_is_port_initialized(ds, i)) | ||
299 | continue; | ||
300 | |||
301 | ret = dsa_slave_suspend(ds->ports[i].netdev); | ||
302 | if (ret) | ||
303 | return ret; | ||
304 | } | ||
305 | |||
306 | if (ds->ops->suspend) | ||
307 | ret = ds->ops->suspend(ds); | ||
308 | |||
309 | return ret; | ||
310 | } | ||
311 | EXPORT_SYMBOL_GPL(dsa_switch_suspend); | ||
312 | |||
313 | int dsa_switch_resume(struct dsa_switch *ds) | ||
314 | { | ||
315 | int i, ret = 0; | ||
316 | |||
317 | if (ds->ops->resume) | ||
318 | ret = ds->ops->resume(ds); | ||
319 | |||
320 | if (ret) | ||
321 | return ret; | ||
322 | |||
323 | /* Resume slave network devices */ | ||
324 | for (i = 0; i < ds->num_ports; i++) { | ||
325 | if (!dsa_is_port_initialized(ds, i)) | ||
326 | continue; | ||
327 | |||
328 | ret = dsa_slave_resume(ds->ports[i].netdev); | ||
329 | if (ret) | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | return 0; | ||
334 | } | ||
335 | EXPORT_SYMBOL_GPL(dsa_switch_resume); | ||
336 | #endif | ||
337 | |||
338 | /* platform driver init and cleanup *****************************************/ | 291 | /* platform driver init and cleanup *****************************************/ |
339 | static int dev_is_class(struct device *dev, void *class) | 292 | static int dev_is_class(struct device *dev, void *class) |
340 | { | 293 | { |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f3dad1661343..58925b6597de 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] = | |||
1043 | .type = SOCK_DGRAM, | 1043 | .type = SOCK_DGRAM, |
1044 | .protocol = IPPROTO_ICMP, | 1044 | .protocol = IPPROTO_ICMP, |
1045 | .prot = &ping_prot, | 1045 | .prot = &ping_prot, |
1046 | .ops = &inet_dgram_ops, | 1046 | .ops = &inet_sockraw_ops, |
1047 | .flags = INET_PROTOSW_REUSE, | 1047 | .flags = INET_PROTOSW_REUSE, |
1048 | }, | 1048 | }, |
1049 | 1049 | ||
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f7be94fc8431..87981fcdfcf2 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2381,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l | |||
2381 | return 0; | 2381 | return 0; |
2382 | } | 2382 | } |
2383 | 2383 | ||
2384 | static int tcp_repair_options_est(struct tcp_sock *tp, | 2384 | static int tcp_repair_options_est(struct sock *sk, |
2385 | struct tcp_repair_opt __user *optbuf, unsigned int len) | 2385 | struct tcp_repair_opt __user *optbuf, unsigned int len) |
2386 | { | 2386 | { |
2387 | struct tcp_sock *tp = tcp_sk(sk); | ||
2387 | struct tcp_repair_opt opt; | 2388 | struct tcp_repair_opt opt; |
2388 | 2389 | ||
2389 | while (len >= sizeof(opt)) { | 2390 | while (len >= sizeof(opt)) { |
@@ -2396,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp, | |||
2396 | switch (opt.opt_code) { | 2397 | switch (opt.opt_code) { |
2397 | case TCPOPT_MSS: | 2398 | case TCPOPT_MSS: |
2398 | tp->rx_opt.mss_clamp = opt.opt_val; | 2399 | tp->rx_opt.mss_clamp = opt.opt_val; |
2400 | tcp_mtup_init(sk); | ||
2399 | break; | 2401 | break; |
2400 | case TCPOPT_WINDOW: | 2402 | case TCPOPT_WINDOW: |
2401 | { | 2403 | { |
@@ -2556,7 +2558,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
2556 | if (!tp->repair) | 2558 | if (!tp->repair) |
2557 | err = -EINVAL; | 2559 | err = -EINVAL; |
2558 | else if (sk->sk_state == TCP_ESTABLISHED) | 2560 | else if (sk->sk_state == TCP_ESTABLISHED) |
2559 | err = tcp_repair_options_est(tp, | 2561 | err = tcp_repair_options_est(sk, |
2560 | (struct tcp_repair_opt __user *)optval, | 2562 | (struct tcp_repair_opt __user *)optval, |
2561 | optlen); | 2563 | optlen); |
2562 | else | 2564 | else |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 6e3c512054a6..324c9bcc5456 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk) | |||
180 | { | 180 | { |
181 | const struct inet_connection_sock *icsk = inet_csk(sk); | 181 | const struct inet_connection_sock *icsk = inet_csk(sk); |
182 | 182 | ||
183 | tcp_sk(sk)->prior_ssthresh = 0; | ||
183 | if (icsk->icsk_ca_ops->init) | 184 | if (icsk->icsk_ca_ops->init) |
184 | icsk->icsk_ca_ops->init(sk); | 185 | icsk->icsk_ca_ops->init(sk); |
185 | if (tcp_ca_needs_ecn(sk)) | 186 | if (tcp_ca_needs_ecn(sk)) |
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c index 37ac9de713c6..8d772fea1dde 100644 --- a/net/ipv6/calipso.c +++ b/net/ipv6/calipso.c | |||
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, | |||
1319 | struct ipv6hdr *ip6_hdr; | 1319 | struct ipv6hdr *ip6_hdr; |
1320 | struct ipv6_opt_hdr *hop; | 1320 | struct ipv6_opt_hdr *hop; |
1321 | unsigned char buf[CALIPSO_MAX_BUFFER]; | 1321 | unsigned char buf[CALIPSO_MAX_BUFFER]; |
1322 | int len_delta, new_end, pad; | 1322 | int len_delta, new_end, pad, payload; |
1323 | unsigned int start, end; | 1323 | unsigned int start, end; |
1324 | 1324 | ||
1325 | ip6_hdr = ipv6_hdr(skb); | 1325 | ip6_hdr = ipv6_hdr(skb); |
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, | |||
1346 | if (ret_val < 0) | 1346 | if (ret_val < 0) |
1347 | return ret_val; | 1347 | return ret_val; |
1348 | 1348 | ||
1349 | ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */ | ||
1350 | |||
1349 | if (len_delta) { | 1351 | if (len_delta) { |
1350 | if (len_delta > 0) | 1352 | if (len_delta > 0) |
1351 | skb_push(skb, len_delta); | 1353 | skb_push(skb, len_delta); |
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb, | |||
1355 | sizeof(*ip6_hdr) + start); | 1357 | sizeof(*ip6_hdr) + start); |
1356 | skb_reset_network_header(skb); | 1358 | skb_reset_network_header(skb); |
1357 | ip6_hdr = ipv6_hdr(skb); | 1359 | ip6_hdr = ipv6_hdr(skb); |
1360 | payload = ntohs(ip6_hdr->payload_len); | ||
1361 | ip6_hdr->payload_len = htons(payload + len_delta); | ||
1358 | } | 1362 | } |
1359 | 1363 | ||
1360 | hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); | 1364 | hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index 280268f1dd7b..cdb3728faca7 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -116,8 +116,10 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
116 | 116 | ||
117 | if (udpfrag) { | 117 | if (udpfrag) { |
118 | int err = ip6_find_1stfragopt(skb, &prevhdr); | 118 | int err = ip6_find_1stfragopt(skb, &prevhdr); |
119 | if (err < 0) | 119 | if (err < 0) { |
120 | kfree_skb_list(segs); | ||
120 | return ERR_PTR(err); | 121 | return ERR_PTR(err); |
122 | } | ||
121 | fptr = (struct frag_hdr *)((u8 *)ipv6h + err); | 123 | fptr = (struct frag_hdr *)((u8 *)ipv6h + err); |
122 | fptr->frag_off = htons(offset); | 124 | fptr->frag_off = htons(offset); |
123 | if (skb->next) | 125 | if (skb->next) |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 7ae6c503f1ca..9b37f9747fc6 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, | |||
1095 | 1095 | ||
1096 | if (!dst) { | 1096 | if (!dst) { |
1097 | route_lookup: | 1097 | route_lookup: |
1098 | /* add dsfield to flowlabel for route lookup */ | ||
1099 | fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel); | ||
1100 | |||
1098 | dst = ip6_route_output(net, NULL, fl6); | 1101 | dst = ip6_route_output(net, NULL, fl6); |
1099 | 1102 | ||
1100 | if (dst->error) | 1103 | if (dst->error) |
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c index 9b522fa90e6d..ac826dd338ff 100644 --- a/net/ipv6/ping.c +++ b/net/ipv6/ping.c | |||
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = { | |||
192 | .type = SOCK_DGRAM, | 192 | .type = SOCK_DGRAM, |
193 | .protocol = IPPROTO_ICMPV6, | 193 | .protocol = IPPROTO_ICMPV6, |
194 | .prot = &pingv6_prot, | 194 | .prot = &pingv6_prot, |
195 | .ops = &inet6_dgram_ops, | 195 | .ops = &inet6_sockraw_ops, |
196 | .flags = INET_PROTOSW_REUSE, | 196 | .flags = INET_PROTOSW_REUSE, |
197 | }; | 197 | }; |
198 | 198 | ||
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1f992d9e261d..60be012fe708 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void) | |||
1338 | #endif /* CONFIG_PROC_FS */ | 1338 | #endif /* CONFIG_PROC_FS */ |
1339 | 1339 | ||
1340 | /* Same as inet6_dgram_ops, sans udp_poll. */ | 1340 | /* Same as inet6_dgram_ops, sans udp_poll. */ |
1341 | static const struct proto_ops inet6_sockraw_ops = { | 1341 | const struct proto_ops inet6_sockraw_ops = { |
1342 | .family = PF_INET6, | 1342 | .family = PF_INET6, |
1343 | .owner = THIS_MODULE, | 1343 | .owner = THIS_MODULE, |
1344 | .release = inet6_release, | 1344 | .release = inet6_release, |
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c index 0e015906f9ca..07d36573f50b 100644 --- a/net/ipv6/xfrm6_mode_ro.c +++ b/net/ipv6/xfrm6_mode_ro.c | |||
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb) | |||
47 | iph = ipv6_hdr(skb); | 47 | iph = ipv6_hdr(skb); |
48 | 48 | ||
49 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); | 49 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
50 | if (hdr_len < 0) | ||
51 | return hdr_len; | ||
50 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); | 52 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); |
51 | skb_set_network_header(skb, -x->props.header_len); | 53 | skb_set_network_header(skb, -x->props.header_len); |
52 | skb->transport_header = skb->network_header + hdr_len; | 54 | skb->transport_header = skb->network_header + hdr_len; |
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c index 7a92c0f31912..9ad07a91708e 100644 --- a/net/ipv6/xfrm6_mode_transport.c +++ b/net/ipv6/xfrm6_mode_transport.c | |||
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb) | |||
30 | skb_set_inner_transport_header(skb, skb_transport_offset(skb)); | 30 | skb_set_inner_transport_header(skb, skb_transport_offset(skb)); |
31 | 31 | ||
32 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); | 32 | hdr_len = x->type->hdr_offset(x, skb, &prevhdr); |
33 | if (hdr_len < 0) | ||
34 | return hdr_len; | ||
33 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); | 35 | skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); |
34 | skb_set_network_header(skb, -x->props.header_len); | 36 | skb_set_network_header(skb, -x->props.header_len); |
35 | skb->transport_header = skb->network_header + hdr_len; | 37 | skb->transport_header = skb->network_header + hdr_len; |
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 60e2a62f7bef..cf2392b2ac71 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
13 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local, | |||
741 | ieee80211_agg_start_txq(sta, tid, true); | 741 | ieee80211_agg_start_txq(sta, tid, true); |
742 | } | 742 | } |
743 | 743 | ||
744 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) | 744 | void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, |
745 | struct tid_ampdu_tx *tid_tx) | ||
745 | { | 746 | { |
746 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 747 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
747 | struct ieee80211_local *local = sdata->local; | 748 | struct ieee80211_local *local = sdata->local; |
748 | struct sta_info *sta; | ||
749 | struct tid_ampdu_tx *tid_tx; | ||
750 | 749 | ||
751 | trace_api_start_tx_ba_cb(sdata, ra, tid); | 750 | if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) |
751 | return; | ||
752 | |||
753 | if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) | ||
754 | ieee80211_agg_tx_operational(local, sta, tid); | ||
755 | } | ||
756 | |||
757 | static struct tid_ampdu_tx * | ||
758 | ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata, | ||
759 | const u8 *ra, u16 tid, struct sta_info **sta) | ||
760 | { | ||
761 | struct tid_ampdu_tx *tid_tx; | ||
752 | 762 | ||
753 | if (tid >= IEEE80211_NUM_TIDS) { | 763 | if (tid >= IEEE80211_NUM_TIDS) { |
754 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", | 764 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", |
755 | tid, IEEE80211_NUM_TIDS); | 765 | tid, IEEE80211_NUM_TIDS); |
756 | return; | 766 | return NULL; |
757 | } | 767 | } |
758 | 768 | ||
759 | mutex_lock(&local->sta_mtx); | 769 | *sta = sta_info_get_bss(sdata, ra); |
760 | sta = sta_info_get_bss(sdata, ra); | 770 | if (!*sta) { |
761 | if (!sta) { | ||
762 | mutex_unlock(&local->sta_mtx); | ||
763 | ht_dbg(sdata, "Could not find station: %pM\n", ra); | 771 | ht_dbg(sdata, "Could not find station: %pM\n", ra); |
764 | return; | 772 | return NULL; |
765 | } | 773 | } |
766 | 774 | ||
767 | mutex_lock(&sta->ampdu_mlme.mtx); | 775 | tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]); |
768 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | ||
769 | 776 | ||
770 | if (WARN_ON(!tid_tx)) { | 777 | if (WARN_ON(!tid_tx)) |
771 | ht_dbg(sdata, "addBA was not requested!\n"); | 778 | ht_dbg(sdata, "addBA was not requested!\n"); |
772 | goto unlock; | ||
773 | } | ||
774 | 779 | ||
775 | if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) | 780 | return tid_tx; |
776 | goto unlock; | ||
777 | |||
778 | if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) | ||
779 | ieee80211_agg_tx_operational(local, sta, tid); | ||
780 | |||
781 | unlock: | ||
782 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
783 | mutex_unlock(&local->sta_mtx); | ||
784 | } | 781 | } |
785 | 782 | ||
786 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 783 | void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
788 | { | 785 | { |
789 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 786 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
790 | struct ieee80211_local *local = sdata->local; | 787 | struct ieee80211_local *local = sdata->local; |
791 | struct ieee80211_ra_tid *ra_tid; | 788 | struct sta_info *sta; |
792 | struct sk_buff *skb = dev_alloc_skb(0); | 789 | struct tid_ampdu_tx *tid_tx; |
793 | 790 | ||
794 | if (unlikely(!skb)) | 791 | trace_api_start_tx_ba_cb(sdata, ra, tid); |
795 | return; | ||
796 | 792 | ||
797 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 793 | rcu_read_lock(); |
798 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 794 | tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); |
799 | ra_tid->tid = tid; | 795 | if (!tid_tx) |
796 | goto out; | ||
800 | 797 | ||
801 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; | 798 | set_bit(HT_AGG_STATE_START_CB, &tid_tx->state); |
802 | skb_queue_tail(&sdata->skb_queue, skb); | 799 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); |
803 | ieee80211_queue_work(&local->hw, &sdata->work); | 800 | out: |
801 | rcu_read_unlock(); | ||
804 | } | 802 | } |
805 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); | 803 | EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); |
806 | 804 | ||
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid) | |||
860 | } | 858 | } |
861 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); | 859 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); |
862 | 860 | ||
863 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | 861 | void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, |
862 | struct tid_ampdu_tx *tid_tx) | ||
864 | { | 863 | { |
865 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 864 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
866 | struct ieee80211_local *local = sdata->local; | ||
867 | struct sta_info *sta; | ||
868 | struct tid_ampdu_tx *tid_tx; | ||
869 | bool send_delba = false; | 865 | bool send_delba = false; |
870 | 866 | ||
871 | trace_api_stop_tx_ba_cb(sdata, ra, tid); | 867 | ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", |
872 | 868 | sta->sta.addr, tid); | |
873 | if (tid >= IEEE80211_NUM_TIDS) { | ||
874 | ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", | ||
875 | tid, IEEE80211_NUM_TIDS); | ||
876 | return; | ||
877 | } | ||
878 | |||
879 | ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid); | ||
880 | |||
881 | mutex_lock(&local->sta_mtx); | ||
882 | |||
883 | sta = sta_info_get_bss(sdata, ra); | ||
884 | if (!sta) { | ||
885 | ht_dbg(sdata, "Could not find station: %pM\n", ra); | ||
886 | goto unlock; | ||
887 | } | ||
888 | 869 | ||
889 | mutex_lock(&sta->ampdu_mlme.mtx); | ||
890 | spin_lock_bh(&sta->lock); | 870 | spin_lock_bh(&sta->lock); |
891 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | ||
892 | 871 | ||
893 | if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { | 872 | if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { |
894 | ht_dbg(sdata, | 873 | ht_dbg(sdata, |
895 | "unexpected callback to A-MPDU stop for %pM tid %d\n", | 874 | "unexpected callback to A-MPDU stop for %pM tid %d\n", |
896 | sta->sta.addr, tid); | 875 | sta->sta.addr, tid); |
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) | |||
906 | spin_unlock_bh(&sta->lock); | 885 | spin_unlock_bh(&sta->lock); |
907 | 886 | ||
908 | if (send_delba) | 887 | if (send_delba) |
909 | ieee80211_send_delba(sdata, ra, tid, | 888 | ieee80211_send_delba(sdata, sta->sta.addr, tid, |
910 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); | 889 | WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); |
911 | |||
912 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
913 | unlock: | ||
914 | mutex_unlock(&local->sta_mtx); | ||
915 | } | 890 | } |
916 | 891 | ||
917 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | 892 | void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, |
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, | |||
919 | { | 894 | { |
920 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); | 895 | struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); |
921 | struct ieee80211_local *local = sdata->local; | 896 | struct ieee80211_local *local = sdata->local; |
922 | struct ieee80211_ra_tid *ra_tid; | 897 | struct sta_info *sta; |
923 | struct sk_buff *skb = dev_alloc_skb(0); | 898 | struct tid_ampdu_tx *tid_tx; |
924 | 899 | ||
925 | if (unlikely(!skb)) | 900 | trace_api_stop_tx_ba_cb(sdata, ra, tid); |
926 | return; | ||
927 | 901 | ||
928 | ra_tid = (struct ieee80211_ra_tid *) &skb->cb; | 902 | rcu_read_lock(); |
929 | memcpy(&ra_tid->ra, ra, ETH_ALEN); | 903 | tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta); |
930 | ra_tid->tid = tid; | 904 | if (!tid_tx) |
905 | goto out; | ||
931 | 906 | ||
932 | skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; | 907 | set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state); |
933 | skb_queue_tail(&sdata->skb_queue, skb); | 908 | ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work); |
934 | ieee80211_queue_work(&local->hw, &sdata->work); | 909 | out: |
910 | rcu_read_unlock(); | ||
935 | } | 911 | } |
936 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); | 912 | EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); |
937 | 913 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index f4a528773563..6ca5442b1e03 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 7 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright 2017 Intel Deutschland GmbH | ||
10 | * | 11 | * |
11 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, | |||
289 | { | 290 | { |
290 | int i; | 291 | int i; |
291 | 292 | ||
292 | cancel_work_sync(&sta->ampdu_mlme.work); | ||
293 | |||
294 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { | 293 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { |
295 | __ieee80211_stop_tx_ba_session(sta, i, reason); | 294 | __ieee80211_stop_tx_ba_session(sta, i, reason); |
296 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, | 295 | __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, |
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, | |||
298 | reason != AGG_STOP_DESTROY_STA && | 297 | reason != AGG_STOP_DESTROY_STA && |
299 | reason != AGG_STOP_PEER_REQUEST); | 298 | reason != AGG_STOP_PEER_REQUEST); |
300 | } | 299 | } |
300 | |||
301 | /* stopping might queue the work again - so cancel only afterwards */ | ||
302 | cancel_work_sync(&sta->ampdu_mlme.work); | ||
301 | } | 303 | } |
302 | 304 | ||
303 | void ieee80211_ba_session_work(struct work_struct *work) | 305 | void ieee80211_ba_session_work(struct work_struct *work) |
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work) | |||
352 | spin_unlock_bh(&sta->lock); | 354 | spin_unlock_bh(&sta->lock); |
353 | 355 | ||
354 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); | 356 | tid_tx = rcu_dereference_protected_tid_tx(sta, tid); |
355 | if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, | 357 | if (!tid_tx) |
356 | &tid_tx->state)) | 358 | continue; |
359 | |||
360 | if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state)) | ||
361 | ieee80211_start_tx_ba_cb(sta, tid, tid_tx); | ||
362 | if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state)) | ||
357 | ___ieee80211_stop_tx_ba_session(sta, tid, | 363 | ___ieee80211_stop_tx_ba_session(sta, tid, |
358 | AGG_STOP_LOCAL_REQUEST); | 364 | AGG_STOP_LOCAL_REQUEST); |
365 | if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state)) | ||
366 | ieee80211_stop_tx_ba_cb(sta, tid, tid_tx); | ||
359 | } | 367 | } |
360 | mutex_unlock(&sta->ampdu_mlme.mtx); | 368 | mutex_unlock(&sta->ampdu_mlme.mtx); |
361 | } | 369 | } |
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index f8f6c148f554..665501ac358f 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h | |||
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg { | |||
1036 | 1036 | ||
1037 | enum sdata_queue_type { | 1037 | enum sdata_queue_type { |
1038 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, | 1038 | IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, |
1039 | IEEE80211_SDATA_QUEUE_AGG_START = 1, | ||
1040 | IEEE80211_SDATA_QUEUE_AGG_STOP = 2, | ||
1041 | IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, | 1039 | IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, |
1042 | IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, | 1040 | IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, |
1043 | }; | 1041 | }; |
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata) | |||
1427 | return local->hw.wiphy->bands[band]; | 1425 | return local->hw.wiphy->bands[band]; |
1428 | } | 1426 | } |
1429 | 1427 | ||
1430 | /* this struct represents 802.11n's RA/TID combination */ | ||
1431 | struct ieee80211_ra_tid { | ||
1432 | u8 ra[ETH_ALEN]; | ||
1433 | u16 tid; | ||
1434 | }; | ||
1435 | |||
1436 | /* this struct holds the value parsing from channel switch IE */ | 1428 | /* this struct holds the value parsing from channel switch IE */ |
1437 | struct ieee80211_csa_ie { | 1429 | struct ieee80211_csa_ie { |
1438 | struct cfg80211_chan_def chandef; | 1430 | struct cfg80211_chan_def chandef; |
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
1794 | enum ieee80211_agg_stop_reason reason); | 1786 | enum ieee80211_agg_stop_reason reason); |
1795 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | 1787 | int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, |
1796 | enum ieee80211_agg_stop_reason reason); | 1788 | enum ieee80211_agg_stop_reason reason); |
1797 | void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); | 1789 | void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid, |
1798 | void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); | 1790 | struct tid_ampdu_tx *tid_tx); |
1791 | void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid, | ||
1792 | struct tid_ampdu_tx *tid_tx); | ||
1799 | void ieee80211_ba_session_work(struct work_struct *work); | 1793 | void ieee80211_ba_session_work(struct work_struct *work); |
1800 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); | 1794 | void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); |
1801 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); | 1795 | void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 3bd5b81f5d81..8fae1a72e6a7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work) | |||
1237 | struct ieee80211_local *local = sdata->local; | 1237 | struct ieee80211_local *local = sdata->local; |
1238 | struct sk_buff *skb; | 1238 | struct sk_buff *skb; |
1239 | struct sta_info *sta; | 1239 | struct sta_info *sta; |
1240 | struct ieee80211_ra_tid *ra_tid; | ||
1241 | struct ieee80211_rx_agg *rx_agg; | 1240 | struct ieee80211_rx_agg *rx_agg; |
1242 | 1241 | ||
1243 | if (!ieee80211_sdata_running(sdata)) | 1242 | if (!ieee80211_sdata_running(sdata)) |
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work) | |||
1253 | while ((skb = skb_dequeue(&sdata->skb_queue))) { | 1252 | while ((skb = skb_dequeue(&sdata->skb_queue))) { |
1254 | struct ieee80211_mgmt *mgmt = (void *)skb->data; | 1253 | struct ieee80211_mgmt *mgmt = (void *)skb->data; |
1255 | 1254 | ||
1256 | if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { | 1255 | if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) { |
1257 | ra_tid = (void *)&skb->cb; | ||
1258 | ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
1259 | ra_tid->tid); | ||
1260 | } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { | ||
1261 | ra_tid = (void *)&skb->cb; | ||
1262 | ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, | ||
1263 | ra_tid->tid); | ||
1264 | } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) { | ||
1265 | rx_agg = (void *)&skb->cb; | 1256 | rx_agg = (void *)&skb->cb; |
1266 | mutex_lock(&local->sta_mtx); | 1257 | mutex_lock(&local->sta_mtx); |
1267 | sta = sta_info_get_bss(sdata, rx_agg->addr); | 1258 | sta = sta_info_get_bss(sdata, rx_agg->addr); |
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 7cdf7a835bb0..403e3cc58b57 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo) | |||
2155 | struct ieee80211_sta_rx_stats *cpurxs; | 2155 | struct ieee80211_sta_rx_stats *cpurxs; |
2156 | 2156 | ||
2157 | cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); | 2157 | cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); |
2158 | sinfo->rx_packets += cpurxs->dropped; | 2158 | sinfo->rx_dropped_misc += cpurxs->dropped; |
2159 | } | 2159 | } |
2160 | } | 2160 | } |
2161 | 2161 | ||
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index 5609cacb20d5..ea0747d6a6da 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h | |||
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags { | |||
116 | #define HT_AGG_STATE_STOPPING 3 | 116 | #define HT_AGG_STATE_STOPPING 3 |
117 | #define HT_AGG_STATE_WANT_START 4 | 117 | #define HT_AGG_STATE_WANT_START 4 |
118 | #define HT_AGG_STATE_WANT_STOP 5 | 118 | #define HT_AGG_STATE_WANT_STOP 5 |
119 | #define HT_AGG_STATE_START_CB 6 | ||
120 | #define HT_AGG_STATE_STOP_CB 7 | ||
119 | 121 | ||
120 | enum ieee80211_agg_stop_reason { | 122 | enum ieee80211_agg_stop_reason { |
121 | AGG_STOP_DECLINED, | 123 | AGG_STOP_DECLINED, |
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c index 94b3317232a6..b51582d92740 100644 --- a/net/mpls/af_mpls.c +++ b/net/mpls/af_mpls.c | |||
@@ -1483,7 +1483,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags) | |||
1483 | continue; | 1483 | continue; |
1484 | alive++; | 1484 | alive++; |
1485 | nh_flags &= ~flags; | 1485 | nh_flags &= ~flags; |
1486 | WRITE_ONCE(nh->nh_flags, flags); | 1486 | WRITE_ONCE(nh->nh_flags, nh_flags); |
1487 | } endfor_nexthops(rt); | 1487 | } endfor_nexthops(rt); |
1488 | 1488 | ||
1489 | WRITE_ONCE(rt->rt_nhn_alive, alive); | 1489 | WRITE_ONCE(rt->rt_nhn_alive, alive); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 9799a50bc604..a8be9b72e6cd 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -890,8 +890,13 @@ restart: | |||
890 | } | 890 | } |
891 | out: | 891 | out: |
892 | local_bh_enable(); | 892 | local_bh_enable(); |
893 | if (last) | 893 | if (last) { |
894 | /* nf ct hash resize happened, now clear the leftover. */ | ||
895 | if ((struct nf_conn *)cb->args[1] == last) | ||
896 | cb->args[1] = 0; | ||
897 | |||
894 | nf_ct_put(last); | 898 | nf_ct_put(last); |
899 | } | ||
895 | 900 | ||
896 | while (i) { | 901 | while (i) { |
897 | i--; | 902 | i--; |
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 13875d599a85..1c5b14a6cab3 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c | |||
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb, | |||
512 | u8 pf, unsigned int hooknum) | 512 | u8 pf, unsigned int hooknum) |
513 | { | 513 | { |
514 | const struct sctphdr *sh; | 514 | const struct sctphdr *sh; |
515 | struct sctphdr _sctph; | ||
516 | const char *logmsg; | 515 | const char *logmsg; |
517 | 516 | ||
518 | sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); | 517 | if (skb->len < dataoff + sizeof(struct sctphdr)) { |
519 | if (!sh) { | ||
520 | logmsg = "nf_ct_sctp: short packet "; | 518 | logmsg = "nf_ct_sctp: short packet "; |
521 | goto out_invalid; | 519 | goto out_invalid; |
522 | } | 520 | } |
523 | if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && | 521 | if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && |
524 | skb->ip_summed == CHECKSUM_NONE) { | 522 | skb->ip_summed == CHECKSUM_NONE) { |
523 | if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) { | ||
524 | logmsg = "nf_ct_sctp: failed to read header "; | ||
525 | goto out_invalid; | ||
526 | } | ||
527 | sh = (const struct sctphdr *)(skb->data + dataoff); | ||
525 | if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { | 528 | if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { |
526 | logmsg = "nf_ct_sctp: bad CRC "; | 529 | logmsg = "nf_ct_sctp: bad CRC "; |
527 | goto out_invalid; | 530 | goto out_invalid; |
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index ef0be325a0c6..6c72922d20ca 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c | |||
@@ -566,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data) | |||
566 | * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() | 566 | * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() |
567 | * will delete entry from already-freed table. | 567 | * will delete entry from already-freed table. |
568 | */ | 568 | */ |
569 | ct->status &= ~IPS_NAT_DONE_MASK; | 569 | clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status); |
570 | rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, | 570 | rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, |
571 | nf_nat_bysource_params); | 571 | nf_nat_bysource_params); |
572 | 572 | ||
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index e97e2fb53f0a..fbdbaa00dd5f 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, | |||
116 | else if (d > 0) | 116 | else if (d > 0) |
117 | p = &parent->rb_right; | 117 | p = &parent->rb_right; |
118 | else { | 118 | else { |
119 | if (nft_set_elem_active(&rbe->ext, genmask)) { | 119 | if (nft_rbtree_interval_end(rbe) && |
120 | if (nft_rbtree_interval_end(rbe) && | 120 | !nft_rbtree_interval_end(new)) { |
121 | !nft_rbtree_interval_end(new)) | 121 | p = &parent->rb_left; |
122 | p = &parent->rb_left; | 122 | } else if (!nft_rbtree_interval_end(rbe) && |
123 | else if (!nft_rbtree_interval_end(rbe) && | 123 | nft_rbtree_interval_end(new)) { |
124 | nft_rbtree_interval_end(new)) | 124 | p = &parent->rb_right; |
125 | p = &parent->rb_right; | 125 | } else if (nft_set_elem_active(&rbe->ext, genmask)) { |
126 | else { | 126 | *ext = &rbe->ext; |
127 | *ext = &rbe->ext; | 127 | return -EEXIST; |
128 | return -EEXIST; | 128 | } else { |
129 | } | 129 | p = &parent->rb_left; |
130 | } | 130 | } |
131 | } | 131 | } |
132 | } | 132 | } |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index ee841f00a6ec..7586d446d7dc 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <asm/cacheflush.h> | 62 | #include <asm/cacheflush.h> |
63 | #include <linux/hash.h> | 63 | #include <linux/hash.h> |
64 | #include <linux/genetlink.h> | 64 | #include <linux/genetlink.h> |
65 | #include <linux/net_namespace.h> | ||
65 | 66 | ||
66 | #include <net/net_namespace.h> | 67 | #include <net/net_namespace.h> |
67 | #include <net/sock.h> | 68 | #include <net/sock.h> |
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk, | |||
1415 | goto out; | 1416 | goto out; |
1416 | } | 1417 | } |
1417 | NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); | 1418 | NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); |
1418 | NETLINK_CB(p->skb2).nsid_is_set = true; | 1419 | if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED) |
1420 | NETLINK_CB(p->skb2).nsid_is_set = true; | ||
1419 | val = netlink_broadcast_deliver(sk, p->skb2); | 1421 | val = netlink_broadcast_deliver(sk, p->skb2); |
1420 | if (val < 0) { | 1422 | if (val < 0) { |
1421 | netlink_overrun(sk); | 1423 | netlink_overrun(sk); |
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 24fedd4b117e..03f6b5840764 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c | |||
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) | |||
119 | 119 | ||
120 | for (i = 0; i < (reqs << 1); i++) { | 120 | for (i = 0; i < (reqs << 1); i++) { |
121 | rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); | 121 | rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); |
122 | if (!rqst) { | 122 | if (!rqst) |
123 | pr_err("RPC: %s: Failed to create bc rpc_rqst\n", | ||
124 | __func__); | ||
125 | goto out_free; | 123 | goto out_free; |
126 | } | 124 | |
127 | dprintk("RPC: %s: new rqst %p\n", __func__, rqst); | 125 | dprintk("RPC: %s: new rqst %p\n", __func__, rqst); |
128 | 126 | ||
129 | rqst->rq_xprt = &r_xprt->rx_xprt; | 127 | rqst->rq_xprt = &r_xprt->rx_xprt; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 16aff8ddc16f..d5b54c020dec 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work) | |||
2432 | case -ENETUNREACH: | 2432 | case -ENETUNREACH: |
2433 | case -EADDRINUSE: | 2433 | case -EADDRINUSE: |
2434 | case -ENOBUFS: | 2434 | case -ENOBUFS: |
2435 | /* retry with existing socket, after a delay */ | 2435 | /* |
2436 | * xs_tcp_force_close() wakes tasks with -EIO. | ||
2437 | * We need to wake them first to ensure the | ||
2438 | * correct error code. | ||
2439 | */ | ||
2440 | xprt_wake_pending_tasks(xprt, status); | ||
2436 | xs_tcp_force_close(xprt); | 2441 | xs_tcp_force_close(xprt); |
2437 | goto out; | 2442 | goto out; |
2438 | } | 2443 | } |
diff --git a/scripts/gdb/linux/dmesg.py b/scripts/gdb/linux/dmesg.py index f9b92ece7834..5afd1098e33a 100644 --- a/scripts/gdb/linux/dmesg.py +++ b/scripts/gdb/linux/dmesg.py | |||
@@ -23,10 +23,11 @@ class LxDmesg(gdb.Command): | |||
23 | super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) | 23 | super(LxDmesg, self).__init__("lx-dmesg", gdb.COMMAND_DATA) |
24 | 24 | ||
25 | def invoke(self, arg, from_tty): | 25 | def invoke(self, arg, from_tty): |
26 | log_buf_addr = int(str(gdb.parse_and_eval("log_buf")).split()[0], 16) | 26 | log_buf_addr = int(str(gdb.parse_and_eval( |
27 | log_first_idx = int(gdb.parse_and_eval("log_first_idx")) | 27 | "'printk.c'::log_buf")).split()[0], 16) |
28 | log_next_idx = int(gdb.parse_and_eval("log_next_idx")) | 28 | log_first_idx = int(gdb.parse_and_eval("'printk.c'::log_first_idx")) |
29 | log_buf_len = int(gdb.parse_and_eval("log_buf_len")) | 29 | log_next_idx = int(gdb.parse_and_eval("'printk.c'::log_next_idx")) |
30 | log_buf_len = int(gdb.parse_and_eval("'printk.c'::log_buf_len")) | ||
30 | 31 | ||
31 | inf = gdb.inferiors()[0] | 32 | inf = gdb.inferiors()[0] |
32 | start = log_buf_addr + log_first_idx | 33 | start = log_buf_addr + log_first_idx |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 918e45268915..a57988d617e9 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -2324,11 +2324,11 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
2324 | SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), | 2324 | SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF), |
2325 | 2325 | ||
2326 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), | 2326 | SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), |
2327 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), | ||
2328 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | ||
2329 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), | 2327 | SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), |
2330 | SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), | 2328 | SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), |
2329 | SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), | ||
2331 | SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), | 2330 | SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), |
2331 | SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), | ||
2332 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), | 2332 | SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), |
2333 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2333 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
2334 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2334 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
diff --git a/sound/usb/mixer_us16x08.c b/sound/usb/mixer_us16x08.c index dc48eedea92e..26ed23b18b77 100644 --- a/sound/usb/mixer_us16x08.c +++ b/sound/usb/mixer_us16x08.c | |||
@@ -698,16 +698,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol, | |||
698 | struct snd_usb_audio *chip = elem->head.mixer->chip; | 698 | struct snd_usb_audio *chip = elem->head.mixer->chip; |
699 | struct snd_us16x08_meter_store *store = elem->private_data; | 699 | struct snd_us16x08_meter_store *store = elem->private_data; |
700 | u8 meter_urb[64]; | 700 | u8 meter_urb[64]; |
701 | char tmp[sizeof(mix_init_msg2)] = {0}; | ||
702 | 701 | ||
703 | switch (kcontrol->private_value) { | 702 | switch (kcontrol->private_value) { |
704 | case 0: | 703 | case 0: { |
705 | snd_us16x08_send_urb(chip, (char *)mix_init_msg1, | 704 | char tmp[sizeof(mix_init_msg1)]; |
706 | sizeof(mix_init_msg1)); | 705 | |
706 | memcpy(tmp, mix_init_msg1, sizeof(mix_init_msg1)); | ||
707 | snd_us16x08_send_urb(chip, tmp, 4); | ||
707 | snd_us16x08_recv_urb(chip, meter_urb, | 708 | snd_us16x08_recv_urb(chip, meter_urb, |
708 | sizeof(meter_urb)); | 709 | sizeof(meter_urb)); |
709 | kcontrol->private_value++; | 710 | kcontrol->private_value++; |
710 | break; | 711 | break; |
712 | } | ||
711 | case 1: | 713 | case 1: |
712 | snd_us16x08_recv_urb(chip, meter_urb, | 714 | snd_us16x08_recv_urb(chip, meter_urb, |
713 | sizeof(meter_urb)); | 715 | sizeof(meter_urb)); |
@@ -718,15 +720,18 @@ static int snd_us16x08_meter_get(struct snd_kcontrol *kcontrol, | |||
718 | sizeof(meter_urb)); | 720 | sizeof(meter_urb)); |
719 | kcontrol->private_value++; | 721 | kcontrol->private_value++; |
720 | break; | 722 | break; |
721 | case 3: | 723 | case 3: { |
724 | char tmp[sizeof(mix_init_msg2)]; | ||
725 | |||
722 | memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2)); | 726 | memcpy(tmp, mix_init_msg2, sizeof(mix_init_msg2)); |
723 | tmp[2] = snd_get_meter_comp_index(store); | 727 | tmp[2] = snd_get_meter_comp_index(store); |
724 | snd_us16x08_send_urb(chip, tmp, sizeof(mix_init_msg2)); | 728 | snd_us16x08_send_urb(chip, tmp, 10); |
725 | snd_us16x08_recv_urb(chip, meter_urb, | 729 | snd_us16x08_recv_urb(chip, meter_urb, |
726 | sizeof(meter_urb)); | 730 | sizeof(meter_urb)); |
727 | kcontrol->private_value = 0; | 731 | kcontrol->private_value = 0; |
728 | break; | 732 | break; |
729 | } | 733 | } |
734 | } | ||
730 | 735 | ||
731 | for (set = 0; set < 6; set++) | 736 | for (set = 0; set < 6; set++) |
732 | get_meter_levels_from_urb(set, store, meter_urb); | 737 | get_meter_levels_from_urb(set, store, meter_urb); |
@@ -1135,7 +1140,7 @@ static const struct snd_us16x08_control_params eq_controls[] = { | |||
1135 | .control_id = SND_US16X08_ID_EQLOWMIDWIDTH, | 1140 | .control_id = SND_US16X08_ID_EQLOWMIDWIDTH, |
1136 | .type = USB_MIXER_U8, | 1141 | .type = USB_MIXER_U8, |
1137 | .num_channels = 16, | 1142 | .num_channels = 16, |
1138 | .name = "EQ MidQLow Q", | 1143 | .name = "EQ MidLow Q", |
1139 | }, | 1144 | }, |
1140 | { /* EQ mid high gain */ | 1145 | { /* EQ mid high gain */ |
1141 | .kcontrol_new = &snd_us16x08_eq_gain_ctl, | 1146 | .kcontrol_new = &snd_us16x08_eq_gain_ctl, |
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6ebd3e6a1fd1..5e3c673fa3f4 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #define __KVM_HAVE_IRQ_LINE | 27 | #define __KVM_HAVE_IRQ_LINE |
28 | #define __KVM_HAVE_READONLY_MEM | 28 | #define __KVM_HAVE_READONLY_MEM |
29 | 29 | ||
30 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
31 | |||
30 | #define KVM_REG_SIZE(id) \ | 32 | #define KVM_REG_SIZE(id) \ |
31 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 33 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
32 | 34 | ||
@@ -114,6 +116,8 @@ struct kvm_debug_exit_arch { | |||
114 | }; | 116 | }; |
115 | 117 | ||
116 | struct kvm_sync_regs { | 118 | struct kvm_sync_regs { |
119 | /* Used with KVM_CAP_ARM_USER_IRQ */ | ||
120 | __u64 device_irq_level; | ||
117 | }; | 121 | }; |
118 | 122 | ||
119 | struct kvm_arch_memory_slot { | 123 | struct kvm_arch_memory_slot { |
@@ -192,13 +196,17 @@ struct kvm_arch_memory_slot { | |||
192 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 | 196 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 |
193 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 | 197 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 |
194 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 | 198 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 |
199 | #define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 | ||
195 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 | 200 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 |
196 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ | 201 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ |
197 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) | 202 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) |
198 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff | 203 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff |
199 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 | 204 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 |
200 | 205 | ||
201 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 | 206 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 |
207 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | ||
208 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | ||
209 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | ||
202 | 210 | ||
203 | /* KVM_IRQ_LINE irq field index values */ | 211 | /* KVM_IRQ_LINE irq field index values */ |
204 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 | 212 | #define KVM_ARM_IRQ_TYPE_SHIFT 24 |
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h index c2860358ae3e..70eea2ecc663 100644 --- a/tools/arch/arm64/include/uapi/asm/kvm.h +++ b/tools/arch/arm64/include/uapi/asm/kvm.h | |||
@@ -39,6 +39,8 @@ | |||
39 | #define __KVM_HAVE_IRQ_LINE | 39 | #define __KVM_HAVE_IRQ_LINE |
40 | #define __KVM_HAVE_READONLY_MEM | 40 | #define __KVM_HAVE_READONLY_MEM |
41 | 41 | ||
42 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
43 | |||
42 | #define KVM_REG_SIZE(id) \ | 44 | #define KVM_REG_SIZE(id) \ |
43 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | 45 | (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) |
44 | 46 | ||
@@ -143,6 +145,8 @@ struct kvm_debug_exit_arch { | |||
143 | #define KVM_GUESTDBG_USE_HW (1 << 17) | 145 | #define KVM_GUESTDBG_USE_HW (1 << 17) |
144 | 146 | ||
145 | struct kvm_sync_regs { | 147 | struct kvm_sync_regs { |
148 | /* Used with KVM_CAP_ARM_USER_IRQ */ | ||
149 | __u64 device_irq_level; | ||
146 | }; | 150 | }; |
147 | 151 | ||
148 | struct kvm_arch_memory_slot { | 152 | struct kvm_arch_memory_slot { |
@@ -212,13 +216,17 @@ struct kvm_arch_memory_slot { | |||
212 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 | 216 | #define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 |
213 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 | 217 | #define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 |
214 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 | 218 | #define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 |
219 | #define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 | ||
215 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 | 220 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 |
216 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ | 221 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ |
217 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) | 222 | (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) |
218 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff | 223 | #define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff |
219 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 | 224 | #define VGIC_LEVEL_INFO_LINE_LEVEL 0 |
220 | 225 | ||
221 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 | 226 | #define KVM_DEV_ARM_VGIC_CTRL_INIT 0 |
227 | #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 | ||
228 | #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 | ||
229 | #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 | ||
222 | 230 | ||
223 | /* Device Control API on vcpu fd */ | 231 | /* Device Control API on vcpu fd */ |
224 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 | 232 | #define KVM_ARM_VCPU_PMU_V3_CTRL 0 |
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h index 4edbe4bb0e8b..07fbeb927834 100644 --- a/tools/arch/powerpc/include/uapi/asm/kvm.h +++ b/tools/arch/powerpc/include/uapi/asm/kvm.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define __KVM_HAVE_IRQ_LINE | 29 | #define __KVM_HAVE_IRQ_LINE |
30 | #define __KVM_HAVE_GUEST_DEBUG | 30 | #define __KVM_HAVE_GUEST_DEBUG |
31 | 31 | ||
32 | /* Not always available, but if it is, this is the correct offset. */ | ||
33 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 | ||
34 | |||
32 | struct kvm_regs { | 35 | struct kvm_regs { |
33 | __u64 pc; | 36 | __u64 pc; |
34 | __u64 cr; | 37 | __u64 cr; |
diff --git a/tools/arch/s390/include/uapi/asm/kvm.h b/tools/arch/s390/include/uapi/asm/kvm.h index 7f4fd65e9208..3dd2a1d308dd 100644 --- a/tools/arch/s390/include/uapi/asm/kvm.h +++ b/tools/arch/s390/include/uapi/asm/kvm.h | |||
@@ -26,6 +26,8 @@ | |||
26 | #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 | 26 | #define KVM_DEV_FLIC_ADAPTER_REGISTER 6 |
27 | #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 | 27 | #define KVM_DEV_FLIC_ADAPTER_MODIFY 7 |
28 | #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 | 28 | #define KVM_DEV_FLIC_CLEAR_IO_IRQ 8 |
29 | #define KVM_DEV_FLIC_AISM 9 | ||
30 | #define KVM_DEV_FLIC_AIRQ_INJECT 10 | ||
29 | /* | 31 | /* |
30 | * We can have up to 4*64k pending subchannels + 8 adapter interrupts, | 32 | * We can have up to 4*64k pending subchannels + 8 adapter interrupts, |
31 | * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. | 33 | * as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts. |
@@ -41,7 +43,14 @@ struct kvm_s390_io_adapter { | |||
41 | __u8 isc; | 43 | __u8 isc; |
42 | __u8 maskable; | 44 | __u8 maskable; |
43 | __u8 swap; | 45 | __u8 swap; |
44 | __u8 pad; | 46 | __u8 flags; |
47 | }; | ||
48 | |||
49 | #define KVM_S390_ADAPTER_SUPPRESSIBLE 0x01 | ||
50 | |||
51 | struct kvm_s390_ais_req { | ||
52 | __u8 isc; | ||
53 | __u16 mode; | ||
45 | }; | 54 | }; |
46 | 55 | ||
47 | #define KVM_S390_IO_ADAPTER_MASK 1 | 56 | #define KVM_S390_IO_ADAPTER_MASK 1 |
@@ -110,6 +119,7 @@ struct kvm_s390_vm_cpu_machine { | |||
110 | #define KVM_S390_VM_CPU_FEAT_CMMA 10 | 119 | #define KVM_S390_VM_CPU_FEAT_CMMA 10 |
111 | #define KVM_S390_VM_CPU_FEAT_PFMFI 11 | 120 | #define KVM_S390_VM_CPU_FEAT_PFMFI 11 |
112 | #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 | 121 | #define KVM_S390_VM_CPU_FEAT_SIGPIF 12 |
122 | #define KVM_S390_VM_CPU_FEAT_KSS 13 | ||
113 | struct kvm_s390_vm_cpu_feat { | 123 | struct kvm_s390_vm_cpu_feat { |
114 | __u64 feat[16]; | 124 | __u64 feat[16]; |
115 | }; | 125 | }; |
@@ -198,6 +208,10 @@ struct kvm_guest_debug_arch { | |||
198 | #define KVM_SYNC_VRS (1UL << 6) | 208 | #define KVM_SYNC_VRS (1UL << 6) |
199 | #define KVM_SYNC_RICCB (1UL << 7) | 209 | #define KVM_SYNC_RICCB (1UL << 7) |
200 | #define KVM_SYNC_FPRS (1UL << 8) | 210 | #define KVM_SYNC_FPRS (1UL << 8) |
211 | #define KVM_SYNC_GSCB (1UL << 9) | ||
212 | /* length and alignment of the sdnx as a power of two */ | ||
213 | #define SDNXC 8 | ||
214 | #define SDNXL (1UL << SDNXC) | ||
201 | /* definition of registers in kvm_run */ | 215 | /* definition of registers in kvm_run */ |
202 | struct kvm_sync_regs { | 216 | struct kvm_sync_regs { |
203 | __u64 prefix; /* prefix register */ | 217 | __u64 prefix; /* prefix register */ |
@@ -218,8 +232,16 @@ struct kvm_sync_regs { | |||
218 | }; | 232 | }; |
219 | __u8 reserved[512]; /* for future vector expansion */ | 233 | __u8 reserved[512]; /* for future vector expansion */ |
220 | __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ | 234 | __u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */ |
221 | __u8 padding[52]; /* riccb needs to be 64byte aligned */ | 235 | __u8 padding1[52]; /* riccb needs to be 64byte aligned */ |
222 | __u8 riccb[64]; /* runtime instrumentation controls block */ | 236 | __u8 riccb[64]; /* runtime instrumentation controls block */ |
237 | __u8 padding2[192]; /* sdnx needs to be 256byte aligned */ | ||
238 | union { | ||
239 | __u8 sdnx[SDNXL]; /* state description annex */ | ||
240 | struct { | ||
241 | __u64 reserved1[2]; | ||
242 | __u64 gscb[4]; | ||
243 | }; | ||
244 | }; | ||
223 | }; | 245 | }; |
224 | 246 | ||
225 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) | 247 | #define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1) |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 0fe00446f9ca..2701e5f8145b 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
@@ -202,6 +202,8 @@ | |||
202 | #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ | 202 | #define X86_FEATURE_AVX512_4VNNIW (7*32+16) /* AVX-512 Neural Network Instructions */ |
203 | #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ | 203 | #define X86_FEATURE_AVX512_4FMAPS (7*32+17) /* AVX-512 Multiply Accumulation Single precision */ |
204 | 204 | ||
205 | #define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */ | ||
206 | |||
205 | /* Virtualization flags: Linux defined, word 8 */ | 207 | /* Virtualization flags: Linux defined, word 8 */ |
206 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ | 208 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
207 | #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ | 209 | #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ |
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index 85599ad4d024..5dff775af7cd 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h | |||
@@ -36,6 +36,12 @@ | |||
36 | # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) | 36 | # define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31)) |
37 | #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ | 37 | #endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */ |
38 | 38 | ||
39 | #ifdef CONFIG_X86_5LEVEL | ||
40 | # define DISABLE_LA57 0 | ||
41 | #else | ||
42 | # define DISABLE_LA57 (1<<(X86_FEATURE_LA57 & 31)) | ||
43 | #endif | ||
44 | |||
39 | /* | 45 | /* |
40 | * Make sure to add features to the correct mask | 46 | * Make sure to add features to the correct mask |
41 | */ | 47 | */ |
@@ -55,7 +61,7 @@ | |||
55 | #define DISABLED_MASK13 0 | 61 | #define DISABLED_MASK13 0 |
56 | #define DISABLED_MASK14 0 | 62 | #define DISABLED_MASK14 0 |
57 | #define DISABLED_MASK15 0 | 63 | #define DISABLED_MASK15 0 |
58 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE) | 64 | #define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE|DISABLE_LA57) |
59 | #define DISABLED_MASK17 0 | 65 | #define DISABLED_MASK17 0 |
60 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | 66 | #define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) |
61 | 67 | ||
diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fac9a5c0abe9..d91ba04dd007 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h | |||
@@ -53,6 +53,12 @@ | |||
53 | # define NEED_MOVBE 0 | 53 | # define NEED_MOVBE 0 |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_X86_5LEVEL | ||
57 | # define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) | ||
58 | #else | ||
59 | # define NEED_LA57 0 | ||
60 | #endif | ||
61 | |||
56 | #ifdef CONFIG_X86_64 | 62 | #ifdef CONFIG_X86_64 |
57 | #ifdef CONFIG_PARAVIRT | 63 | #ifdef CONFIG_PARAVIRT |
58 | /* Paravirtualized systems may not have PSE or PGE available */ | 64 | /* Paravirtualized systems may not have PSE or PGE available */ |
@@ -98,7 +104,7 @@ | |||
98 | #define REQUIRED_MASK13 0 | 104 | #define REQUIRED_MASK13 0 |
99 | #define REQUIRED_MASK14 0 | 105 | #define REQUIRED_MASK14 0 |
100 | #define REQUIRED_MASK15 0 | 106 | #define REQUIRED_MASK15 0 |
101 | #define REQUIRED_MASK16 0 | 107 | #define REQUIRED_MASK16 (NEED_LA57) |
102 | #define REQUIRED_MASK17 0 | 108 | #define REQUIRED_MASK17 0 |
103 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) | 109 | #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 18) |
104 | 110 | ||
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index 739c0c594022..c2824d02ba37 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h | |||
@@ -9,6 +9,9 @@ | |||
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/ioctl.h> | 10 | #include <linux/ioctl.h> |
11 | 11 | ||
12 | #define KVM_PIO_PAGE_OFFSET 1 | ||
13 | #define KVM_COALESCED_MMIO_PAGE_OFFSET 2 | ||
14 | |||
12 | #define DE_VECTOR 0 | 15 | #define DE_VECTOR 0 |
13 | #define DB_VECTOR 1 | 16 | #define DB_VECTOR 1 |
14 | #define BP_VECTOR 3 | 17 | #define BP_VECTOR 3 |
diff --git a/tools/arch/x86/include/uapi/asm/vmx.h b/tools/arch/x86/include/uapi/asm/vmx.h index 14458658e988..690a2dcf4078 100644 --- a/tools/arch/x86/include/uapi/asm/vmx.h +++ b/tools/arch/x86/include/uapi/asm/vmx.h | |||
@@ -76,7 +76,11 @@ | |||
76 | #define EXIT_REASON_WBINVD 54 | 76 | #define EXIT_REASON_WBINVD 54 |
77 | #define EXIT_REASON_XSETBV 55 | 77 | #define EXIT_REASON_XSETBV 55 |
78 | #define EXIT_REASON_APIC_WRITE 56 | 78 | #define EXIT_REASON_APIC_WRITE 56 |
79 | #define EXIT_REASON_RDRAND 57 | ||
79 | #define EXIT_REASON_INVPCID 58 | 80 | #define EXIT_REASON_INVPCID 58 |
81 | #define EXIT_REASON_VMFUNC 59 | ||
82 | #define EXIT_REASON_ENCLS 60 | ||
83 | #define EXIT_REASON_RDSEED 61 | ||
80 | #define EXIT_REASON_PML_FULL 62 | 84 | #define EXIT_REASON_PML_FULL 62 |
81 | #define EXIT_REASON_XSAVES 63 | 85 | #define EXIT_REASON_XSAVES 63 |
82 | #define EXIT_REASON_XRSTORS 64 | 86 | #define EXIT_REASON_XRSTORS 64 |
@@ -90,6 +94,7 @@ | |||
90 | { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ | 94 | { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ |
91 | { EXIT_REASON_CPUID, "CPUID" }, \ | 95 | { EXIT_REASON_CPUID, "CPUID" }, \ |
92 | { EXIT_REASON_HLT, "HLT" }, \ | 96 | { EXIT_REASON_HLT, "HLT" }, \ |
97 | { EXIT_REASON_INVD, "INVD" }, \ | ||
93 | { EXIT_REASON_INVLPG, "INVLPG" }, \ | 98 | { EXIT_REASON_INVLPG, "INVLPG" }, \ |
94 | { EXIT_REASON_RDPMC, "RDPMC" }, \ | 99 | { EXIT_REASON_RDPMC, "RDPMC" }, \ |
95 | { EXIT_REASON_RDTSC, "RDTSC" }, \ | 100 | { EXIT_REASON_RDTSC, "RDTSC" }, \ |
@@ -108,6 +113,8 @@ | |||
108 | { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ | 113 | { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ |
109 | { EXIT_REASON_MSR_READ, "MSR_READ" }, \ | 114 | { EXIT_REASON_MSR_READ, "MSR_READ" }, \ |
110 | { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ | 115 | { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ |
116 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ | ||
117 | { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ | ||
111 | { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ | 118 | { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ |
112 | { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ | 119 | { EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \ |
113 | { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ | 120 | { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ |
@@ -115,20 +122,24 @@ | |||
115 | { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ | 122 | { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ |
116 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ | 123 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ |
117 | { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ | 124 | { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ |
118 | { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ | 125 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ |
119 | { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ | 126 | { EXIT_REASON_GDTR_IDTR, "GDTR_IDTR" }, \ |
127 | { EXIT_REASON_LDTR_TR, "LDTR_TR" }, \ | ||
120 | { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ | 128 | { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ |
121 | { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ | 129 | { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ |
122 | { EXIT_REASON_INVEPT, "INVEPT" }, \ | 130 | { EXIT_REASON_INVEPT, "INVEPT" }, \ |
131 | { EXIT_REASON_RDTSCP, "RDTSCP" }, \ | ||
123 | { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ | 132 | { EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \ |
133 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | ||
124 | { EXIT_REASON_WBINVD, "WBINVD" }, \ | 134 | { EXIT_REASON_WBINVD, "WBINVD" }, \ |
135 | { EXIT_REASON_XSETBV, "XSETBV" }, \ | ||
125 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ | 136 | { EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \ |
126 | { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ | 137 | { EXIT_REASON_RDRAND, "RDRAND" }, \ |
127 | { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ | ||
128 | { EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \ | ||
129 | { EXIT_REASON_INVD, "INVD" }, \ | ||
130 | { EXIT_REASON_INVVPID, "INVVPID" }, \ | ||
131 | { EXIT_REASON_INVPCID, "INVPCID" }, \ | 138 | { EXIT_REASON_INVPCID, "INVPCID" }, \ |
139 | { EXIT_REASON_VMFUNC, "VMFUNC" }, \ | ||
140 | { EXIT_REASON_ENCLS, "ENCLS" }, \ | ||
141 | { EXIT_REASON_RDSEED, "RDSEED" }, \ | ||
142 | { EXIT_REASON_PML_FULL, "PML_FULL" }, \ | ||
132 | { EXIT_REASON_XSAVES, "XSAVES" }, \ | 143 | { EXIT_REASON_XSAVES, "XSAVES" }, \ |
133 | { EXIT_REASON_XRSTORS, "XRSTORS" } | 144 | { EXIT_REASON_XRSTORS, "XRSTORS" } |
134 | 145 | ||
diff --git a/tools/include/uapi/linux/stat.h b/tools/include/uapi/linux/stat.h index d538897b8e08..17b10304c393 100644 --- a/tools/include/uapi/linux/stat.h +++ b/tools/include/uapi/linux/stat.h | |||
@@ -48,17 +48,13 @@ | |||
48 | * tv_sec holds the number of seconds before (negative) or after (positive) | 48 | * tv_sec holds the number of seconds before (negative) or after (positive) |
49 | * 00:00:00 1st January 1970 UTC. | 49 | * 00:00:00 1st January 1970 UTC. |
50 | * | 50 | * |
51 | * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is | 51 | * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time. |
52 | * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time. | ||
53 | * | ||
54 | * Note that if both tv_sec and tv_nsec are non-zero, then the two values must | ||
55 | * either be both positive or both negative. | ||
56 | * | 52 | * |
57 | * __reserved is held in case we need a yet finer resolution. | 53 | * __reserved is held in case we need a yet finer resolution. |
58 | */ | 54 | */ |
59 | struct statx_timestamp { | 55 | struct statx_timestamp { |
60 | __s64 tv_sec; | 56 | __s64 tv_sec; |
61 | __s32 tv_nsec; | 57 | __u32 tv_nsec; |
62 | __s32 __reserved; | 58 | __s32 __reserved; |
63 | }; | 59 | }; |
64 | 60 | ||
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index cb0eda3925e6..3517e204a2b3 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt | |||
@@ -311,6 +311,10 @@ include::itrace.txt[] | |||
311 | Set the maximum number of program blocks to print with brstackasm for | 311 | Set the maximum number of program blocks to print with brstackasm for |
312 | each sample. | 312 | each sample. |
313 | 313 | ||
314 | --inline:: | ||
315 | If a callgraph address belongs to an inlined function, the inline stack | ||
316 | will be printed. Each entry has function name and file/line. | ||
317 | |||
314 | SEE ALSO | 318 | SEE ALSO |
315 | -------- | 319 | -------- |
316 | linkperf:perf-record[1], linkperf:perf-script-perl[1], | 320 | linkperf:perf-record[1], linkperf:perf-script-perl[1], |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index d05aec491cff..4761b0d7fcb5 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -2494,6 +2494,8 @@ int cmd_script(int argc, const char **argv) | |||
2494 | "Enable kernel symbol demangling"), | 2494 | "Enable kernel symbol demangling"), |
2495 | OPT_STRING(0, "time", &script.time_str, "str", | 2495 | OPT_STRING(0, "time", &script.time_str, "str", |
2496 | "Time span of interest (start,stop)"), | 2496 | "Time span of interest (start,stop)"), |
2497 | OPT_BOOLEAN(0, "inline", &symbol_conf.inline_name, | ||
2498 | "Show inline function"), | ||
2497 | OPT_END() | 2499 | OPT_END() |
2498 | }; | 2500 | }; |
2499 | const char * const script_subcommands[] = { "record", "report", NULL }; | 2501 | const char * const script_subcommands[] = { "record", "report", NULL }; |
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index 59addd52d9cd..ddb2c6fbdf91 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c | |||
@@ -210,6 +210,8 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, | |||
210 | return 0; | 210 | return 0; |
211 | 211 | ||
212 | ret = b->callchain->max_depth - a->callchain->max_depth; | 212 | ret = b->callchain->max_depth - a->callchain->max_depth; |
213 | if (callchain_param.order == ORDER_CALLER) | ||
214 | ret = -ret; | ||
213 | } | 215 | } |
214 | return ret; | 216 | return ret; |
215 | } | 217 | } |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 81fc29ac798f..b4204b43ed58 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -621,14 +621,19 @@ enum match_result { | |||
621 | static enum match_result match_chain_srcline(struct callchain_cursor_node *node, | 621 | static enum match_result match_chain_srcline(struct callchain_cursor_node *node, |
622 | struct callchain_list *cnode) | 622 | struct callchain_list *cnode) |
623 | { | 623 | { |
624 | char *left = get_srcline(cnode->ms.map->dso, | 624 | char *left = NULL; |
625 | char *right = NULL; | ||
626 | enum match_result ret = MATCH_EQ; | ||
627 | int cmp; | ||
628 | |||
629 | if (cnode->ms.map) | ||
630 | left = get_srcline(cnode->ms.map->dso, | ||
625 | map__rip_2objdump(cnode->ms.map, cnode->ip), | 631 | map__rip_2objdump(cnode->ms.map, cnode->ip), |
626 | cnode->ms.sym, true, false); | 632 | cnode->ms.sym, true, false); |
627 | char *right = get_srcline(node->map->dso, | 633 | if (node->map) |
634 | right = get_srcline(node->map->dso, | ||
628 | map__rip_2objdump(node->map, node->ip), | 635 | map__rip_2objdump(node->map, node->ip), |
629 | node->sym, true, false); | 636 | node->sym, true, false); |
630 | enum match_result ret = MATCH_EQ; | ||
631 | int cmp; | ||
632 | 637 | ||
633 | if (left && right) | 638 | if (left && right) |
634 | cmp = strcmp(left, right); | 639 | cmp = strcmp(left, right); |
diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c index e415aee6a245..583f3a602506 100644 --- a/tools/perf/util/evsel_fprintf.c +++ b/tools/perf/util/evsel_fprintf.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include "map.h" | 7 | #include "map.h" |
8 | #include "strlist.h" | 8 | #include "strlist.h" |
9 | #include "symbol.h" | 9 | #include "symbol.h" |
10 | #include "srcline.h" | ||
10 | 11 | ||
11 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) | 12 | static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...) |
12 | { | 13 | { |
@@ -168,6 +169,38 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, | |||
168 | if (!print_oneline) | 169 | if (!print_oneline) |
169 | printed += fprintf(fp, "\n"); | 170 | printed += fprintf(fp, "\n"); |
170 | 171 | ||
172 | if (symbol_conf.inline_name && node->map) { | ||
173 | struct inline_node *inode; | ||
174 | |||
175 | addr = map__rip_2objdump(node->map, node->ip), | ||
176 | inode = dso__parse_addr_inlines(node->map->dso, addr); | ||
177 | |||
178 | if (inode) { | ||
179 | struct inline_list *ilist; | ||
180 | |||
181 | list_for_each_entry(ilist, &inode->val, list) { | ||
182 | if (print_arrow) | ||
183 | printed += fprintf(fp, " <-"); | ||
184 | |||
185 | /* IP is same, just skip it */ | ||
186 | if (print_ip) | ||
187 | printed += fprintf(fp, "%c%16s", | ||
188 | s, ""); | ||
189 | if (print_sym) | ||
190 | printed += fprintf(fp, " %s", | ||
191 | ilist->funcname); | ||
192 | if (print_srcline) | ||
193 | printed += fprintf(fp, "\n %s:%d", | ||
194 | ilist->filename, | ||
195 | ilist->line_nr); | ||
196 | if (!print_oneline) | ||
197 | printed += fprintf(fp, "\n"); | ||
198 | } | ||
199 | |||
200 | inline_node__delete(inode); | ||
201 | } | ||
202 | } | ||
203 | |||
171 | if (symbol_conf.bt_stop_list && | 204 | if (symbol_conf.bt_stop_list && |
172 | node->sym && | 205 | node->sym && |
173 | strlist__has_entry(symbol_conf.bt_stop_list, | 206 | strlist__has_entry(symbol_conf.bt_stop_list, |
diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index df051a52393c..ebc88a74e67b 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c | |||
@@ -56,7 +56,10 @@ static int inline_list__append(char *filename, char *funcname, int line_nr, | |||
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
59 | list_add_tail(&ilist->list, &node->val); | 59 | if (callchain_param.order == ORDER_CALLEE) |
60 | list_add_tail(&ilist->list, &node->val); | ||
61 | else | ||
62 | list_add(&ilist->list, &node->val); | ||
60 | 63 | ||
61 | return 0; | 64 | return 0; |
62 | } | 65 | } |
@@ -200,12 +203,14 @@ static void addr2line_cleanup(struct a2l_data *a2l) | |||
200 | 203 | ||
201 | #define MAX_INLINE_NEST 1024 | 204 | #define MAX_INLINE_NEST 1024 |
202 | 205 | ||
203 | static void inline_list__reverse(struct inline_node *node) | 206 | static int inline_list__append_dso_a2l(struct dso *dso, |
207 | struct inline_node *node) | ||
204 | { | 208 | { |
205 | struct inline_list *ilist, *n; | 209 | struct a2l_data *a2l = dso->a2l; |
210 | char *funcname = a2l->funcname ? strdup(a2l->funcname) : NULL; | ||
211 | char *filename = a2l->filename ? strdup(a2l->filename) : NULL; | ||
206 | 212 | ||
207 | list_for_each_entry_safe_reverse(ilist, n, &node->val, list) | 213 | return inline_list__append(filename, funcname, a2l->line, node, dso); |
208 | list_move_tail(&ilist->list, &node->val); | ||
209 | } | 214 | } |
210 | 215 | ||
211 | static int addr2line(const char *dso_name, u64 addr, | 216 | static int addr2line(const char *dso_name, u64 addr, |
@@ -230,36 +235,36 @@ static int addr2line(const char *dso_name, u64 addr, | |||
230 | 235 | ||
231 | bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); | 236 | bfd_map_over_sections(a2l->abfd, find_address_in_section, a2l); |
232 | 237 | ||
233 | if (a2l->found && unwind_inlines) { | 238 | if (!a2l->found) |
239 | return 0; | ||
240 | |||
241 | if (unwind_inlines) { | ||
234 | int cnt = 0; | 242 | int cnt = 0; |
235 | 243 | ||
244 | if (node && inline_list__append_dso_a2l(dso, node)) | ||
245 | return 0; | ||
246 | |||
236 | while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, | 247 | while (bfd_find_inliner_info(a2l->abfd, &a2l->filename, |
237 | &a2l->funcname, &a2l->line) && | 248 | &a2l->funcname, &a2l->line) && |
238 | cnt++ < MAX_INLINE_NEST) { | 249 | cnt++ < MAX_INLINE_NEST) { |
239 | 250 | ||
240 | if (node != NULL) { | 251 | if (node != NULL) { |
241 | if (inline_list__append(strdup(a2l->filename), | 252 | if (inline_list__append_dso_a2l(dso, node)) |
242 | strdup(a2l->funcname), | ||
243 | a2l->line, node, | ||
244 | dso) != 0) | ||
245 | return 0; | 253 | return 0; |
254 | // found at least one inline frame | ||
255 | ret = 1; | ||
246 | } | 256 | } |
247 | } | 257 | } |
258 | } | ||
248 | 259 | ||
249 | if ((node != NULL) && | 260 | if (file) { |
250 | (callchain_param.order != ORDER_CALLEE)) { | 261 | *file = a2l->filename ? strdup(a2l->filename) : NULL; |
251 | inline_list__reverse(node); | 262 | ret = *file ? 1 : 0; |
252 | } | ||
253 | } | 263 | } |
254 | 264 | ||
255 | if (a2l->found && a2l->filename) { | 265 | if (line) |
256 | *file = strdup(a2l->filename); | ||
257 | *line = a2l->line; | 266 | *line = a2l->line; |
258 | 267 | ||
259 | if (*file) | ||
260 | ret = 1; | ||
261 | } | ||
262 | |||
263 | return ret; | 268 | return ret; |
264 | } | 269 | } |
265 | 270 | ||
@@ -278,8 +283,6 @@ void dso__free_a2l(struct dso *dso) | |||
278 | static struct inline_node *addr2inlines(const char *dso_name, u64 addr, | 283 | static struct inline_node *addr2inlines(const char *dso_name, u64 addr, |
279 | struct dso *dso) | 284 | struct dso *dso) |
280 | { | 285 | { |
281 | char *file = NULL; | ||
282 | unsigned int line = 0; | ||
283 | struct inline_node *node; | 286 | struct inline_node *node; |
284 | 287 | ||
285 | node = zalloc(sizeof(*node)); | 288 | node = zalloc(sizeof(*node)); |
@@ -291,7 +294,7 @@ static struct inline_node *addr2inlines(const char *dso_name, u64 addr, | |||
291 | INIT_LIST_HEAD(&node->val); | 294 | INIT_LIST_HEAD(&node->val); |
292 | node->addr = addr; | 295 | node->addr = addr; |
293 | 296 | ||
294 | if (!addr2line(dso_name, addr, &file, &line, dso, TRUE, node)) | 297 | if (!addr2line(dso_name, addr, NULL, NULL, dso, TRUE, node)) |
295 | goto out_free_inline_node; | 298 | goto out_free_inline_node; |
296 | 299 | ||
297 | if (list_empty(&node->val)) | 300 | if (list_empty(&node->val)) |
diff --git a/tools/perf/util/unwind-libdw.c b/tools/perf/util/unwind-libdw.c index f90e11a555b2..943a06291587 100644 --- a/tools/perf/util/unwind-libdw.c +++ b/tools/perf/util/unwind-libdw.c | |||
@@ -168,12 +168,16 @@ frame_callback(Dwfl_Frame *state, void *arg) | |||
168 | { | 168 | { |
169 | struct unwind_info *ui = arg; | 169 | struct unwind_info *ui = arg; |
170 | Dwarf_Addr pc; | 170 | Dwarf_Addr pc; |
171 | bool isactivation; | ||
171 | 172 | ||
172 | if (!dwfl_frame_pc(state, &pc, NULL)) { | 173 | if (!dwfl_frame_pc(state, &pc, &isactivation)) { |
173 | pr_err("%s", dwfl_errmsg(-1)); | 174 | pr_err("%s", dwfl_errmsg(-1)); |
174 | return DWARF_CB_ABORT; | 175 | return DWARF_CB_ABORT; |
175 | } | 176 | } |
176 | 177 | ||
178 | if (!isactivation) | ||
179 | --pc; | ||
180 | |||
177 | return entry(pc, ui) || !(--ui->max_stack) ? | 181 | return entry(pc, ui) || !(--ui->max_stack) ? |
178 | DWARF_CB_ABORT : DWARF_CB_OK; | 182 | DWARF_CB_ABORT : DWARF_CB_OK; |
179 | } | 183 | } |
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c index f8455bed6e65..672c2ada9357 100644 --- a/tools/perf/util/unwind-libunwind-local.c +++ b/tools/perf/util/unwind-libunwind-local.c | |||
@@ -692,6 +692,17 @@ static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, | |||
692 | 692 | ||
693 | while (!ret && (unw_step(&c) > 0) && i < max_stack) { | 693 | while (!ret && (unw_step(&c) > 0) && i < max_stack) { |
694 | unw_get_reg(&c, UNW_REG_IP, &ips[i]); | 694 | unw_get_reg(&c, UNW_REG_IP, &ips[i]); |
695 | |||
696 | /* | ||
697 | * Decrement the IP for any non-activation frames. | ||
698 | * this is required to properly find the srcline | ||
699 | * for caller frames. | ||
700 | * See also the documentation for dwfl_frame_pc(), | ||
701 | * which this code tries to replicate. | ||
702 | */ | ||
703 | if (unw_is_signal_frame(&c) <= 0) | ||
704 | --ips[i]; | ||
705 | |||
695 | ++i; | 706 | ++i; |
696 | } | 707 | } |
697 | 708 | ||
diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc new file mode 100644 index 000000000000..f4d1ff785d67 --- /dev/null +++ b/tools/testing/selftests/ftrace/test.d/kprobe/multiple_kprobes.tc | |||
@@ -0,0 +1,21 @@ | |||
1 | #!/bin/sh | ||
2 | # description: Register/unregister many kprobe events | ||
3 | |||
4 | # ftrace fentry skip size depends on the machine architecture. | ||
5 | # Currently HAVE_KPROBES_ON_FTRACE defined on x86 and powerpc | ||
6 | case `uname -m` in | ||
7 | x86_64|i[3456]86) OFFS=5;; | ||
8 | ppc*) OFFS=4;; | ||
9 | *) OFFS=0;; | ||
10 | esac | ||
11 | |||
12 | echo "Setup up to 256 kprobes" | ||
13 | grep t /proc/kallsyms | cut -f3 -d" " | grep -v .*\\..* | \ | ||
14 | head -n 256 | while read i; do echo p ${i}+${OFFS} ; done > kprobe_events ||: | ||
15 | |||
16 | echo 1 > events/kprobes/enable | ||
17 | echo 0 > events/kprobes/enable | ||
18 | echo > kprobe_events | ||
19 | echo "Waiting for unoptimizing & freeing" | ||
20 | sleep 5 | ||
21 | echo "Done" | ||
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c index d9c49f41515e..e79ccd6aada1 100644 --- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c | |||
@@ -42,12 +42,12 @@ int test_body(void) | |||
42 | printf("Check DSCR TM context switch: "); | 42 | printf("Check DSCR TM context switch: "); |
43 | fflush(stdout); | 43 | fflush(stdout); |
44 | for (;;) { | 44 | for (;;) { |
45 | rv = 1; | ||
46 | asm __volatile__ ( | 45 | asm __volatile__ ( |
47 | /* set a known value into the DSCR */ | 46 | /* set a known value into the DSCR */ |
48 | "ld 3, %[dscr1];" | 47 | "ld 3, %[dscr1];" |
49 | "mtspr %[sprn_dscr], 3;" | 48 | "mtspr %[sprn_dscr], 3;" |
50 | 49 | ||
50 | "li %[rv], 1;" | ||
51 | /* start and suspend a transaction */ | 51 | /* start and suspend a transaction */ |
52 | "tbegin.;" | 52 | "tbegin.;" |
53 | "beq 1f;" | 53 | "beq 1f;" |
diff --git a/usr/Kconfig b/usr/Kconfig index c0c48507e44e..ad0543e21760 100644 --- a/usr/Kconfig +++ b/usr/Kconfig | |||
@@ -220,6 +220,7 @@ config INITRAMFS_COMPRESSION_LZ4 | |||
220 | endchoice | 220 | endchoice |
221 | 221 | ||
222 | config INITRAMFS_COMPRESSION | 222 | config INITRAMFS_COMPRESSION |
223 | depends on INITRAMFS_SOURCE!="" | ||
223 | string | 224 | string |
224 | default "" if INITRAMFS_COMPRESSION_NONE | 225 | default "" if INITRAMFS_COMPRESSION_NONE |
225 | default ".gz" if INITRAMFS_COMPRESSION_GZIP | 226 | default ".gz" if INITRAMFS_COMPRESSION_GZIP |