diff options
768 files changed, 7450 insertions, 3939 deletions
@@ -842,10 +842,9 @@ D: ax25-utils maintainer. | |||
842 | 842 | ||
843 | N: Helge Deller | 843 | N: Helge Deller |
844 | E: deller@gmx.de | 844 | E: deller@gmx.de |
845 | E: hdeller@redhat.de | 845 | W: http://www.parisc-linux.org/ |
846 | D: PA-RISC Linux hacker, LASI-, ASP-, WAX-, LCD/LED-driver | 846 | D: PA-RISC Linux architecture maintainer |
847 | S: Schimmelsrain 1 | 847 | D: LASI-, ASP-, WAX-, LCD/LED-driver |
848 | S: D-69231 Rauenberg | ||
849 | S: Germany | 848 | S: Germany |
850 | 849 | ||
851 | N: Jean Delvare | 850 | N: Jean Delvare |
@@ -1361,7 +1360,7 @@ S: Stellenbosch, Western Cape | |||
1361 | S: South Africa | 1360 | S: South Africa |
1362 | 1361 | ||
1363 | N: Grant Grundler | 1362 | N: Grant Grundler |
1364 | E: grundler@parisc-linux.org | 1363 | E: grantgrundler@gmail.com |
1365 | W: http://obmouse.sourceforge.net/ | 1364 | W: http://obmouse.sourceforge.net/ |
1366 | W: http://www.parisc-linux.org/ | 1365 | W: http://www.parisc-linux.org/ |
1367 | D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver | 1366 | D: obmouse - rewrote Olivier Florent's Omnibook 600 "pop-up" mouse driver |
@@ -2492,7 +2491,7 @@ S: Syracuse, New York 13206 | |||
2492 | S: USA | 2491 | S: USA |
2493 | 2492 | ||
2494 | N: Kyle McMartin | 2493 | N: Kyle McMartin |
2495 | E: kyle@parisc-linux.org | 2494 | E: kyle@mcmartin.ca |
2496 | D: Linux/PARISC hacker | 2495 | D: Linux/PARISC hacker |
2497 | D: AD1889 sound driver | 2496 | D: AD1889 sound driver |
2498 | S: Ottawa, Canada | 2497 | S: Ottawa, Canada |
@@ -3780,14 +3779,13 @@ S: 21513 Conradia Ct | |||
3780 | S: Cupertino, CA 95014 | 3779 | S: Cupertino, CA 95014 |
3781 | S: USA | 3780 | S: USA |
3782 | 3781 | ||
3783 | N: Thibaut Varene | 3782 | N: Thibaut Varène |
3784 | E: T-Bone@parisc-linux.org | 3783 | E: hacks+kernel@slashdirt.org |
3785 | W: http://www.parisc-linux.org/~varenet/ | 3784 | W: http://hacks.slashdirt.org/ |
3786 | P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063 | ||
3787 | D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits | 3785 | D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits |
3788 | D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there | 3786 | D: Some ARM at91rm9200 bits, S1D13XXX FB driver, random patches here and there |
3789 | D: AD1889 sound driver | 3787 | D: AD1889 sound driver |
3790 | S: Paris, France | 3788 | S: France |
3791 | 3789 | ||
3792 | N: Heikki Vatiainen | 3790 | N: Heikki Vatiainen |
3793 | E: hessu@cs.tut.fi | 3791 | E: hessu@cs.tut.fi |
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 9b642669cb16..169fe08a649b 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io | |||
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
24 | cpld3_version | 24 | cpld3_version |
25 | 25 | ||
26 | Date: November 2018 | 26 | Date: November 2018 |
27 | KernelVersion: 4.21 | 27 | KernelVersion: 5.0 |
28 | Contact: Vadim Pasternak <vadimpmellanox.com> | 28 | Contact: Vadim Pasternak <vadimpmellanox.com> |
29 | Description: These files show with which CPLD versions have been burned | 29 | Description: These files show with which CPLD versions have been burned |
30 | on LED board. | 30 | on LED board. |
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
35 | jtag_enable | 35 | jtag_enable |
36 | 36 | ||
37 | Date: November 2018 | 37 | Date: November 2018 |
38 | KernelVersion: 4.21 | 38 | KernelVersion: 5.0 |
39 | Contact: Vadim Pasternak <vadimpmellanox.com> | 39 | Contact: Vadim Pasternak <vadimpmellanox.com> |
40 | Description: These files enable and disable the access to the JTAG domain. | 40 | Description: These files enable and disable the access to the JTAG domain. |
41 | By default access to the JTAG domain is disabled. | 41 | By default access to the JTAG domain is disabled. |
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
105 | reset_voltmon_upgrade_fail | 105 | reset_voltmon_upgrade_fail |
106 | 106 | ||
107 | Date: November 2018 | 107 | Date: November 2018 |
108 | KernelVersion: 4.21 | 108 | KernelVersion: 5.0 |
109 | Contact: Vadim Pasternak <vadimpmellanox.com> | 109 | Contact: Vadim Pasternak <vadimpmellanox.com> |
110 | Description: These files show the system reset cause, as following: ComEx | 110 | Description: These files show the system reset cause, as following: ComEx |
111 | power fail, reset from ComEx, system platform reset, reset | 111 | power fail, reset from ComEx, system platform reset, reset |
diff --git a/Documentation/admin-guide/README.rst b/Documentation/admin-guide/README.rst index 0797eec76be1..47e577264198 100644 --- a/Documentation/admin-guide/README.rst +++ b/Documentation/admin-guide/README.rst | |||
@@ -1,9 +1,9 @@ | |||
1 | .. _readme: | 1 | .. _readme: |
2 | 2 | ||
3 | Linux kernel release 4.x <http://kernel.org/> | 3 | Linux kernel release 5.x <http://kernel.org/> |
4 | ============================================= | 4 | ============================================= |
5 | 5 | ||
6 | These are the release notes for Linux version 4. Read them carefully, | 6 | These are the release notes for Linux version 5. Read them carefully, |
7 | as they tell you what this is all about, explain how to install the | 7 | as they tell you what this is all about, explain how to install the |
8 | kernel, and what to do if something goes wrong. | 8 | kernel, and what to do if something goes wrong. |
9 | 9 | ||
@@ -63,7 +63,7 @@ Installing the kernel source | |||
63 | directory where you have permissions (e.g. your home directory) and | 63 | directory where you have permissions (e.g. your home directory) and |
64 | unpack it:: | 64 | unpack it:: |
65 | 65 | ||
66 | xz -cd linux-4.X.tar.xz | tar xvf - | 66 | xz -cd linux-5.x.tar.xz | tar xvf - |
67 | 67 | ||
68 | Replace "X" with the version number of the latest kernel. | 68 | Replace "X" with the version number of the latest kernel. |
69 | 69 | ||
@@ -72,26 +72,26 @@ Installing the kernel source | |||
72 | files. They should match the library, and not get messed up by | 72 | files. They should match the library, and not get messed up by |
73 | whatever the kernel-du-jour happens to be. | 73 | whatever the kernel-du-jour happens to be. |
74 | 74 | ||
75 | - You can also upgrade between 4.x releases by patching. Patches are | 75 | - You can also upgrade between 5.x releases by patching. Patches are |
76 | distributed in the xz format. To install by patching, get all the | 76 | distributed in the xz format. To install by patching, get all the |
77 | newer patch files, enter the top level directory of the kernel source | 77 | newer patch files, enter the top level directory of the kernel source |
78 | (linux-4.X) and execute:: | 78 | (linux-5.x) and execute:: |
79 | 79 | ||
80 | xz -cd ../patch-4.x.xz | patch -p1 | 80 | xz -cd ../patch-5.x.xz | patch -p1 |
81 | 81 | ||
82 | Replace "x" for all versions bigger than the version "X" of your current | 82 | Replace "x" for all versions bigger than the version "x" of your current |
83 | source tree, **in_order**, and you should be ok. You may want to remove | 83 | source tree, **in_order**, and you should be ok. You may want to remove |
84 | the backup files (some-file-name~ or some-file-name.orig), and make sure | 84 | the backup files (some-file-name~ or some-file-name.orig), and make sure |
85 | that there are no failed patches (some-file-name# or some-file-name.rej). | 85 | that there are no failed patches (some-file-name# or some-file-name.rej). |
86 | If there are, either you or I have made a mistake. | 86 | If there are, either you or I have made a mistake. |
87 | 87 | ||
88 | Unlike patches for the 4.x kernels, patches for the 4.x.y kernels | 88 | Unlike patches for the 5.x kernels, patches for the 5.x.y kernels |
89 | (also known as the -stable kernels) are not incremental but instead apply | 89 | (also known as the -stable kernels) are not incremental but instead apply |
90 | directly to the base 4.x kernel. For example, if your base kernel is 4.0 | 90 | directly to the base 5.x kernel. For example, if your base kernel is 5.0 |
91 | and you want to apply the 4.0.3 patch, you must not first apply the 4.0.1 | 91 | and you want to apply the 5.0.3 patch, you must not first apply the 5.0.1 |
92 | and 4.0.2 patches. Similarly, if you are running kernel version 4.0.2 and | 92 | and 5.0.2 patches. Similarly, if you are running kernel version 5.0.2 and |
93 | want to jump to 4.0.3, you must first reverse the 4.0.2 patch (that is, | 93 | want to jump to 5.0.3, you must first reverse the 5.0.2 patch (that is, |
94 | patch -R) **before** applying the 4.0.3 patch. You can read more on this in | 94 | patch -R) **before** applying the 5.0.3 patch. You can read more on this in |
95 | :ref:`Documentation/process/applying-patches.rst <applying_patches>`. | 95 | :ref:`Documentation/process/applying-patches.rst <applying_patches>`. |
96 | 96 | ||
97 | Alternatively, the script patch-kernel can be used to automate this | 97 | Alternatively, the script patch-kernel can be used to automate this |
@@ -114,7 +114,7 @@ Installing the kernel source | |||
114 | Software requirements | 114 | Software requirements |
115 | --------------------- | 115 | --------------------- |
116 | 116 | ||
117 | Compiling and running the 4.x kernels requires up-to-date | 117 | Compiling and running the 5.x kernels requires up-to-date |
118 | versions of various software packages. Consult | 118 | versions of various software packages. Consult |
119 | :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers | 119 | :ref:`Documentation/process/changes.rst <changes>` for the minimum version numbers |
120 | required and how to get updates for these packages. Beware that using | 120 | required and how to get updates for these packages. Beware that using |
@@ -132,12 +132,12 @@ Build directory for the kernel | |||
132 | place for the output files (including .config). | 132 | place for the output files (including .config). |
133 | Example:: | 133 | Example:: |
134 | 134 | ||
135 | kernel source code: /usr/src/linux-4.X | 135 | kernel source code: /usr/src/linux-5.x |
136 | build directory: /home/name/build/kernel | 136 | build directory: /home/name/build/kernel |
137 | 137 | ||
138 | To configure and build the kernel, use:: | 138 | To configure and build the kernel, use:: |
139 | 139 | ||
140 | cd /usr/src/linux-4.X | 140 | cd /usr/src/linux-5.x |
141 | make O=/home/name/build/kernel menuconfig | 141 | make O=/home/name/build/kernel menuconfig |
142 | make O=/home/name/build/kernel | 142 | make O=/home/name/build/kernel |
143 | sudo make O=/home/name/build/kernel modules_install install | 143 | sudo make O=/home/name/build/kernel modules_install install |
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..858b6c0b9a15 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -1696,12 +1696,11 @@ | |||
1696 | By default, super page will be supported if Intel IOMMU | 1696 | By default, super page will be supported if Intel IOMMU |
1697 | has the capability. With this option, super page will | 1697 | has the capability. With this option, super page will |
1698 | not be supported. | 1698 | not be supported. |
1699 | sm_off [Default Off] | 1699 | sm_on [Default Off] |
1700 | By default, scalable mode will be supported if the | 1700 | By default, scalable mode will be disabled even if the |
1701 | hardware advertises that it has support for the scalable | 1701 | hardware advertises that it has support for the scalable |
1702 | mode translation. With this option set, scalable mode | 1702 | mode translation. With this option set, scalable mode |
1703 | will not be used even on hardware which claims to support | 1703 | will be used on hardware which claims to support it. |
1704 | it. | ||
1705 | tboot_noforce [Default Off] | 1704 | tboot_noforce [Default Off] |
1706 | Do not force the Intel IOMMU enabled under tboot. | 1705 | Do not force the Intel IOMMU enabled under tboot. |
1707 | By default, tboot will force Intel IOMMU on, which | 1706 | By default, tboot will force Intel IOMMU on, which |
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 25170ad7d25b..101f2b2c69ad 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
@@ -533,16 +533,12 @@ Bridge VLAN filtering | |||
533 | function that the driver has to call for each VLAN the given port is a member | 533 | function that the driver has to call for each VLAN the given port is a member |
534 | of. A switchdev object is used to carry the VID and bridge flags. | 534 | of. A switchdev object is used to carry the VID and bridge flags. |
535 | 535 | ||
536 | - port_fdb_prepare: bridge layer function invoked when the bridge prepares the | ||
537 | installation of a Forwarding Database entry. If the operation is not | ||
538 | supported, this function should return -EOPNOTSUPP to inform the bridge code | ||
539 | to fallback to a software implementation. No hardware setup must be done in | ||
540 | this function. See port_fdb_add for this and details. | ||
541 | |||
542 | - port_fdb_add: bridge layer function invoked when the bridge wants to install a | 536 | - port_fdb_add: bridge layer function invoked when the bridge wants to install a |
543 | Forwarding Database entry, the switch hardware should be programmed with the | 537 | Forwarding Database entry, the switch hardware should be programmed with the |
544 | specified address in the specified VLAN Id in the forwarding database | 538 | specified address in the specified VLAN Id in the forwarding database |
545 | associated with this VLAN ID | 539 | associated with this VLAN ID. If the operation is not supported, this |
540 | function should return -EOPNOTSUPP to inform the bridge code to fallback to | ||
541 | a software implementation. | ||
546 | 542 | ||
547 | Note: VLAN ID 0 corresponds to the port private database, which, in the context | 543 | Note: VLAN ID 0 corresponds to the port private database, which, in the context |
548 | of DSA, would be the its port-based VLAN, used by the associated bridge device. | 544 | of DSA, would be the its port-based VLAN, used by the associated bridge device. |
diff --git a/Documentation/networking/msg_zerocopy.rst b/Documentation/networking/msg_zerocopy.rst index fe46d4867e2d..18c1415e7bfa 100644 --- a/Documentation/networking/msg_zerocopy.rst +++ b/Documentation/networking/msg_zerocopy.rst | |||
@@ -7,7 +7,7 @@ Intro | |||
7 | ===== | 7 | ===== |
8 | 8 | ||
9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. | 9 | The MSG_ZEROCOPY flag enables copy avoidance for socket send calls. |
10 | The feature is currently implemented for TCP sockets. | 10 | The feature is currently implemented for TCP and UDP sockets. |
11 | 11 | ||
12 | 12 | ||
13 | Opportunity and Caveats | 13 | Opportunity and Caveats |
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt index 355c6d8ef8ad..b203d1334822 100644 --- a/Documentation/networking/operstates.txt +++ b/Documentation/networking/operstates.txt | |||
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules. | |||
22 | 2. Querying from userspace | 22 | 2. Querying from userspace |
23 | 23 | ||
24 | Both admin and operational state can be queried via the netlink | 24 | Both admin and operational state can be queried via the netlink |
25 | operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK | 25 | operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK |
26 | to be notified of updates. This is important for setting from userspace. | 26 | to be notified of updates while the interface is admin up. This is |
27 | important for setting from userspace. | ||
27 | 28 | ||
28 | These values contain interface state: | 29 | These values contain interface state: |
29 | 30 | ||
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to | |||
101 | complete. Corresponding functions are netif_dormant_on() to set the | 102 | complete. Corresponding functions are netif_dormant_on() to set the |
102 | flag, netif_dormant_off() to clear it and netif_dormant() to query. | 103 | flag, netif_dormant_off() to clear it and netif_dormant() to query. |
103 | 104 | ||
104 | On device allocation, networking core sets the flags equivalent to | 105 | On device allocation, both flags __LINK_STATE_NOCARRIER and |
105 | netif_carrier_ok() and !netif_dormant(). | 106 | __LINK_STATE_DORMANT are cleared, so the effective state is equivalent |
107 | to netif_carrier_ok() and !netif_dormant(). | ||
106 | 108 | ||
107 | 109 | ||
108 | Whenever the driver CHANGES one of these flags, a workqueue event is | 110 | Whenever the driver CHANGES one of these flags, a workqueue event is |
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the | |||
133 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE | 135 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE |
134 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set | 136 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set |
135 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace | 137 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace |
136 | are multicasted on the netlink group RTMGRP_LINK. | 138 | are multicasted on the netlink group RTNLGRP_LINK. |
137 | 139 | ||
138 | So basically a 802.1X supplicant interacts with the kernel like this: | 140 | So basically a 802.1X supplicant interacts with the kernel like this: |
139 | 141 | ||
140 | -subscribe to RTMGRP_LINK | 142 | -subscribe to RTNLGRP_LINK |
141 | -set IFLA_LINKMODE to 1 via RTM_SETLINK | 143 | -set IFLA_LINKMODE to 1 via RTM_SETLINK |
142 | -query RTM_GETLINK once to get initial state | 144 | -query RTM_GETLINK once to get initial state |
143 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until | 145 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until |
diff --git a/Documentation/networking/switchdev.txt b/Documentation/networking/switchdev.txt index 82236a17b5e6..97b7ca8b9b86 100644 --- a/Documentation/networking/switchdev.txt +++ b/Documentation/networking/switchdev.txt | |||
@@ -92,11 +92,11 @@ device. | |||
92 | Switch ID | 92 | Switch ID |
93 | ^^^^^^^^^ | 93 | ^^^^^^^^^ |
94 | 94 | ||
95 | The switchdev driver must implement the switchdev op switchdev_port_attr_get | 95 | The switchdev driver must implement the net_device operation |
96 | for SWITCHDEV_ATTR_ID_PORT_PARENT_ID for each port netdev, returning the same | 96 | ndo_get_port_parent_id for each port netdev, returning the same physical ID for |
97 | physical ID for each port of a switch. The ID must be unique between switches | 97 | each port of a switch. The ID must be unique between switches on the same |
98 | on the same system. The ID does not need to be unique between switches on | 98 | system. The ID does not need to be unique between switches on different |
99 | different systems. | 99 | systems. |
100 | 100 | ||
101 | The switch ID is used to locate ports on a switch and to know if aggregated | 101 | The switch ID is used to locate ports on a switch and to know if aggregated |
102 | ports belong to the same switch. | 102 | ports belong to the same switch. |
diff --git a/Documentation/process/applying-patches.rst b/Documentation/process/applying-patches.rst index dc2ddc345044..fbb9297e6360 100644 --- a/Documentation/process/applying-patches.rst +++ b/Documentation/process/applying-patches.rst | |||
@@ -216,14 +216,14 @@ You can use the ``interdiff`` program (http://cyberelk.net/tim/patchutils/) to | |||
216 | generate a patch representing the differences between two patches and then | 216 | generate a patch representing the differences between two patches and then |
217 | apply the result. | 217 | apply the result. |
218 | 218 | ||
219 | This will let you move from something like 4.7.2 to 4.7.3 in a single | 219 | This will let you move from something like 5.7.2 to 5.7.3 in a single |
220 | step. The -z flag to interdiff will even let you feed it patches in gzip or | 220 | step. The -z flag to interdiff will even let you feed it patches in gzip or |
221 | bzip2 compressed form directly without the use of zcat or bzcat or manual | 221 | bzip2 compressed form directly without the use of zcat or bzcat or manual |
222 | decompression. | 222 | decompression. |
223 | 223 | ||
224 | Here's how you'd go from 4.7.2 to 4.7.3 in a single step:: | 224 | Here's how you'd go from 5.7.2 to 5.7.3 in a single step:: |
225 | 225 | ||
226 | interdiff -z ../patch-4.7.2.gz ../patch-4.7.3.gz | patch -p1 | 226 | interdiff -z ../patch-5.7.2.gz ../patch-5.7.3.gz | patch -p1 |
227 | 227 | ||
228 | Although interdiff may save you a step or two you are generally advised to | 228 | Although interdiff may save you a step or two you are generally advised to |
229 | do the additional steps since interdiff can get things wrong in some cases. | 229 | do the additional steps since interdiff can get things wrong in some cases. |
@@ -245,62 +245,67 @@ The patches are available at http://kernel.org/ | |||
245 | Most recent patches are linked from the front page, but they also have | 245 | Most recent patches are linked from the front page, but they also have |
246 | specific homes. | 246 | specific homes. |
247 | 247 | ||
248 | The 4.x.y (-stable) and 4.x patches live at | 248 | The 5.x.y (-stable) and 5.x patches live at |
249 | 249 | ||
250 | https://www.kernel.org/pub/linux/kernel/v4.x/ | 250 | https://www.kernel.org/pub/linux/kernel/v5.x/ |
251 | 251 | ||
252 | The -rc patches live at | 252 | The -rc patches are not stored on the webserver but are generated on |
253 | demand from git tags such as | ||
253 | 254 | ||
254 | https://www.kernel.org/pub/linux/kernel/v4.x/testing/ | 255 | https://git.kernel.org/torvalds/p/v5.1-rc1/v5.0 |
255 | 256 | ||
257 | The stable -rc patches live at | ||
256 | 258 | ||
257 | The 4.x kernels | 259 | https://www.kernel.org/pub/linux/kernel/v5.x/stable-review/ |
260 | |||
261 | |||
262 | The 5.x kernels | ||
258 | =============== | 263 | =============== |
259 | 264 | ||
260 | These are the base stable releases released by Linus. The highest numbered | 265 | These are the base stable releases released by Linus. The highest numbered |
261 | release is the most recent. | 266 | release is the most recent. |
262 | 267 | ||
263 | If regressions or other serious flaws are found, then a -stable fix patch | 268 | If regressions or other serious flaws are found, then a -stable fix patch |
264 | will be released (see below) on top of this base. Once a new 4.x base | 269 | will be released (see below) on top of this base. Once a new 5.x base |
265 | kernel is released, a patch is made available that is a delta between the | 270 | kernel is released, a patch is made available that is a delta between the |
266 | previous 4.x kernel and the new one. | 271 | previous 5.x kernel and the new one. |
267 | 272 | ||
268 | To apply a patch moving from 4.6 to 4.7, you'd do the following (note | 273 | To apply a patch moving from 5.6 to 5.7, you'd do the following (note |
269 | that such patches do **NOT** apply on top of 4.x.y kernels but on top of the | 274 | that such patches do **NOT** apply on top of 5.x.y kernels but on top of the |
270 | base 4.x kernel -- if you need to move from 4.x.y to 4.x+1 you need to | 275 | base 5.x kernel -- if you need to move from 5.x.y to 5.x+1 you need to |
271 | first revert the 4.x.y patch). | 276 | first revert the 5.x.y patch). |
272 | 277 | ||
273 | Here are some examples:: | 278 | Here are some examples:: |
274 | 279 | ||
275 | # moving from 4.6 to 4.7 | 280 | # moving from 5.6 to 5.7 |
276 | 281 | ||
277 | $ cd ~/linux-4.6 # change to kernel source dir | 282 | $ cd ~/linux-5.6 # change to kernel source dir |
278 | $ patch -p1 < ../patch-4.7 # apply the 4.7 patch | 283 | $ patch -p1 < ../patch-5.7 # apply the 5.7 patch |
279 | $ cd .. | 284 | $ cd .. |
280 | $ mv linux-4.6 linux-4.7 # rename source dir | 285 | $ mv linux-5.6 linux-5.7 # rename source dir |
281 | 286 | ||
282 | # moving from 4.6.1 to 4.7 | 287 | # moving from 5.6.1 to 5.7 |
283 | 288 | ||
284 | $ cd ~/linux-4.6.1 # change to kernel source dir | 289 | $ cd ~/linux-5.6.1 # change to kernel source dir |
285 | $ patch -p1 -R < ../patch-4.6.1 # revert the 4.6.1 patch | 290 | $ patch -p1 -R < ../patch-5.6.1 # revert the 5.6.1 patch |
286 | # source dir is now 4.6 | 291 | # source dir is now 5.6 |
287 | $ patch -p1 < ../patch-4.7 # apply new 4.7 patch | 292 | $ patch -p1 < ../patch-5.7 # apply new 5.7 patch |
288 | $ cd .. | 293 | $ cd .. |
289 | $ mv linux-4.6.1 linux-4.7 # rename source dir | 294 | $ mv linux-5.6.1 linux-5.7 # rename source dir |
290 | 295 | ||
291 | 296 | ||
292 | The 4.x.y kernels | 297 | The 5.x.y kernels |
293 | ================= | 298 | ================= |
294 | 299 | ||
295 | Kernels with 3-digit versions are -stable kernels. They contain small(ish) | 300 | Kernels with 3-digit versions are -stable kernels. They contain small(ish) |
296 | critical fixes for security problems or significant regressions discovered | 301 | critical fixes for security problems or significant regressions discovered |
297 | in a given 4.x kernel. | 302 | in a given 5.x kernel. |
298 | 303 | ||
299 | This is the recommended branch for users who want the most recent stable | 304 | This is the recommended branch for users who want the most recent stable |
300 | kernel and are not interested in helping test development/experimental | 305 | kernel and are not interested in helping test development/experimental |
301 | versions. | 306 | versions. |
302 | 307 | ||
303 | If no 4.x.y kernel is available, then the highest numbered 4.x kernel is | 308 | If no 5.x.y kernel is available, then the highest numbered 5.x kernel is |
304 | the current stable kernel. | 309 | the current stable kernel. |
305 | 310 | ||
306 | .. note:: | 311 | .. note:: |
@@ -308,23 +313,23 @@ the current stable kernel. | |||
308 | The -stable team usually do make incremental patches available as well | 313 | The -stable team usually do make incremental patches available as well |
309 | as patches against the latest mainline release, but I only cover the | 314 | as patches against the latest mainline release, but I only cover the |
310 | non-incremental ones below. The incremental ones can be found at | 315 | non-incremental ones below. The incremental ones can be found at |
311 | https://www.kernel.org/pub/linux/kernel/v4.x/incr/ | 316 | https://www.kernel.org/pub/linux/kernel/v5.x/incr/ |
312 | 317 | ||
313 | These patches are not incremental, meaning that for example the 4.7.3 | 318 | These patches are not incremental, meaning that for example the 5.7.3 |
314 | patch does not apply on top of the 4.7.2 kernel source, but rather on top | 319 | patch does not apply on top of the 5.7.2 kernel source, but rather on top |
315 | of the base 4.7 kernel source. | 320 | of the base 5.7 kernel source. |
316 | 321 | ||
317 | So, in order to apply the 4.7.3 patch to your existing 4.7.2 kernel | 322 | So, in order to apply the 5.7.3 patch to your existing 5.7.2 kernel |
318 | source you have to first back out the 4.7.2 patch (so you are left with a | 323 | source you have to first back out the 5.7.2 patch (so you are left with a |
319 | base 4.7 kernel source) and then apply the new 4.7.3 patch. | 324 | base 5.7 kernel source) and then apply the new 5.7.3 patch. |
320 | 325 | ||
321 | Here's a small example:: | 326 | Here's a small example:: |
322 | 327 | ||
323 | $ cd ~/linux-4.7.2 # change to the kernel source dir | 328 | $ cd ~/linux-5.7.2 # change to the kernel source dir |
324 | $ patch -p1 -R < ../patch-4.7.2 # revert the 4.7.2 patch | 329 | $ patch -p1 -R < ../patch-5.7.2 # revert the 5.7.2 patch |
325 | $ patch -p1 < ../patch-4.7.3 # apply the new 4.7.3 patch | 330 | $ patch -p1 < ../patch-5.7.3 # apply the new 5.7.3 patch |
326 | $ cd .. | 331 | $ cd .. |
327 | $ mv linux-4.7.2 linux-4.7.3 # rename the kernel source dir | 332 | $ mv linux-5.7.2 linux-5.7.3 # rename the kernel source dir |
328 | 333 | ||
329 | The -rc kernels | 334 | The -rc kernels |
330 | =============== | 335 | =============== |
@@ -343,38 +348,38 @@ This is a good branch to run for people who want to help out testing | |||
343 | development kernels but do not want to run some of the really experimental | 348 | development kernels but do not want to run some of the really experimental |
344 | stuff (such people should see the sections about -next and -mm kernels below). | 349 | stuff (such people should see the sections about -next and -mm kernels below). |
345 | 350 | ||
346 | The -rc patches are not incremental, they apply to a base 4.x kernel, just | 351 | The -rc patches are not incremental, they apply to a base 5.x kernel, just |
347 | like the 4.x.y patches described above. The kernel version before the -rcN | 352 | like the 5.x.y patches described above. The kernel version before the -rcN |
348 | suffix denotes the version of the kernel that this -rc kernel will eventually | 353 | suffix denotes the version of the kernel that this -rc kernel will eventually |
349 | turn into. | 354 | turn into. |
350 | 355 | ||
351 | So, 4.8-rc5 means that this is the fifth release candidate for the 4.8 | 356 | So, 5.8-rc5 means that this is the fifth release candidate for the 5.8 |
352 | kernel and the patch should be applied on top of the 4.7 kernel source. | 357 | kernel and the patch should be applied on top of the 5.7 kernel source. |
353 | 358 | ||
354 | Here are 3 examples of how to apply these patches:: | 359 | Here are 3 examples of how to apply these patches:: |
355 | 360 | ||
356 | # first an example of moving from 4.7 to 4.8-rc3 | 361 | # first an example of moving from 5.7 to 5.8-rc3 |
357 | 362 | ||
358 | $ cd ~/linux-4.7 # change to the 4.7 source dir | 363 | $ cd ~/linux-5.7 # change to the 5.7 source dir |
359 | $ patch -p1 < ../patch-4.8-rc3 # apply the 4.8-rc3 patch | 364 | $ patch -p1 < ../patch-5.8-rc3 # apply the 5.8-rc3 patch |
360 | $ cd .. | 365 | $ cd .. |
361 | $ mv linux-4.7 linux-4.8-rc3 # rename the source dir | 366 | $ mv linux-5.7 linux-5.8-rc3 # rename the source dir |
362 | 367 | ||
363 | # now let's move from 4.8-rc3 to 4.8-rc5 | 368 | # now let's move from 5.8-rc3 to 5.8-rc5 |
364 | 369 | ||
365 | $ cd ~/linux-4.8-rc3 # change to the 4.8-rc3 dir | 370 | $ cd ~/linux-5.8-rc3 # change to the 5.8-rc3 dir |
366 | $ patch -p1 -R < ../patch-4.8-rc3 # revert the 4.8-rc3 patch | 371 | $ patch -p1 -R < ../patch-5.8-rc3 # revert the 5.8-rc3 patch |
367 | $ patch -p1 < ../patch-4.8-rc5 # apply the new 4.8-rc5 patch | 372 | $ patch -p1 < ../patch-5.8-rc5 # apply the new 5.8-rc5 patch |
368 | $ cd .. | 373 | $ cd .. |
369 | $ mv linux-4.8-rc3 linux-4.8-rc5 # rename the source dir | 374 | $ mv linux-5.8-rc3 linux-5.8-rc5 # rename the source dir |
370 | 375 | ||
371 | # finally let's try and move from 4.7.3 to 4.8-rc5 | 376 | # finally let's try and move from 5.7.3 to 5.8-rc5 |
372 | 377 | ||
373 | $ cd ~/linux-4.7.3 # change to the kernel source dir | 378 | $ cd ~/linux-5.7.3 # change to the kernel source dir |
374 | $ patch -p1 -R < ../patch-4.7.3 # revert the 4.7.3 patch | 379 | $ patch -p1 -R < ../patch-5.7.3 # revert the 5.7.3 patch |
375 | $ patch -p1 < ../patch-4.8-rc5 # apply new 4.8-rc5 patch | 380 | $ patch -p1 < ../patch-5.8-rc5 # apply new 5.8-rc5 patch |
376 | $ cd .. | 381 | $ cd .. |
377 | $ mv linux-4.7.3 linux-4.8-rc5 # rename the kernel source dir | 382 | $ mv linux-5.7.3 linux-5.8-rc5 # rename the kernel source dir |
378 | 383 | ||
379 | 384 | ||
380 | The -mm patches and the linux-next tree | 385 | The -mm patches and the linux-next tree |
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 58649bd4fcfc..ebc679bcb2dc 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt | |||
@@ -80,7 +80,9 @@ nonzero when shrink_dcache_pages() has been called and the | |||
80 | dcache isn't pruned yet. | 80 | dcache isn't pruned yet. |
81 | 81 | ||
82 | nr_negative shows the number of unused dentries that are also | 82 | nr_negative shows the number of unused dentries that are also |
83 | negative dentries which do not mapped to actual files. | 83 | negative dentries which do not map to any files. Instead, |
84 | they help speeding up rejection of non-existing files provided | ||
85 | by the users. | ||
84 | 86 | ||
85 | ============================================================== | 87 | ============================================================== |
86 | 88 | ||
diff --git a/Documentation/translations/it_IT/admin-guide/README.rst b/Documentation/translations/it_IT/admin-guide/README.rst index 80f5ffc94a9e..b37166817842 100644 --- a/Documentation/translations/it_IT/admin-guide/README.rst +++ b/Documentation/translations/it_IT/admin-guide/README.rst | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | .. _it_readme: | 5 | .. _it_readme: |
6 | 6 | ||
7 | Rilascio del kernel Linux 4.x <http://kernel.org/> | 7 | Rilascio del kernel Linux 5.x <http://kernel.org/> |
8 | =================================================== | 8 | =================================================== |
9 | 9 | ||
10 | .. warning:: | 10 | .. warning:: |
diff --git a/MAINTAINERS b/MAINTAINERS index 8b95aa5363dd..de1cf31863a7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -409,8 +409,7 @@ F: drivers/platform/x86/wmi.c | |||
409 | F: include/uapi/linux/wmi.h | 409 | F: include/uapi/linux/wmi.h |
410 | 410 | ||
411 | AD1889 ALSA SOUND DRIVER | 411 | AD1889 ALSA SOUND DRIVER |
412 | M: Thibaut Varene <T-Bone@parisc-linux.org> | 412 | W: https://parisc.wiki.kernel.org/index.php/AD1889 |
413 | W: http://wiki.parisc-linux.org/AD1889 | ||
414 | L: linux-parisc@vger.kernel.org | 413 | L: linux-parisc@vger.kernel.org |
415 | S: Maintained | 414 | S: Maintained |
416 | F: sound/pci/ad1889.* | 415 | F: sound/pci/ad1889.* |
@@ -2849,8 +2848,11 @@ F: include/uapi/linux/if_bonding.h | |||
2849 | BPF (Safe dynamic programs and tools) | 2848 | BPF (Safe dynamic programs and tools) |
2850 | M: Alexei Starovoitov <ast@kernel.org> | 2849 | M: Alexei Starovoitov <ast@kernel.org> |
2851 | M: Daniel Borkmann <daniel@iogearbox.net> | 2850 | M: Daniel Borkmann <daniel@iogearbox.net> |
2851 | R: Martin KaFai Lau <kafai@fb.com> | ||
2852 | R: Song Liu <songliubraving@fb.com> | ||
2853 | R: Yonghong Song <yhs@fb.com> | ||
2852 | L: netdev@vger.kernel.org | 2854 | L: netdev@vger.kernel.org |
2853 | L: linux-kernel@vger.kernel.org | 2855 | L: bpf@vger.kernel.org |
2854 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git | 2856 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git |
2855 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git | 2857 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git |
2856 | Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 | 2858 | Q: https://patchwork.ozlabs.org/project/netdev/list/?delegate=77147 |
@@ -2874,10 +2876,13 @@ F: samples/bpf/ | |||
2874 | F: tools/bpf/ | 2876 | F: tools/bpf/ |
2875 | F: tools/lib/bpf/ | 2877 | F: tools/lib/bpf/ |
2876 | F: tools/testing/selftests/bpf/ | 2878 | F: tools/testing/selftests/bpf/ |
2879 | K: bpf | ||
2880 | N: bpf | ||
2877 | 2881 | ||
2878 | BPF JIT for ARM | 2882 | BPF JIT for ARM |
2879 | M: Shubham Bansal <illusionist.neo@gmail.com> | 2883 | M: Shubham Bansal <illusionist.neo@gmail.com> |
2880 | L: netdev@vger.kernel.org | 2884 | L: netdev@vger.kernel.org |
2885 | L: bpf@vger.kernel.org | ||
2881 | S: Maintained | 2886 | S: Maintained |
2882 | F: arch/arm/net/ | 2887 | F: arch/arm/net/ |
2883 | 2888 | ||
@@ -2886,18 +2891,21 @@ M: Daniel Borkmann <daniel@iogearbox.net> | |||
2886 | M: Alexei Starovoitov <ast@kernel.org> | 2891 | M: Alexei Starovoitov <ast@kernel.org> |
2887 | M: Zi Shen Lim <zlim.lnx@gmail.com> | 2892 | M: Zi Shen Lim <zlim.lnx@gmail.com> |
2888 | L: netdev@vger.kernel.org | 2893 | L: netdev@vger.kernel.org |
2894 | L: bpf@vger.kernel.org | ||
2889 | S: Supported | 2895 | S: Supported |
2890 | F: arch/arm64/net/ | 2896 | F: arch/arm64/net/ |
2891 | 2897 | ||
2892 | BPF JIT for MIPS (32-BIT AND 64-BIT) | 2898 | BPF JIT for MIPS (32-BIT AND 64-BIT) |
2893 | M: Paul Burton <paul.burton@mips.com> | 2899 | M: Paul Burton <paul.burton@mips.com> |
2894 | L: netdev@vger.kernel.org | 2900 | L: netdev@vger.kernel.org |
2901 | L: bpf@vger.kernel.org | ||
2895 | S: Maintained | 2902 | S: Maintained |
2896 | F: arch/mips/net/ | 2903 | F: arch/mips/net/ |
2897 | 2904 | ||
2898 | BPF JIT for NFP NICs | 2905 | BPF JIT for NFP NICs |
2899 | M: Jakub Kicinski <jakub.kicinski@netronome.com> | 2906 | M: Jakub Kicinski <jakub.kicinski@netronome.com> |
2900 | L: netdev@vger.kernel.org | 2907 | L: netdev@vger.kernel.org |
2908 | L: bpf@vger.kernel.org | ||
2901 | S: Supported | 2909 | S: Supported |
2902 | F: drivers/net/ethernet/netronome/nfp/bpf/ | 2910 | F: drivers/net/ethernet/netronome/nfp/bpf/ |
2903 | 2911 | ||
@@ -2905,6 +2913,7 @@ BPF JIT for POWERPC (32-BIT AND 64-BIT) | |||
2905 | M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> | 2913 | M: Naveen N. Rao <naveen.n.rao@linux.ibm.com> |
2906 | M: Sandipan Das <sandipan@linux.ibm.com> | 2914 | M: Sandipan Das <sandipan@linux.ibm.com> |
2907 | L: netdev@vger.kernel.org | 2915 | L: netdev@vger.kernel.org |
2916 | L: bpf@vger.kernel.org | ||
2908 | S: Maintained | 2917 | S: Maintained |
2909 | F: arch/powerpc/net/ | 2918 | F: arch/powerpc/net/ |
2910 | 2919 | ||
@@ -2912,6 +2921,7 @@ BPF JIT for S390 | |||
2912 | M: Martin Schwidefsky <schwidefsky@de.ibm.com> | 2921 | M: Martin Schwidefsky <schwidefsky@de.ibm.com> |
2913 | M: Heiko Carstens <heiko.carstens@de.ibm.com> | 2922 | M: Heiko Carstens <heiko.carstens@de.ibm.com> |
2914 | L: netdev@vger.kernel.org | 2923 | L: netdev@vger.kernel.org |
2924 | L: bpf@vger.kernel.org | ||
2915 | S: Maintained | 2925 | S: Maintained |
2916 | F: arch/s390/net/ | 2926 | F: arch/s390/net/ |
2917 | X: arch/s390/net/pnet.c | 2927 | X: arch/s390/net/pnet.c |
@@ -2919,12 +2929,14 @@ X: arch/s390/net/pnet.c | |||
2919 | BPF JIT for SPARC (32-BIT AND 64-BIT) | 2929 | BPF JIT for SPARC (32-BIT AND 64-BIT) |
2920 | M: David S. Miller <davem@davemloft.net> | 2930 | M: David S. Miller <davem@davemloft.net> |
2921 | L: netdev@vger.kernel.org | 2931 | L: netdev@vger.kernel.org |
2932 | L: bpf@vger.kernel.org | ||
2922 | S: Maintained | 2933 | S: Maintained |
2923 | F: arch/sparc/net/ | 2934 | F: arch/sparc/net/ |
2924 | 2935 | ||
2925 | BPF JIT for X86 32-BIT | 2936 | BPF JIT for X86 32-BIT |
2926 | M: Wang YanQing <udknight@gmail.com> | 2937 | M: Wang YanQing <udknight@gmail.com> |
2927 | L: netdev@vger.kernel.org | 2938 | L: netdev@vger.kernel.org |
2939 | L: bpf@vger.kernel.org | ||
2928 | S: Maintained | 2940 | S: Maintained |
2929 | F: arch/x86/net/bpf_jit_comp32.c | 2941 | F: arch/x86/net/bpf_jit_comp32.c |
2930 | 2942 | ||
@@ -2932,6 +2944,7 @@ BPF JIT for X86 64-BIT | |||
2932 | M: Alexei Starovoitov <ast@kernel.org> | 2944 | M: Alexei Starovoitov <ast@kernel.org> |
2933 | M: Daniel Borkmann <daniel@iogearbox.net> | 2945 | M: Daniel Borkmann <daniel@iogearbox.net> |
2934 | L: netdev@vger.kernel.org | 2946 | L: netdev@vger.kernel.org |
2947 | L: bpf@vger.kernel.org | ||
2935 | S: Supported | 2948 | S: Supported |
2936 | F: arch/x86/net/ | 2949 | F: arch/x86/net/ |
2937 | X: arch/x86/net/bpf_jit_comp32.c | 2950 | X: arch/x86/net/bpf_jit_comp32.c |
@@ -3386,9 +3399,8 @@ F: Documentation/media/v4l-drivers/cafe_ccic* | |||
3386 | F: drivers/media/platform/marvell-ccic/ | 3399 | F: drivers/media/platform/marvell-ccic/ |
3387 | 3400 | ||
3388 | CAIF NETWORK LAYER | 3401 | CAIF NETWORK LAYER |
3389 | M: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> | ||
3390 | L: netdev@vger.kernel.org | 3402 | L: netdev@vger.kernel.org |
3391 | S: Supported | 3403 | S: Orphan |
3392 | F: Documentation/networking/caif/ | 3404 | F: Documentation/networking/caif/ |
3393 | F: drivers/net/caif/ | 3405 | F: drivers/net/caif/ |
3394 | F: include/uapi/linux/caif/ | 3406 | F: include/uapi/linux/caif/ |
@@ -5182,7 +5194,7 @@ DRM DRIVERS FOR XEN | |||
5182 | M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> | 5194 | M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
5183 | T: git git://anongit.freedesktop.org/drm/drm-misc | 5195 | T: git git://anongit.freedesktop.org/drm/drm-misc |
5184 | L: dri-devel@lists.freedesktop.org | 5196 | L: dri-devel@lists.freedesktop.org |
5185 | L: xen-devel@lists.xen.org | 5197 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) |
5186 | S: Supported | 5198 | S: Supported |
5187 | F: drivers/gpu/drm/xen/ | 5199 | F: drivers/gpu/drm/xen/ |
5188 | F: Documentation/gpu/xen-front.rst | 5200 | F: Documentation/gpu/xen-front.rst |
@@ -6147,7 +6159,7 @@ FREESCALE SOC SOUND DRIVERS | |||
6147 | M: Timur Tabi <timur@kernel.org> | 6159 | M: Timur Tabi <timur@kernel.org> |
6148 | M: Nicolin Chen <nicoleotsuka@gmail.com> | 6160 | M: Nicolin Chen <nicoleotsuka@gmail.com> |
6149 | M: Xiubo Li <Xiubo.Lee@gmail.com> | 6161 | M: Xiubo Li <Xiubo.Lee@gmail.com> |
6150 | R: Fabio Estevam <fabio.estevam@nxp.com> | 6162 | R: Fabio Estevam <festevam@gmail.com> |
6151 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 6163 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
6152 | L: linuxppc-dev@lists.ozlabs.org | 6164 | L: linuxppc-dev@lists.ozlabs.org |
6153 | S: Maintained | 6165 | S: Maintained |
@@ -8483,6 +8495,7 @@ L7 BPF FRAMEWORK | |||
8483 | M: John Fastabend <john.fastabend@gmail.com> | 8495 | M: John Fastabend <john.fastabend@gmail.com> |
8484 | M: Daniel Borkmann <daniel@iogearbox.net> | 8496 | M: Daniel Borkmann <daniel@iogearbox.net> |
8485 | L: netdev@vger.kernel.org | 8497 | L: netdev@vger.kernel.org |
8498 | L: bpf@vger.kernel.org | ||
8486 | S: Maintained | 8499 | S: Maintained |
8487 | F: include/linux/skmsg.h | 8500 | F: include/linux/skmsg.h |
8488 | F: net/core/skmsg.c | 8501 | F: net/core/skmsg.c |
@@ -10894,7 +10907,7 @@ F: include/linux/nvmem-consumer.h | |||
10894 | F: include/linux/nvmem-provider.h | 10907 | F: include/linux/nvmem-provider.h |
10895 | 10908 | ||
10896 | NXP SGTL5000 DRIVER | 10909 | NXP SGTL5000 DRIVER |
10897 | M: Fabio Estevam <fabio.estevam@nxp.com> | 10910 | M: Fabio Estevam <festevam@gmail.com> |
10898 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 10911 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
10899 | S: Maintained | 10912 | S: Maintained |
10900 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt | 10913 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt |
@@ -11308,10 +11321,12 @@ F: include/dt-bindings/ | |||
11308 | 11321 | ||
11309 | OPENCORES I2C BUS DRIVER | 11322 | OPENCORES I2C BUS DRIVER |
11310 | M: Peter Korsgaard <peter@korsgaard.com> | 11323 | M: Peter Korsgaard <peter@korsgaard.com> |
11324 | M: Andrew Lunn <andrew@lunn.ch> | ||
11311 | L: linux-i2c@vger.kernel.org | 11325 | L: linux-i2c@vger.kernel.org |
11312 | S: Maintained | 11326 | S: Maintained |
11313 | F: Documentation/i2c/busses/i2c-ocores | 11327 | F: Documentation/i2c/busses/i2c-ocores |
11314 | F: drivers/i2c/busses/i2c-ocores.c | 11328 | F: drivers/i2c/busses/i2c-ocores.c |
11329 | F: include/linux/platform_data/i2c-ocores.h | ||
11315 | 11330 | ||
11316 | OPENRISC ARCHITECTURE | 11331 | OPENRISC ARCHITECTURE |
11317 | M: Jonas Bonn <jonas@southpole.se> | 11332 | M: Jonas Bonn <jonas@southpole.se> |
@@ -11482,7 +11497,7 @@ F: Documentation/blockdev/paride.txt | |||
11482 | F: drivers/block/paride/ | 11497 | F: drivers/block/paride/ |
11483 | 11498 | ||
11484 | PARISC ARCHITECTURE | 11499 | PARISC ARCHITECTURE |
11485 | M: "James E.J. Bottomley" <jejb@parisc-linux.org> | 11500 | M: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> |
11486 | M: Helge Deller <deller@gmx.de> | 11501 | M: Helge Deller <deller@gmx.de> |
11487 | L: linux-parisc@vger.kernel.org | 11502 | L: linux-parisc@vger.kernel.org |
11488 | W: http://www.parisc-linux.org/ | 11503 | W: http://www.parisc-linux.org/ |
@@ -12869,6 +12884,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt | |||
12869 | F: drivers/net/dsa/realtek-smi* | 12884 | F: drivers/net/dsa/realtek-smi* |
12870 | F: drivers/net/dsa/rtl83* | 12885 | F: drivers/net/dsa/rtl83* |
12871 | 12886 | ||
12887 | REDPINE WIRELESS DRIVER | ||
12888 | M: Amitkumar Karwar <amitkarwar@gmail.com> | ||
12889 | M: Siva Rebbagondla <siva8118@gmail.com> | ||
12890 | L: linux-wireless@vger.kernel.org | ||
12891 | S: Maintained | ||
12892 | F: drivers/net/wireless/rsi/ | ||
12893 | |||
12872 | REGISTER MAP ABSTRACTION | 12894 | REGISTER MAP ABSTRACTION |
12873 | M: Mark Brown <broonie@kernel.org> | 12895 | M: Mark Brown <broonie@kernel.org> |
12874 | L: linux-kernel@vger.kernel.org | 12896 | L: linux-kernel@vger.kernel.org |
@@ -13697,6 +13719,15 @@ L: netdev@vger.kernel.org | |||
13697 | S: Supported | 13719 | S: Supported |
13698 | F: drivers/net/ethernet/sfc/ | 13720 | F: drivers/net/ethernet/sfc/ |
13699 | 13721 | ||
13722 | SFF/SFP/SFP+ MODULE SUPPORT | ||
13723 | M: Russell King <linux@armlinux.org.uk> | ||
13724 | L: netdev@vger.kernel.org | ||
13725 | S: Maintained | ||
13726 | F: drivers/net/phy/phylink.c | ||
13727 | F: drivers/net/phy/sfp* | ||
13728 | F: include/linux/phylink.h | ||
13729 | F: include/linux/sfp.h | ||
13730 | |||
13700 | SGI GRU DRIVER | 13731 | SGI GRU DRIVER |
13701 | M: Dimitri Sivanich <sivanich@sgi.com> | 13732 | M: Dimitri Sivanich <sivanich@sgi.com> |
13702 | S: Maintained | 13733 | S: Maintained |
@@ -16692,6 +16723,7 @@ M: Jesper Dangaard Brouer <hawk@kernel.org> | |||
16692 | M: John Fastabend <john.fastabend@gmail.com> | 16723 | M: John Fastabend <john.fastabend@gmail.com> |
16693 | L: netdev@vger.kernel.org | 16724 | L: netdev@vger.kernel.org |
16694 | L: xdp-newbies@vger.kernel.org | 16725 | L: xdp-newbies@vger.kernel.org |
16726 | L: bpf@vger.kernel.org | ||
16695 | S: Supported | 16727 | S: Supported |
16696 | F: net/core/xdp.c | 16728 | F: net/core/xdp.c |
16697 | F: include/net/xdp.h | 16729 | F: include/net/xdp.h |
@@ -16705,6 +16737,7 @@ XDP SOCKETS (AF_XDP) | |||
16705 | M: Björn Töpel <bjorn.topel@intel.com> | 16737 | M: Björn Töpel <bjorn.topel@intel.com> |
16706 | M: Magnus Karlsson <magnus.karlsson@intel.com> | 16738 | M: Magnus Karlsson <magnus.karlsson@intel.com> |
16707 | L: netdev@vger.kernel.org | 16739 | L: netdev@vger.kernel.org |
16740 | L: bpf@vger.kernel.org | ||
16708 | S: Maintained | 16741 | S: Maintained |
16709 | F: kernel/bpf/xskmap.c | 16742 | F: kernel/bpf/xskmap.c |
16710 | F: net/xdp/ | 16743 | F: net/xdp/ |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 5 | 2 | VERSION = 5 |
3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc5 | 5 | EXTRAVERSION = -rc8 |
6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h | |||
@@ -56,15 +56,15 @@ | |||
56 | 56 | ||
57 | #elif defined(CONFIG_ALPHA_DP264) || \ | 57 | #elif defined(CONFIG_ALPHA_DP264) || \ |
58 | defined(CONFIG_ALPHA_LYNX) || \ | 58 | defined(CONFIG_ALPHA_LYNX) || \ |
59 | defined(CONFIG_ALPHA_SHARK) || \ | 59 | defined(CONFIG_ALPHA_SHARK) |
60 | defined(CONFIG_ALPHA_EIGER) | ||
61 | # define NR_IRQS 64 | 60 | # define NR_IRQS 64 |
62 | 61 | ||
63 | #elif defined(CONFIG_ALPHA_TITAN) | 62 | #elif defined(CONFIG_ALPHA_TITAN) |
64 | #define NR_IRQS 80 | 63 | #define NR_IRQS 80 |
65 | 64 | ||
66 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ | 65 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ |
67 | defined(CONFIG_ALPHA_TAKARA) | 66 | defined(CONFIG_ALPHA_TAKARA) || \ |
67 | defined(CONFIG_ALPHA_EIGER) | ||
68 | # define NR_IRQS 128 | 68 | # define NR_IRQS 128 |
69 | 69 | ||
70 | #elif defined(CONFIG_ALPHA_WILDFIRE) | 70 | #elif defined(CONFIG_ALPHA_WILDFIRE) |
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) | |||
78 | /* Macro for exception fixup code to access integer registers. */ | 78 | /* Macro for exception fixup code to access integer registers. */ |
79 | #define dpf_reg(r) \ | 79 | #define dpf_reg(r) \ |
80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ | 80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ |
81 | (r) <= 18 ? (r)+8 : (r)-10]) | 81 | (r) <= 18 ? (r)+10 : (r)-10]) |
82 | 82 | ||
83 | asmlinkage void | 83 | asmlinkage void |
84 | do_page_fault(unsigned long address, unsigned long mmcsr, | 84 | do_page_fault(unsigned long address, unsigned long mmcsr, |
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index 376366a7db81..d750b302d5ab 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig | |||
@@ -191,7 +191,6 @@ config NR_CPUS | |||
191 | 191 | ||
192 | config ARC_SMP_HALT_ON_RESET | 192 | config ARC_SMP_HALT_ON_RESET |
193 | bool "Enable Halt-on-reset boot mode" | 193 | bool "Enable Halt-on-reset boot mode" |
194 | default y if ARC_UBOOT_SUPPORT | ||
195 | help | 194 | help |
196 | In SMP configuration cores can be configured as Halt-on-reset | 195 | In SMP configuration cores can be configured as Halt-on-reset |
197 | or they could all start at same time. For Halt-on-reset, non | 196 | or they could all start at same time. For Halt-on-reset, non |
@@ -407,6 +406,14 @@ config ARC_HAS_ACCL_REGS | |||
407 | (also referred to as r58:r59). These can also be used by gcc as GPR so | 406 | (also referred to as r58:r59). These can also be used by gcc as GPR so |
408 | kernel needs to save/restore per process | 407 | kernel needs to save/restore per process |
409 | 408 | ||
409 | config ARC_IRQ_NO_AUTOSAVE | ||
410 | bool "Disable hardware autosave regfile on interrupts" | ||
411 | default n | ||
412 | help | ||
413 | On HS cores, taken interrupt auto saves the regfile on stack. | ||
414 | This is programmable and can be optionally disabled in which case | ||
415 | software INTERRUPT_PROLOGUE/EPILGUE do the needed work | ||
416 | |||
410 | endif # ISA_ARCV2 | 417 | endif # ISA_ARCV2 |
411 | 418 | ||
412 | endmenu # "ARC CPU Configuration" | 419 | endmenu # "ARC CPU Configuration" |
@@ -515,17 +522,6 @@ config ARC_DBG_TLB_PARANOIA | |||
515 | 522 | ||
516 | endif | 523 | endif |
517 | 524 | ||
518 | config ARC_UBOOT_SUPPORT | ||
519 | bool "Support uboot arg Handling" | ||
520 | help | ||
521 | ARC Linux by default checks for uboot provided args as pointers to | ||
522 | external cmdline or DTB. This however breaks in absence of uboot, | ||
523 | when booting from Metaware debugger directly, as the registers are | ||
524 | not zeroed out on reset by mdb and/or ARCv2 based cores. The bogus | ||
525 | registers look like uboot args to kernel which then chokes. | ||
526 | So only enable the uboot arg checking/processing if users are sure | ||
527 | of uboot being in play. | ||
528 | |||
529 | config ARC_BUILTIN_DTB_NAME | 525 | config ARC_BUILTIN_DTB_NAME |
530 | string "Built in DTB" | 526 | string "Built in DTB" |
531 | help | 527 | help |
diff --git a/arch/arc/configs/nps_defconfig b/arch/arc/configs/nps_defconfig index 6e84060e7c90..621f59407d76 100644 --- a/arch/arc/configs/nps_defconfig +++ b/arch/arc/configs/nps_defconfig | |||
@@ -31,7 +31,6 @@ CONFIG_ARC_CACHE_LINE_SHIFT=5 | |||
31 | # CONFIG_ARC_HAS_LLSC is not set | 31 | # CONFIG_ARC_HAS_LLSC is not set |
32 | CONFIG_ARC_KVADDR_SIZE=402 | 32 | CONFIG_ARC_KVADDR_SIZE=402 |
33 | CONFIG_ARC_EMUL_UNALIGNED=y | 33 | CONFIG_ARC_EMUL_UNALIGNED=y |
34 | CONFIG_ARC_UBOOT_SUPPORT=y | ||
35 | CONFIG_PREEMPT=y | 34 | CONFIG_PREEMPT=y |
36 | CONFIG_NET=y | 35 | CONFIG_NET=y |
37 | CONFIG_UNIX=y | 36 | CONFIG_UNIX=y |
diff --git a/arch/arc/configs/vdk_hs38_defconfig b/arch/arc/configs/vdk_hs38_defconfig index 1e59a2e9c602..e447ace6fa1c 100644 --- a/arch/arc/configs/vdk_hs38_defconfig +++ b/arch/arc/configs/vdk_hs38_defconfig | |||
@@ -13,7 +13,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
13 | CONFIG_ARC_PLAT_AXS10X=y | 13 | CONFIG_ARC_PLAT_AXS10X=y |
14 | CONFIG_AXS103=y | 14 | CONFIG_AXS103=y |
15 | CONFIG_ISA_ARCV2=y | 15 | CONFIG_ISA_ARCV2=y |
16 | CONFIG_ARC_UBOOT_SUPPORT=y | ||
17 | CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" | 16 | CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38" |
18 | CONFIG_PREEMPT=y | 17 | CONFIG_PREEMPT=y |
19 | CONFIG_NET=y | 18 | CONFIG_NET=y |
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig index b5c3f6c54b03..c82cdb10aaf4 100644 --- a/arch/arc/configs/vdk_hs38_smp_defconfig +++ b/arch/arc/configs/vdk_hs38_smp_defconfig | |||
@@ -15,8 +15,6 @@ CONFIG_AXS103=y | |||
15 | CONFIG_ISA_ARCV2=y | 15 | CONFIG_ISA_ARCV2=y |
16 | CONFIG_SMP=y | 16 | CONFIG_SMP=y |
17 | # CONFIG_ARC_TIMERS_64BIT is not set | 17 | # CONFIG_ARC_TIMERS_64BIT is not set |
18 | # CONFIG_ARC_SMP_HALT_ON_RESET is not set | ||
19 | CONFIG_ARC_UBOOT_SUPPORT=y | ||
20 | CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" | 18 | CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp" |
21 | CONFIG_PREEMPT=y | 19 | CONFIG_PREEMPT=y |
22 | CONFIG_NET=y | 20 | CONFIG_NET=y |
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index f1b86cef0905..a27eafdc8260 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h | |||
@@ -151,6 +151,14 @@ struct bcr_isa_arcv2 { | |||
151 | #endif | 151 | #endif |
152 | }; | 152 | }; |
153 | 153 | ||
154 | struct bcr_uarch_build_arcv2 { | ||
155 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
156 | unsigned int pad:8, prod:8, maj:8, min:8; | ||
157 | #else | ||
158 | unsigned int min:8, maj:8, prod:8, pad:8; | ||
159 | #endif | ||
160 | }; | ||
161 | |||
154 | struct bcr_mpy { | 162 | struct bcr_mpy { |
155 | #ifdef CONFIG_CPU_BIG_ENDIAN | 163 | #ifdef CONFIG_CPU_BIG_ENDIAN |
156 | unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; | 164 | unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8; |
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h index f393b663413e..2ad77fb43639 100644 --- a/arch/arc/include/asm/cache.h +++ b/arch/arc/include/asm/cache.h | |||
@@ -52,6 +52,17 @@ | |||
52 | #define cache_line_size() SMP_CACHE_BYTES | 52 | #define cache_line_size() SMP_CACHE_BYTES |
53 | #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES | 53 | #define ARCH_DMA_MINALIGN SMP_CACHE_BYTES |
54 | 54 | ||
55 | /* | ||
56 | * Make sure slab-allocated buffers are 64-bit aligned when atomic64_t uses | ||
57 | * ARCv2 64-bit atomics (LLOCKD/SCONDD). This guarantess runtime 64-bit | ||
58 | * alignment for any atomic64_t embedded in buffer. | ||
59 | * Default ARCH_SLAB_MINALIGN is __alignof__(long long) which has a relaxed | ||
60 | * value of 4 (and not 8) in ARC ABI. | ||
61 | */ | ||
62 | #if defined(CONFIG_ARC_HAS_LL64) && defined(CONFIG_ARC_HAS_LLSC) | ||
63 | #define ARCH_SLAB_MINALIGN 8 | ||
64 | #endif | ||
65 | |||
55 | extern void arc_cache_init(void); | 66 | extern void arc_cache_init(void); |
56 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); | 67 | extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len); |
57 | extern void read_decode_cache_bcr(void); | 68 | extern void read_decode_cache_bcr(void); |
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h index 309f4e6721b3..225e7df2d8ed 100644 --- a/arch/arc/include/asm/entry-arcv2.h +++ b/arch/arc/include/asm/entry-arcv2.h | |||
@@ -17,6 +17,33 @@ | |||
17 | ; | 17 | ; |
18 | ; Now manually save: r12, sp, fp, gp, r25 | 18 | ; Now manually save: r12, sp, fp, gp, r25 |
19 | 19 | ||
20 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE | ||
21 | .ifnc \called_from, exception | ||
22 | st.as r9, [sp, -10] ; save r9 in it's final stack slot | ||
23 | sub sp, sp, 12 ; skip JLI, LDI, EI | ||
24 | |||
25 | PUSH lp_count | ||
26 | PUSHAX lp_start | ||
27 | PUSHAX lp_end | ||
28 | PUSH blink | ||
29 | |||
30 | PUSH r11 | ||
31 | PUSH r10 | ||
32 | |||
33 | sub sp, sp, 4 ; skip r9 | ||
34 | |||
35 | PUSH r8 | ||
36 | PUSH r7 | ||
37 | PUSH r6 | ||
38 | PUSH r5 | ||
39 | PUSH r4 | ||
40 | PUSH r3 | ||
41 | PUSH r2 | ||
42 | PUSH r1 | ||
43 | PUSH r0 | ||
44 | .endif | ||
45 | #endif | ||
46 | |||
20 | #ifdef CONFIG_ARC_HAS_ACCL_REGS | 47 | #ifdef CONFIG_ARC_HAS_ACCL_REGS |
21 | PUSH r59 | 48 | PUSH r59 |
22 | PUSH r58 | 49 | PUSH r58 |
@@ -86,6 +113,33 @@ | |||
86 | POP r59 | 113 | POP r59 |
87 | #endif | 114 | #endif |
88 | 115 | ||
116 | #ifdef CONFIG_ARC_IRQ_NO_AUTOSAVE | ||
117 | .ifnc \called_from, exception | ||
118 | POP r0 | ||
119 | POP r1 | ||
120 | POP r2 | ||
121 | POP r3 | ||
122 | POP r4 | ||
123 | POP r5 | ||
124 | POP r6 | ||
125 | POP r7 | ||
126 | POP r8 | ||
127 | POP r9 | ||
128 | POP r10 | ||
129 | POP r11 | ||
130 | |||
131 | POP blink | ||
132 | POPAX lp_end | ||
133 | POPAX lp_start | ||
134 | |||
135 | POP r9 | ||
136 | mov lp_count, r9 | ||
137 | |||
138 | add sp, sp, 12 ; skip JLI, LDI, EI | ||
139 | ld.as r9, [sp, -10] ; reload r9 which got clobbered | ||
140 | .endif | ||
141 | #endif | ||
142 | |||
89 | .endm | 143 | .endm |
90 | 144 | ||
91 | /*------------------------------------------------------------------------*/ | 145 | /*------------------------------------------------------------------------*/ |
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c9173c02081c..eabc3efa6c6d 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h | |||
@@ -207,7 +207,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) | |||
207 | */ | 207 | */ |
208 | "=&r" (tmp), "+r" (to), "+r" (from) | 208 | "=&r" (tmp), "+r" (to), "+r" (from) |
209 | : | 209 | : |
210 | : "lp_count", "lp_start", "lp_end", "memory"); | 210 | : "lp_count", "memory"); |
211 | 211 | ||
212 | return n; | 212 | return n; |
213 | } | 213 | } |
@@ -433,7 +433,7 @@ raw_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
433 | */ | 433 | */ |
434 | "=&r" (tmp), "+r" (to), "+r" (from) | 434 | "=&r" (tmp), "+r" (to), "+r" (from) |
435 | : | 435 | : |
436 | : "lp_count", "lp_start", "lp_end", "memory"); | 436 | : "lp_count", "memory"); |
437 | 437 | ||
438 | return n; | 438 | return n; |
439 | } | 439 | } |
@@ -653,7 +653,7 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n) | |||
653 | " .previous \n" | 653 | " .previous \n" |
654 | : "+r"(d_char), "+r"(res) | 654 | : "+r"(d_char), "+r"(res) |
655 | : "i"(0) | 655 | : "i"(0) |
656 | : "lp_count", "lp_start", "lp_end", "memory"); | 656 | : "lp_count", "memory"); |
657 | 657 | ||
658 | return res; | 658 | return res; |
659 | } | 659 | } |
@@ -686,7 +686,7 @@ __arc_strncpy_from_user(char *dst, const char __user *src, long count) | |||
686 | " .previous \n" | 686 | " .previous \n" |
687 | : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) | 687 | : "+r"(res), "+r"(dst), "+r"(src), "=r"(val) |
688 | : "g"(-EFAULT), "r"(count) | 688 | : "g"(-EFAULT), "r"(count) |
689 | : "lp_count", "lp_start", "lp_end", "memory"); | 689 | : "lp_count", "memory"); |
690 | 690 | ||
691 | return res; | 691 | return res; |
692 | } | 692 | } |
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S index cc558a25b8fa..562089d62d9d 100644 --- a/arch/arc/kernel/entry-arcv2.S +++ b/arch/arc/kernel/entry-arcv2.S | |||
@@ -209,7 +209,9 @@ restore_regs: | |||
209 | ;####### Return from Intr ####### | 209 | ;####### Return from Intr ####### |
210 | 210 | ||
211 | debug_marker_l1: | 211 | debug_marker_l1: |
212 | bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot | 212 | ; bbit1.nt r0, STATUS_DE_BIT, .Lintr_ret_to_delay_slot |
213 | btst r0, STATUS_DE_BIT ; Z flag set if bit clear | ||
214 | bnz .Lintr_ret_to_delay_slot ; branch if STATUS_DE_BIT set | ||
213 | 215 | ||
214 | .Lisr_ret_fast_path: | 216 | .Lisr_ret_fast_path: |
215 | ; Handle special case #1: (Entry via Exception, Return via IRQ) | 217 | ; Handle special case #1: (Entry via Exception, Return via IRQ) |
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 8b90d25a15cc..30e090625916 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/entry.h> | 17 | #include <asm/entry.h> |
18 | #include <asm/arcregs.h> | 18 | #include <asm/arcregs.h> |
19 | #include <asm/cache.h> | 19 | #include <asm/cache.h> |
20 | #include <asm/irqflags.h> | ||
20 | 21 | ||
21 | .macro CPU_EARLY_SETUP | 22 | .macro CPU_EARLY_SETUP |
22 | 23 | ||
@@ -47,6 +48,15 @@ | |||
47 | sr r5, [ARC_REG_DC_CTRL] | 48 | sr r5, [ARC_REG_DC_CTRL] |
48 | 49 | ||
49 | 1: | 50 | 1: |
51 | |||
52 | #ifdef CONFIG_ISA_ARCV2 | ||
53 | ; Unaligned access is disabled at reset, so re-enable early as | ||
54 | ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access | ||
55 | ; by default | ||
56 | lr r5, [status32] | ||
57 | bset r5, r5, STATUS_AD_BIT | ||
58 | kflag r5 | ||
59 | #endif | ||
50 | .endm | 60 | .endm |
51 | 61 | ||
52 | .section .init.text, "ax",@progbits | 62 | .section .init.text, "ax",@progbits |
@@ -90,15 +100,13 @@ ENTRY(stext) | |||
90 | st.ab 0, [r5, 4] | 100 | st.ab 0, [r5, 4] |
91 | 1: | 101 | 1: |
92 | 102 | ||
93 | #ifdef CONFIG_ARC_UBOOT_SUPPORT | ||
94 | ; Uboot - kernel ABI | 103 | ; Uboot - kernel ABI |
95 | ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 | 104 | ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 |
96 | ; r1 = magic number (board identity, unused as of now | 105 | ; r1 = magic number (always zero as of now) |
97 | ; r2 = pointer to uboot provided cmdline or external DTB in mem | 106 | ; r2 = pointer to uboot provided cmdline or external DTB in mem |
98 | ; These are handled later in setup_arch() | 107 | ; These are handled later in handle_uboot_args() |
99 | st r0, [@uboot_tag] | 108 | st r0, [@uboot_tag] |
100 | st r2, [@uboot_arg] | 109 | st r2, [@uboot_arg] |
101 | #endif | ||
102 | 110 | ||
103 | ; setup "current" tsk and optionally cache it in dedicated r25 | 111 | ; setup "current" tsk and optionally cache it in dedicated r25 |
104 | mov r9, @init_task | 112 | mov r9, @init_task |
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index 067ea362fb3e..cf18b3e5a934 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c | |||
@@ -49,11 +49,13 @@ void arc_init_IRQ(void) | |||
49 | 49 | ||
50 | *(unsigned int *)&ictrl = 0; | 50 | *(unsigned int *)&ictrl = 0; |
51 | 51 | ||
52 | #ifndef CONFIG_ARC_IRQ_NO_AUTOSAVE | ||
52 | ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ | 53 | ictrl.save_nr_gpr_pairs = 6; /* r0 to r11 (r12 saved manually) */ |
53 | ictrl.save_blink = 1; | 54 | ictrl.save_blink = 1; |
54 | ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ | 55 | ictrl.save_lp_regs = 1; /* LP_COUNT, LP_START, LP_END */ |
55 | ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ | 56 | ictrl.save_u_to_u = 0; /* user ctxt saved on kernel stack */ |
56 | ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ | 57 | ictrl.save_idx_regs = 1; /* JLI, LDI, EI */ |
58 | #endif | ||
57 | 59 | ||
58 | WRITE_AUX(AUX_IRQ_CTRL, ictrl); | 60 | WRITE_AUX(AUX_IRQ_CTRL, ictrl); |
59 | 61 | ||
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index feb90093e6b1..7b2340996cf8 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c | |||
@@ -199,20 +199,36 @@ static void read_arc_build_cfg_regs(void) | |||
199 | cpu->bpu.ret_stk = 4 << bpu.rse; | 199 | cpu->bpu.ret_stk = 4 << bpu.rse; |
200 | 200 | ||
201 | if (cpu->core.family >= 0x54) { | 201 | if (cpu->core.family >= 0x54) { |
202 | unsigned int exec_ctrl; | ||
203 | 202 | ||
204 | READ_BCR(AUX_EXEC_CTRL, exec_ctrl); | 203 | struct bcr_uarch_build_arcv2 uarch; |
205 | cpu->extn.dual_enb = !(exec_ctrl & 1); | ||
206 | 204 | ||
207 | /* dual issue always present for this core */ | 205 | /* |
208 | cpu->extn.dual = 1; | 206 | * The first 0x54 core (uarch maj:min 0:1 or 0:2) was |
207 | * dual issue only (HS4x). But next uarch rev (1:0) | ||
208 | * allows it be configured for single issue (HS3x) | ||
209 | * Ensure we fiddle with dual issue only on HS4x | ||
210 | */ | ||
211 | READ_BCR(ARC_REG_MICRO_ARCH_BCR, uarch); | ||
212 | |||
213 | if (uarch.prod == 4) { | ||
214 | unsigned int exec_ctrl; | ||
215 | |||
216 | /* dual issue hardware always present */ | ||
217 | cpu->extn.dual = 1; | ||
218 | |||
219 | READ_BCR(AUX_EXEC_CTRL, exec_ctrl); | ||
220 | |||
221 | /* dual issue hardware enabled ? */ | ||
222 | cpu->extn.dual_enb = !(exec_ctrl & 1); | ||
223 | |||
224 | } | ||
209 | } | 225 | } |
210 | } | 226 | } |
211 | 227 | ||
212 | READ_BCR(ARC_REG_AP_BCR, ap); | 228 | READ_BCR(ARC_REG_AP_BCR, ap); |
213 | if (ap.ver) { | 229 | if (ap.ver) { |
214 | cpu->extn.ap_num = 2 << ap.num; | 230 | cpu->extn.ap_num = 2 << ap.num; |
215 | cpu->extn.ap_full = !!ap.min; | 231 | cpu->extn.ap_full = !ap.min; |
216 | } | 232 | } |
217 | 233 | ||
218 | READ_BCR(ARC_REG_SMART_BCR, bcr); | 234 | READ_BCR(ARC_REG_SMART_BCR, bcr); |
@@ -462,43 +478,78 @@ void setup_processor(void) | |||
462 | arc_chk_core_config(); | 478 | arc_chk_core_config(); |
463 | } | 479 | } |
464 | 480 | ||
465 | static inline int is_kernel(unsigned long addr) | 481 | static inline bool uboot_arg_invalid(unsigned long addr) |
466 | { | 482 | { |
467 | if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) | 483 | /* |
468 | return 1; | 484 | * Check that it is a untranslated address (although MMU is not enabled |
469 | return 0; | 485 | * yet, it being a high address ensures this is not by fluke) |
486 | */ | ||
487 | if (addr < PAGE_OFFSET) | ||
488 | return true; | ||
489 | |||
490 | /* Check that address doesn't clobber resident kernel image */ | ||
491 | return addr >= (unsigned long)_stext && addr <= (unsigned long)_end; | ||
470 | } | 492 | } |
471 | 493 | ||
472 | void __init setup_arch(char **cmdline_p) | 494 | #define IGNORE_ARGS "Ignore U-boot args: " |
495 | |||
496 | /* uboot_tag values for U-boot - kernel ABI revision 0; see head.S */ | ||
497 | #define UBOOT_TAG_NONE 0 | ||
498 | #define UBOOT_TAG_CMDLINE 1 | ||
499 | #define UBOOT_TAG_DTB 2 | ||
500 | |||
501 | void __init handle_uboot_args(void) | ||
473 | { | 502 | { |
474 | #ifdef CONFIG_ARC_UBOOT_SUPPORT | 503 | bool use_embedded_dtb = true; |
475 | /* make sure that uboot passed pointer to cmdline/dtb is valid */ | 504 | bool append_cmdline = false; |
476 | if (uboot_tag && is_kernel((unsigned long)uboot_arg)) | 505 | |
477 | panic("Invalid uboot arg\n"); | 506 | /* check that we know this tag */ |
478 | 507 | if (uboot_tag != UBOOT_TAG_NONE && | |
479 | /* See if u-boot passed an external Device Tree blob */ | 508 | uboot_tag != UBOOT_TAG_CMDLINE && |
480 | machine_desc = setup_machine_fdt(uboot_arg); /* uboot_tag == 2 */ | 509 | uboot_tag != UBOOT_TAG_DTB) { |
481 | if (!machine_desc) | 510 | pr_warn(IGNORE_ARGS "invalid uboot tag: '%08x'\n", uboot_tag); |
482 | #endif | 511 | goto ignore_uboot_args; |
483 | { | 512 | } |
484 | /* No, so try the embedded one */ | 513 | |
514 | if (uboot_tag != UBOOT_TAG_NONE && | ||
515 | uboot_arg_invalid((unsigned long)uboot_arg)) { | ||
516 | pr_warn(IGNORE_ARGS "invalid uboot arg: '%px'\n", uboot_arg); | ||
517 | goto ignore_uboot_args; | ||
518 | } | ||
519 | |||
520 | /* see if U-boot passed an external Device Tree blob */ | ||
521 | if (uboot_tag == UBOOT_TAG_DTB) { | ||
522 | machine_desc = setup_machine_fdt((void *)uboot_arg); | ||
523 | |||
524 | /* external Device Tree blob is invalid - use embedded one */ | ||
525 | use_embedded_dtb = !machine_desc; | ||
526 | } | ||
527 | |||
528 | if (uboot_tag == UBOOT_TAG_CMDLINE) | ||
529 | append_cmdline = true; | ||
530 | |||
531 | ignore_uboot_args: | ||
532 | |||
533 | if (use_embedded_dtb) { | ||
485 | machine_desc = setup_machine_fdt(__dtb_start); | 534 | machine_desc = setup_machine_fdt(__dtb_start); |
486 | if (!machine_desc) | 535 | if (!machine_desc) |
487 | panic("Embedded DT invalid\n"); | 536 | panic("Embedded DT invalid\n"); |
537 | } | ||
488 | 538 | ||
489 | /* | 539 | /* |
490 | * If we are here, it is established that @uboot_arg didn't | 540 | * NOTE: @boot_command_line is populated by setup_machine_fdt() so this |
491 | * point to DT blob. Instead if u-boot says it is cmdline, | 541 | * append processing can only happen after. |
492 | * append to embedded DT cmdline. | 542 | */ |
493 | * setup_machine_fdt() would have populated @boot_command_line | 543 | if (append_cmdline) { |
494 | */ | 544 | /* Ensure a whitespace between the 2 cmdlines */ |
495 | if (uboot_tag == 1) { | 545 | strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); |
496 | /* Ensure a whitespace between the 2 cmdlines */ | 546 | strlcat(boot_command_line, uboot_arg, COMMAND_LINE_SIZE); |
497 | strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); | ||
498 | strlcat(boot_command_line, uboot_arg, | ||
499 | COMMAND_LINE_SIZE); | ||
500 | } | ||
501 | } | 547 | } |
548 | } | ||
549 | |||
550 | void __init setup_arch(char **cmdline_p) | ||
551 | { | ||
552 | handle_uboot_args(); | ||
502 | 553 | ||
503 | /* Save unparsed command line copy for /proc/cmdline */ | 554 | /* Save unparsed command line copy for /proc/cmdline */ |
504 | *cmdline_p = boot_command_line; | 555 | *cmdline_p = boot_command_line; |
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S index d61044dd8b58..ea14b0bf3116 100644 --- a/arch/arc/lib/memcpy-archs.S +++ b/arch/arc/lib/memcpy-archs.S | |||
@@ -25,15 +25,11 @@ | |||
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #ifdef CONFIG_ARC_HAS_LL64 | 27 | #ifdef CONFIG_ARC_HAS_LL64 |
28 | # define PREFETCH_READ(RX) prefetch [RX, 56] | ||
29 | # define PREFETCH_WRITE(RX) prefetchw [RX, 64] | ||
30 | # define LOADX(DST,RX) ldd.ab DST, [RX, 8] | 28 | # define LOADX(DST,RX) ldd.ab DST, [RX, 8] |
31 | # define STOREX(SRC,RX) std.ab SRC, [RX, 8] | 29 | # define STOREX(SRC,RX) std.ab SRC, [RX, 8] |
32 | # define ZOLSHFT 5 | 30 | # define ZOLSHFT 5 |
33 | # define ZOLAND 0x1F | 31 | # define ZOLAND 0x1F |
34 | #else | 32 | #else |
35 | # define PREFETCH_READ(RX) prefetch [RX, 28] | ||
36 | # define PREFETCH_WRITE(RX) prefetchw [RX, 32] | ||
37 | # define LOADX(DST,RX) ld.ab DST, [RX, 4] | 33 | # define LOADX(DST,RX) ld.ab DST, [RX, 4] |
38 | # define STOREX(SRC,RX) st.ab SRC, [RX, 4] | 34 | # define STOREX(SRC,RX) st.ab SRC, [RX, 4] |
39 | # define ZOLSHFT 4 | 35 | # define ZOLSHFT 4 |
@@ -41,8 +37,6 @@ | |||
41 | #endif | 37 | #endif |
42 | 38 | ||
43 | ENTRY_CFI(memcpy) | 39 | ENTRY_CFI(memcpy) |
44 | prefetch [r1] ; Prefetch the read location | ||
45 | prefetchw [r0] ; Prefetch the write location | ||
46 | mov.f 0, r2 | 40 | mov.f 0, r2 |
47 | ;;; if size is zero | 41 | ;;; if size is zero |
48 | jz.d [blink] | 42 | jz.d [blink] |
@@ -72,8 +66,6 @@ ENTRY_CFI(memcpy) | |||
72 | lpnz @.Lcopy32_64bytes | 66 | lpnz @.Lcopy32_64bytes |
73 | ;; LOOP START | 67 | ;; LOOP START |
74 | LOADX (r6, r1) | 68 | LOADX (r6, r1) |
75 | PREFETCH_READ (r1) | ||
76 | PREFETCH_WRITE (r3) | ||
77 | LOADX (r8, r1) | 69 | LOADX (r8, r1) |
78 | LOADX (r10, r1) | 70 | LOADX (r10, r1) |
79 | LOADX (r4, r1) | 71 | LOADX (r4, r1) |
@@ -117,9 +109,7 @@ ENTRY_CFI(memcpy) | |||
117 | lpnz @.Lcopy8bytes_1 | 109 | lpnz @.Lcopy8bytes_1 |
118 | ;; LOOP START | 110 | ;; LOOP START |
119 | ld.ab r6, [r1, 4] | 111 | ld.ab r6, [r1, 4] |
120 | prefetch [r1, 28] ;Prefetch the next read location | ||
121 | ld.ab r8, [r1,4] | 112 | ld.ab r8, [r1,4] |
122 | prefetchw [r3, 32] ;Prefetch the next write location | ||
123 | 113 | ||
124 | SHIFT_1 (r7, r6, 24) | 114 | SHIFT_1 (r7, r6, 24) |
125 | or r7, r7, r5 | 115 | or r7, r7, r5 |
@@ -162,9 +152,7 @@ ENTRY_CFI(memcpy) | |||
162 | lpnz @.Lcopy8bytes_2 | 152 | lpnz @.Lcopy8bytes_2 |
163 | ;; LOOP START | 153 | ;; LOOP START |
164 | ld.ab r6, [r1, 4] | 154 | ld.ab r6, [r1, 4] |
165 | prefetch [r1, 28] ;Prefetch the next read location | ||
166 | ld.ab r8, [r1,4] | 155 | ld.ab r8, [r1,4] |
167 | prefetchw [r3, 32] ;Prefetch the next write location | ||
168 | 156 | ||
169 | SHIFT_1 (r7, r6, 16) | 157 | SHIFT_1 (r7, r6, 16) |
170 | or r7, r7, r5 | 158 | or r7, r7, r5 |
@@ -204,9 +192,7 @@ ENTRY_CFI(memcpy) | |||
204 | lpnz @.Lcopy8bytes_3 | 192 | lpnz @.Lcopy8bytes_3 |
205 | ;; LOOP START | 193 | ;; LOOP START |
206 | ld.ab r6, [r1, 4] | 194 | ld.ab r6, [r1, 4] |
207 | prefetch [r1, 28] ;Prefetch the next read location | ||
208 | ld.ab r8, [r1,4] | 195 | ld.ab r8, [r1,4] |
209 | prefetchw [r3, 32] ;Prefetch the next write location | ||
210 | 196 | ||
211 | SHIFT_1 (r7, r6, 8) | 197 | SHIFT_1 (r7, r6, 8) |
212 | or r7, r7, r5 | 198 | or r7, r7, r5 |
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index f25c085b9874..23e00216e5a5 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig | |||
@@ -9,6 +9,7 @@ menuconfig ARC_SOC_HSDK | |||
9 | bool "ARC HS Development Kit SOC" | 9 | bool "ARC HS Development Kit SOC" |
10 | depends on ISA_ARCV2 | 10 | depends on ISA_ARCV2 |
11 | select ARC_HAS_ACCL_REGS | 11 | select ARC_HAS_ACCL_REGS |
12 | select ARC_IRQ_NO_AUTOSAVE | ||
12 | select CLK_HSDK | 13 | select CLK_HSDK |
13 | select RESET_HSDK | 14 | select RESET_HSDK |
14 | select HAVE_PCI | 15 | select HAVE_PCI |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 664e918e2624..26524b75970a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1400,6 +1400,7 @@ config NR_CPUS | |||
1400 | config HOTPLUG_CPU | 1400 | config HOTPLUG_CPU |
1401 | bool "Support for hot-pluggable CPUs" | 1401 | bool "Support for hot-pluggable CPUs" |
1402 | depends on SMP | 1402 | depends on SMP |
1403 | select GENERIC_IRQ_MIGRATION | ||
1403 | help | 1404 | help |
1404 | Say Y here to experiment with turning CPUs off and on. CPUs | 1405 | Say Y here to experiment with turning CPUs off and on. CPUs |
1405 | can be controlled through /sys/devices/system/cpu. | 1406 | can be controlled through /sys/devices/system/cpu. |
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index b67f5fee1469..dce5be5df97b 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts | |||
@@ -729,7 +729,7 @@ | |||
729 | 729 | ||
730 | &cpsw_emac0 { | 730 | &cpsw_emac0 { |
731 | phy-handle = <ðphy0>; | 731 | phy-handle = <ðphy0>; |
732 | phy-mode = "rgmii-txid"; | 732 | phy-mode = "rgmii-id"; |
733 | }; | 733 | }; |
734 | 734 | ||
735 | &tscadc { | 735 | &tscadc { |
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 172c0224e7f6..b128998097ce 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts | |||
@@ -651,13 +651,13 @@ | |||
651 | 651 | ||
652 | &cpsw_emac0 { | 652 | &cpsw_emac0 { |
653 | phy-handle = <ðphy0>; | 653 | phy-handle = <ðphy0>; |
654 | phy-mode = "rgmii-txid"; | 654 | phy-mode = "rgmii-id"; |
655 | dual_emac_res_vlan = <1>; | 655 | dual_emac_res_vlan = <1>; |
656 | }; | 656 | }; |
657 | 657 | ||
658 | &cpsw_emac1 { | 658 | &cpsw_emac1 { |
659 | phy-handle = <ðphy1>; | 659 | phy-handle = <ðphy1>; |
660 | phy-mode = "rgmii-txid"; | 660 | phy-mode = "rgmii-id"; |
661 | dual_emac_res_vlan = <2>; | 661 | dual_emac_res_vlan = <2>; |
662 | }; | 662 | }; |
663 | 663 | ||
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index d0fd68873689..5b250060f6dd 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts | |||
@@ -215,7 +215,7 @@ | |||
215 | pinctrl-names = "default"; | 215 | pinctrl-names = "default"; |
216 | pinctrl-0 = <&mmc1_pins>; | 216 | pinctrl-0 = <&mmc1_pins>; |
217 | bus-width = <0x4>; | 217 | bus-width = <0x4>; |
218 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 218 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; |
219 | cd-inverted; | 219 | cd-inverted; |
220 | max-frequency = <26000000>; | 220 | max-frequency = <26000000>; |
221 | vmmc-supply = <&vmmcsd_fixed>; | 221 | vmmc-supply = <&vmmcsd_fixed>; |
diff --git a/arch/arm/boot/dts/armada-xp-db.dts b/arch/arm/boot/dts/armada-xp-db.dts index f3ac7483afed..5d04dc68cf57 100644 --- a/arch/arm/boot/dts/armada-xp-db.dts +++ b/arch/arm/boot/dts/armada-xp-db.dts | |||
@@ -144,30 +144,32 @@ | |||
144 | status = "okay"; | 144 | status = "okay"; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | nand@d0000 { | 147 | nand-controller@d0000 { |
148 | status = "okay"; | 148 | status = "okay"; |
149 | label = "pxa3xx_nand-0"; | ||
150 | num-cs = <1>; | ||
151 | marvell,nand-keep-config; | ||
152 | nand-on-flash-bbt; | ||
153 | |||
154 | partitions { | ||
155 | compatible = "fixed-partitions"; | ||
156 | #address-cells = <1>; | ||
157 | #size-cells = <1>; | ||
158 | |||
159 | partition@0 { | ||
160 | label = "U-Boot"; | ||
161 | reg = <0 0x800000>; | ||
162 | }; | ||
163 | partition@800000 { | ||
164 | label = "Linux"; | ||
165 | reg = <0x800000 0x800000>; | ||
166 | }; | ||
167 | partition@1000000 { | ||
168 | label = "Filesystem"; | ||
169 | reg = <0x1000000 0x3f000000>; | ||
170 | 149 | ||
150 | nand@0 { | ||
151 | reg = <0>; | ||
152 | label = "pxa3xx_nand-0"; | ||
153 | nand-rb = <0>; | ||
154 | nand-on-flash-bbt; | ||
155 | |||
156 | partitions { | ||
157 | compatible = "fixed-partitions"; | ||
158 | #address-cells = <1>; | ||
159 | #size-cells = <1>; | ||
160 | |||
161 | partition@0 { | ||
162 | label = "U-Boot"; | ||
163 | reg = <0 0x800000>; | ||
164 | }; | ||
165 | partition@800000 { | ||
166 | label = "Linux"; | ||
167 | reg = <0x800000 0x800000>; | ||
168 | }; | ||
169 | partition@1000000 { | ||
170 | label = "Filesystem"; | ||
171 | reg = <0x1000000 0x3f000000>; | ||
172 | }; | ||
171 | }; | 173 | }; |
172 | }; | 174 | }; |
173 | }; | 175 | }; |
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 1139e9469a83..b4cca507cf13 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts | |||
@@ -160,12 +160,15 @@ | |||
160 | status = "okay"; | 160 | status = "okay"; |
161 | }; | 161 | }; |
162 | 162 | ||
163 | nand@d0000 { | 163 | nand-controller@d0000 { |
164 | status = "okay"; | 164 | status = "okay"; |
165 | label = "pxa3xx_nand-0"; | 165 | |
166 | num-cs = <1>; | 166 | nand@0 { |
167 | marvell,nand-keep-config; | 167 | reg = <0>; |
168 | nand-on-flash-bbt; | 168 | label = "pxa3xx_nand-0"; |
169 | nand-rb = <0>; | ||
170 | nand-on-flash-bbt; | ||
171 | }; | ||
169 | }; | 172 | }; |
170 | }; | 173 | }; |
171 | 174 | ||
diff --git a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts index bbbb38888bb8..87dcb502f72d 100644 --- a/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts +++ b/arch/arm/boot/dts/armada-xp-lenovo-ix4-300d.dts | |||
@@ -81,49 +81,52 @@ | |||
81 | 81 | ||
82 | }; | 82 | }; |
83 | 83 | ||
84 | nand@d0000 { | 84 | nand-controller@d0000 { |
85 | status = "okay"; | 85 | status = "okay"; |
86 | label = "pxa3xx_nand-0"; | ||
87 | num-cs = <1>; | ||
88 | marvell,nand-keep-config; | ||
89 | nand-on-flash-bbt; | ||
90 | |||
91 | partitions { | ||
92 | compatible = "fixed-partitions"; | ||
93 | #address-cells = <1>; | ||
94 | #size-cells = <1>; | ||
95 | |||
96 | partition@0 { | ||
97 | label = "u-boot"; | ||
98 | reg = <0x00000000 0x000e0000>; | ||
99 | read-only; | ||
100 | }; | ||
101 | |||
102 | partition@e0000 { | ||
103 | label = "u-boot-env"; | ||
104 | reg = <0x000e0000 0x00020000>; | ||
105 | read-only; | ||
106 | }; | ||
107 | |||
108 | partition@100000 { | ||
109 | label = "u-boot-env2"; | ||
110 | reg = <0x00100000 0x00020000>; | ||
111 | read-only; | ||
112 | }; | ||
113 | |||
114 | partition@120000 { | ||
115 | label = "zImage"; | ||
116 | reg = <0x00120000 0x00400000>; | ||
117 | }; | ||
118 | |||
119 | partition@520000 { | ||
120 | label = "initrd"; | ||
121 | reg = <0x00520000 0x00400000>; | ||
122 | }; | ||
123 | 86 | ||
124 | partition@e00000 { | 87 | nand@0 { |
125 | label = "boot"; | 88 | reg = <0>; |
126 | reg = <0x00e00000 0x3f200000>; | 89 | label = "pxa3xx_nand-0"; |
90 | nand-rb = <0>; | ||
91 | nand-on-flash-bbt; | ||
92 | |||
93 | partitions { | ||
94 | compatible = "fixed-partitions"; | ||
95 | #address-cells = <1>; | ||
96 | #size-cells = <1>; | ||
97 | |||
98 | partition@0 { | ||
99 | label = "u-boot"; | ||
100 | reg = <0x00000000 0x000e0000>; | ||
101 | read-only; | ||
102 | }; | ||
103 | |||
104 | partition@e0000 { | ||
105 | label = "u-boot-env"; | ||
106 | reg = <0x000e0000 0x00020000>; | ||
107 | read-only; | ||
108 | }; | ||
109 | |||
110 | partition@100000 { | ||
111 | label = "u-boot-env2"; | ||
112 | reg = <0x00100000 0x00020000>; | ||
113 | read-only; | ||
114 | }; | ||
115 | |||
116 | partition@120000 { | ||
117 | label = "zImage"; | ||
118 | reg = <0x00120000 0x00400000>; | ||
119 | }; | ||
120 | |||
121 | partition@520000 { | ||
122 | label = "initrd"; | ||
123 | reg = <0x00520000 0x00400000>; | ||
124 | }; | ||
125 | |||
126 | partition@e00000 { | ||
127 | label = "boot"; | ||
128 | reg = <0x00e00000 0x3f200000>; | ||
129 | }; | ||
127 | }; | 130 | }; |
128 | }; | 131 | }; |
129 | }; | 132 | }; |
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92..559659b399d0 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi | |||
@@ -476,7 +476,7 @@ | |||
476 | clocksource: timer@20000 { | 476 | clocksource: timer@20000 { |
477 | compatible = "ti,da830-timer"; | 477 | compatible = "ti,da830-timer"; |
478 | reg = <0x20000 0x1000>; | 478 | reg = <0x20000 0x1000>; |
479 | interrupts = <12>, <13>; | 479 | interrupts = <21>, <22>; |
480 | interrupt-names = "tint12", "tint34"; | 480 | interrupt-names = "tint12", "tint34"; |
481 | clocks = <&pll0_auxclk>; | 481 | clocks = <&pll0_auxclk>; |
482 | }; | 482 | }; |
diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts index 5edf858c8b86..a31b17eaf51c 100644 --- a/arch/arm/boot/dts/imx6q-pistachio.dts +++ b/arch/arm/boot/dts/imx6q-pistachio.dts | |||
@@ -103,7 +103,7 @@ | |||
103 | power { | 103 | power { |
104 | label = "Power Button"; | 104 | label = "Power Button"; |
105 | gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; | 105 | gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; |
106 | gpio-key,wakeup; | 106 | wakeup-source; |
107 | linux,code = <KEY_POWER>; | 107 | linux,code = <KEY_POWER>; |
108 | }; | 108 | }; |
109 | }; | 109 | }; |
diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index d8163705363e..4a31a415f88e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts | |||
@@ -309,7 +309,7 @@ | |||
309 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 309 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
310 | cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; | 310 | cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; |
311 | keep-power-in-suspend; | 311 | keep-power-in-suspend; |
312 | enable-sdio-wakeup; | 312 | wakeup-source; |
313 | vmmc-supply = <®_sd3_vmmc>; | 313 | vmmc-supply = <®_sd3_vmmc>; |
314 | status = "okay"; | 314 | status = "okay"; |
315 | }; | 315 | }; |
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 272ff6133ec1..d1375d3650fd 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi | |||
@@ -467,7 +467,7 @@ | |||
467 | }; | 467 | }; |
468 | 468 | ||
469 | gpt: gpt@2098000 { | 469 | gpt: gpt@2098000 { |
470 | compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; | 470 | compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; |
471 | reg = <0x02098000 0x4000>; | 471 | reg = <0x02098000 0x4000>; |
472 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; | 472 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; |
473 | clocks = <&clks IMX6SX_CLK_GPT_BUS>, | 473 | clocks = <&clks IMX6SX_CLK_GPT_BUS>, |
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index e4645f612712..2ab74860d962 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi | |||
@@ -274,7 +274,7 @@ | |||
274 | compatible = "amlogic,meson6-dwmac", "snps,dwmac"; | 274 | compatible = "amlogic,meson6-dwmac", "snps,dwmac"; |
275 | reg = <0xc9410000 0x10000 | 275 | reg = <0xc9410000 0x10000 |
276 | 0xc1108108 0x4>; | 276 | 0xc1108108 0x4>; |
277 | interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; | 277 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; |
278 | interrupt-names = "macirq"; | 278 | interrupt-names = "macirq"; |
279 | status = "disabled"; | 279 | status = "disabled"; |
280 | }; | 280 | }; |
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts index 0872f6e3abf5..d50fc2f60fa3 100644 --- a/arch/arm/boot/dts/meson8b-ec100.dts +++ b/arch/arm/boot/dts/meson8b-ec100.dts | |||
@@ -205,8 +205,7 @@ | |||
205 | cap-sd-highspeed; | 205 | cap-sd-highspeed; |
206 | disable-wp; | 206 | disable-wp; |
207 | 207 | ||
208 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 208 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
209 | cd-inverted; | ||
210 | 209 | ||
211 | vmmc-supply = <&vcc_3v3>; | 210 | vmmc-supply = <&vcc_3v3>; |
212 | }; | 211 | }; |
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index 58669abda259..0f0a46ddf3ff 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts | |||
@@ -221,7 +221,6 @@ | |||
221 | /* Realtek RTL8211F (0x001cc916) */ | 221 | /* Realtek RTL8211F (0x001cc916) */ |
222 | eth_phy: ethernet-phy@0 { | 222 | eth_phy: ethernet-phy@0 { |
223 | reg = <0>; | 223 | reg = <0>; |
224 | eee-broken-1000t; | ||
225 | interrupt-parent = <&gpio_intc>; | 224 | interrupt-parent = <&gpio_intc>; |
226 | /* GPIOH_3 */ | 225 | /* GPIOH_3 */ |
227 | interrupts = <17 IRQ_TYPE_LEVEL_LOW>; | 226 | interrupts = <17 IRQ_TYPE_LEVEL_LOW>; |
@@ -273,8 +272,7 @@ | |||
273 | cap-sd-highspeed; | 272 | cap-sd-highspeed; |
274 | disable-wp; | 273 | disable-wp; |
275 | 274 | ||
276 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 275 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
277 | cd-inverted; | ||
278 | 276 | ||
279 | vmmc-supply = <&tflash_vdd>; | 277 | vmmc-supply = <&tflash_vdd>; |
280 | vqmmc-supply = <&tf_io>; | 278 | vqmmc-supply = <&tf_io>; |
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b..6ac02beb5fa7 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts | |||
@@ -206,8 +206,7 @@ | |||
206 | cap-sd-highspeed; | 206 | cap-sd-highspeed; |
207 | disable-wp; | 207 | disable-wp; |
208 | 208 | ||
209 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 209 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
210 | cd-inverted; | ||
211 | 210 | ||
212 | vmmc-supply = <&vcc_3v3>; | 211 | vmmc-supply = <&vcc_3v3>; |
213 | }; | 212 | }; |
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0..f57acf8f66b9 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi | |||
@@ -105,7 +105,7 @@ | |||
105 | interrupts-extended = < | 105 | interrupts-extended = < |
106 | &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 | 106 | &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 |
107 | &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 | 107 | &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 |
108 | &cpcap 48 1 | 108 | &cpcap 48 0 |
109 | >; | 109 | >; |
110 | interrupt-names = | 110 | interrupt-names = |
111 | "id_ground", "id_float", "se0conn", "vbusvld", | 111 | "id_ground", "id_float", "se0conn", "vbusvld", |
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e53d32691308..93b420934e8e 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi | |||
@@ -714,11 +714,7 @@ | |||
714 | 714 | ||
715 | vdda-supply = <&vdac>; | 715 | vdda-supply = <&vdac>; |
716 | 716 | ||
717 | #address-cells = <1>; | ||
718 | #size-cells = <0>; | ||
719 | |||
720 | port { | 717 | port { |
721 | reg = <0>; | ||
722 | venc_out: endpoint { | 718 | venc_out: endpoint { |
723 | remote-endpoint = <&opa_in>; | 719 | remote-endpoint = <&opa_in>; |
724 | ti,channels = <1>; | 720 | ti,channels = <1>; |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 182a53991c90..826920e6b878 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -814,7 +814,7 @@ | |||
814 | /* For debugging, it is often good idea to remove this GPIO. | 814 | /* For debugging, it is often good idea to remove this GPIO. |
815 | It means you can remove back cover (to reboot by removing | 815 | It means you can remove back cover (to reboot by removing |
816 | battery) and still use the MMC card. */ | 816 | battery) and still use the MMC card. */ |
817 | cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ | 817 | cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ |
818 | }; | 818 | }; |
819 | 819 | ||
820 | /* most boards use vaux3, only some old versions use vmmc2 instead */ | 820 | /* most boards use vaux3, only some old versions use vmmc2 instead */ |
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529..e142e6c70a59 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi | |||
@@ -370,6 +370,19 @@ | |||
370 | compatible = "ti,omap2-onenand"; | 370 | compatible = "ti,omap2-onenand"; |
371 | reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ | 371 | reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ |
372 | 372 | ||
373 | /* | ||
374 | * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported | ||
375 | * bootloader set values when booted with v4.19 using both N950 | ||
376 | * and N9 devices (OneNAND Manufacturer: Samsung): | ||
377 | * | ||
378 | * gpmc cs0 before gpmc_cs_program_settings: | ||
379 | * cs0 GPMC_CS_CONFIG1: 0xfd001202 | ||
380 | * cs0 GPMC_CS_CONFIG2: 0x00181800 | ||
381 | * cs0 GPMC_CS_CONFIG3: 0x00030300 | ||
382 | * cs0 GPMC_CS_CONFIG4: 0x18001804 | ||
383 | * cs0 GPMC_CS_CONFIG5: 0x03171d1d | ||
384 | * cs0 GPMC_CS_CONFIG6: 0x97080000 | ||
385 | */ | ||
373 | gpmc,sync-read; | 386 | gpmc,sync-read; |
374 | gpmc,sync-write; | 387 | gpmc,sync-write; |
375 | gpmc,burst-length = <16>; | 388 | gpmc,burst-length = <16>; |
@@ -379,26 +392,27 @@ | |||
379 | gpmc,device-width = <2>; | 392 | gpmc,device-width = <2>; |
380 | gpmc,mux-add-data = <2>; | 393 | gpmc,mux-add-data = <2>; |
381 | gpmc,cs-on-ns = <0>; | 394 | gpmc,cs-on-ns = <0>; |
382 | gpmc,cs-rd-off-ns = <87>; | 395 | gpmc,cs-rd-off-ns = <122>; |
383 | gpmc,cs-wr-off-ns = <87>; | 396 | gpmc,cs-wr-off-ns = <122>; |
384 | gpmc,adv-on-ns = <0>; | 397 | gpmc,adv-on-ns = <0>; |
385 | gpmc,adv-rd-off-ns = <10>; | 398 | gpmc,adv-rd-off-ns = <15>; |
386 | gpmc,adv-wr-off-ns = <10>; | 399 | gpmc,adv-wr-off-ns = <15>; |
387 | gpmc,oe-on-ns = <15>; | 400 | gpmc,oe-on-ns = <20>; |
388 | gpmc,oe-off-ns = <87>; | 401 | gpmc,oe-off-ns = <122>; |
389 | gpmc,we-on-ns = <0>; | 402 | gpmc,we-on-ns = <0>; |
390 | gpmc,we-off-ns = <87>; | 403 | gpmc,we-off-ns = <122>; |
391 | gpmc,rd-cycle-ns = <112>; | 404 | gpmc,rd-cycle-ns = <148>; |
392 | gpmc,wr-cycle-ns = <112>; | 405 | gpmc,wr-cycle-ns = <148>; |
393 | gpmc,access-ns = <81>; | 406 | gpmc,access-ns = <117>; |
394 | gpmc,page-burst-access-ns = <15>; | 407 | gpmc,page-burst-access-ns = <15>; |
395 | gpmc,bus-turnaround-ns = <0>; | 408 | gpmc,bus-turnaround-ns = <0>; |
396 | gpmc,cycle2cycle-delay-ns = <0>; | 409 | gpmc,cycle2cycle-delay-ns = <0>; |
397 | gpmc,wait-monitoring-ns = <0>; | 410 | gpmc,wait-monitoring-ns = <0>; |
398 | gpmc,clk-activation-ns = <5>; | 411 | gpmc,clk-activation-ns = <10>; |
399 | gpmc,wr-data-mux-bus-ns = <30>; | 412 | gpmc,wr-data-mux-bus-ns = <40>; |
400 | gpmc,wr-access-ns = <81>; | 413 | gpmc,wr-access-ns = <117>; |
401 | gpmc,sync-clk-ps = <15000>; | 414 | |
415 | gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ | ||
402 | 416 | ||
403 | /* | 417 | /* |
404 | * MTD partition table corresponding to Nokia's MeeGo 1.2 | 418 | * MTD partition table corresponding to Nokia's MeeGo 1.2 |
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts index 04758a2a87f0..67d77eee9433 100644 --- a/arch/arm/boot/dts/omap4-droid4-xt894.dts +++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts | |||
@@ -644,6 +644,17 @@ | |||
644 | }; | 644 | }; |
645 | }; | 645 | }; |
646 | 646 | ||
647 | /* Configure pwm clock source for timers 8 & 9 */ | ||
648 | &timer8 { | ||
649 | assigned-clocks = <&abe_clkctrl OMAP4_TIMER8_CLKCTRL 24>; | ||
650 | assigned-clock-parents = <&sys_clkin_ck>; | ||
651 | }; | ||
652 | |||
653 | &timer9 { | ||
654 | assigned-clocks = <&l4_per_clkctrl OMAP4_TIMER9_CLKCTRL 24>; | ||
655 | assigned-clock-parents = <&sys_clkin_ck>; | ||
656 | }; | ||
657 | |||
647 | /* | 658 | /* |
648 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for | 659 | * As uart1 is wired to mdm6600 with rts and cts, we can use the cts pin for |
649 | * uart1 wakeirq. | 660 | * uart1 wakeirq. |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index bc853ebeda22..61a06f6add3c 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -317,7 +317,8 @@ | |||
317 | 317 | ||
318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | 318 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { |
319 | pinctrl-single,pins = < | 319 | pinctrl-single,pins = < |
320 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) /* sys_nirq1 */ | 320 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ |
321 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
321 | >; | 322 | >; |
322 | }; | 323 | }; |
323 | 324 | ||
@@ -385,7 +386,8 @@ | |||
385 | 386 | ||
386 | palmas: palmas@48 { | 387 | palmas: palmas@48 { |
387 | compatible = "ti,palmas"; | 388 | compatible = "ti,palmas"; |
388 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | 389 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
390 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
389 | reg = <0x48>; | 391 | reg = <0x48>; |
390 | interrupt-controller; | 392 | interrupt-controller; |
391 | #interrupt-cells = <2>; | 393 | #interrupt-cells = <2>; |
@@ -651,7 +653,8 @@ | |||
651 | pinctrl-names = "default"; | 653 | pinctrl-names = "default"; |
652 | pinctrl-0 = <&twl6040_pins>; | 654 | pinctrl-0 = <&twl6040_pins>; |
653 | 655 | ||
654 | interrupts = <GIC_SPI 119 IRQ_TYPE_NONE>; /* IRQ_SYS_2N cascaded to gic */ | 656 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ |
657 | interrupts = <GIC_SPI 119 IRQ_TYPE_LEVEL_LOW>; | ||
655 | 658 | ||
656 | /* audpwron gpio defined in the board specific dts */ | 659 | /* audpwron gpio defined in the board specific dts */ |
657 | 660 | ||
diff --git a/arch/arm/boot/dts/omap5-cm-t54.dts b/arch/arm/boot/dts/omap5-cm-t54.dts index 5e21fb430a65..e78d3718f145 100644 --- a/arch/arm/boot/dts/omap5-cm-t54.dts +++ b/arch/arm/boot/dts/omap5-cm-t54.dts | |||
@@ -181,6 +181,13 @@ | |||
181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ | 181 | OMAP5_IOPAD(0x0042, PIN_INPUT_PULLDOWN | MUX_MODE6) /* llib_wakereqin.gpio1_wk15 */ |
182 | >; | 182 | >; |
183 | }; | 183 | }; |
184 | |||
185 | palmas_sys_nirq_pins: pinmux_palmas_sys_nirq_pins { | ||
186 | pinctrl-single,pins = < | ||
187 | /* sys_nirq1 is pulled down as the SoC is inverting it for GIC */ | ||
188 | OMAP5_IOPAD(0x068, PIN_INPUT_PULLUP | MUX_MODE0) | ||
189 | >; | ||
190 | }; | ||
184 | }; | 191 | }; |
185 | 192 | ||
186 | &omap5_pmx_core { | 193 | &omap5_pmx_core { |
@@ -414,8 +421,11 @@ | |||
414 | 421 | ||
415 | palmas: palmas@48 { | 422 | palmas: palmas@48 { |
416 | compatible = "ti,palmas"; | 423 | compatible = "ti,palmas"; |
417 | interrupts = <GIC_SPI 7 IRQ_TYPE_NONE>; /* IRQ_SYS_1N */ | ||
418 | reg = <0x48>; | 424 | reg = <0x48>; |
425 | pinctrl-0 = <&palmas_sys_nirq_pins>; | ||
426 | pinctrl-names = "default"; | ||
427 | /* sys_nirq/ext_sys_irq pins get inverted at mpuss wakeupgen */ | ||
428 | interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_LOW>; | ||
419 | interrupt-controller; | 429 | interrupt-controller; |
420 | #interrupt-cells = <2>; | 430 | #interrupt-cells = <2>; |
421 | ti,system-power-controller; | 431 | ti,system-power-controller; |
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 9c7e309d9c2c..0960348002ad 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi | |||
@@ -1046,8 +1046,6 @@ | |||
1046 | <SYSC_IDLE_SMART>, | 1046 | <SYSC_IDLE_SMART>, |
1047 | <SYSC_IDLE_SMART_WKUP>; | 1047 | <SYSC_IDLE_SMART_WKUP>; |
1048 | ti,syss-mask = <1>; | 1048 | ti,syss-mask = <1>; |
1049 | ti,no-reset-on-init; | ||
1050 | ti,no-idle-on-init; | ||
1051 | /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ | 1049 | /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ |
1052 | clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; | 1050 | clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; |
1053 | clock-names = "fck"; | 1051 | clock-names = "fck"; |
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 3cc33f7ff7fe..3adc158a40bb 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi | |||
@@ -1681,15 +1681,12 @@ | |||
1681 | 1681 | ||
1682 | du: display@feb00000 { | 1682 | du: display@feb00000 { |
1683 | compatible = "renesas,du-r8a7743"; | 1683 | compatible = "renesas,du-r8a7743"; |
1684 | reg = <0 0xfeb00000 0 0x40000>, | 1684 | reg = <0 0xfeb00000 0 0x40000>; |
1685 | <0 0xfeb90000 0 0x1c>; | ||
1686 | reg-names = "du", "lvds.0"; | ||
1687 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, | 1685 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, |
1688 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; | 1686 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; |
1689 | clocks = <&cpg CPG_MOD 724>, | 1687 | clocks = <&cpg CPG_MOD 724>, |
1690 | <&cpg CPG_MOD 723>, | 1688 | <&cpg CPG_MOD 723>; |
1691 | <&cpg CPG_MOD 726>; | 1689 | clock-names = "du.0", "du.1"; |
1692 | clock-names = "du.0", "du.1", "lvds.0"; | ||
1693 | status = "disabled"; | 1690 | status = "disabled"; |
1694 | 1691 | ||
1695 | ports { | 1692 | ports { |
@@ -1704,6 +1701,33 @@ | |||
1704 | port@1 { | 1701 | port@1 { |
1705 | reg = <1>; | 1702 | reg = <1>; |
1706 | du_out_lvds0: endpoint { | 1703 | du_out_lvds0: endpoint { |
1704 | remote-endpoint = <&lvds0_in>; | ||
1705 | }; | ||
1706 | }; | ||
1707 | }; | ||
1708 | }; | ||
1709 | |||
1710 | lvds0: lvds@feb90000 { | ||
1711 | compatible = "renesas,r8a7743-lvds"; | ||
1712 | reg = <0 0xfeb90000 0 0x1c>; | ||
1713 | clocks = <&cpg CPG_MOD 726>; | ||
1714 | power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; | ||
1715 | resets = <&cpg 726>; | ||
1716 | status = "disabled"; | ||
1717 | |||
1718 | ports { | ||
1719 | #address-cells = <1>; | ||
1720 | #size-cells = <0>; | ||
1721 | |||
1722 | port@0 { | ||
1723 | reg = <0>; | ||
1724 | lvds0_in: endpoint { | ||
1725 | remote-endpoint = <&du_out_lvds0>; | ||
1726 | }; | ||
1727 | }; | ||
1728 | port@1 { | ||
1729 | reg = <1>; | ||
1730 | lvds0_out: endpoint { | ||
1707 | }; | 1731 | }; |
1708 | }; | 1732 | }; |
1709 | }; | 1733 | }; |
diff --git a/arch/arm/boot/dts/rk3188.dtsi b/arch/arm/boot/dts/rk3188.dtsi index 4acb501dd3f8..3ed49898f4b2 100644 --- a/arch/arm/boot/dts/rk3188.dtsi +++ b/arch/arm/boot/dts/rk3188.dtsi | |||
@@ -719,7 +719,6 @@ | |||
719 | pm_qos = <&qos_lcdc0>, | 719 | pm_qos = <&qos_lcdc0>, |
720 | <&qos_lcdc1>, | 720 | <&qos_lcdc1>, |
721 | <&qos_cif0>, | 721 | <&qos_cif0>, |
722 | <&qos_cif1>, | ||
723 | <&qos_ipp>, | 722 | <&qos_ipp>, |
724 | <&qos_rga>; | 723 | <&qos_rga>; |
725 | }; | 724 | }; |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 353d90f99b40..13304b8c5139 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -216,6 +216,7 @@ | |||
216 | #clock-cells = <0>; | 216 | #clock-cells = <0>; |
217 | compatible = "fixed-clock"; | 217 | compatible = "fixed-clock"; |
218 | clock-frequency = <24000000>; | 218 | clock-frequency = <24000000>; |
219 | clock-output-names = "osc24M"; | ||
219 | }; | 220 | }; |
220 | 221 | ||
221 | osc32k: clk-32k { | 222 | osc32k: clk-32k { |
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2..25540b7694d5 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | aliases { | 54 | aliases { |
55 | serial0 = &uart0; | 55 | serial0 = &uart0; |
56 | /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ | 56 | ethernet0 = &emac; |
57 | ethernet1 = &sdiowifi; | 57 | ethernet1 = &sdiowifi; |
58 | }; | 58 | }; |
59 | 59 | ||
diff --git a/arch/arm/boot/dts/tegra124-nyan.dtsi b/arch/arm/boot/dts/tegra124-nyan.dtsi index d5f11d6d987e..bc85b6a166c7 100644 --- a/arch/arm/boot/dts/tegra124-nyan.dtsi +++ b/arch/arm/boot/dts/tegra124-nyan.dtsi | |||
@@ -13,10 +13,25 @@ | |||
13 | stdout-path = "serial0:115200n8"; | 13 | stdout-path = "serial0:115200n8"; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | memory@80000000 { | 16 | /* |
17 | * Note that recent version of the device tree compiler (starting with | ||
18 | * version 1.4.2) warn about this node containing a reg property, but | ||
19 | * missing a unit-address. However, the bootloader on these Chromebook | ||
20 | * devices relies on the full name of this node to be exactly /memory. | ||
21 | * Adding the unit-address causes the bootloader to create a /memory | ||
22 | * node and write the memory bank configuration to that node, which in | ||
23 | * turn leads the kernel to believe that the device has 2 GiB of | ||
24 | * memory instead of the amount detected by the bootloader. | ||
25 | * | ||
26 | * The name of this node is effectively ABI and must not be changed. | ||
27 | */ | ||
28 | memory { | ||
29 | device_type = "memory"; | ||
17 | reg = <0x0 0x80000000 0x0 0x80000000>; | 30 | reg = <0x0 0x80000000 0x0 0x80000000>; |
18 | }; | 31 | }; |
19 | 32 | ||
33 | /delete-node/ memory@80000000; | ||
34 | |||
20 | host1x@50000000 { | 35 | host1x@50000000 { |
21 | hdmi@54280000 { | 36 | hdmi@54280000 { |
22 | status = "okay"; | 37 | status = "okay"; |
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 689c8930dce3..b08d561d6748 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts | |||
@@ -110,11 +110,11 @@ | |||
110 | bus-num = <3>; | 110 | bus-num = <3>; |
111 | status = "okay"; | 111 | status = "okay"; |
112 | spi-slave; | 112 | spi-slave; |
113 | #address-cells = <0>; | ||
113 | 114 | ||
114 | slave@0 { | 115 | slave { |
115 | compatible = "lwn,bk4"; | 116 | compatible = "lwn,bk4"; |
116 | spi-max-frequency = <30000000>; | 117 | spi-max-frequency = <30000000>; |
117 | reg = <0>; | ||
118 | }; | 118 | }; |
119 | }; | 119 | }; |
120 | 120 | ||
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index c883fcbe93b6..46d41140df27 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h | |||
@@ -25,7 +25,6 @@ | |||
25 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
26 | struct irqaction; | 26 | struct irqaction; |
27 | struct pt_regs; | 27 | struct pt_regs; |
28 | extern void migrate_irqs(void); | ||
29 | 28 | ||
30 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); | 29 | extern void asm_do_IRQ(unsigned int, struct pt_regs *); |
31 | void handle_IRQ(unsigned int, struct pt_regs *); | 30 | void handle_IRQ(unsigned int, struct pt_regs *); |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index ca56537b61bc..50e89869178a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -147,6 +148,13 @@ struct kvm_cpu_context { | |||
147 | 148 | ||
148 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 149 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
149 | 150 | ||
151 | struct vcpu_reset_state { | ||
152 | unsigned long pc; | ||
153 | unsigned long r0; | ||
154 | bool be; | ||
155 | bool reset; | ||
156 | }; | ||
157 | |||
150 | struct kvm_vcpu_arch { | 158 | struct kvm_vcpu_arch { |
151 | struct kvm_cpu_context ctxt; | 159 | struct kvm_cpu_context ctxt; |
152 | 160 | ||
@@ -186,6 +194,8 @@ struct kvm_vcpu_arch { | |||
186 | /* Cache some mmu pages needed inside spinlock regions */ | 194 | /* Cache some mmu pages needed inside spinlock regions */ |
187 | struct kvm_mmu_memory_cache mmu_page_cache; | 195 | struct kvm_mmu_memory_cache mmu_page_cache; |
188 | 196 | ||
197 | struct vcpu_reset_state reset_state; | ||
198 | |||
189 | /* Detect first run of a vcpu */ | 199 | /* Detect first run of a vcpu */ |
190 | bool has_run_once; | 200 | bool has_run_once; |
191 | }; | 201 | }; |
diff --git a/arch/arm/include/asm/stage2_pgtable.h b/arch/arm/include/asm/stage2_pgtable.h index c4b1d4fb1797..de2089501b8b 100644 --- a/arch/arm/include/asm/stage2_pgtable.h +++ b/arch/arm/include/asm/stage2_pgtable.h | |||
@@ -76,4 +76,9 @@ static inline bool kvm_stage2_has_pud(struct kvm *kvm) | |||
76 | #define S2_PMD_MASK PMD_MASK | 76 | #define S2_PMD_MASK PMD_MASK |
77 | #define S2_PMD_SIZE PMD_SIZE | 77 | #define S2_PMD_SIZE PMD_SIZE |
78 | 78 | ||
79 | static inline bool kvm_stage2_has_pmd(struct kvm *kvm) | ||
80 | { | ||
81 | return true; | ||
82 | } | ||
83 | |||
79 | #endif /* __ARM_S2_PGTABLE_H_ */ | 84 | #endif /* __ARM_S2_PGTABLE_H_ */ |
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 9908dacf9229..844861368cd5 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/smp.h> | 31 | #include <linux/smp.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/ratelimit.h> | ||
35 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
36 | #include <linux/list.h> | 35 | #include <linux/list.h> |
37 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
@@ -109,64 +108,3 @@ int __init arch_probe_nr_irqs(void) | |||
109 | return nr_irqs; | 108 | return nr_irqs; |
110 | } | 109 | } |
111 | #endif | 110 | #endif |
112 | |||
113 | #ifdef CONFIG_HOTPLUG_CPU | ||
114 | static bool migrate_one_irq(struct irq_desc *desc) | ||
115 | { | ||
116 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
117 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
118 | struct irq_chip *c; | ||
119 | bool ret = false; | ||
120 | |||
121 | /* | ||
122 | * If this is a per-CPU interrupt, or the affinity does not | ||
123 | * include this CPU, then we have nothing to do. | ||
124 | */ | ||
125 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
126 | return false; | ||
127 | |||
128 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
129 | affinity = cpu_online_mask; | ||
130 | ret = true; | ||
131 | } | ||
132 | |||
133 | c = irq_data_get_irq_chip(d); | ||
134 | if (!c->irq_set_affinity) | ||
135 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
136 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | ||
137 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); | ||
138 | |||
139 | return ret; | ||
140 | } | ||
141 | |||
142 | /* | ||
143 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
144 | * If the affinity settings do not allow other CPUs, force them onto any | ||
145 | * available CPU. | ||
146 | * | ||
147 | * Note: we must iterate over all IRQs, whether they have an attached | ||
148 | * action structure or not, as we need to get chained interrupts too. | ||
149 | */ | ||
150 | void migrate_irqs(void) | ||
151 | { | ||
152 | unsigned int i; | ||
153 | struct irq_desc *desc; | ||
154 | unsigned long flags; | ||
155 | |||
156 | local_irq_save(flags); | ||
157 | |||
158 | for_each_irq_desc(i, desc) { | ||
159 | bool affinity_broken; | ||
160 | |||
161 | raw_spin_lock(&desc->lock); | ||
162 | affinity_broken = migrate_one_irq(desc); | ||
163 | raw_spin_unlock(&desc->lock); | ||
164 | |||
165 | if (affinity_broken) | ||
166 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
167 | i, smp_processor_id()); | ||
168 | } | ||
169 | |||
170 | local_irq_restore(flags); | ||
171 | } | ||
172 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 3bf82232b1be..1d6f5ea522f4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -254,7 +254,7 @@ int __cpu_disable(void) | |||
254 | /* | 254 | /* |
255 | * OK - migrate IRQs away from this CPU | 255 | * OK - migrate IRQs away from this CPU |
256 | */ | 256 | */ |
257 | migrate_irqs(); | 257 | irq_migrate_all_off_this_cpu(); |
258 | 258 | ||
259 | /* | 259 | /* |
260 | * Flush user cache and TLB mappings, and then remove this CPU | 260 | * Flush user cache and TLB mappings, and then remove this CPU |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 222c1635bc7a..e8bd288fd5be 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -1450,6 +1450,6 @@ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) | |||
1450 | reset_coproc_regs(vcpu, table, num); | 1450 | reset_coproc_regs(vcpu, table, num); |
1451 | 1451 | ||
1452 | for (num = 1; num < NR_CP15_REGS; num++) | 1452 | for (num = 1; num < NR_CP15_REGS; num++) |
1453 | if (vcpu_cp15(vcpu, num) == 0x42424242) | 1453 | WARN(vcpu_cp15(vcpu, num) == 0x42424242, |
1454 | panic("Didn't reset vcpu_cp15(vcpu, %zi)", num); | 1454 | "Didn't reset vcpu_cp15(vcpu, %zi)", num); |
1455 | } | 1455 | } |
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c index 5ed0c3ee33d6..e53327912adc 100644 --- a/arch/arm/kvm/reset.c +++ b/arch/arm/kvm/reset.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | #include <asm/kvm_arm.h> | 27 | #include <asm/kvm_arm.h> |
28 | #include <asm/kvm_coproc.h> | 28 | #include <asm/kvm_coproc.h> |
29 | #include <asm/kvm_emulate.h> | ||
29 | 30 | ||
30 | #include <kvm/arm_arch_timer.h> | 31 | #include <kvm/arm_arch_timer.h> |
31 | 32 | ||
@@ -69,6 +70,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
69 | /* Reset CP15 registers */ | 70 | /* Reset CP15 registers */ |
70 | kvm_reset_coprocs(vcpu); | 71 | kvm_reset_coprocs(vcpu); |
71 | 72 | ||
73 | /* | ||
74 | * Additional reset state handling that PSCI may have imposed on us. | ||
75 | * Must be done after all the sys_reg reset. | ||
76 | */ | ||
77 | if (READ_ONCE(vcpu->arch.reset_state.reset)) { | ||
78 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
79 | |||
80 | /* Gracefully handle Thumb2 entry point */ | ||
81 | if (target_pc & 1) { | ||
82 | target_pc &= ~1UL; | ||
83 | vcpu_set_thumb(vcpu); | ||
84 | } | ||
85 | |||
86 | /* Propagate caller endianness */ | ||
87 | if (vcpu->arch.reset_state.be) | ||
88 | kvm_vcpu_set_be(vcpu); | ||
89 | |||
90 | *vcpu_pc(vcpu) = target_pc; | ||
91 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
92 | |||
93 | vcpu->arch.reset_state.reset = false; | ||
94 | } | ||
95 | |||
72 | /* Reset arch_timer context */ | 96 | /* Reset arch_timer context */ |
73 | return kvm_timer_vcpu_reset(vcpu); | 97 | return kvm_timer_vcpu_reset(vcpu); |
74 | } | 98 | } |
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b04..23e8c93515d4 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c | |||
@@ -75,8 +75,7 @@ void __init n2100_map_io(void) | |||
75 | /* | 75 | /* |
76 | * N2100 PCI. | 76 | * N2100 PCI. |
77 | */ | 77 | */ |
78 | static int __init | 78 | static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
79 | n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
80 | { | 79 | { |
81 | int irq; | 80 | int irq; |
82 | 81 | ||
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index a8b291f00109..dae514c8276a 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -152,6 +152,10 @@ static int omap_enter_idle_coupled(struct cpuidle_device *dev, | |||
152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && | 152 | mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) && |
153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); | 153 | (cx->mpu_logic_state == PWRDM_POWER_OFF); |
154 | 154 | ||
155 | /* Enter broadcast mode for periodic timers */ | ||
156 | tick_broadcast_enable(); | ||
157 | |||
158 | /* Enter broadcast mode for one-shot timers */ | ||
155 | tick_broadcast_enter(); | 159 | tick_broadcast_enter(); |
156 | 160 | ||
157 | /* | 161 | /* |
@@ -218,15 +222,6 @@ fail: | |||
218 | return index; | 222 | return index; |
219 | } | 223 | } |
220 | 224 | ||
221 | /* | ||
222 | * For each cpu, setup the broadcast timer because local timers | ||
223 | * stops for the states above C1. | ||
224 | */ | ||
225 | static void omap_setup_broadcast_timer(void *arg) | ||
226 | { | ||
227 | tick_broadcast_enable(); | ||
228 | } | ||
229 | |||
230 | static struct cpuidle_driver omap4_idle_driver = { | 225 | static struct cpuidle_driver omap4_idle_driver = { |
231 | .name = "omap4_idle", | 226 | .name = "omap4_idle", |
232 | .owner = THIS_MODULE, | 227 | .owner = THIS_MODULE, |
@@ -319,8 +314,5 @@ int __init omap4_idle_init(void) | |||
319 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) | 314 | if (!cpu_clkdm[0] || !cpu_clkdm[1]) |
320 | return -ENODEV; | 315 | return -ENODEV; |
321 | 316 | ||
322 | /* Configure the broadcast timer on each cpu */ | ||
323 | on_each_cpu(omap_setup_broadcast_timer, NULL, 1); | ||
324 | |||
325 | return cpuidle_register(idle_driver, cpu_online_mask); | 317 | return cpuidle_register(idle_driver, cpu_online_mask); |
326 | } | 318 | } |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index f86b72d1d59e..1444b4b4bd9f 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -83,6 +83,7 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
83 | u32 enable_mask, enable_shift; | 83 | u32 enable_mask, enable_shift; |
84 | u32 pipd_mask, pipd_shift; | 84 | u32 pipd_mask, pipd_shift; |
85 | u32 reg; | 85 | u32 reg; |
86 | int ret; | ||
86 | 87 | ||
87 | if (dsi_id == 0) { | 88 | if (dsi_id == 0) { |
88 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | 89 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; |
@@ -98,7 +99,11 @@ static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | |||
98 | return -ENODEV; | 99 | return -ENODEV; |
99 | } | 100 | } |
100 | 101 | ||
101 | regmap_read(omap4_dsi_mux_syscon, OMAP4_DSIPHY_SYSCON_OFFSET, ®); | 102 | ret = regmap_read(omap4_dsi_mux_syscon, |
103 | OMAP4_DSIPHY_SYSCON_OFFSET, | ||
104 | ®); | ||
105 | if (ret) | ||
106 | return ret; | ||
102 | 107 | ||
103 | reg &= ~enable_mask; | 108 | reg &= ~enable_mask; |
104 | reg &= ~pipd_mask; | 109 | reg &= ~pipd_mask; |
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c index fc5fb776a710..17558be4bf0a 100644 --- a/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/arch/arm/mach-omap2/omap-wakeupgen.c | |||
@@ -50,6 +50,9 @@ | |||
50 | #define OMAP4_NR_BANKS 4 | 50 | #define OMAP4_NR_BANKS 4 |
51 | #define OMAP4_NR_IRQS 128 | 51 | #define OMAP4_NR_IRQS 128 |
52 | 52 | ||
53 | #define SYS_NIRQ1_EXT_SYS_IRQ_1 7 | ||
54 | #define SYS_NIRQ2_EXT_SYS_IRQ_2 119 | ||
55 | |||
53 | static void __iomem *wakeupgen_base; | 56 | static void __iomem *wakeupgen_base; |
54 | static void __iomem *sar_base; | 57 | static void __iomem *sar_base; |
55 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); | 58 | static DEFINE_RAW_SPINLOCK(wakeupgen_lock); |
@@ -153,6 +156,37 @@ static void wakeupgen_unmask(struct irq_data *d) | |||
153 | irq_chip_unmask_parent(d); | 156 | irq_chip_unmask_parent(d); |
154 | } | 157 | } |
155 | 158 | ||
159 | /* | ||
160 | * The sys_nirq pins bypass peripheral modules and are wired directly | ||
161 | * to MPUSS wakeupgen. They get automatically inverted for GIC. | ||
162 | */ | ||
163 | static int wakeupgen_irq_set_type(struct irq_data *d, unsigned int type) | ||
164 | { | ||
165 | bool inverted = false; | ||
166 | |||
167 | switch (type) { | ||
168 | case IRQ_TYPE_LEVEL_LOW: | ||
169 | type &= ~IRQ_TYPE_LEVEL_MASK; | ||
170 | type |= IRQ_TYPE_LEVEL_HIGH; | ||
171 | inverted = true; | ||
172 | break; | ||
173 | case IRQ_TYPE_EDGE_FALLING: | ||
174 | type &= ~IRQ_TYPE_EDGE_BOTH; | ||
175 | type |= IRQ_TYPE_EDGE_RISING; | ||
176 | inverted = true; | ||
177 | break; | ||
178 | default: | ||
179 | break; | ||
180 | } | ||
181 | |||
182 | if (inverted && d->hwirq != SYS_NIRQ1_EXT_SYS_IRQ_1 && | ||
183 | d->hwirq != SYS_NIRQ2_EXT_SYS_IRQ_2) | ||
184 | pr_warn("wakeupgen: irq%li polarity inverted in dts\n", | ||
185 | d->hwirq); | ||
186 | |||
187 | return irq_chip_set_type_parent(d, type); | ||
188 | } | ||
189 | |||
156 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
157 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); | 191 | static DEFINE_PER_CPU(u32 [MAX_NR_REG_BANKS], irqmasks); |
158 | 192 | ||
@@ -446,7 +480,7 @@ static struct irq_chip wakeupgen_chip = { | |||
446 | .irq_mask = wakeupgen_mask, | 480 | .irq_mask = wakeupgen_mask, |
447 | .irq_unmask = wakeupgen_unmask, | 481 | .irq_unmask = wakeupgen_unmask, |
448 | .irq_retrigger = irq_chip_retrigger_hierarchy, | 482 | .irq_retrigger = irq_chip_retrigger_hierarchy, |
449 | .irq_set_type = irq_chip_set_type_parent, | 483 | .irq_set_type = wakeupgen_irq_set_type, |
450 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, | 484 | .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, |
451 | #ifdef CONFIG_SMP | 485 | #ifdef CONFIG_SMP |
452 | .irq_set_affinity = irq_chip_set_affinity_parent, | 486 | .irq_set_affinity = irq_chip_set_affinity_parent, |
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383f..a32c3b631484 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/suspend.h> | 3 | #include <linux/suspend.h> |
4 | #include <asm/suspend.h> | 4 | #include <asm/suspend.h> |
5 | #include "smc.h" | 5 | #include "smc.h" |
6 | #include "pm.h" | ||
6 | 7 | ||
7 | static int tango_pm_powerdown(unsigned long arg) | 8 | static int tango_pm_powerdown(unsigned long arg) |
8 | { | 9 | { |
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { | |||
24 | .valid = suspend_valid_only_mem, | 25 | .valid = suspend_valid_only_mem, |
25 | }; | 26 | }; |
26 | 27 | ||
27 | static int __init tango_pm_init(void) | 28 | void __init tango_pm_init(void) |
28 | { | 29 | { |
29 | suspend_set_ops(&tango_pm_ops); | 30 | suspend_set_ops(&tango_pm_ops); |
30 | return 0; | ||
31 | } | 31 | } |
32 | |||
33 | late_initcall(tango_pm_init); | ||
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 000000000000..35ea705a0ee2 --- /dev/null +++ b/arch/arm/mach-tango/pm.h | |||
@@ -0,0 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifdef CONFIG_SUSPEND | ||
4 | void __init tango_pm_init(void); | ||
5 | #else | ||
6 | #define tango_pm_init NULL | ||
7 | #endif | ||
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9..824f90737b04 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <asm/mach/arch.h> | 2 | #include <asm/mach/arch.h> |
3 | #include <asm/hardware/cache-l2x0.h> | 3 | #include <asm/hardware/cache-l2x0.h> |
4 | #include "smc.h" | 4 | #include "smc.h" |
5 | #include "pm.h" | ||
5 | 6 | ||
6 | static void tango_l2c_write(unsigned long val, unsigned int reg) | 7 | static void tango_l2c_write(unsigned long val, unsigned int reg) |
7 | { | 8 | { |
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") | |||
15 | .dt_compat = tango_dt_compat, | 16 | .dt_compat = tango_dt_compat, |
16 | .l2c_aux_mask = ~0, | 17 | .l2c_aux_mask = ~0, |
17 | .l2c_write_sec = tango_l2c_write, | 18 | .l2c_write_sec = tango_l2c_write, |
19 | .init_late = tango_pm_init, | ||
18 | MACHINE_END | 20 | MACHINE_END |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f1e2922e447c..1e3e08a1c456 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -2390,4 +2390,6 @@ void arch_teardown_dma_ops(struct device *dev) | |||
2390 | return; | 2390 | return; |
2391 | 2391 | ||
2392 | arm_teardown_iommu_dma_ops(dev); | 2392 | arm_teardown_iommu_dma_ops(dev); |
2393 | /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ | ||
2394 | set_dma_ops(dev, NULL); | ||
2393 | } | 2395 | } |
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1..f51919974183 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c | |||
@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) | |||
190 | if (ssp == NULL) | 190 | if (ssp == NULL) |
191 | return -ENODEV; | 191 | return -ENODEV; |
192 | 192 | ||
193 | iounmap(ssp->mmio_base); | ||
194 | |||
195 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 193 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
196 | release_mem_region(res->start, resource_size(res)); | 194 | release_mem_region(res->start, resource_size(res)); |
197 | 195 | ||
@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) | |||
201 | list_del(&ssp->node); | 199 | list_del(&ssp->node); |
202 | mutex_unlock(&ssp_lock); | 200 | mutex_unlock(&ssp_lock); |
203 | 201 | ||
204 | kfree(ssp); | ||
205 | return 0; | 202 | return 0; |
206 | } | 203 | } |
207 | 204 | ||
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c index 2c118a6ab358..0dc23fc227ed 100644 --- a/arch/arm/probes/kprobes/opt-arm.c +++ b/arch/arm/probes/kprobes/opt-arm.c | |||
@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *or | |||
247 | } | 247 | } |
248 | 248 | ||
249 | /* Copy arch-dep-instance from template. */ | 249 | /* Copy arch-dep-instance from template. */ |
250 | memcpy(code, (unsigned char *)optprobe_template_entry, | 250 | memcpy(code, (unsigned long *)&optprobe_template_entry, |
251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); | 251 | TMPL_END_IDX * sizeof(kprobe_opcode_t)); |
252 | 252 | ||
253 | /* Adjust buffer according to instruction. */ | 253 | /* Adjust buffer according to instruction. */ |
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cb44aa290e73..e1d44b903dfc 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/of_address.h> | 7 | #include <linux/of_address.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/vmalloc.h> | 10 | #include <linux/vmalloc.h> |
12 | #include <linux/swiotlb.h> | 11 | #include <linux/swiotlb.h> |
13 | 12 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index b0c64f75792c..8974b5a1d3b1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts | |||
@@ -188,6 +188,7 @@ | |||
188 | reg = <0x3a3>; | 188 | reg = <0x3a3>; |
189 | interrupt-parent = <&r_intc>; | 189 | interrupt-parent = <&r_intc>; |
190 | interrupts = <0 IRQ_TYPE_LEVEL_LOW>; | 190 | interrupts = <0 IRQ_TYPE_LEVEL_LOW>; |
191 | x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ | ||
191 | }; | 192 | }; |
192 | }; | 193 | }; |
193 | 194 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 837a03dee875..2abb335145a6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | |||
@@ -390,7 +390,7 @@ | |||
390 | }; | 390 | }; |
391 | 391 | ||
392 | video-codec@1c0e000 { | 392 | video-codec@1c0e000 { |
393 | compatible = "allwinner,sun50i-h5-video-engine"; | 393 | compatible = "allwinner,sun50i-a64-video-engine"; |
394 | reg = <0x01c0e000 0x1000>; | 394 | reg = <0x01c0e000 0x1000>; |
395 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, | 395 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, |
396 | <&ccu CLK_DRAM_VE>; | 396 | <&ccu CLK_DRAM_VE>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index e14e0ce7e89f..016641a41694 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi | |||
@@ -187,8 +187,7 @@ | |||
187 | max-frequency = <100000000>; | 187 | max-frequency = <100000000>; |
188 | disable-wp; | 188 | disable-wp; |
189 | 189 | ||
190 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 190 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
191 | cd-inverted; | ||
192 | 191 | ||
193 | vmmc-supply = <&vddao_3v3>; | 192 | vmmc-supply = <&vddao_3v3>; |
194 | vqmmc-supply = <&vddio_boot>; | 193 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 8cd50b75171d..ade2ee09ae96 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts | |||
@@ -305,8 +305,7 @@ | |||
305 | max-frequency = <200000000>; | 305 | max-frequency = <200000000>; |
306 | disable-wp; | 306 | disable-wp; |
307 | 307 | ||
308 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 308 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
309 | cd-inverted; | ||
310 | 309 | ||
311 | vmmc-supply = <&vddio_ao3v3>; | 310 | vmmc-supply = <&vddio_ao3v3>; |
312 | vqmmc-supply = <&vddio_tf>; | 311 | vqmmc-supply = <&vddio_tf>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 4cf7f6e80c6a..25105ac96d55 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts | |||
@@ -238,8 +238,7 @@ | |||
238 | max-frequency = <100000000>; | 238 | max-frequency = <100000000>; |
239 | disable-wp; | 239 | disable-wp; |
240 | 240 | ||
241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
242 | cd-inverted; | ||
243 | 242 | ||
244 | vmmc-supply = <&vddao_3v3>; | 243 | vmmc-supply = <&vddao_3v3>; |
245 | vqmmc-supply = <&vddio_card>; | 244 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 2e1cd5e3a246..1cc9dc68ef00 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | |||
@@ -258,8 +258,7 @@ | |||
258 | max-frequency = <100000000>; | 258 | max-frequency = <100000000>; |
259 | disable-wp; | 259 | disable-wp; |
260 | 260 | ||
261 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 261 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
262 | cd-inverted; | ||
263 | 262 | ||
264 | vmmc-supply = <&tflash_vdd>; | 263 | vmmc-supply = <&tflash_vdd>; |
265 | vqmmc-supply = <&tf_io>; | 264 | vqmmc-supply = <&tf_io>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index ce862266b9aa..0be0f2a5d2fe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi | |||
@@ -196,8 +196,7 @@ | |||
196 | max-frequency = <100000000>; | 196 | max-frequency = <100000000>; |
197 | disable-wp; | 197 | disable-wp; |
198 | 198 | ||
199 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 199 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
200 | cd-inverted; | ||
201 | 200 | ||
202 | vmmc-supply = <&vddao_3v3>; | 201 | vmmc-supply = <&vddao_3v3>; |
203 | vqmmc-supply = <&vddio_card>; | 202 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 93a4acf2c46c..ad4d50bd9d77 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi | |||
@@ -154,8 +154,7 @@ | |||
154 | max-frequency = <100000000>; | 154 | max-frequency = <100000000>; |
155 | disable-wp; | 155 | disable-wp; |
156 | 156 | ||
157 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 157 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
158 | cd-inverted; | ||
159 | 158 | ||
160 | vmmc-supply = <&vcc_3v3>; | 159 | vmmc-supply = <&vcc_3v3>; |
161 | }; | 160 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index ec09bb5792b7..2d2db783c44c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi | |||
@@ -211,8 +211,7 @@ | |||
211 | max-frequency = <100000000>; | 211 | max-frequency = <100000000>; |
212 | disable-wp; | 212 | disable-wp; |
213 | 213 | ||
214 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 214 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
215 | cd-inverted; | ||
216 | 215 | ||
217 | vmmc-supply = <&vddao_3v3>; | 216 | vmmc-supply = <&vddao_3v3>; |
218 | vqmmc-supply = <&vcc_3v3>; | 217 | vqmmc-supply = <&vcc_3v3>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index f1c410e2da2b..796baea7a0bf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts | |||
@@ -131,8 +131,7 @@ | |||
131 | max-frequency = <100000000>; | 131 | max-frequency = <100000000>; |
132 | disable-wp; | 132 | disable-wp; |
133 | 133 | ||
134 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 134 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
135 | cd-inverted; | ||
136 | 135 | ||
137 | vmmc-supply = <&vddao_3v3>; | 136 | vmmc-supply = <&vddao_3v3>; |
138 | vqmmc-supply = <&vddio_card>; | 137 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index db293440e4ca..255cede7b447 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts | |||
@@ -238,8 +238,7 @@ | |||
238 | max-frequency = <100000000>; | 238 | max-frequency = <100000000>; |
239 | disable-wp; | 239 | disable-wp; |
240 | 240 | ||
241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
242 | cd-inverted; | ||
243 | 242 | ||
244 | vmmc-supply = <&vcc_3v3>; | 243 | vmmc-supply = <&vcc_3v3>; |
245 | vqmmc-supply = <&vcc_card>; | 244 | vqmmc-supply = <&vcc_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6739697be1de..9cbdb85fb591 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts | |||
@@ -183,8 +183,7 @@ | |||
183 | max-frequency = <100000000>; | 183 | max-frequency = <100000000>; |
184 | disable-wp; | 184 | disable-wp; |
185 | 185 | ||
186 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 186 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
187 | cd-inverted; | ||
188 | 187 | ||
189 | vmmc-supply = <&vddao_3v3>; | 188 | vmmc-supply = <&vddao_3v3>; |
190 | vqmmc-supply = <&vddio_card>; | 189 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index a1b31013ab6e..bc811a2faf42 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi | |||
@@ -137,8 +137,7 @@ | |||
137 | max-frequency = <100000000>; | 137 | max-frequency = <100000000>; |
138 | disable-wp; | 138 | disable-wp; |
139 | 139 | ||
140 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 140 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
141 | cd-inverted; | ||
142 | 141 | ||
143 | vmmc-supply = <&vddao_3v3>; | 142 | vmmc-supply = <&vddao_3v3>; |
144 | vqmmc-supply = <&vddio_boot>; | 143 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3c3a667a8df8..3f086ed7de05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts | |||
@@ -356,8 +356,7 @@ | |||
356 | max-frequency = <100000000>; | 356 | max-frequency = <100000000>; |
357 | disable-wp; | 357 | disable-wp; |
358 | 358 | ||
359 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 359 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
360 | cd-inverted; | ||
361 | 360 | ||
362 | vmmc-supply = <&vddao_3v3>; | 361 | vmmc-supply = <&vddao_3v3>; |
363 | vqmmc-supply = <&vddio_boot>; | 362 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index f7a1cffab4a8..8acfd40090d2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts | |||
@@ -147,8 +147,7 @@ | |||
147 | max-frequency = <100000000>; | 147 | max-frequency = <100000000>; |
148 | disable-wp; | 148 | disable-wp; |
149 | 149 | ||
150 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 150 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
151 | cd-inverted; | ||
152 | 151 | ||
153 | vmmc-supply = <&vddao_3v3>; | 152 | vmmc-supply = <&vddao_3v3>; |
154 | vqmmc-supply = <&vddio_boot>; | 153 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 7212dc4531e4..7fa20a8ede17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts | |||
@@ -170,8 +170,7 @@ | |||
170 | max-frequency = <100000000>; | 170 | max-frequency = <100000000>; |
171 | disable-wp; | 171 | disable-wp; |
172 | 172 | ||
173 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 173 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
174 | cd-inverted; | ||
175 | 174 | ||
176 | vmmc-supply = <&vddao_3v3>; | 175 | vmmc-supply = <&vddao_3v3>; |
177 | vqmmc-supply = <&vddio_boot>; | 176 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts index 64acccc4bfcb..f74b13aa5aa5 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-evk.dts +++ b/arch/arm64/boot/dts/freescale/imx8mq-evk.dts | |||
@@ -227,34 +227,34 @@ | |||
227 | 227 | ||
228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { | 228 | pinctrl_usdhc1_100mhz: usdhc1-100grp { |
229 | fsl,pins = < | 229 | fsl,pins = < |
230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x85 | 230 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x8d |
231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc5 | 231 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xcd |
232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc5 | 232 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xcd |
233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc5 | 233 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xcd |
234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc5 | 234 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xcd |
235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc5 | 235 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xcd |
236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc5 | 236 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xcd |
237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc5 | 237 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xcd |
238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc5 | 238 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xcd |
239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc5 | 239 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xcd |
240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x85 | 240 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x8d |
241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 241 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
242 | >; | 242 | >; |
243 | }; | 243 | }; |
244 | 244 | ||
245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { | 245 | pinctrl_usdhc1_200mhz: usdhc1-200grp { |
246 | fsl,pins = < | 246 | fsl,pins = < |
247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x87 | 247 | MX8MQ_IOMUXC_SD1_CLK_USDHC1_CLK 0x9f |
248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xc7 | 248 | MX8MQ_IOMUXC_SD1_CMD_USDHC1_CMD 0xdf |
249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xc7 | 249 | MX8MQ_IOMUXC_SD1_DATA0_USDHC1_DATA0 0xdf |
250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xc7 | 250 | MX8MQ_IOMUXC_SD1_DATA1_USDHC1_DATA1 0xdf |
251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xc7 | 251 | MX8MQ_IOMUXC_SD1_DATA2_USDHC1_DATA2 0xdf |
252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xc7 | 252 | MX8MQ_IOMUXC_SD1_DATA3_USDHC1_DATA3 0xdf |
253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xc7 | 253 | MX8MQ_IOMUXC_SD1_DATA4_USDHC1_DATA4 0xdf |
254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xc7 | 254 | MX8MQ_IOMUXC_SD1_DATA5_USDHC1_DATA5 0xdf |
255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xc7 | 255 | MX8MQ_IOMUXC_SD1_DATA6_USDHC1_DATA6 0xdf |
256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xc7 | 256 | MX8MQ_IOMUXC_SD1_DATA7_USDHC1_DATA7 0xdf |
257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x87 | 257 | MX8MQ_IOMUXC_SD1_STROBE_USDHC1_STROBE 0x9f |
258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 | 258 | MX8MQ_IOMUXC_SD1_RESET_B_USDHC1_RESET_B 0xc1 |
259 | >; | 259 | >; |
260 | }; | 260 | }; |
diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 8e9d6d5ed7b2..b6d31499fb43 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi | |||
@@ -360,6 +360,8 @@ | |||
360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, | 360 | <&clk IMX8MQ_CLK_NAND_USDHC_BUS>, |
361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; | 361 | <&clk IMX8MQ_CLK_USDHC1_ROOT>; |
362 | clock-names = "ipg", "ahb", "per"; | 362 | clock-names = "ipg", "ahb", "per"; |
363 | assigned-clocks = <&clk IMX8MQ_CLK_USDHC1>; | ||
364 | assigned-clock-rates = <400000000>; | ||
363 | fsl,tuning-start-tap = <20>; | 365 | fsl,tuning-start-tap = <20>; |
364 | fsl,tuning-step = <2>; | 366 | fsl,tuning-step = <2>; |
365 | bus-width = <4>; | 367 | bus-width = <4>; |
diff --git a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts index 5b4a9609e31f..2468762283a5 100644 --- a/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts +++ b/arch/arm64/boot/dts/marvell/armada-8040-clearfog-gt-8k.dts | |||
@@ -351,7 +351,7 @@ | |||
351 | reg = <0>; | 351 | reg = <0>; |
352 | pinctrl-names = "default"; | 352 | pinctrl-names = "default"; |
353 | pinctrl-0 = <&cp0_copper_eth_phy_reset>; | 353 | pinctrl-0 = <&cp0_copper_eth_phy_reset>; |
354 | reset-gpios = <&cp1_gpio1 11 GPIO_ACTIVE_LOW>; | 354 | reset-gpios = <&cp0_gpio2 11 GPIO_ACTIVE_LOW>; |
355 | reset-assert-us = <10000>; | 355 | reset-assert-us = <10000>; |
356 | }; | 356 | }; |
357 | 357 | ||
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 99b7495455a6..838e32cc14c9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi | |||
@@ -404,7 +404,7 @@ | |||
404 | }; | 404 | }; |
405 | 405 | ||
406 | intc: interrupt-controller@9bc0000 { | 406 | intc: interrupt-controller@9bc0000 { |
407 | compatible = "arm,gic-v3"; | 407 | compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; |
408 | #interrupt-cells = <3>; | 408 | #interrupt-cells = <3>; |
409 | interrupt-controller; | 409 | interrupt-controller; |
410 | #redistributor-regions = <1>; | 410 | #redistributor-regions = <1>; |
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 20745a8528c5..719ed9d9067d 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi | |||
@@ -1011,6 +1011,9 @@ | |||
1011 | <&cpg CPG_CORE R8A774A1_CLK_S3D1>, | 1011 | <&cpg CPG_CORE R8A774A1_CLK_S3D1>, |
1012 | <&scif_clk>; | 1012 | <&scif_clk>; |
1013 | clock-names = "fck", "brg_int", "scif_clk"; | 1013 | clock-names = "fck", "brg_int", "scif_clk"; |
1014 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1015 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1016 | dma-names = "tx", "rx", "tx", "rx"; | ||
1014 | power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; | 1017 | power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; |
1015 | resets = <&cpg 310>; | 1018 | resets = <&cpg 310>; |
1016 | status = "disabled"; | 1019 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index afedbf5728ec..0648d12778ed 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi | |||
@@ -1262,6 +1262,9 @@ | |||
1262 | <&cpg CPG_CORE R8A7796_CLK_S3D1>, | 1262 | <&cpg CPG_CORE R8A7796_CLK_S3D1>, |
1263 | <&scif_clk>; | 1263 | <&scif_clk>; |
1264 | clock-names = "fck", "brg_int", "scif_clk"; | 1264 | clock-names = "fck", "brg_int", "scif_clk"; |
1265 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1266 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1267 | dma-names = "tx", "rx", "tx", "rx"; | ||
1265 | power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; | 1268 | power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; |
1266 | resets = <&cpg 310>; | 1269 | resets = <&cpg 310>; |
1267 | status = "disabled"; | 1270 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 6dc9b1fef830..4b3730f640ef 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi | |||
@@ -1068,6 +1068,9 @@ | |||
1068 | <&cpg CPG_CORE R8A77965_CLK_S3D1>, | 1068 | <&cpg CPG_CORE R8A77965_CLK_S3D1>, |
1069 | <&scif_clk>; | 1069 | <&scif_clk>; |
1070 | clock-names = "fck", "brg_int", "scif_clk"; | 1070 | clock-names = "fck", "brg_int", "scif_clk"; |
1071 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1072 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1073 | dma-names = "tx", "rx", "tx", "rx"; | ||
1071 | power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; | 1074 | power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; |
1072 | resets = <&cpg 310>; | 1075 | resets = <&cpg 310>; |
1073 | status = "disabled"; | 1076 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts index bd937d68ca3b..040b36ef0dd2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts +++ b/arch/arm64/boot/dts/rockchip/rk3328-rock64.dts | |||
@@ -40,6 +40,7 @@ | |||
40 | pinctrl-0 = <&usb30_host_drv>; | 40 | pinctrl-0 = <&usb30_host_drv>; |
41 | regulator-name = "vcc_host_5v"; | 41 | regulator-name = "vcc_host_5v"; |
42 | regulator-always-on; | 42 | regulator-always-on; |
43 | regulator-boot-on; | ||
43 | vin-supply = <&vcc_sys>; | 44 | vin-supply = <&vcc_sys>; |
44 | }; | 45 | }; |
45 | 46 | ||
@@ -51,6 +52,7 @@ | |||
51 | pinctrl-0 = <&usb20_host_drv>; | 52 | pinctrl-0 = <&usb20_host_drv>; |
52 | regulator-name = "vcc_host1_5v"; | 53 | regulator-name = "vcc_host1_5v"; |
53 | regulator-always-on; | 54 | regulator-always-on; |
55 | regulator-boot-on; | ||
54 | vin-supply = <&vcc_sys>; | 56 | vin-supply = <&vcc_sys>; |
55 | }; | 57 | }; |
56 | 58 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts index 1ee0dc0d9f10..d1cf404b8708 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts | |||
@@ -22,7 +22,7 @@ | |||
22 | backlight = <&backlight>; | 22 | backlight = <&backlight>; |
23 | power-supply = <&pp3300_disp>; | 23 | power-supply = <&pp3300_disp>; |
24 | 24 | ||
25 | ports { | 25 | port { |
26 | panel_in_edp: endpoint { | 26 | panel_in_edp: endpoint { |
27 | remote-endpoint = <&edp_out_panel>; | 27 | remote-endpoint = <&edp_out_panel>; |
28 | }; | 28 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts index 81e73103fa78..15e254a77391 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-gru-kevin.dts | |||
@@ -43,7 +43,7 @@ | |||
43 | backlight = <&backlight>; | 43 | backlight = <&backlight>; |
44 | power-supply = <&pp3300_disp>; | 44 | power-supply = <&pp3300_disp>; |
45 | 45 | ||
46 | ports { | 46 | port { |
47 | panel_in_edp: endpoint { | 47 | panel_in_edp: endpoint { |
48 | remote-endpoint = <&edp_out_panel>; | 48 | remote-endpoint = <&edp_out_panel>; |
49 | }; | 49 | }; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts index 0b8f1edbd746..b48a63c3efc3 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire-excavator.dts | |||
@@ -91,7 +91,7 @@ | |||
91 | pinctrl-0 = <&lcd_panel_reset>; | 91 | pinctrl-0 = <&lcd_panel_reset>; |
92 | power-supply = <&vcc3v3_s0>; | 92 | power-supply = <&vcc3v3_s0>; |
93 | 93 | ||
94 | ports { | 94 | port { |
95 | panel_in_edp: endpoint { | 95 | panel_in_edp: endpoint { |
96 | remote-endpoint = <&edp_out_panel>; | 96 | remote-endpoint = <&edp_out_panel>; |
97 | }; | 97 | }; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 7732d0ba4e60..da3fc7324d68 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -48,6 +48,7 @@ | |||
48 | #define KVM_REQ_SLEEP \ | 48 | #define KVM_REQ_SLEEP \ |
49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | 49 | KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) | 50 | #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1) |
51 | #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2) | ||
51 | 52 | ||
52 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); | 53 | DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use); |
53 | 54 | ||
@@ -208,6 +209,13 @@ struct kvm_cpu_context { | |||
208 | 209 | ||
209 | typedef struct kvm_cpu_context kvm_cpu_context_t; | 210 | typedef struct kvm_cpu_context kvm_cpu_context_t; |
210 | 211 | ||
212 | struct vcpu_reset_state { | ||
213 | unsigned long pc; | ||
214 | unsigned long r0; | ||
215 | bool be; | ||
216 | bool reset; | ||
217 | }; | ||
218 | |||
211 | struct kvm_vcpu_arch { | 219 | struct kvm_vcpu_arch { |
212 | struct kvm_cpu_context ctxt; | 220 | struct kvm_cpu_context ctxt; |
213 | 221 | ||
@@ -297,6 +305,9 @@ struct kvm_vcpu_arch { | |||
297 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ | 305 | /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ |
298 | u64 vsesr_el2; | 306 | u64 vsesr_el2; |
299 | 307 | ||
308 | /* Additional reset state */ | ||
309 | struct vcpu_reset_state reset_state; | ||
310 | |||
300 | /* True when deferrable sysregs are loaded on the physical CPU, | 311 | /* True when deferrable sysregs are loaded on the physical CPU, |
301 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ | 312 | * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ |
302 | bool sysregs_loaded_on_cpu; | 313 | bool sysregs_loaded_on_cpu; |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e1ec947e7c0c..0c656850eeea 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -332,6 +332,17 @@ static inline void *phys_to_virt(phys_addr_t x) | |||
332 | #define virt_addr_valid(kaddr) \ | 332 | #define virt_addr_valid(kaddr) \ |
333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) | 333 | (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) |
334 | 334 | ||
335 | /* | ||
336 | * Given that the GIC architecture permits ITS implementations that can only be | ||
337 | * configured with a LPI table address once, GICv3 systems with many CPUs may | ||
338 | * end up reserving a lot of different regions after a kexec for their LPI | ||
339 | * tables (one per CPU), as we are forced to reuse the same memory after kexec | ||
340 | * (and thus reserve it persistently with EFI beforehand) | ||
341 | */ | ||
342 | #if defined(CONFIG_EFI) && defined(CONFIG_ARM_GIC_V3_ITS) | ||
343 | # define INIT_MEMBLOCK_RESERVED_REGIONS (INIT_MEMBLOCK_REGIONS + NR_CPUS + 1) | ||
344 | #endif | ||
345 | |||
335 | #include <asm-generic/memory_model.h> | 346 | #include <asm-generic/memory_model.h> |
336 | 347 | ||
337 | #endif | 348 | #endif |
diff --git a/arch/arm64/include/asm/neon-intrinsics.h b/arch/arm64/include/asm/neon-intrinsics.h index 2ba6c6b9541f..71abfc7612b2 100644 --- a/arch/arm64/include/asm/neon-intrinsics.h +++ b/arch/arm64/include/asm/neon-intrinsics.h | |||
@@ -36,4 +36,8 @@ | |||
36 | #include <arm_neon.h> | 36 | #include <arm_neon.h> |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | #ifdef CONFIG_CC_IS_CLANG | ||
40 | #pragma clang diagnostic ignored "-Wincompatible-pointer-types" | ||
41 | #endif | ||
42 | |||
39 | #endif /* __ASM_NEON_INTRINSICS_H */ | 43 | #endif /* __ASM_NEON_INTRINSICS_H */ |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 15d79a8e5e5e..eecf7927dab0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -539,8 +539,7 @@ set_hcr: | |||
539 | /* GICv3 system register access */ | 539 | /* GICv3 system register access */ |
540 | mrs x0, id_aa64pfr0_el1 | 540 | mrs x0, id_aa64pfr0_el1 |
541 | ubfx x0, x0, #24, #4 | 541 | ubfx x0, x0, #24, #4 |
542 | cmp x0, #1 | 542 | cbz x0, 3f |
543 | b.ne 3f | ||
544 | 543 | ||
545 | mrs_s x0, SYS_ICC_SRE_EL2 | 544 | mrs_s x0, SYS_ICC_SRE_EL2 |
546 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 | 545 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 |
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index f2c211a6229b..58871333737a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c | |||
@@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image, | |||
120 | { | 120 | { |
121 | void *buf; | 121 | void *buf; |
122 | size_t buf_size; | 122 | size_t buf_size; |
123 | size_t cmdline_len; | ||
123 | int ret; | 124 | int ret; |
124 | 125 | ||
126 | cmdline_len = cmdline ? strlen(cmdline) : 0; | ||
125 | buf_size = fdt_totalsize(initial_boot_params) | 127 | buf_size = fdt_totalsize(initial_boot_params) |
126 | + strlen(cmdline) + DTB_EXTRA_SPACE; | 128 | + cmdline_len + DTB_EXTRA_SPACE; |
127 | 129 | ||
128 | for (;;) { | 130 | for (;;) { |
129 | buf = vmalloc(buf_size); | 131 | buf = vmalloc(buf_size); |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9dce33b0e260..ddaea0fd2fa4 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -1702,19 +1702,20 @@ void syscall_trace_exit(struct pt_regs *regs) | |||
1702 | } | 1702 | } |
1703 | 1703 | ||
1704 | /* | 1704 | /* |
1705 | * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487C.a | 1705 | * SPSR_ELx bits which are always architecturally RES0 per ARM DDI 0487D.a. |
1706 | * We also take into account DIT (bit 24), which is not yet documented, and | 1706 | * We permit userspace to set SSBS (AArch64 bit 12, AArch32 bit 23) which is |
1707 | * treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may be | 1707 | * not described in ARM DDI 0487D.a. |
1708 | * allocated an EL0 meaning in future. | 1708 | * We treat PAN and UAO as RES0 bits, as they are meaningless at EL0, and may |
1709 | * be allocated an EL0 meaning in future. | ||
1709 | * Userspace cannot use these until they have an architectural meaning. | 1710 | * Userspace cannot use these until they have an architectural meaning. |
1710 | * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. | 1711 | * Note that this follows the SPSR_ELx format, not the AArch32 PSR format. |
1711 | * We also reserve IL for the kernel; SS is handled dynamically. | 1712 | * We also reserve IL for the kernel; SS is handled dynamically. |
1712 | */ | 1713 | */ |
1713 | #define SPSR_EL1_AARCH64_RES0_BITS \ | 1714 | #define SPSR_EL1_AARCH64_RES0_BITS \ |
1714 | (GENMASK_ULL(63,32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ | 1715 | (GENMASK_ULL(63, 32) | GENMASK_ULL(27, 25) | GENMASK_ULL(23, 22) | \ |
1715 | GENMASK_ULL(20, 10) | GENMASK_ULL(5, 5)) | 1716 | GENMASK_ULL(20, 13) | GENMASK_ULL(11, 10) | GENMASK_ULL(5, 5)) |
1716 | #define SPSR_EL1_AARCH32_RES0_BITS \ | 1717 | #define SPSR_EL1_AARCH32_RES0_BITS \ |
1717 | (GENMASK_ULL(63,32) | GENMASK_ULL(23, 22) | GENMASK_ULL(20,20)) | 1718 | (GENMASK_ULL(63, 32) | GENMASK_ULL(22, 22) | GENMASK_ULL(20, 20)) |
1718 | 1719 | ||
1719 | static int valid_compat_regs(struct user_pt_regs *regs) | 1720 | static int valid_compat_regs(struct user_pt_regs *regs) |
1720 | { | 1721 | { |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4b0e1231625c..009849328289 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -313,7 +313,6 @@ void __init setup_arch(char **cmdline_p) | |||
313 | arm64_memblock_init(); | 313 | arm64_memblock_init(); |
314 | 314 | ||
315 | paging_init(); | 315 | paging_init(); |
316 | efi_apply_persistent_mem_reservations(); | ||
317 | 316 | ||
318 | acpi_table_upgrade(); | 317 | acpi_table_upgrade(); |
319 | 318 | ||
@@ -340,6 +339,9 @@ void __init setup_arch(char **cmdline_p) | |||
340 | smp_init_cpus(); | 339 | smp_init_cpus(); |
341 | smp_build_mpidr_hash(); | 340 | smp_build_mpidr_hash(); |
342 | 341 | ||
342 | /* Init percpu seeds for random tags after cpus are set up. */ | ||
343 | kasan_init_tags(); | ||
344 | |||
343 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | 345 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
344 | /* | 346 | /* |
345 | * Make sure init_thread_info.ttbr0 always generates translation | 347 | * Make sure init_thread_info.ttbr0 always generates translation |
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index b0b1478094b4..421ebf6f7086 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <kvm/arm_psci.h> | 23 | #include <kvm/arm_psci.h> |
24 | 24 | ||
25 | #include <asm/cpufeature.h> | 25 | #include <asm/cpufeature.h> |
26 | #include <asm/kprobes.h> | ||
26 | #include <asm/kvm_asm.h> | 27 | #include <asm/kvm_asm.h> |
27 | #include <asm/kvm_emulate.h> | 28 | #include <asm/kvm_emulate.h> |
28 | #include <asm/kvm_host.h> | 29 | #include <asm/kvm_host.h> |
@@ -107,6 +108,7 @@ static void activate_traps_vhe(struct kvm_vcpu *vcpu) | |||
107 | 108 | ||
108 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); | 109 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
109 | } | 110 | } |
111 | NOKPROBE_SYMBOL(activate_traps_vhe); | ||
110 | 112 | ||
111 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) | 113 | static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu) |
112 | { | 114 | { |
@@ -154,6 +156,7 @@ static void deactivate_traps_vhe(void) | |||
154 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); | 156 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
155 | write_sysreg(vectors, vbar_el1); | 157 | write_sysreg(vectors, vbar_el1); |
156 | } | 158 | } |
159 | NOKPROBE_SYMBOL(deactivate_traps_vhe); | ||
157 | 160 | ||
158 | static void __hyp_text __deactivate_traps_nvhe(void) | 161 | static void __hyp_text __deactivate_traps_nvhe(void) |
159 | { | 162 | { |
@@ -513,6 +516,7 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) | |||
513 | 516 | ||
514 | return exit_code; | 517 | return exit_code; |
515 | } | 518 | } |
519 | NOKPROBE_SYMBOL(kvm_vcpu_run_vhe); | ||
516 | 520 | ||
517 | /* Switch to the guest for legacy non-VHE systems */ | 521 | /* Switch to the guest for legacy non-VHE systems */ |
518 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) | 522 | int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu) |
@@ -620,6 +624,7 @@ static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, | |||
620 | read_sysreg_el2(esr), read_sysreg_el2(far), | 624 | read_sysreg_el2(esr), read_sysreg_el2(far), |
621 | read_sysreg(hpfar_el2), par, vcpu); | 625 | read_sysreg(hpfar_el2), par, vcpu); |
622 | } | 626 | } |
627 | NOKPROBE_SYMBOL(__hyp_call_panic_vhe); | ||
623 | 628 | ||
624 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) | 629 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt) |
625 | { | 630 | { |
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c index 68d6f7c3b237..b426e2cf973c 100644 --- a/arch/arm64/kvm/hyp/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/sysreg-sr.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/compiler.h> | 18 | #include <linux/compiler.h> |
19 | #include <linux/kvm_host.h> | 19 | #include <linux/kvm_host.h> |
20 | 20 | ||
21 | #include <asm/kprobes.h> | ||
21 | #include <asm/kvm_asm.h> | 22 | #include <asm/kvm_asm.h> |
22 | #include <asm/kvm_emulate.h> | 23 | #include <asm/kvm_emulate.h> |
23 | #include <asm/kvm_hyp.h> | 24 | #include <asm/kvm_hyp.h> |
@@ -98,12 +99,14 @@ void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
98 | { | 99 | { |
99 | __sysreg_save_common_state(ctxt); | 100 | __sysreg_save_common_state(ctxt); |
100 | } | 101 | } |
102 | NOKPROBE_SYMBOL(sysreg_save_host_state_vhe); | ||
101 | 103 | ||
102 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) | 104 | void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) |
103 | { | 105 | { |
104 | __sysreg_save_common_state(ctxt); | 106 | __sysreg_save_common_state(ctxt); |
105 | __sysreg_save_el2_return_state(ctxt); | 107 | __sysreg_save_el2_return_state(ctxt); |
106 | } | 108 | } |
109 | NOKPROBE_SYMBOL(sysreg_save_guest_state_vhe); | ||
107 | 110 | ||
108 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) | 111 | static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt) |
109 | { | 112 | { |
@@ -188,12 +191,14 @@ void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) | |||
188 | { | 191 | { |
189 | __sysreg_restore_common_state(ctxt); | 192 | __sysreg_restore_common_state(ctxt); |
190 | } | 193 | } |
194 | NOKPROBE_SYMBOL(sysreg_restore_host_state_vhe); | ||
191 | 195 | ||
192 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) | 196 | void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) |
193 | { | 197 | { |
194 | __sysreg_restore_common_state(ctxt); | 198 | __sysreg_restore_common_state(ctxt); |
195 | __sysreg_restore_el2_return_state(ctxt); | 199 | __sysreg_restore_el2_return_state(ctxt); |
196 | } | 200 | } |
201 | NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); | ||
197 | 202 | ||
198 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) | 203 | void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu) |
199 | { | 204 | { |
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index b72a3dd56204..f16a5f8ff2b4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/kvm_arm.h> | 32 | #include <asm/kvm_arm.h> |
33 | #include <asm/kvm_asm.h> | 33 | #include <asm/kvm_asm.h> |
34 | #include <asm/kvm_coproc.h> | 34 | #include <asm/kvm_coproc.h> |
35 | #include <asm/kvm_emulate.h> | ||
35 | #include <asm/kvm_mmu.h> | 36 | #include <asm/kvm_mmu.h> |
36 | 37 | ||
37 | /* Maximum phys_shift supported for any VM on this host */ | 38 | /* Maximum phys_shift supported for any VM on this host */ |
@@ -105,16 +106,33 @@ int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
105 | * This function finds the right table above and sets the registers on | 106 | * This function finds the right table above and sets the registers on |
106 | * the virtual CPU struct to their architecturally defined reset | 107 | * the virtual CPU struct to their architecturally defined reset |
107 | * values. | 108 | * values. |
109 | * | ||
110 | * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT | ||
111 | * ioctl or as part of handling a request issued by another VCPU in the PSCI | ||
112 | * handling code. In the first case, the VCPU will not be loaded, and in the | ||
113 | * second case the VCPU will be loaded. Because this function operates purely | ||
114 | * on the memory-backed valus of system registers, we want to do a full put if | ||
115 | * we were loaded (handling a request) and load the values back at the end of | ||
116 | * the function. Otherwise we leave the state alone. In both cases, we | ||
117 | * disable preemption around the vcpu reset as we would otherwise race with | ||
118 | * preempt notifiers which also call put/load. | ||
108 | */ | 119 | */ |
109 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | 120 | int kvm_reset_vcpu(struct kvm_vcpu *vcpu) |
110 | { | 121 | { |
111 | const struct kvm_regs *cpu_reset; | 122 | const struct kvm_regs *cpu_reset; |
123 | int ret = -EINVAL; | ||
124 | bool loaded; | ||
125 | |||
126 | preempt_disable(); | ||
127 | loaded = (vcpu->cpu != -1); | ||
128 | if (loaded) | ||
129 | kvm_arch_vcpu_put(vcpu); | ||
112 | 130 | ||
113 | switch (vcpu->arch.target) { | 131 | switch (vcpu->arch.target) { |
114 | default: | 132 | default: |
115 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { | 133 | if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) { |
116 | if (!cpu_has_32bit_el1()) | 134 | if (!cpu_has_32bit_el1()) |
117 | return -EINVAL; | 135 | goto out; |
118 | cpu_reset = &default_regs_reset32; | 136 | cpu_reset = &default_regs_reset32; |
119 | } else { | 137 | } else { |
120 | cpu_reset = &default_regs_reset; | 138 | cpu_reset = &default_regs_reset; |
@@ -129,6 +147,29 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
129 | /* Reset system registers */ | 147 | /* Reset system registers */ |
130 | kvm_reset_sys_regs(vcpu); | 148 | kvm_reset_sys_regs(vcpu); |
131 | 149 | ||
150 | /* | ||
151 | * Additional reset state handling that PSCI may have imposed on us. | ||
152 | * Must be done after all the sys_reg reset. | ||
153 | */ | ||
154 | if (vcpu->arch.reset_state.reset) { | ||
155 | unsigned long target_pc = vcpu->arch.reset_state.pc; | ||
156 | |||
157 | /* Gracefully handle Thumb2 entry point */ | ||
158 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
159 | target_pc &= ~1UL; | ||
160 | vcpu_set_thumb(vcpu); | ||
161 | } | ||
162 | |||
163 | /* Propagate caller endianness */ | ||
164 | if (vcpu->arch.reset_state.be) | ||
165 | kvm_vcpu_set_be(vcpu); | ||
166 | |||
167 | *vcpu_pc(vcpu) = target_pc; | ||
168 | vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0); | ||
169 | |||
170 | vcpu->arch.reset_state.reset = false; | ||
171 | } | ||
172 | |||
132 | /* Reset PMU */ | 173 | /* Reset PMU */ |
133 | kvm_pmu_vcpu_reset(vcpu); | 174 | kvm_pmu_vcpu_reset(vcpu); |
134 | 175 | ||
@@ -137,7 +178,12 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) | |||
137 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; | 178 | vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG; |
138 | 179 | ||
139 | /* Reset timer */ | 180 | /* Reset timer */ |
140 | return kvm_timer_vcpu_reset(vcpu); | 181 | ret = kvm_timer_vcpu_reset(vcpu); |
182 | out: | ||
183 | if (loaded) | ||
184 | kvm_arch_vcpu_load(vcpu, smp_processor_id()); | ||
185 | preempt_enable(); | ||
186 | return ret; | ||
141 | } | 187 | } |
142 | 188 | ||
143 | void kvm_set_ipa_limit(void) | 189 | void kvm_set_ipa_limit(void) |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e3e37228ae4e..c936aa40c3f4 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -314,12 +314,29 @@ static bool trap_raz_wi(struct kvm_vcpu *vcpu, | |||
314 | return read_zero(vcpu, p); | 314 | return read_zero(vcpu, p); |
315 | } | 315 | } |
316 | 316 | ||
317 | static bool trap_undef(struct kvm_vcpu *vcpu, | 317 | /* |
318 | struct sys_reg_params *p, | 318 | * ARMv8.1 mandates at least a trivial LORegion implementation, where all the |
319 | const struct sys_reg_desc *r) | 319 | * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0 |
320 | * system, these registers should UNDEF. LORID_EL1 being a RO register, we | ||
321 | * treat it separately. | ||
322 | */ | ||
323 | static bool trap_loregion(struct kvm_vcpu *vcpu, | ||
324 | struct sys_reg_params *p, | ||
325 | const struct sys_reg_desc *r) | ||
320 | { | 326 | { |
321 | kvm_inject_undefined(vcpu); | 327 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
322 | return false; | 328 | u32 sr = sys_reg((u32)r->Op0, (u32)r->Op1, |
329 | (u32)r->CRn, (u32)r->CRm, (u32)r->Op2); | ||
330 | |||
331 | if (!(val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))) { | ||
332 | kvm_inject_undefined(vcpu); | ||
333 | return false; | ||
334 | } | ||
335 | |||
336 | if (p->is_write && sr == SYS_LORID_EL1) | ||
337 | return write_to_read_only(vcpu, p, r); | ||
338 | |||
339 | return trap_raz_wi(vcpu, p, r); | ||
323 | } | 340 | } |
324 | 341 | ||
325 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, | 342 | static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, |
@@ -1048,11 +1065,6 @@ static u64 read_id_reg(struct sys_reg_desc const *r, bool raz) | |||
1048 | if (val & ptrauth_mask) | 1065 | if (val & ptrauth_mask) |
1049 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); | 1066 | kvm_debug("ptrauth unsupported for guests, suppressing\n"); |
1050 | val &= ~ptrauth_mask; | 1067 | val &= ~ptrauth_mask; |
1051 | } else if (id == SYS_ID_AA64MMFR1_EL1) { | ||
1052 | if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT)) | ||
1053 | kvm_debug("LORegions unsupported for guests, suppressing\n"); | ||
1054 | |||
1055 | val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT); | ||
1056 | } | 1068 | } |
1057 | 1069 | ||
1058 | return val; | 1070 | return val; |
@@ -1338,11 +1350,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
1338 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, | 1350 | { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, |
1339 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, | 1351 | { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
1340 | 1352 | ||
1341 | { SYS_DESC(SYS_LORSA_EL1), trap_undef }, | 1353 | { SYS_DESC(SYS_LORSA_EL1), trap_loregion }, |
1342 | { SYS_DESC(SYS_LOREA_EL1), trap_undef }, | 1354 | { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, |
1343 | { SYS_DESC(SYS_LORN_EL1), trap_undef }, | 1355 | { SYS_DESC(SYS_LORN_EL1), trap_loregion }, |
1344 | { SYS_DESC(SYS_LORC_EL1), trap_undef }, | 1356 | { SYS_DESC(SYS_LORC_EL1), trap_loregion }, |
1345 | { SYS_DESC(SYS_LORID_EL1), trap_undef }, | 1357 | { SYS_DESC(SYS_LORID_EL1), trap_loregion }, |
1346 | 1358 | ||
1347 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, | 1359 | { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 }, |
1348 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, | 1360 | { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, |
@@ -2596,7 +2608,9 @@ void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |||
2596 | table = get_target_table(vcpu->arch.target, true, &num); | 2608 | table = get_target_table(vcpu->arch.target, true, &num); |
2597 | reset_sys_reg_descs(vcpu, table, num); | 2609 | reset_sys_reg_descs(vcpu, table, num); |
2598 | 2610 | ||
2599 | for (num = 1; num < NR_SYS_REGS; num++) | 2611 | for (num = 1; num < NR_SYS_REGS; num++) { |
2600 | if (__vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | 2612 | if (WARN(__vcpu_sys_reg(vcpu, num) == 0x4242424242424242, |
2601 | panic("Didn't reset __vcpu_sys_reg(%zi)", num); | 2613 | "Didn't reset __vcpu_sys_reg(%zi)\n", num)) |
2614 | break; | ||
2615 | } | ||
2602 | } | 2616 | } |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..99bb8facb5cb 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
286 | 286 | ||
287 | } | 287 | } |
288 | 288 | ||
289 | static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) | 289 | static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, |
290 | unsigned long end) | ||
290 | { | 291 | { |
291 | pte_t *ptep = pte_offset_kernel(pmdp, 0UL); | 292 | unsigned long addr = start; |
292 | unsigned long addr; | 293 | pte_t *ptep = pte_offset_kernel(pmdp, start); |
293 | unsigned i; | ||
294 | 294 | ||
295 | for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { | 295 | do { |
296 | addr = start + i * PAGE_SIZE; | ||
297 | note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); | 296 | note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); |
298 | } | 297 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
299 | } | 298 | } |
300 | 299 | ||
301 | static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) | 300 | static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, |
301 | unsigned long end) | ||
302 | { | 302 | { |
303 | pmd_t *pmdp = pmd_offset(pudp, 0UL); | 303 | unsigned long next, addr = start; |
304 | unsigned long addr; | 304 | pmd_t *pmdp = pmd_offset(pudp, start); |
305 | unsigned i; | ||
306 | 305 | ||
307 | for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { | 306 | do { |
308 | pmd_t pmd = READ_ONCE(*pmdp); | 307 | pmd_t pmd = READ_ONCE(*pmdp); |
308 | next = pmd_addr_end(addr, end); | ||
309 | 309 | ||
310 | addr = start + i * PMD_SIZE; | ||
311 | if (pmd_none(pmd) || pmd_sect(pmd)) { | 310 | if (pmd_none(pmd) || pmd_sect(pmd)) { |
312 | note_page(st, addr, 3, pmd_val(pmd)); | 311 | note_page(st, addr, 3, pmd_val(pmd)); |
313 | } else { | 312 | } else { |
314 | BUG_ON(pmd_bad(pmd)); | 313 | BUG_ON(pmd_bad(pmd)); |
315 | walk_pte(st, pmdp, addr); | 314 | walk_pte(st, pmdp, addr, next); |
316 | } | 315 | } |
317 | } | 316 | } while (pmdp++, addr = next, addr != end); |
318 | } | 317 | } |
319 | 318 | ||
320 | static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) | 319 | static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, |
320 | unsigned long end) | ||
321 | { | 321 | { |
322 | pud_t *pudp = pud_offset(pgdp, 0UL); | 322 | unsigned long next, addr = start; |
323 | unsigned long addr; | 323 | pud_t *pudp = pud_offset(pgdp, start); |
324 | unsigned i; | ||
325 | 324 | ||
326 | for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { | 325 | do { |
327 | pud_t pud = READ_ONCE(*pudp); | 326 | pud_t pud = READ_ONCE(*pudp); |
327 | next = pud_addr_end(addr, end); | ||
328 | 328 | ||
329 | addr = start + i * PUD_SIZE; | ||
330 | if (pud_none(pud) || pud_sect(pud)) { | 329 | if (pud_none(pud) || pud_sect(pud)) { |
331 | note_page(st, addr, 2, pud_val(pud)); | 330 | note_page(st, addr, 2, pud_val(pud)); |
332 | } else { | 331 | } else { |
333 | BUG_ON(pud_bad(pud)); | 332 | BUG_ON(pud_bad(pud)); |
334 | walk_pmd(st, pudp, addr); | 333 | walk_pmd(st, pudp, addr, next); |
335 | } | 334 | } |
336 | } | 335 | } while (pudp++, addr = next, addr != end); |
337 | } | 336 | } |
338 | 337 | ||
339 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, | 338 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, |
340 | unsigned long start) | 339 | unsigned long start) |
341 | { | 340 | { |
342 | pgd_t *pgdp = pgd_offset(mm, 0UL); | 341 | unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; |
343 | unsigned i; | 342 | unsigned long next, addr = start; |
344 | unsigned long addr; | 343 | pgd_t *pgdp = pgd_offset(mm, start); |
345 | 344 | ||
346 | for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { | 345 | do { |
347 | pgd_t pgd = READ_ONCE(*pgdp); | 346 | pgd_t pgd = READ_ONCE(*pgdp); |
347 | next = pgd_addr_end(addr, end); | ||
348 | 348 | ||
349 | addr = start + i * PGDIR_SIZE; | ||
350 | if (pgd_none(pgd)) { | 349 | if (pgd_none(pgd)) { |
351 | note_page(st, addr, 1, pgd_val(pgd)); | 350 | note_page(st, addr, 1, pgd_val(pgd)); |
352 | } else { | 351 | } else { |
353 | BUG_ON(pgd_bad(pgd)); | 352 | BUG_ON(pgd_bad(pgd)); |
354 | walk_pud(st, pgdp, addr); | 353 | walk_pud(st, pgdp, addr, next); |
355 | } | 354 | } |
356 | } | 355 | } while (pgdp++, addr = next, addr != end); |
357 | } | 356 | } |
358 | 357 | ||
359 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) | 358 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) |
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 4b55b15707a3..f37a86d2a69d 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c | |||
@@ -252,8 +252,6 @@ void __init kasan_init(void) | |||
252 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); | 252 | memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); |
253 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); | 253 | cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); |
254 | 254 | ||
255 | kasan_init_tags(); | ||
256 | |||
257 | /* At this point kasan is fully initialized. Enable error messages */ | 255 | /* At this point kasan is fully initialized. Enable error messages */ |
258 | init_task.kasan_depth = 0; | 256 | init_task.kasan_depth = 0; |
259 | pr_info("KernelAddressSanitizer initialized\n"); | 257 | pr_info("KernelAddressSanitizer initialized\n"); |
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h index edfcbb25fd9f..dcea277c09ae 100644 --- a/arch/csky/include/asm/pgtable.h +++ b/arch/csky/include/asm/pgtable.h | |||
@@ -45,8 +45,8 @@ | |||
45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) | 45 | ((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset_t(address)) |
46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) | 46 | #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT)) |
47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ | 47 | #define pte_clear(mm, addr, ptep) set_pte((ptep), \ |
48 | (((unsigned int)addr&0x80000000)?__pte(1):__pte(0))) | 48 | (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0))) |
49 | #define pte_none(pte) (!(pte_val(pte)&0xfffffffe)) | 49 | #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL)) |
50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) | 50 | #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT) |
51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) | 51 | #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT)) |
52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ | 52 | #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \ |
@@ -241,6 +241,11 @@ static inline pte_t pte_mkyoung(pte_t pte) | |||
241 | 241 | ||
242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) | 242 | #define pgd_index(address) ((address) >> PGDIR_SHIFT) |
243 | 243 | ||
244 | #define __HAVE_PHYS_MEM_ACCESS_PROT | ||
245 | struct file; | ||
246 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
247 | unsigned long size, pgprot_t vma_prot); | ||
248 | |||
244 | /* | 249 | /* |
245 | * Macro to make mark a page protection value as "uncacheable". Note | 250 | * Macro to make mark a page protection value as "uncacheable". Note |
246 | * that "protection" is really a misnomer here as the protection value | 251 | * that "protection" is really a misnomer here as the protection value |
diff --git a/arch/csky/include/asm/processor.h b/arch/csky/include/asm/processor.h index 8f454810514f..21e0bd5293dd 100644 --- a/arch/csky/include/asm/processor.h +++ b/arch/csky/include/asm/processor.h | |||
@@ -49,7 +49,7 @@ struct thread_struct { | |||
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define INIT_THREAD { \ | 51 | #define INIT_THREAD { \ |
52 | .ksp = (unsigned long) init_thread_union.stack + THREAD_SIZE, \ | 52 | .ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ |
53 | .sr = DEFAULT_PSR_VALUE, \ | 53 | .sr = DEFAULT_PSR_VALUE, \ |
54 | } | 54 | } |
55 | 55 | ||
@@ -95,7 +95,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) | 95 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->usp) |
96 | 96 | ||
97 | #define task_pt_regs(p) \ | 97 | #define task_pt_regs(p) \ |
98 | ((struct pt_regs *)(THREAD_SIZE + p->stack) - 1) | 98 | ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) |
99 | 99 | ||
100 | #define cpu_relax() barrier() | 100 | #define cpu_relax() barrier() |
101 | 101 | ||
diff --git a/arch/csky/kernel/dumpstack.c b/arch/csky/kernel/dumpstack.c index 659253e9989c..d67f9777cfd9 100644 --- a/arch/csky/kernel/dumpstack.c +++ b/arch/csky/kernel/dumpstack.c | |||
@@ -38,7 +38,11 @@ void show_stack(struct task_struct *task, unsigned long *stack) | |||
38 | if (task) | 38 | if (task) |
39 | stack = (unsigned long *)thread_saved_fp(task); | 39 | stack = (unsigned long *)thread_saved_fp(task); |
40 | else | 40 | else |
41 | #ifdef CONFIG_STACKTRACE | ||
42 | asm volatile("mov %0, r8\n":"=r"(stack)::"memory"); | ||
43 | #else | ||
41 | stack = (unsigned long *)&stack; | 44 | stack = (unsigned long *)&stack; |
45 | #endif | ||
42 | } | 46 | } |
43 | 47 | ||
44 | show_trace(stack); | 48 | show_trace(stack); |
diff --git a/arch/csky/kernel/ptrace.c b/arch/csky/kernel/ptrace.c index 57f1afe19a52..f2f12fff36f7 100644 --- a/arch/csky/kernel/ptrace.c +++ b/arch/csky/kernel/ptrace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/ptrace.h> | 8 | #include <linux/ptrace.h> |
9 | #include <linux/regset.h> | 9 | #include <linux/regset.h> |
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <linux/sched/task_stack.h> | ||
11 | #include <linux/signal.h> | 12 | #include <linux/signal.h> |
12 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
@@ -159,7 +160,7 @@ static int fpr_set(struct task_struct *target, | |||
159 | static const struct user_regset csky_regsets[] = { | 160 | static const struct user_regset csky_regsets[] = { |
160 | [REGSET_GPR] = { | 161 | [REGSET_GPR] = { |
161 | .core_note_type = NT_PRSTATUS, | 162 | .core_note_type = NT_PRSTATUS, |
162 | .n = ELF_NGREG, | 163 | .n = sizeof(struct pt_regs) / sizeof(u32), |
163 | .size = sizeof(u32), | 164 | .size = sizeof(u32), |
164 | .align = sizeof(u32), | 165 | .align = sizeof(u32), |
165 | .get = &gpr_get, | 166 | .get = &gpr_get, |
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c index ddc4dd79f282..b07a534b3062 100644 --- a/arch/csky/kernel/smp.c +++ b/arch/csky/kernel/smp.c | |||
@@ -160,7 +160,8 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
160 | { | 160 | { |
161 | unsigned long mask = 1 << cpu; | 161 | unsigned long mask = 1 << cpu; |
162 | 162 | ||
163 | secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8; | 163 | secondary_stack = |
164 | (unsigned int) task_stack_page(tidle) + THREAD_SIZE - 8; | ||
164 | secondary_hint = mfcr("cr31"); | 165 | secondary_hint = mfcr("cr31"); |
165 | secondary_ccr = mfcr("cr18"); | 166 | secondary_ccr = mfcr("cr18"); |
166 | 167 | ||
diff --git a/arch/csky/mm/ioremap.c b/arch/csky/mm/ioremap.c index cb7c03e5cd21..8473b6bdf512 100644 --- a/arch/csky/mm/ioremap.c +++ b/arch/csky/mm/ioremap.c | |||
@@ -46,3 +46,17 @@ void iounmap(void __iomem *addr) | |||
46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); | 46 | vunmap((void *)((unsigned long)addr & PAGE_MASK)); |
47 | } | 47 | } |
48 | EXPORT_SYMBOL(iounmap); | 48 | EXPORT_SYMBOL(iounmap); |
49 | |||
50 | pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | ||
51 | unsigned long size, pgprot_t vma_prot) | ||
52 | { | ||
53 | if (!pfn_valid(pfn)) { | ||
54 | vma_prot.pgprot |= _PAGE_SO; | ||
55 | return pgprot_noncached(vma_prot); | ||
56 | } else if (file->f_flags & O_SYNC) { | ||
57 | return pgprot_noncached(vma_prot); | ||
58 | } | ||
59 | |||
60 | return vma_prot; | ||
61 | } | ||
62 | EXPORT_SYMBOL(phys_mem_access_prot); | ||
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 38049357d6d3..40712e49381b 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c | |||
@@ -155,18 +155,22 @@ out: | |||
155 | static int __init nfhd_init(void) | 155 | static int __init nfhd_init(void) |
156 | { | 156 | { |
157 | u32 blocks, bsize; | 157 | u32 blocks, bsize; |
158 | int ret; | ||
158 | int i; | 159 | int i; |
159 | 160 | ||
160 | nfhd_id = nf_get_id("XHDI"); | 161 | nfhd_id = nf_get_id("XHDI"); |
161 | if (!nfhd_id) | 162 | if (!nfhd_id) |
162 | return -ENODEV; | 163 | return -ENODEV; |
163 | 164 | ||
164 | major_num = register_blkdev(major_num, "nfhd"); | 165 | ret = register_blkdev(major_num, "nfhd"); |
165 | if (major_num <= 0) { | 166 | if (ret < 0) { |
166 | pr_warn("nfhd: unable to get major number\n"); | 167 | pr_warn("nfhd: unable to get major number\n"); |
167 | return major_num; | 168 | return ret; |
168 | } | 169 | } |
169 | 170 | ||
171 | if (!major_num) | ||
172 | major_num = ret; | ||
173 | |||
170 | for (i = NFHD_DEV_OFFSET; i < 24; i++) { | 174 | for (i = NFHD_DEV_OFFSET; i < 24; i++) { |
171 | if (nfhd_get_capacity(i, 0, &blocks, &bsize)) | 175 | if (nfhd_get_capacity(i, 0, &blocks, &bsize)) |
172 | continue; | 176 | continue; |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0d14f51d0002..a84c24d894aa 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT | |||
1403 | please say 'N' here. If you want a high-performance kernel to run on | 1403 | please say 'N' here. If you want a high-performance kernel to run on |
1404 | new Loongson 3 machines only, please say 'Y' here. | 1404 | new Loongson 3 machines only, please say 'Y' here. |
1405 | 1405 | ||
1406 | config CPU_LOONGSON3_WORKAROUNDS | ||
1407 | bool "Old Loongson 3 LLSC Workarounds" | ||
1408 | default y if SMP | ||
1409 | depends on CPU_LOONGSON3 | ||
1410 | help | ||
1411 | Loongson 3 processors have the llsc issues which require workarounds. | ||
1412 | Without workarounds the system may hang unexpectedly. | ||
1413 | |||
1414 | Newer Loongson 3 will fix these issues and no workarounds are needed. | ||
1415 | The workarounds have no significant side effect on them but may | ||
1416 | decrease the performance of the system so this option should be | ||
1417 | disabled unless the kernel is intended to be run on old systems. | ||
1418 | |||
1419 | If unsure, please say Y. | ||
1420 | |||
1406 | config CPU_LOONGSON2E | 1421 | config CPU_LOONGSON2E |
1407 | bool "Loongson 2E" | 1422 | bool "Loongson 2E" |
1408 | depends on SYS_HAS_CPU_LOONGSON2E | 1423 | depends on SYS_HAS_CPU_LOONGSON2E |
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6d..4f7b1fa31cf5 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts | |||
@@ -76,7 +76,7 @@ | |||
76 | status = "okay"; | 76 | status = "okay"; |
77 | 77 | ||
78 | pinctrl-names = "default"; | 78 | pinctrl-names = "default"; |
79 | pinctrl-0 = <&pins_uart2>; | 79 | pinctrl-0 = <&pins_uart3>; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | &uart4 { | 82 | &uart4 { |
@@ -196,9 +196,9 @@ | |||
196 | bias-disable; | 196 | bias-disable; |
197 | }; | 197 | }; |
198 | 198 | ||
199 | pins_uart2: uart2 { | 199 | pins_uart3: uart3 { |
200 | function = "uart2"; | 200 | function = "uart3"; |
201 | groups = "uart2-data", "uart2-hwflow"; | 201 | groups = "uart3-data", "uart3-hwflow"; |
202 | bias-disable; | 202 | bias-disable; |
203 | }; | 203 | }; |
204 | 204 | ||
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 6fb16fd24035..2beb78a62b7d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi | |||
@@ -161,7 +161,7 @@ | |||
161 | #dma-cells = <2>; | 161 | #dma-cells = <2>; |
162 | 162 | ||
163 | interrupt-parent = <&intc>; | 163 | interrupt-parent = <&intc>; |
164 | interrupts = <29>; | 164 | interrupts = <20>; |
165 | 165 | ||
166 | clocks = <&cgu JZ4740_CLK_DMA>; | 166 | clocks = <&cgu JZ4740_CLK_DMA>; |
167 | 167 | ||
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts index 2152b7ba65fb..cc8dbea0911f 100644 --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts | |||
@@ -90,11 +90,11 @@ | |||
90 | interrupts = <0>; | 90 | interrupts = <0>; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | axi_i2c: i2c@10A00000 { | 93 | axi_i2c: i2c@10a00000 { |
94 | compatible = "xlnx,xps-iic-2.00.a"; | 94 | compatible = "xlnx,xps-iic-2.00.a"; |
95 | interrupt-parent = <&axi_intc>; | 95 | interrupt-parent = <&axi_intc>; |
96 | interrupts = <4>; | 96 | interrupts = <4>; |
97 | reg = < 0x10A00000 0x10000 >; | 97 | reg = < 0x10a00000 0x10000 >; |
98 | clocks = <&ext>; | 98 | clocks = <&ext>; |
99 | xlnx,clk-freq = <0x5f5e100>; | 99 | xlnx,clk-freq = <0x5f5e100>; |
100 | xlnx,family = "Artix7"; | 100 | xlnx,family = "Artix7"; |
@@ -106,9 +106,9 @@ | |||
106 | #address-cells = <1>; | 106 | #address-cells = <1>; |
107 | #size-cells = <0>; | 107 | #size-cells = <0>; |
108 | 108 | ||
109 | ad7420@4B { | 109 | ad7420@4b { |
110 | compatible = "adi,adt7420"; | 110 | compatible = "adi,adt7420"; |
111 | reg = <0x4B>; | 111 | reg = <0x4b>; |
112 | }; | 112 | }; |
113 | } ; | 113 | } ; |
114 | }; | 114 | }; |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 43fcd35e2957..94096299fc56 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
58 | if (kernel_uses_llsc) { \ | 58 | if (kernel_uses_llsc) { \ |
59 | int temp; \ | 59 | int temp; \ |
60 | \ | 60 | \ |
61 | loongson_llsc_mb(); \ | ||
61 | __asm__ __volatile__( \ | 62 | __asm__ __volatile__( \ |
62 | " .set push \n" \ | 63 | " .set push \n" \ |
63 | " .set "MIPS_ISA_LEVEL" \n" \ | 64 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ | |||
85 | if (kernel_uses_llsc) { \ | 86 | if (kernel_uses_llsc) { \ |
86 | int temp; \ | 87 | int temp; \ |
87 | \ | 88 | \ |
89 | loongson_llsc_mb(); \ | ||
88 | __asm__ __volatile__( \ | 90 | __asm__ __volatile__( \ |
89 | " .set push \n" \ | 91 | " .set push \n" \ |
90 | " .set "MIPS_ISA_LEVEL" \n" \ | 92 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ | |||
118 | if (kernel_uses_llsc) { \ | 120 | if (kernel_uses_llsc) { \ |
119 | int temp; \ | 121 | int temp; \ |
120 | \ | 122 | \ |
123 | loongson_llsc_mb(); \ | ||
121 | __asm__ __volatile__( \ | 124 | __asm__ __volatile__( \ |
122 | " .set push \n" \ | 125 | " .set push \n" \ |
123 | " .set "MIPS_ISA_LEVEL" \n" \ | 126 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |||
256 | if (kernel_uses_llsc) { \ | 259 | if (kernel_uses_llsc) { \ |
257 | long temp; \ | 260 | long temp; \ |
258 | \ | 261 | \ |
262 | loongson_llsc_mb(); \ | ||
259 | __asm__ __volatile__( \ | 263 | __asm__ __volatile__( \ |
260 | " .set push \n" \ | 264 | " .set push \n" \ |
261 | " .set "MIPS_ISA_LEVEL" \n" \ | 265 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ | |||
283 | if (kernel_uses_llsc) { \ | 287 | if (kernel_uses_llsc) { \ |
284 | long temp; \ | 288 | long temp; \ |
285 | \ | 289 | \ |
290 | loongson_llsc_mb(); \ | ||
286 | __asm__ __volatile__( \ | 291 | __asm__ __volatile__( \ |
287 | " .set push \n" \ | 292 | " .set push \n" \ |
288 | " .set "MIPS_ISA_LEVEL" \n" \ | 293 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | |||
316 | if (kernel_uses_llsc) { \ | 321 | if (kernel_uses_llsc) { \ |
317 | long temp; \ | 322 | long temp; \ |
318 | \ | 323 | \ |
324 | loongson_llsc_mb(); \ | ||
319 | __asm__ __volatile__( \ | 325 | __asm__ __volatile__( \ |
320 | " .set push \n" \ | 326 | " .set push \n" \ |
321 | " .set "MIPS_ISA_LEVEL" \n" \ | 327 | " .set "MIPS_ISA_LEVEL" \n" \ |
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b7f6ac5e513c 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
@@ -222,6 +222,42 @@ | |||
222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() | 222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() |
223 | #define __smp_mb__after_atomic() smp_llsc_mb() | 223 | #define __smp_mb__after_atomic() smp_llsc_mb() |
224 | 224 | ||
225 | /* | ||
226 | * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, | ||
227 | * store or pref) in between an ll & sc can cause the sc instruction to | ||
228 | * erroneously succeed, breaking atomicity. Whilst it's unusual to write code | ||
229 | * containing such sequences, this bug bites harder than we might otherwise | ||
230 | * expect due to reordering & speculation: | ||
231 | * | ||
232 | * 1) A memory access appearing prior to the ll in program order may actually | ||
233 | * be executed after the ll - this is the reordering case. | ||
234 | * | ||
235 | * In order to avoid this we need to place a memory barrier (ie. a sync | ||
236 | * instruction) prior to every ll instruction, in between it & any earlier | ||
237 | * memory access instructions. Many of these cases are already covered by | ||
238 | * smp_mb__before_llsc() but for the remaining cases, typically ones in | ||
239 | * which multiple CPUs may operate on a memory location but ordering is not | ||
240 | * usually guaranteed, we use loongson_llsc_mb() below. | ||
241 | * | ||
242 | * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. | ||
243 | * | ||
244 | * 2) If a conditional branch exists between an ll & sc with a target outside | ||
245 | * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() | ||
246 | * or similar, then misprediction of the branch may allow speculative | ||
247 | * execution of memory accesses from outside of the ll-sc loop. | ||
248 | * | ||
249 | * In order to avoid this we need a memory barrier (ie. a sync instruction) | ||
250 | * at each affected branch target, for which we also use loongson_llsc_mb() | ||
251 | * defined below. | ||
252 | * | ||
253 | * This case affects all current Loongson 3 CPUs. | ||
254 | */ | ||
255 | #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ | ||
256 | #define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | ||
257 | #else | ||
258 | #define loongson_llsc_mb() do { } while (0) | ||
259 | #endif | ||
260 | |||
225 | #include <asm-generic/barrier.h> | 261 | #include <asm-generic/barrier.h> |
226 | 262 | ||
227 | #endif /* __ASM_BARRIER_H */ | 263 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index c4675957b21b..830c93a010c3 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h | |||
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
69 | : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); | 69 | : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); |
70 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) | 70 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
71 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 71 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
72 | loongson_llsc_mb(); | ||
72 | do { | 73 | do { |
73 | __asm__ __volatile__( | 74 | __asm__ __volatile__( |
74 | " " __LL "%0, %1 # set_bit \n" | 75 | " " __LL "%0, %1 # set_bit \n" |
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
79 | } while (unlikely(!temp)); | 80 | } while (unlikely(!temp)); |
80 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ | 81 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
81 | } else if (kernel_uses_llsc) { | 82 | } else if (kernel_uses_llsc) { |
83 | loongson_llsc_mb(); | ||
82 | do { | 84 | do { |
83 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
84 | " .set push \n" | 86 | " .set push \n" |
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
123 | : "ir" (~(1UL << bit))); | 125 | : "ir" (~(1UL << bit))); |
124 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) | 126 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
125 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 127 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
128 | loongson_llsc_mb(); | ||
126 | do { | 129 | do { |
127 | __asm__ __volatile__( | 130 | __asm__ __volatile__( |
128 | " " __LL "%0, %1 # clear_bit \n" | 131 | " " __LL "%0, %1 # clear_bit \n" |
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
133 | } while (unlikely(!temp)); | 136 | } while (unlikely(!temp)); |
134 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ | 137 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
135 | } else if (kernel_uses_llsc) { | 138 | } else if (kernel_uses_llsc) { |
139 | loongson_llsc_mb(); | ||
136 | do { | 140 | do { |
137 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
138 | " .set push \n" | 142 | " .set push \n" |
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
193 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 197 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
194 | unsigned long temp; | 198 | unsigned long temp; |
195 | 199 | ||
200 | loongson_llsc_mb(); | ||
196 | do { | 201 | do { |
197 | __asm__ __volatile__( | 202 | __asm__ __volatile__( |
198 | " .set push \n" | 203 | " .set push \n" |
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index c14d798f3888..b83b0397462d 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h | |||
@@ -50,6 +50,7 @@ | |||
50 | "i" (-EFAULT) \ | 50 | "i" (-EFAULT) \ |
51 | : "memory"); \ | 51 | : "memory"); \ |
52 | } else if (cpu_has_llsc) { \ | 52 | } else if (cpu_has_llsc) { \ |
53 | loongson_llsc_mb(); \ | ||
53 | __asm__ __volatile__( \ | 54 | __asm__ __volatile__( \ |
54 | " .set push \n" \ | 55 | " .set push \n" \ |
55 | " .set noat \n" \ | 56 | " .set noat \n" \ |
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
163 | "i" (-EFAULT) | 164 | "i" (-EFAULT) |
164 | : "memory"); | 165 | : "memory"); |
165 | } else if (cpu_has_llsc) { | 166 | } else if (cpu_has_llsc) { |
167 | loongson_llsc_mb(); | ||
166 | __asm__ __volatile__( | 168 | __asm__ __volatile__( |
167 | "# futex_atomic_cmpxchg_inatomic \n" | 169 | "# futex_atomic_cmpxchg_inatomic \n" |
168 | " .set push \n" | 170 | " .set push \n" |
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
192 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), | 194 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), |
193 | "i" (-EFAULT) | 195 | "i" (-EFAULT) |
194 | : "memory"); | 196 | : "memory"); |
197 | loongson_llsc_mb(); | ||
195 | } else | 198 | } else |
196 | return -ENOSYS; | 199 | return -ENOSYS; |
197 | 200 | ||
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 57933fc8fd98..910851c62db3 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
228 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) | 228 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
229 | : [global] "r" (page_global)); | 229 | : [global] "r" (page_global)); |
230 | } else if (kernel_uses_llsc) { | 230 | } else if (kernel_uses_llsc) { |
231 | loongson_llsc_mb(); | ||
231 | __asm__ __volatile__ ( | 232 | __asm__ __volatile__ ( |
232 | " .set push \n" | 233 | " .set push \n" |
233 | " .set "MIPS_ISA_ARCH_LEVEL" \n" | 234 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
242 | " .set pop \n" | 243 | " .set pop \n" |
243 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) | 244 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
244 | : [global] "r" (page_global)); | 245 | : [global] "r" (page_global)); |
246 | loongson_llsc_mb(); | ||
245 | } | 247 | } |
246 | #else /* !CONFIG_SMP */ | 248 | #else /* !CONFIG_SMP */ |
247 | if (pte_none(*buddy)) | 249 | if (pte_none(*buddy)) |
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a..7f3f136572de 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c | |||
@@ -457,5 +457,5 @@ void mips_cm_error_report(void) | |||
457 | } | 457 | } |
458 | 458 | ||
459 | /* reprime cause register */ | 459 | /* reprime cause register */ |
460 | write_gcr_error_cause(0); | 460 | write_gcr_error_cause(cm_error); |
461 | } | 461 | } |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6829a064aac8..339870ed92f7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) | |||
371 | static int get_frame_info(struct mips_frame_info *info) | 371 | static int get_frame_info(struct mips_frame_info *info) |
372 | { | 372 | { |
373 | bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); | 373 | bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); |
374 | union mips_instruction insn, *ip, *ip_end; | 374 | union mips_instruction insn, *ip; |
375 | const unsigned int max_insns = 128; | 375 | const unsigned int max_insns = 128; |
376 | unsigned int last_insn_size = 0; | 376 | unsigned int last_insn_size = 0; |
377 | unsigned int i; | 377 | unsigned int i; |
@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) | |||
384 | if (!ip) | 384 | if (!ip) |
385 | goto err; | 385 | goto err; |
386 | 386 | ||
387 | ip_end = (void *)ip + info->func_size; | 387 | for (i = 0; i < max_insns; i++) { |
388 | |||
389 | for (i = 0; i < max_insns && ip < ip_end; i++) { | ||
390 | ip = (void *)ip + last_insn_size; | 388 | ip = (void *)ip + last_insn_size; |
389 | |||
391 | if (is_mmips && mm_insn_16bit(ip->halfword[0])) { | 390 | if (is_mmips && mm_insn_16bit(ip->halfword[0])) { |
392 | insn.word = ip->halfword[0] << 16; | 391 | insn.word = ip->halfword[0] << 16; |
393 | last_insn_size = 2; | 392 | last_insn_size = 2; |
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88..c1a4d4dc4665 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform | |||
@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS | |||
23 | endif | 23 | endif |
24 | 24 | ||
25 | cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap | 25 | cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap |
26 | |||
27 | # | ||
28 | # Some versions of binutils, not currently mainline as of 2019/02/04, support | ||
29 | # an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction | ||
30 | # to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a | ||
31 | # description). | ||
32 | # | ||
33 | # We disable this in order to prevent the assembler meddling with the | ||
34 | # instruction that labels refer to, ie. if we label an ll instruction: | ||
35 | # | ||
36 | # 1: ll v0, 0(a0) | ||
37 | # | ||
38 | # ...then with the assembler fix applied the label may actually point at a sync | ||
39 | # instruction inserted by the assembler, and if we were using the label in an | ||
40 | # exception table the table would no longer contain the address of the ll | ||
41 | # instruction. | ||
42 | # | ||
43 | # Avoid this by explicitly disabling that assembler behaviour. If upstream | ||
44 | # binutils does not merge support for the flag then we can revisit & remove | ||
45 | # this later - for now it ensures vendor toolchains don't cause problems. | ||
46 | # | ||
47 | cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) | ||
48 | |||
26 | # | 49 | # |
27 | # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a | 50 | # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a |
28 | # as MIPS64 R2; older versions as just R1. This leaves the possibility open | 51 | # as MIPS64 R2; older versions as just R1. This leaves the possibility open |
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306..b26892ce871c 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c | |||
@@ -59,7 +59,12 @@ static void loongson_poweroff(void) | |||
59 | { | 59 | { |
60 | #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE | 60 | #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE |
61 | mach_prepare_shutdown(); | 61 | mach_prepare_shutdown(); |
62 | unreachable(); | 62 | |
63 | /* | ||
64 | * It needs a wait loop here, but mips/kernel/reset.c already calls | ||
65 | * a generic delay loop, machine_hang(), so simply return. | ||
66 | */ | ||
67 | return; | ||
63 | #else | 68 | #else |
64 | void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; | 69 | void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; |
65 | 70 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 37b1cb246332..65b6e85447b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
932 | * to mimic that here by taking a load/istream page | 932 | * to mimic that here by taking a load/istream page |
933 | * fault. | 933 | * fault. |
934 | */ | 934 | */ |
935 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
936 | uasm_i_sync(p, 0); | ||
935 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | 937 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); |
936 | uasm_i_jr(p, ptr); | 938 | uasm_i_jr(p, ptr); |
937 | 939 | ||
@@ -1646,6 +1648,8 @@ static void | |||
1646 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | 1648 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
1647 | { | 1649 | { |
1648 | #ifdef CONFIG_SMP | 1650 | #ifdef CONFIG_SMP |
1651 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
1652 | uasm_i_sync(p, 0); | ||
1649 | # ifdef CONFIG_PHYS_ADDR_T_64BIT | 1653 | # ifdef CONFIG_PHYS_ADDR_T_64BIT |
1650 | if (cpu_has_64bits) | 1654 | if (cpu_has_64bits) |
1651 | uasm_i_lld(p, pte, 0, ptr); | 1655 | uasm_i_lld(p, pte, 0, ptr); |
@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) | |||
2259 | #endif | 2263 | #endif |
2260 | 2264 | ||
2261 | uasm_l_nopage_tlbl(&l, p); | 2265 | uasm_l_nopage_tlbl(&l, p); |
2266 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2267 | uasm_i_sync(&p, 0); | ||
2262 | build_restore_work_registers(&p); | 2268 | build_restore_work_registers(&p); |
2263 | #ifdef CONFIG_CPU_MICROMIPS | 2269 | #ifdef CONFIG_CPU_MICROMIPS |
2264 | if ((unsigned long)tlb_do_page_fault_0 & 1) { | 2270 | if ((unsigned long)tlb_do_page_fault_0 & 1) { |
@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) | |||
2313 | #endif | 2319 | #endif |
2314 | 2320 | ||
2315 | uasm_l_nopage_tlbs(&l, p); | 2321 | uasm_l_nopage_tlbs(&l, p); |
2322 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2323 | uasm_i_sync(&p, 0); | ||
2316 | build_restore_work_registers(&p); | 2324 | build_restore_work_registers(&p); |
2317 | #ifdef CONFIG_CPU_MICROMIPS | 2325 | #ifdef CONFIG_CPU_MICROMIPS |
2318 | if ((unsigned long)tlb_do_page_fault_1 & 1) { | 2326 | if ((unsigned long)tlb_do_page_fault_1 & 1) { |
@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) | |||
2368 | #endif | 2376 | #endif |
2369 | 2377 | ||
2370 | uasm_l_nopage_tlbm(&l, p); | 2378 | uasm_l_nopage_tlbm(&l, p); |
2379 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2380 | uasm_i_sync(&p, 0); | ||
2371 | build_restore_work_registers(&p); | 2381 | build_restore_work_registers(&p); |
2372 | #ifdef CONFIG_CPU_MICROMIPS | 2382 | #ifdef CONFIG_CPU_MICROMIPS |
2373 | if ((unsigned long)tlb_do_page_fault_1 & 1) { | 2383 | if ((unsigned long)tlb_do_page_fault_1 & 1) { |
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index b16710a8a9e7..76e9bf88d3b9 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -79,8 +79,6 @@ enum reg_val_type { | |||
79 | REG_64BIT_32BIT, | 79 | REG_64BIT_32BIT, |
80 | /* 32-bit compatible, need truncation for 64-bit ops. */ | 80 | /* 32-bit compatible, need truncation for 64-bit ops. */ |
81 | REG_32BIT, | 81 | REG_32BIT, |
82 | /* 32-bit zero extended. */ | ||
83 | REG_32BIT_ZERO_EX, | ||
84 | /* 32-bit no sign/zero extension needed. */ | 82 | /* 32-bit no sign/zero extension needed. */ |
85 | REG_32BIT_POS | 83 | REG_32BIT_POS |
86 | }; | 84 | }; |
@@ -343,12 +341,15 @@ static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg) | |||
343 | const struct bpf_prog *prog = ctx->skf; | 341 | const struct bpf_prog *prog = ctx->skf; |
344 | int stack_adjust = ctx->stack_size; | 342 | int stack_adjust = ctx->stack_size; |
345 | int store_offset = stack_adjust - 8; | 343 | int store_offset = stack_adjust - 8; |
344 | enum reg_val_type td; | ||
346 | int r0 = MIPS_R_V0; | 345 | int r0 = MIPS_R_V0; |
347 | 346 | ||
348 | if (dest_reg == MIPS_R_RA && | 347 | if (dest_reg == MIPS_R_RA) { |
349 | get_reg_val_type(ctx, prog->len, BPF_REG_0) == REG_32BIT_ZERO_EX) | ||
350 | /* Don't let zero extended value escape. */ | 348 | /* Don't let zero extended value escape. */ |
351 | emit_instr(ctx, sll, r0, r0, 0); | 349 | td = get_reg_val_type(ctx, prog->len, BPF_REG_0); |
350 | if (td == REG_64BIT) | ||
351 | emit_instr(ctx, sll, r0, r0, 0); | ||
352 | } | ||
352 | 353 | ||
353 | if (ctx->flags & EBPF_SAVE_RA) { | 354 | if (ctx->flags & EBPF_SAVE_RA) { |
354 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); | 355 | emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP); |
@@ -692,7 +693,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
692 | if (dst < 0) | 693 | if (dst < 0) |
693 | return dst; | 694 | return dst; |
694 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 695 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
695 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 696 | if (td == REG_64BIT) { |
696 | /* sign extend */ | 697 | /* sign extend */ |
697 | emit_instr(ctx, sll, dst, dst, 0); | 698 | emit_instr(ctx, sll, dst, dst, 0); |
698 | } | 699 | } |
@@ -707,7 +708,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
707 | if (dst < 0) | 708 | if (dst < 0) |
708 | return dst; | 709 | return dst; |
709 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 710 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
710 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 711 | if (td == REG_64BIT) { |
711 | /* sign extend */ | 712 | /* sign extend */ |
712 | emit_instr(ctx, sll, dst, dst, 0); | 713 | emit_instr(ctx, sll, dst, dst, 0); |
713 | } | 714 | } |
@@ -721,7 +722,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
721 | if (dst < 0) | 722 | if (dst < 0) |
722 | return dst; | 723 | return dst; |
723 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 724 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
724 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) | 725 | if (td == REG_64BIT) |
725 | /* sign extend */ | 726 | /* sign extend */ |
726 | emit_instr(ctx, sll, dst, dst, 0); | 727 | emit_instr(ctx, sll, dst, dst, 0); |
727 | if (insn->imm == 1) { | 728 | if (insn->imm == 1) { |
@@ -860,13 +861,13 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
860 | if (src < 0 || dst < 0) | 861 | if (src < 0 || dst < 0) |
861 | return -EINVAL; | 862 | return -EINVAL; |
862 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); | 863 | td = get_reg_val_type(ctx, this_idx, insn->dst_reg); |
863 | if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) { | 864 | if (td == REG_64BIT) { |
864 | /* sign extend */ | 865 | /* sign extend */ |
865 | emit_instr(ctx, sll, dst, dst, 0); | 866 | emit_instr(ctx, sll, dst, dst, 0); |
866 | } | 867 | } |
867 | did_move = false; | 868 | did_move = false; |
868 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); | 869 | ts = get_reg_val_type(ctx, this_idx, insn->src_reg); |
869 | if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) { | 870 | if (ts == REG_64BIT) { |
870 | int tmp_reg = MIPS_R_AT; | 871 | int tmp_reg = MIPS_R_AT; |
871 | 872 | ||
872 | if (bpf_op == BPF_MOV) { | 873 | if (bpf_op == BPF_MOV) { |
@@ -1254,8 +1255,7 @@ jeq_common: | |||
1254 | if (insn->imm == 64 && td == REG_32BIT) | 1255 | if (insn->imm == 64 && td == REG_32BIT) |
1255 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); | 1256 | emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32); |
1256 | 1257 | ||
1257 | if (insn->imm != 64 && | 1258 | if (insn->imm != 64 && td == REG_64BIT) { |
1258 | (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) { | ||
1259 | /* sign extend */ | 1259 | /* sign extend */ |
1260 | emit_instr(ctx, sll, dst, dst, 0); | 1260 | emit_instr(ctx, sll, dst, dst, 0); |
1261 | } | 1261 | } |
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5a..fc29b85cfa92 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c | |||
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) | |||
568 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) | 568 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) |
569 | return 0; | 569 | return 0; |
570 | 570 | ||
571 | if (!octeon_is_pci_host()) { | ||
572 | pr_notice("Not in host mode, PCI Controller not initialized\n"); | ||
573 | return 0; | ||
574 | } | ||
575 | |||
571 | /* Point pcibios_map_irq() to the PCI version of it */ | 576 | /* Point pcibios_map_irq() to the PCI version of it */ |
572 | octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; | 577 | octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; |
573 | 578 | ||
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) | |||
579 | else | 584 | else |
580 | octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; | 585 | octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; |
581 | 586 | ||
582 | if (!octeon_is_pci_host()) { | ||
583 | pr_notice("Not in host mode, PCI Controller not initialized\n"); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* PCI I/O and PCI MEM values */ | 587 | /* PCI I/O and PCI MEM values */ |
588 | set_io_port_base(OCTEON_PCI_IOSPACE_BASE); | 588 | set_io_port_base(OCTEON_PCI_IOSPACE_BASE); |
589 | ioport_resource.start = 0; | 589 | ioport_resource.start = 0; |
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index f6fd340e39c2..0ede4deb8181 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile | |||
@@ -8,6 +8,7 @@ ccflags-vdso := \ | |||
8 | $(filter -E%,$(KBUILD_CFLAGS)) \ | 8 | $(filter -E%,$(KBUILD_CFLAGS)) \ |
9 | $(filter -mmicromips,$(KBUILD_CFLAGS)) \ | 9 | $(filter -mmicromips,$(KBUILD_CFLAGS)) \ |
10 | $(filter -march=%,$(KBUILD_CFLAGS)) \ | 10 | $(filter -march=%,$(KBUILD_CFLAGS)) \ |
11 | $(filter -m%-float,$(KBUILD_CFLAGS)) \ | ||
11 | -D__VDSO__ | 12 | -D__VDSO__ |
12 | 13 | ||
13 | ifdef CONFIG_CC_IS_CLANG | 14 | ifdef CONFIG_CC_IS_CLANG |
@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE | |||
129 | $(call cmd,force_checksrc) | 130 | $(call cmd,force_checksrc) |
130 | $(call if_changed_rule,cc_o_c) | 131 | $(call if_changed_rule,cc_o_c) |
131 | 132 | ||
132 | $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 | 133 | $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 |
133 | $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE | 134 | $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE |
134 | $(call if_changed_dep,cpp_lds_S) | 135 | $(call if_changed_dep,cpp_lds_S) |
135 | 136 | ||
@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE | |||
169 | $(call cmd,force_checksrc) | 170 | $(call cmd,force_checksrc) |
170 | $(call if_changed_rule,cc_o_c) | 171 | $(call if_changed_rule,cc_o_c) |
171 | 172 | ||
172 | $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 | 173 | $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 |
173 | $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE | 174 | $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE |
174 | $(call if_changed_dep,cpp_lds_S) | 175 | $(call if_changed_dep,cpp_lds_S) |
175 | 176 | ||
diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index 2582df1c529b..0964c236e3e5 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c | |||
@@ -308,15 +308,29 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
308 | 308 | ||
309 | long do_syscall_trace_enter(struct pt_regs *regs) | 309 | long do_syscall_trace_enter(struct pt_regs *regs) |
310 | { | 310 | { |
311 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 311 | if (test_thread_flag(TIF_SYSCALL_TRACE)) { |
312 | tracehook_report_syscall_entry(regs)) { | 312 | int rc = tracehook_report_syscall_entry(regs); |
313 | |||
313 | /* | 314 | /* |
314 | * Tracing decided this syscall should not happen or the | 315 | * As tracesys_next does not set %r28 to -ENOSYS |
315 | * debugger stored an invalid system call number. Skip | 316 | * when %r20 is set to -1, initialize it here. |
316 | * the system call and the system call restart handling. | ||
317 | */ | 317 | */ |
318 | regs->gr[20] = -1UL; | 318 | regs->gr[28] = -ENOSYS; |
319 | goto out; | 319 | |
320 | if (rc) { | ||
321 | /* | ||
322 | * A nonzero return code from | ||
323 | * tracehook_report_syscall_entry() tells us | ||
324 | * to prevent the syscall execution. Skip | ||
325 | * the syscall call and the syscall restart handling. | ||
326 | * | ||
327 | * Note that the tracer may also just change | ||
328 | * regs->gr[20] to an invalid syscall number, | ||
329 | * that is handled by tracesys_next. | ||
330 | */ | ||
331 | regs->gr[20] = -1UL; | ||
332 | return -1; | ||
333 | } | ||
320 | } | 334 | } |
321 | 335 | ||
322 | /* Do the secure computing check after ptrace. */ | 336 | /* Do the secure computing check after ptrace. */ |
@@ -340,7 +354,6 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
340 | regs->gr[24] & 0xffffffff, | 354 | regs->gr[24] & 0xffffffff, |
341 | regs->gr[23] & 0xffffffff); | 355 | regs->gr[23] & 0xffffffff); |
342 | 356 | ||
343 | out: | ||
344 | /* | 357 | /* |
345 | * Sign extend the syscall number to 64bit since it may have been | 358 | * Sign extend the syscall number to 64bit since it may have been |
346 | * modified by a compat ptrace call | 359 | * modified by a compat ptrace call |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2e6ada28da64..d8c8d7c9df15 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -904,7 +904,7 @@ static inline int pud_none(pud_t pud) | |||
904 | 904 | ||
905 | static inline int pud_present(pud_t pud) | 905 | static inline int pud_present(pud_t pud) |
906 | { | 906 | { |
907 | return (pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); | 907 | return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PRESENT)); |
908 | } | 908 | } |
909 | 909 | ||
910 | extern struct page *pud_page(pud_t pud); | 910 | extern struct page *pud_page(pud_t pud); |
@@ -951,7 +951,7 @@ static inline int pgd_none(pgd_t pgd) | |||
951 | 951 | ||
952 | static inline int pgd_present(pgd_t pgd) | 952 | static inline int pgd_present(pgd_t pgd) |
953 | { | 953 | { |
954 | return (pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); | 954 | return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); |
955 | } | 955 | } |
956 | 956 | ||
957 | static inline pte_t pgd_pte(pgd_t pgd) | 957 | static inline pte_t pgd_pte(pgd_t pgd) |
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |||
1258 | 1258 | ||
1259 | #define pmd_move_must_withdraw pmd_move_must_withdraw | 1259 | #define pmd_move_must_withdraw pmd_move_must_withdraw |
1260 | struct spinlock; | 1260 | struct spinlock; |
1261 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | 1261 | extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, |
1262 | struct spinlock *old_pmd_ptl, | 1262 | struct spinlock *old_pmd_ptl, |
1263 | struct vm_area_struct *vma) | 1263 | struct vm_area_struct *vma); |
1264 | { | 1264 | /* |
1265 | if (radix_enabled()) | 1265 | * Hash translation mode use the deposited table to store hash pte |
1266 | return false; | 1266 | * slot information. |
1267 | /* | 1267 | */ |
1268 | * Archs like ppc64 use pgtable to store per pmd | ||
1269 | * specific information. So when we switch the pmd, | ||
1270 | * we should also withdraw and deposit the pgtable | ||
1271 | */ | ||
1272 | return true; | ||
1273 | } | ||
1274 | |||
1275 | |||
1276 | #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit | 1268 | #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit |
1277 | static inline bool arch_needs_pgtable_deposit(void) | 1269 | static inline bool arch_needs_pgtable_deposit(void) |
1278 | { | 1270 | { |
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f3c31f5e1026..ecd31569a120 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
@@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m) | |||
400 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); | 400 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); |
401 | } | 401 | } |
402 | #endif /* CONFIG_PROC_FS */ | 402 | #endif /* CONFIG_PROC_FS */ |
403 | |||
404 | /* | ||
405 | * For hash translation mode, we use the deposited table to store hash slot | ||
406 | * information and they are stored at PTRS_PER_PMD offset from related pmd | ||
407 | * location. Hence a pmd move requires deposit and withdraw. | ||
408 | * | ||
409 | * For radix translation with split pmd ptl, we store the deposited table in the | ||
410 | * pmd page. Hence if we have different pmd page we need to withdraw during pmd | ||
411 | * move. | ||
412 | * | ||
413 | * With hash we use deposited table always irrespective of anon or not. | ||
414 | * With radix we use deposited table only for anonymous mapping. | ||
415 | */ | ||
416 | int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | ||
417 | struct spinlock *old_pmd_ptl, | ||
418 | struct vm_area_struct *vma) | ||
419 | { | ||
420 | if (radix_enabled()) | ||
421 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); | ||
422 | |||
423 | return true; | ||
424 | } | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 7db3119f8a5b..145373f0e5dc 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1593,6 +1593,8 @@ static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs) | |||
1593 | 1593 | ||
1594 | pnv_pci_ioda2_setup_dma_pe(phb, pe); | 1594 | pnv_pci_ioda2_setup_dma_pe(phb, pe); |
1595 | #ifdef CONFIG_IOMMU_API | 1595 | #ifdef CONFIG_IOMMU_API |
1596 | iommu_register_group(&pe->table_group, | ||
1597 | pe->phb->hose->global_number, pe->pe_number); | ||
1596 | pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); | 1598 | pnv_ioda_setup_bus_iommu_group(pe, &pe->table_group, NULL); |
1597 | #endif | 1599 | #endif |
1598 | } | 1600 | } |
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 45fb70b4bfa7..ef9448a907c6 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -1147,6 +1147,8 @@ static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb, | |||
1147 | return 0; | 1147 | return 0; |
1148 | 1148 | ||
1149 | pe = &phb->ioda.pe_array[pdn->pe_number]; | 1149 | pe = &phb->ioda.pe_array[pdn->pe_number]; |
1150 | if (!pe->table_group.group) | ||
1151 | return 0; | ||
1150 | iommu_add_device(&pe->table_group, dev); | 1152 | iommu_add_device(&pe->table_group, dev); |
1151 | return 0; | 1153 | return 0; |
1152 | case BUS_NOTIFY_DEL_DEVICE: | 1154 | case BUS_NOTIFY_DEL_DEVICE: |
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 7d6457ab5d34..bba281b1fe1b 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c | |||
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
43 | { | 43 | { |
44 | unsigned long ret[PLPAR_HCALL_BUFSIZE]; | 44 | unsigned long ret[PLPAR_HCALL_BUFSIZE]; |
45 | uint64_t rc, token; | 45 | uint64_t rc, token; |
46 | uint64_t saved = 0; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * When the hypervisor cannot map all the requested memory in a single | 49 | * When the hypervisor cannot map all the requested memory in a single |
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
56 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, | 57 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, |
57 | p->blocks, BIND_ANY_ADDR, token); | 58 | p->blocks, BIND_ANY_ADDR, token); |
58 | token = ret[0]; | 59 | token = ret[0]; |
60 | if (!saved) | ||
61 | saved = ret[1]; | ||
59 | cond_resched(); | 62 | cond_resched(); |
60 | } while (rc == H_BUSY); | 63 | } while (rc == H_BUSY); |
61 | 64 | ||
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
64 | return -ENXIO; | 67 | return -ENXIO; |
65 | } | 68 | } |
66 | 69 | ||
67 | p->bound_addr = ret[1]; | 70 | p->bound_addr = saved; |
68 | 71 | ||
69 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); | 72 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); |
70 | 73 | ||
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h | |||
@@ -35,6 +35,12 @@ | |||
35 | #define _PAGE_SPECIAL _PAGE_SOFT | 35 | #define _PAGE_SPECIAL _PAGE_SOFT |
36 | #define _PAGE_TABLE _PAGE_PRESENT | 36 | #define _PAGE_TABLE _PAGE_PRESENT |
37 | 37 | ||
38 | /* | ||
39 | * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to | ||
40 | * distinguish them from swapped out pages | ||
41 | */ | ||
42 | #define _PAGE_PROT_NONE _PAGE_READ | ||
43 | |||
38 | #define _PAGE_PFN_SHIFT 10 | 44 | #define _PAGE_PFN_SHIFT 10 |
39 | 45 | ||
40 | /* Set of bits to preserve across pte_modify() */ | 46 | /* Set of bits to preserve across pte_modify() */ |
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..a8179a8c1491 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h | |||
@@ -44,7 +44,7 @@ | |||
44 | /* Page protection bits */ | 44 | /* Page protection bits */ |
45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | 45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) |
46 | 46 | ||
47 | #define PAGE_NONE __pgprot(0) | 47 | #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) |
48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) | 48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) |
49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | 49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) |
50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | 50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) |
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
98 | 98 | ||
99 | static inline int pmd_present(pmd_t pmd) | 99 | static inline int pmd_present(pmd_t pmd) |
100 | { | 100 | { |
101 | return (pmd_val(pmd) & _PAGE_PRESENT); | 101 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static inline int pmd_none(pmd_t pmd) | 104 | static inline int pmd_none(pmd_t pmd) |
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |||
178 | 178 | ||
179 | static inline int pte_present(pte_t pte) | 179 | static inline int pte_present(pte_t pte) |
180 | { | 180 | { |
181 | return (pte_val(pte) & _PAGE_PRESENT); | 181 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline int pte_none(pte_t pte) | 184 | static inline int pte_none(pte_t pte) |
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |||
380 | * | 380 | * |
381 | * Format of swap PTE: | 381 | * Format of swap PTE: |
382 | * bit 0: _PAGE_PRESENT (zero) | 382 | * bit 0: _PAGE_PRESENT (zero) |
383 | * bit 1: reserved for future use (zero) | 383 | * bit 1: _PAGE_PROT_NONE (zero) |
384 | * bits 2 to 6: swap type | 384 | * bits 2 to 6: swap type |
385 | * bits 7 to XLEN-1: swap offset | 385 | * bits 7 to XLEN-1: swap offset |
386 | */ | 386 | */ |
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <asm/cache.h> | 18 | #include <asm/cache.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | 20 | ||
21 | #define MAX_BYTES_PER_LONG 0x10 | ||
22 | |||
23 | OUTPUT_ARCH(riscv) | 21 | OUTPUT_ARCH(riscv) |
24 | ENTRY(_start) | 22 | ENTRY(_start) |
25 | 23 | ||
@@ -76,6 +74,8 @@ SECTIONS | |||
76 | *(.sbss*) | 74 | *(.sbss*) |
77 | } | 75 | } |
78 | 76 | ||
77 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) | ||
78 | |||
79 | EXCEPTION_TABLE(0x10) | 79 | EXCEPTION_TABLE(0x10) |
80 | NOTES | 80 | NOTES |
81 | 81 | ||
@@ -83,10 +83,6 @@ SECTIONS | |||
83 | *(.rel.dyn*) | 83 | *(.rel.dyn*) |
84 | } | 84 | } |
85 | 85 | ||
86 | BSS_SECTION(MAX_BYTES_PER_LONG, | ||
87 | MAX_BYTES_PER_LONG, | ||
88 | MAX_BYTES_PER_LONG) | ||
89 | |||
90 | _end = .; | 86 | _end = .; |
91 | 87 | ||
92 | STABS_DEBUG | 88 | STABS_DEBUG |
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..b6796e616812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -30,10 +30,10 @@ | |||
30 | .section .text | 30 | .section .text |
31 | ENTRY(swsusp_arch_suspend) | 31 | ENTRY(swsusp_arch_suspend) |
32 | lg %r1,__LC_NODAT_STACK | 32 | lg %r1,__LC_NODAT_STACK |
33 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
34 | stmg %r6,%r15,__SF_GPRS(%r1) | 33 | stmg %r6,%r15,__SF_GPRS(%r1) |
34 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
35 | stg %r15,__SF_BACKCHAIN(%r1) | 35 | stg %r15,__SF_BACKCHAIN(%r1) |
36 | lgr %r1,%r15 | 36 | lgr %r15,%r1 |
37 | 37 | ||
38 | /* Store FPU registers */ | 38 | /* Store FPU registers */ |
39 | brasl %r14,save_fpu_regs | 39 | brasl %r14,save_fpu_regs |
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index a153257bf7d9..d62fa148558b 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c | |||
@@ -297,7 +297,7 @@ static int shadow_crycb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page) | |||
297 | scb_s->crycbd = 0; | 297 | scb_s->crycbd = 0; |
298 | 298 | ||
299 | apie_h = vcpu->arch.sie_block->eca & ECA_APIE; | 299 | apie_h = vcpu->arch.sie_block->eca & ECA_APIE; |
300 | if (!apie_h && !key_msk) | 300 | if (!apie_h && (!key_msk || fmt_o == CRYCB_FORMAT0)) |
301 | return 0; | 301 | return 0; |
302 | 302 | ||
303 | if (!crycb_addr) | 303 | if (!crycb_addr) |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..4266a4de3160 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) | |||
382 | if (ai == -1UL) | 382 | if (ai == -1UL) |
383 | break; | 383 | break; |
384 | inc_irq_stat(IRQIO_MSI); | 384 | inc_irq_stat(IRQIO_MSI); |
385 | airq_iv_lock(aibv, ai); | ||
385 | generic_handle_irq(airq_iv_get_data(aibv, ai)); | 386 | generic_handle_irq(airq_iv_get_data(aibv, ai)); |
387 | airq_iv_unlock(aibv, ai); | ||
386 | } | 388 | } |
387 | } | 389 | } |
388 | } | 390 | } |
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
408 | zdev->aisb = aisb; | 410 | zdev->aisb = aisb; |
409 | 411 | ||
410 | /* Create adapter interrupt vector */ | 412 | /* Create adapter interrupt vector */ |
411 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); | 413 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); |
412 | if (!zdev->aibv) | 414 | if (!zdev->aibv) |
413 | return -ENOMEM; | 415 | return -ENOMEM; |
414 | 416 | ||
diff --git a/arch/sh/boot/dts/Makefile b/arch/sh/boot/dts/Makefile index 01d0f7fb14cc..2563d1e532e2 100644 --- a/arch/sh/boot/dts/Makefile +++ b/arch/sh/boot/dts/Makefile | |||
@@ -1,3 +1,3 @@ | |||
1 | ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") | 1 | ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") |
2 | obj-y += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o | 2 | obj-$(CONFIG_USE_BUILTIN_DTB) += $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o |
3 | endif | 3 | endif |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index f105ae8651c9..f62e347862cc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -602,10 +602,12 @@ ENTRY(trampoline_32bit_src) | |||
602 | 3: | 602 | 3: |
603 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ | 603 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ |
604 | pushl %ecx | 604 | pushl %ecx |
605 | pushl %edx | ||
605 | movl $MSR_EFER, %ecx | 606 | movl $MSR_EFER, %ecx |
606 | rdmsr | 607 | rdmsr |
607 | btsl $_EFER_LME, %eax | 608 | btsl $_EFER_LME, %eax |
608 | wrmsr | 609 | wrmsr |
610 | popl %edx | ||
609 | popl %ecx | 611 | popl %ecx |
610 | 612 | ||
611 | /* Enable PAE and LA57 (if required) paging modes */ | 613 | /* Enable PAE and LA57 (if required) paging modes */ |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 374a19712e20..b684f0294f35 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2278,6 +2278,19 @@ void perf_check_microcode(void) | |||
2278 | x86_pmu.check_microcode(); | 2278 | x86_pmu.check_microcode(); |
2279 | } | 2279 | } |
2280 | 2280 | ||
2281 | static int x86_pmu_check_period(struct perf_event *event, u64 value) | ||
2282 | { | ||
2283 | if (x86_pmu.check_period && x86_pmu.check_period(event, value)) | ||
2284 | return -EINVAL; | ||
2285 | |||
2286 | if (value && x86_pmu.limit_period) { | ||
2287 | if (x86_pmu.limit_period(event, value) > value) | ||
2288 | return -EINVAL; | ||
2289 | } | ||
2290 | |||
2291 | return 0; | ||
2292 | } | ||
2293 | |||
2281 | static struct pmu pmu = { | 2294 | static struct pmu pmu = { |
2282 | .pmu_enable = x86_pmu_enable, | 2295 | .pmu_enable = x86_pmu_enable, |
2283 | .pmu_disable = x86_pmu_disable, | 2296 | .pmu_disable = x86_pmu_disable, |
@@ -2302,6 +2315,7 @@ static struct pmu pmu = { | |||
2302 | .event_idx = x86_pmu_event_idx, | 2315 | .event_idx = x86_pmu_event_idx, |
2303 | .sched_task = x86_pmu_sched_task, | 2316 | .sched_task = x86_pmu_sched_task, |
2304 | .task_ctx_size = sizeof(struct x86_perf_task_context), | 2317 | .task_ctx_size = sizeof(struct x86_perf_task_context), |
2318 | .check_period = x86_pmu_check_period, | ||
2305 | }; | 2319 | }; |
2306 | 2320 | ||
2307 | void arch_perf_update_userpage(struct perf_event *event, | 2321 | void arch_perf_update_userpage(struct perf_event *event, |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..730978dff63f 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu) | |||
3559 | 3559 | ||
3560 | static void intel_pmu_cpu_dying(int cpu) | 3560 | static void intel_pmu_cpu_dying(int cpu) |
3561 | { | 3561 | { |
3562 | fini_debug_store_on_cpu(cpu); | ||
3563 | |||
3564 | if (x86_pmu.counter_freezing) | ||
3565 | disable_counter_freeze(); | ||
3566 | } | ||
3567 | |||
3568 | static void intel_pmu_cpu_dead(int cpu) | ||
3569 | { | ||
3562 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 3570 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
3563 | struct intel_shared_regs *pc; | 3571 | struct intel_shared_regs *pc; |
3564 | 3572 | ||
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu) | |||
3570 | } | 3578 | } |
3571 | 3579 | ||
3572 | free_excl_cntrs(cpu); | 3580 | free_excl_cntrs(cpu); |
3573 | |||
3574 | fini_debug_store_on_cpu(cpu); | ||
3575 | |||
3576 | if (x86_pmu.counter_freezing) | ||
3577 | disable_counter_freeze(); | ||
3578 | } | 3581 | } |
3579 | 3582 | ||
3580 | static void intel_pmu_sched_task(struct perf_event_context *ctx, | 3583 | static void intel_pmu_sched_task(struct perf_event_context *ctx, |
@@ -3584,6 +3587,11 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, | |||
3584 | intel_pmu_lbr_sched_task(ctx, sched_in); | 3587 | intel_pmu_lbr_sched_task(ctx, sched_in); |
3585 | } | 3588 | } |
3586 | 3589 | ||
3590 | static int intel_pmu_check_period(struct perf_event *event, u64 value) | ||
3591 | { | ||
3592 | return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; | ||
3593 | } | ||
3594 | |||
3587 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); | 3595 | PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63"); |
3588 | 3596 | ||
3589 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); | 3597 | PMU_FORMAT_ATTR(ldlat, "config1:0-15"); |
@@ -3663,6 +3671,9 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3663 | .cpu_prepare = intel_pmu_cpu_prepare, | 3671 | .cpu_prepare = intel_pmu_cpu_prepare, |
3664 | .cpu_starting = intel_pmu_cpu_starting, | 3672 | .cpu_starting = intel_pmu_cpu_starting, |
3665 | .cpu_dying = intel_pmu_cpu_dying, | 3673 | .cpu_dying = intel_pmu_cpu_dying, |
3674 | .cpu_dead = intel_pmu_cpu_dead, | ||
3675 | |||
3676 | .check_period = intel_pmu_check_period, | ||
3666 | }; | 3677 | }; |
3667 | 3678 | ||
3668 | static struct attribute *intel_pmu_attrs[]; | 3679 | static struct attribute *intel_pmu_attrs[]; |
@@ -3703,8 +3714,12 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3703 | .cpu_prepare = intel_pmu_cpu_prepare, | 3714 | .cpu_prepare = intel_pmu_cpu_prepare, |
3704 | .cpu_starting = intel_pmu_cpu_starting, | 3715 | .cpu_starting = intel_pmu_cpu_starting, |
3705 | .cpu_dying = intel_pmu_cpu_dying, | 3716 | .cpu_dying = intel_pmu_cpu_dying, |
3717 | .cpu_dead = intel_pmu_cpu_dead, | ||
3718 | |||
3706 | .guest_get_msrs = intel_guest_get_msrs, | 3719 | .guest_get_msrs = intel_guest_get_msrs, |
3707 | .sched_task = intel_pmu_sched_task, | 3720 | .sched_task = intel_pmu_sched_task, |
3721 | |||
3722 | .check_period = intel_pmu_check_period, | ||
3708 | }; | 3723 | }; |
3709 | 3724 | ||
3710 | static __init void intel_clovertown_quirk(void) | 3725 | static __init void intel_clovertown_quirk(void) |
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe8..b10e04387f38 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = { | |||
1222 | .id_table = snbep_uncore_pci_ids, | 1222 | .id_table = snbep_uncore_pci_ids, |
1223 | }; | 1223 | }; |
1224 | 1224 | ||
1225 | #define NODE_ID_MASK 0x7 | ||
1226 | |||
1225 | /* | 1227 | /* |
1226 | * build pci bus to socket mapping | 1228 | * build pci bus to socket mapping |
1227 | */ | 1229 | */ |
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool | |||
1243 | err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); | 1245 | err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); |
1244 | if (err) | 1246 | if (err) |
1245 | break; | 1247 | break; |
1246 | nodeid = config; | 1248 | nodeid = config & NODE_ID_MASK; |
1247 | /* get the Node ID mapping */ | 1249 | /* get the Node ID mapping */ |
1248 | err = pci_read_config_dword(ubox_dev, idmap_loc, &config); | 1250 | err = pci_read_config_dword(ubox_dev, idmap_loc, &config); |
1249 | if (err) | 1251 | if (err) |
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78d7b7031bfc..d46fd6754d92 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -646,6 +646,11 @@ struct x86_pmu { | |||
646 | * Intel host/guest support (KVM) | 646 | * Intel host/guest support (KVM) |
647 | */ | 647 | */ |
648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | 648 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); |
649 | |||
650 | /* | ||
651 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
652 | */ | ||
653 | int (*check_period) (struct perf_event *event, u64 period); | ||
649 | }; | 654 | }; |
650 | 655 | ||
651 | struct x86_perf_task_context { | 656 | struct x86_perf_task_context { |
@@ -857,7 +862,7 @@ static inline int amd_pmu_init(void) | |||
857 | 862 | ||
858 | #ifdef CONFIG_CPU_SUP_INTEL | 863 | #ifdef CONFIG_CPU_SUP_INTEL |
859 | 864 | ||
860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 865 | static inline bool intel_pmu_has_bts_period(struct perf_event *event, u64 period) |
861 | { | 866 | { |
862 | struct hw_perf_event *hwc = &event->hw; | 867 | struct hw_perf_event *hwc = &event->hw; |
863 | unsigned int hw_event, bts_event; | 868 | unsigned int hw_event, bts_event; |
@@ -868,7 +873,14 @@ static inline bool intel_pmu_has_bts(struct perf_event *event) | |||
868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | 873 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; |
869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | 874 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); |
870 | 875 | ||
871 | return hw_event == bts_event && hwc->sample_period == 1; | 876 | return hw_event == bts_event && period == 1; |
877 | } | ||
878 | |||
879 | static inline bool intel_pmu_has_bts(struct perf_event *event) | ||
880 | { | ||
881 | struct hw_perf_event *hwc = &event->hw; | ||
882 | |||
883 | return intel_pmu_has_bts_period(event, hwc->sample_period); | ||
872 | } | 884 | } |
873 | 885 | ||
874 | int intel_pmu_save_and_restart(struct perf_event *event); | 886 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index f65b78d32f5e..7dbbe9ffda17 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -51,7 +51,7 @@ static unsigned long get_dr(int n) | |||
51 | /* | 51 | /* |
52 | * fill in the user structure for a core dump.. | 52 | * fill in the user structure for a core dump.. |
53 | */ | 53 | */ |
54 | static void dump_thread32(struct pt_regs *regs, struct user32 *dump) | 54 | static void fill_dump(struct pt_regs *regs, struct user32 *dump) |
55 | { | 55 | { |
56 | u32 fs, gs; | 56 | u32 fs, gs; |
57 | memset(dump, 0, sizeof(*dump)); | 57 | memset(dump, 0, sizeof(*dump)); |
@@ -157,10 +157,12 @@ static int aout_core_dump(struct coredump_params *cprm) | |||
157 | fs = get_fs(); | 157 | fs = get_fs(); |
158 | set_fs(KERNEL_DS); | 158 | set_fs(KERNEL_DS); |
159 | has_dumped = 1; | 159 | has_dumped = 1; |
160 | |||
161 | fill_dump(cprm->regs, &dump); | ||
162 | |||
160 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); | 163 | strncpy(dump.u_comm, current->comm, sizeof(current->comm)); |
161 | dump.u_ar0 = offsetof(struct user32, regs); | 164 | dump.u_ar0 = offsetof(struct user32, regs); |
162 | dump.signal = cprm->siginfo->si_signo; | 165 | dump.signal = cprm->siginfo->si_signo; |
163 | dump_thread32(cprm->regs, &dump); | ||
164 | 166 | ||
165 | /* | 167 | /* |
166 | * If the size of the dump file exceeds the rlimit, then see | 168 | * If the size of the dump file exceeds the rlimit, then see |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index d9a9993af882..9f15384c504a 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -52,6 +52,8 @@ | |||
52 | 52 | ||
53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 | 53 | #define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 |
54 | 54 | ||
55 | #define INTEL_FAM6_ICELAKE_MOBILE 0x7E | ||
56 | |||
55 | /* "Small Core" Processors (Atom) */ | 57 | /* "Small Core" Processors (Atom) */ |
56 | 58 | ||
57 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ | 59 | #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */ |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 4660ce90de7f..180373360e34 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -299,6 +299,7 @@ union kvm_mmu_extended_role { | |||
299 | unsigned int cr4_smap:1; | 299 | unsigned int cr4_smap:1; |
300 | unsigned int cr4_smep:1; | 300 | unsigned int cr4_smep:1; |
301 | unsigned int cr4_la57:1; | 301 | unsigned int cr4_la57:1; |
302 | unsigned int maxphyaddr:6; | ||
302 | }; | 303 | }; |
303 | }; | 304 | }; |
304 | 305 | ||
@@ -397,6 +398,7 @@ struct kvm_mmu { | |||
397 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 398 | void (*update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
398 | u64 *spte, const void *pte); | 399 | u64 *spte, const void *pte); |
399 | hpa_t root_hpa; | 400 | hpa_t root_hpa; |
401 | gpa_t root_cr3; | ||
400 | union kvm_mmu_role mmu_role; | 402 | union kvm_mmu_role mmu_role; |
401 | u8 root_level; | 403 | u8 root_level; |
402 | u8 shadow_root_level; | 404 | u8 shadow_root_level; |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1066 | pmd_t *pmdp, pmd_t pmd) | 1066 | pmd_t *pmdp, pmd_t pmd) |
1067 | { | 1067 | { |
1068 | native_set_pmd(pmdp, pmd); | 1068 | set_pmd(pmdp, pmd); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, | 1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, |
diff --git a/arch/x86/include/asm/uv/bios.h b/arch/x86/include/asm/uv/bios.h index e652a7cc6186..3f697a9e3f59 100644 --- a/arch/x86/include/asm/uv/bios.h +++ b/arch/x86/include/asm/uv/bios.h | |||
@@ -48,7 +48,8 @@ enum { | |||
48 | BIOS_STATUS_SUCCESS = 0, | 48 | BIOS_STATUS_SUCCESS = 0, |
49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, | 49 | BIOS_STATUS_UNIMPLEMENTED = -ENOSYS, |
50 | BIOS_STATUS_EINVAL = -EINVAL, | 50 | BIOS_STATUS_EINVAL = -EINVAL, |
51 | BIOS_STATUS_UNAVAIL = -EBUSY | 51 | BIOS_STATUS_UNAVAIL = -EBUSY, |
52 | BIOS_STATUS_ABORT = -EINTR, | ||
52 | }; | 53 | }; |
53 | 54 | ||
54 | /* Address map parameters */ | 55 | /* Address map parameters */ |
@@ -167,4 +168,9 @@ extern long system_serial_number; | |||
167 | 168 | ||
168 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ | 169 | extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */ |
169 | 170 | ||
171 | /* | ||
172 | * EFI runtime lock; cf. firmware/efi/runtime-wrappers.c for details | ||
173 | */ | ||
174 | extern struct semaphore __efi_uv_runtime_lock; | ||
175 | |||
170 | #endif /* _ASM_X86_UV_BIOS_H */ | 176 | #endif /* _ASM_X86_UV_BIOS_H */ |
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..6ce290c506d9 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c | |||
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, | |||
784 | quirk_no_way_out(i, m, regs); | 784 | quirk_no_way_out(i, m, regs); |
785 | 785 | ||
786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { | 786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
787 | m->bank = i; | ||
787 | mce_read_aux(m, i); | 788 | mce_read_aux(m, i); |
788 | *msg = tmp; | 789 | *msg = tmp; |
789 | return 1; | 790 | return 1; |
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index bbffa6c54697..c07958b59f50 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -335,6 +335,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
335 | unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; | 335 | unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; |
336 | unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; | 336 | unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0; |
337 | unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; | 337 | unsigned f_intel_pt = kvm_x86_ops->pt_supported() ? F(INTEL_PT) : 0; |
338 | unsigned f_la57 = 0; | ||
338 | 339 | ||
339 | /* cpuid 1.edx */ | 340 | /* cpuid 1.edx */ |
340 | const u32 kvm_cpuid_1_edx_x86_features = | 341 | const u32 kvm_cpuid_1_edx_x86_features = |
@@ -489,7 +490,10 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
489 | // TSC_ADJUST is emulated | 490 | // TSC_ADJUST is emulated |
490 | entry->ebx |= F(TSC_ADJUST); | 491 | entry->ebx |= F(TSC_ADJUST); |
491 | entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; | 492 | entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; |
493 | f_la57 = entry->ecx & F(LA57); | ||
492 | cpuid_mask(&entry->ecx, CPUID_7_ECX); | 494 | cpuid_mask(&entry->ecx, CPUID_7_ECX); |
495 | /* Set LA57 based on hardware capability. */ | ||
496 | entry->ecx |= f_la57; | ||
493 | entry->ecx |= f_umip; | 497 | entry->ecx |= f_umip; |
494 | /* PKU is not yet implemented for shadow paging. */ | 498 | /* PKU is not yet implemented for shadow paging. */ |
495 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) | 499 | if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE)) |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index da9c42349b1f..f2d1d230d5b8 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3555,6 +3555,7 @@ void kvm_mmu_free_roots(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
3555 | &invalid_list); | 3555 | &invalid_list); |
3556 | mmu->root_hpa = INVALID_PAGE; | 3556 | mmu->root_hpa = INVALID_PAGE; |
3557 | } | 3557 | } |
3558 | mmu->root_cr3 = 0; | ||
3558 | } | 3559 | } |
3559 | 3560 | ||
3560 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); | 3561 | kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); |
@@ -3610,6 +3611,7 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu) | |||
3610 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); | 3611 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->pae_root); |
3611 | } else | 3612 | } else |
3612 | BUG(); | 3613 | BUG(); |
3614 | vcpu->arch.mmu->root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); | ||
3613 | 3615 | ||
3614 | return 0; | 3616 | return 0; |
3615 | } | 3617 | } |
@@ -3618,10 +3620,11 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3618 | { | 3620 | { |
3619 | struct kvm_mmu_page *sp; | 3621 | struct kvm_mmu_page *sp; |
3620 | u64 pdptr, pm_mask; | 3622 | u64 pdptr, pm_mask; |
3621 | gfn_t root_gfn; | 3623 | gfn_t root_gfn, root_cr3; |
3622 | int i; | 3624 | int i; |
3623 | 3625 | ||
3624 | root_gfn = vcpu->arch.mmu->get_cr3(vcpu) >> PAGE_SHIFT; | 3626 | root_cr3 = vcpu->arch.mmu->get_cr3(vcpu); |
3627 | root_gfn = root_cr3 >> PAGE_SHIFT; | ||
3625 | 3628 | ||
3626 | if (mmu_check_root(vcpu, root_gfn)) | 3629 | if (mmu_check_root(vcpu, root_gfn)) |
3627 | return 1; | 3630 | return 1; |
@@ -3646,7 +3649,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3646 | ++sp->root_count; | 3649 | ++sp->root_count; |
3647 | spin_unlock(&vcpu->kvm->mmu_lock); | 3650 | spin_unlock(&vcpu->kvm->mmu_lock); |
3648 | vcpu->arch.mmu->root_hpa = root; | 3651 | vcpu->arch.mmu->root_hpa = root; |
3649 | return 0; | 3652 | goto set_root_cr3; |
3650 | } | 3653 | } |
3651 | 3654 | ||
3652 | /* | 3655 | /* |
@@ -3712,6 +3715,9 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) | |||
3712 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); | 3715 | vcpu->arch.mmu->root_hpa = __pa(vcpu->arch.mmu->lm_root); |
3713 | } | 3716 | } |
3714 | 3717 | ||
3718 | set_root_cr3: | ||
3719 | vcpu->arch.mmu->root_cr3 = root_cr3; | ||
3720 | |||
3715 | return 0; | 3721 | return 0; |
3716 | } | 3722 | } |
3717 | 3723 | ||
@@ -4163,7 +4169,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, | |||
4163 | struct kvm_mmu_root_info root; | 4169 | struct kvm_mmu_root_info root; |
4164 | struct kvm_mmu *mmu = vcpu->arch.mmu; | 4170 | struct kvm_mmu *mmu = vcpu->arch.mmu; |
4165 | 4171 | ||
4166 | root.cr3 = mmu->get_cr3(vcpu); | 4172 | root.cr3 = mmu->root_cr3; |
4167 | root.hpa = mmu->root_hpa; | 4173 | root.hpa = mmu->root_hpa; |
4168 | 4174 | ||
4169 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { | 4175 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) { |
@@ -4176,6 +4182,7 @@ static bool cached_root_available(struct kvm_vcpu *vcpu, gpa_t new_cr3, | |||
4176 | } | 4182 | } |
4177 | 4183 | ||
4178 | mmu->root_hpa = root.hpa; | 4184 | mmu->root_hpa = root.hpa; |
4185 | mmu->root_cr3 = root.cr3; | ||
4179 | 4186 | ||
4180 | return i < KVM_MMU_NUM_PREV_ROOTS; | 4187 | return i < KVM_MMU_NUM_PREV_ROOTS; |
4181 | } | 4188 | } |
@@ -4770,6 +4777,7 @@ static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu) | |||
4770 | ext.cr4_pse = !!is_pse(vcpu); | 4777 | ext.cr4_pse = !!is_pse(vcpu); |
4771 | ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); | 4778 | ext.cr4_pke = !!kvm_read_cr4_bits(vcpu, X86_CR4_PKE); |
4772 | ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); | 4779 | ext.cr4_la57 = !!kvm_read_cr4_bits(vcpu, X86_CR4_LA57); |
4780 | ext.maxphyaddr = cpuid_maxphyaddr(vcpu); | ||
4773 | 4781 | ||
4774 | ext.valid = 1; | 4782 | ext.valid = 1; |
4775 | 4783 | ||
@@ -5516,11 +5524,13 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) | |||
5516 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; | 5524 | vcpu->arch.walk_mmu = &vcpu->arch.root_mmu; |
5517 | 5525 | ||
5518 | vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; | 5526 | vcpu->arch.root_mmu.root_hpa = INVALID_PAGE; |
5527 | vcpu->arch.root_mmu.root_cr3 = 0; | ||
5519 | vcpu->arch.root_mmu.translate_gpa = translate_gpa; | 5528 | vcpu->arch.root_mmu.translate_gpa = translate_gpa; |
5520 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) | 5529 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
5521 | vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; | 5530 | vcpu->arch.root_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
5522 | 5531 | ||
5523 | vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; | 5532 | vcpu->arch.guest_mmu.root_hpa = INVALID_PAGE; |
5533 | vcpu->arch.guest_mmu.root_cr3 = 0; | ||
5524 | vcpu->arch.guest_mmu.translate_gpa = translate_gpa; | 5534 | vcpu->arch.guest_mmu.translate_gpa = translate_gpa; |
5525 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) | 5535 | for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) |
5526 | vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; | 5536 | vcpu->arch.guest_mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; |
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8ff20523661b..d737a51a53ca 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu) | |||
211 | if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) | 211 | if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) |
212 | return; | 212 | return; |
213 | 213 | ||
214 | hrtimer_cancel(&vmx->nested.preemption_timer); | ||
214 | vmx->nested.vmxon = false; | 215 | vmx->nested.vmxon = false; |
215 | vmx->nested.smm.vmxon = false; | 216 | vmx->nested.smm.vmxon = false; |
216 | free_vpid(vmx->nested.vpid02); | 217 | free_vpid(vmx->nested.vpid02); |
@@ -2472,6 +2473,10 @@ static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu, | |||
2472 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) | 2473 | (nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id)) |
2473 | return -EINVAL; | 2474 | return -EINVAL; |
2474 | 2475 | ||
2476 | if (!nested_cpu_has_preemption_timer(vmcs12) && | ||
2477 | nested_cpu_has_save_preemption_timer(vmcs12)) | ||
2478 | return -EINVAL; | ||
2479 | |||
2475 | if (nested_cpu_has_ept(vmcs12) && | 2480 | if (nested_cpu_has_ept(vmcs12) && |
2476 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) | 2481 | !valid_ept_address(vcpu, vmcs12->ept_pointer)) |
2477 | return -EINVAL; | 2482 | return -EINVAL; |
@@ -5556,9 +5561,11 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps, | |||
5556 | * secondary cpu-based controls. Do not include those that | 5561 | * secondary cpu-based controls. Do not include those that |
5557 | * depend on CPUID bits, they are added later by vmx_cpuid_update. | 5562 | * depend on CPUID bits, they are added later by vmx_cpuid_update. |
5558 | */ | 5563 | */ |
5559 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 5564 | if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) |
5560 | msrs->secondary_ctls_low, | 5565 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
5561 | msrs->secondary_ctls_high); | 5566 | msrs->secondary_ctls_low, |
5567 | msrs->secondary_ctls_high); | ||
5568 | |||
5562 | msrs->secondary_ctls_low = 0; | 5569 | msrs->secondary_ctls_low = 0; |
5563 | msrs->secondary_ctls_high &= | 5570 | msrs->secondary_ctls_high &= |
5564 | SECONDARY_EXEC_DESC | | 5571 | SECONDARY_EXEC_DESC | |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 95d618045001..30a6bcd735ec 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
@@ -863,7 +863,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, | |||
863 | if (!entry_only) | 863 | if (!entry_only) |
864 | j = find_msr(&m->host, msr); | 864 | j = find_msr(&m->host, msr); |
865 | 865 | ||
866 | if (i == NR_AUTOLOAD_MSRS || j == NR_AUTOLOAD_MSRS) { | 866 | if ((i < 0 && m->guest.nr == NR_AUTOLOAD_MSRS) || |
867 | (j < 0 && m->host.nr == NR_AUTOLOAD_MSRS)) { | ||
867 | printk_once(KERN_WARNING "Not enough msr switch entries. " | 868 | printk_once(KERN_WARNING "Not enough msr switch entries. " |
868 | "Can't add msr %x\n", msr); | 869 | "Can't add msr %x\n", msr); |
869 | return; | 870 | return; |
@@ -1193,21 +1194,6 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1193 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) | 1194 | if (!pi_test_sn(pi_desc) && vcpu->cpu == cpu) |
1194 | return; | 1195 | return; |
1195 | 1196 | ||
1196 | /* | ||
1197 | * First handle the simple case where no cmpxchg is necessary; just | ||
1198 | * allow posting non-urgent interrupts. | ||
1199 | * | ||
1200 | * If the 'nv' field is POSTED_INTR_WAKEUP_VECTOR, do not change | ||
1201 | * PI.NDST: pi_post_block will do it for us and the wakeup_handler | ||
1202 | * expects the VCPU to be on the blocked_vcpu_list that matches | ||
1203 | * PI.NDST. | ||
1204 | */ | ||
1205 | if (pi_desc->nv == POSTED_INTR_WAKEUP_VECTOR || | ||
1206 | vcpu->cpu == cpu) { | ||
1207 | pi_clear_sn(pi_desc); | ||
1208 | return; | ||
1209 | } | ||
1210 | |||
1211 | /* The full case. */ | 1197 | /* The full case. */ |
1212 | do { | 1198 | do { |
1213 | old.control = new.control = pi_desc->control; | 1199 | old.control = new.control = pi_desc->control; |
@@ -1222,6 +1208,17 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu) | |||
1222 | new.sn = 0; | 1208 | new.sn = 0; |
1223 | } while (cmpxchg64(&pi_desc->control, old.control, | 1209 | } while (cmpxchg64(&pi_desc->control, old.control, |
1224 | new.control) != old.control); | 1210 | new.control) != old.control); |
1211 | |||
1212 | /* | ||
1213 | * Clear SN before reading the bitmap. The VT-d firmware | ||
1214 | * writes the bitmap and reads SN atomically (5.2.3 in the | ||
1215 | * spec), so it doesn't really have a memory barrier that | ||
1216 | * pairs with this, but we cannot do that and we need one. | ||
1217 | */ | ||
1218 | smp_mb__after_atomic(); | ||
1219 | |||
1220 | if (!bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS)) | ||
1221 | pi_set_on(pi_desc); | ||
1225 | } | 1222 | } |
1226 | 1223 | ||
1227 | /* | 1224 | /* |
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index 99328954c2fc..0ac0a64c7790 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h | |||
@@ -337,16 +337,16 @@ static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc) | |||
337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); | 337 | return test_and_set_bit(vector, (unsigned long *)pi_desc->pir); |
338 | } | 338 | } |
339 | 339 | ||
340 | static inline void pi_clear_sn(struct pi_desc *pi_desc) | 340 | static inline void pi_set_sn(struct pi_desc *pi_desc) |
341 | { | 341 | { |
342 | return clear_bit(POSTED_INTR_SN, | 342 | return set_bit(POSTED_INTR_SN, |
343 | (unsigned long *)&pi_desc->control); | 343 | (unsigned long *)&pi_desc->control); |
344 | } | 344 | } |
345 | 345 | ||
346 | static inline void pi_set_sn(struct pi_desc *pi_desc) | 346 | static inline void pi_set_on(struct pi_desc *pi_desc) |
347 | { | 347 | { |
348 | return set_bit(POSTED_INTR_SN, | 348 | set_bit(POSTED_INTR_ON, |
349 | (unsigned long *)&pi_desc->control); | 349 | (unsigned long *)&pi_desc->control); |
350 | } | 350 | } |
351 | 351 | ||
352 | static inline void pi_clear_on(struct pi_desc *pi_desc) | 352 | static inline void pi_clear_on(struct pi_desc *pi_desc) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d27206f6c01..941f932373d0 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, | |||
5116 | { | 5116 | { |
5117 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | 5117 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; |
5118 | 5118 | ||
5119 | /* | ||
5120 | * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED | ||
5121 | * is returned, but our callers are not ready for that and they blindly | ||
5122 | * call kvm_inject_page_fault. Ensure that they at least do not leak | ||
5123 | * uninitialized kernel stack memory into cr2 and error code. | ||
5124 | */ | ||
5125 | memset(exception, 0, sizeof(*exception)); | ||
5119 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, | 5126 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, |
5120 | exception); | 5127 | exception); |
5121 | } | 5128 | } |
@@ -7794,7 +7801,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
7794 | * 1) We should set ->mode before checking ->requests. Please see | 7801 | * 1) We should set ->mode before checking ->requests. Please see |
7795 | * the comment in kvm_vcpu_exiting_guest_mode(). | 7802 | * the comment in kvm_vcpu_exiting_guest_mode(). |
7796 | * | 7803 | * |
7797 | * 2) For APICv, we should set ->mode before checking PIR.ON. This | 7804 | * 2) For APICv, we should set ->mode before checking PID.ON. This |
7798 | * pairs with the memory barrier implicit in pi_test_and_set_on | 7805 | * pairs with the memory barrier implicit in pi_test_and_set_on |
7799 | * (see vmx_deliver_posted_interrupt). | 7806 | * (see vmx_deliver_posted_interrupt). |
7800 | * | 7807 | * |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 6521134057e8..856fa409c536 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -117,67 +117,11 @@ __visible bool ex_handler_fprestore(const struct exception_table_entry *fixup, | |||
117 | } | 117 | } |
118 | EXPORT_SYMBOL_GPL(ex_handler_fprestore); | 118 | EXPORT_SYMBOL_GPL(ex_handler_fprestore); |
119 | 119 | ||
120 | /* Helper to check whether a uaccess fault indicates a kernel bug. */ | ||
121 | static bool bogus_uaccess(struct pt_regs *regs, int trapnr, | ||
122 | unsigned long fault_addr) | ||
123 | { | ||
124 | /* This is the normal case: #PF with a fault address in userspace. */ | ||
125 | if (trapnr == X86_TRAP_PF && fault_addr < TASK_SIZE_MAX) | ||
126 | return false; | ||
127 | |||
128 | /* | ||
129 | * This code can be reached for machine checks, but only if the #MC | ||
130 | * handler has already decided that it looks like a candidate for fixup. | ||
131 | * This e.g. happens when attempting to access userspace memory which | ||
132 | * the CPU can't access because of uncorrectable bad memory. | ||
133 | */ | ||
134 | if (trapnr == X86_TRAP_MC) | ||
135 | return false; | ||
136 | |||
137 | /* | ||
138 | * There are two remaining exception types we might encounter here: | ||
139 | * - #PF for faulting accesses to kernel addresses | ||
140 | * - #GP for faulting accesses to noncanonical addresses | ||
141 | * Complain about anything else. | ||
142 | */ | ||
143 | if (trapnr != X86_TRAP_PF && trapnr != X86_TRAP_GP) { | ||
144 | WARN(1, "unexpected trap %d in uaccess\n", trapnr); | ||
145 | return false; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * This is a faulting memory access in kernel space, on a kernel | ||
150 | * address, in a usercopy function. This can e.g. be caused by improper | ||
151 | * use of helpers like __put_user and by improper attempts to access | ||
152 | * userspace addresses in KERNEL_DS regions. | ||
153 | * The one (semi-)legitimate exception are probe_kernel_{read,write}(), | ||
154 | * which can be invoked from places like kgdb, /dev/mem (for reading) | ||
155 | * and privileged BPF code (for reading). | ||
156 | * The probe_kernel_*() functions set the kernel_uaccess_faults_ok flag | ||
157 | * to tell us that faulting on kernel addresses, and even noncanonical | ||
158 | * addresses, in a userspace accessor does not necessarily imply a | ||
159 | * kernel bug, root might just be doing weird stuff. | ||
160 | */ | ||
161 | if (current->kernel_uaccess_faults_ok) | ||
162 | return false; | ||
163 | |||
164 | /* This is bad. Refuse the fixup so that we go into die(). */ | ||
165 | if (trapnr == X86_TRAP_PF) { | ||
166 | pr_emerg("BUG: pagefault on kernel address 0x%lx in non-whitelisted uaccess\n", | ||
167 | fault_addr); | ||
168 | } else { | ||
169 | pr_emerg("BUG: GPF in non-whitelisted uaccess (non-canonical address?)\n"); | ||
170 | } | ||
171 | return true; | ||
172 | } | ||
173 | |||
174 | __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, | 120 | __visible bool ex_handler_uaccess(const struct exception_table_entry *fixup, |
175 | struct pt_regs *regs, int trapnr, | 121 | struct pt_regs *regs, int trapnr, |
176 | unsigned long error_code, | 122 | unsigned long error_code, |
177 | unsigned long fault_addr) | 123 | unsigned long fault_addr) |
178 | { | 124 | { |
179 | if (bogus_uaccess(regs, trapnr, fault_addr)) | ||
180 | return false; | ||
181 | regs->ip = ex_fixup_addr(fixup); | 125 | regs->ip = ex_fixup_addr(fixup); |
182 | return true; | 126 | return true; |
183 | } | 127 | } |
@@ -188,8 +132,6 @@ __visible bool ex_handler_ext(const struct exception_table_entry *fixup, | |||
188 | unsigned long error_code, | 132 | unsigned long error_code, |
189 | unsigned long fault_addr) | 133 | unsigned long fault_addr) |
190 | { | 134 | { |
191 | if (bogus_uaccess(regs, trapnr, fault_addr)) | ||
192 | return false; | ||
193 | /* Special hack for uaccess_err */ | 135 | /* Special hack for uaccess_err */ |
194 | current->thread.uaccess_err = 1; | 136 | current->thread.uaccess_err = 1; |
195 | regs->ip = ex_fixup_addr(fixup); | 137 | regs->ip = ex_fixup_addr(fixup); |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..14e6119838a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) | |||
230 | 230 | ||
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | /* | ||
234 | * See set_mce_nospec(). | ||
235 | * | ||
236 | * Machine check recovery code needs to change cache mode of poisoned pages to | ||
237 | * UC to avoid speculative access logging another error. But passing the | ||
238 | * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a | ||
239 | * speculative access. So we cheat and flip the top bit of the address. This | ||
240 | * works fine for the code that updates the page tables. But at the end of the | ||
241 | * process we need to flush the TLB and cache and the non-canonical address | ||
242 | * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. | ||
243 | * | ||
244 | * But in the common case we already have a canonical address. This code | ||
245 | * will fix the top bit if needed and is a no-op otherwise. | ||
246 | */ | ||
247 | static inline unsigned long fix_addr(unsigned long addr) | ||
248 | { | ||
249 | #ifdef CONFIG_X86_64 | ||
250 | return (long)(addr << 1) >> 1; | ||
251 | #else | ||
252 | return addr; | ||
253 | #endif | ||
254 | } | ||
255 | |||
233 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) | 256 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) |
234 | { | 257 | { |
235 | if (cpa->flags & CPA_PAGES_ARRAY) { | 258 | if (cpa->flags & CPA_PAGES_ARRAY) { |
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) | |||
313 | unsigned int i; | 336 | unsigned int i; |
314 | 337 | ||
315 | for (i = 0; i < cpa->numpages; i++) | 338 | for (i = 0; i < cpa->numpages; i++) |
316 | __flush_tlb_one_kernel(__cpa_addr(cpa, i)); | 339 | __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); |
317 | } | 340 | } |
318 | 341 | ||
319 | static void cpa_flush(struct cpa_data *data, int cache) | 342 | static void cpa_flush(struct cpa_data *data, int cache) |
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) | |||
347 | * Only flush present addresses: | 370 | * Only flush present addresses: |
348 | */ | 371 | */ |
349 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | 372 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
350 | clflush_cache_range_opt((void *)addr, PAGE_SIZE); | 373 | clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); |
351 | } | 374 | } |
352 | mb(); | 375 | mb(); |
353 | } | 376 | } |
@@ -1627,29 +1650,6 @@ out: | |||
1627 | return ret; | 1650 | return ret; |
1628 | } | 1651 | } |
1629 | 1652 | ||
1630 | /* | ||
1631 | * Machine check recovery code needs to change cache mode of poisoned | ||
1632 | * pages to UC to avoid speculative access logging another error. But | ||
1633 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
1634 | * way to encourage a speculative access. So we cheat and flip the top | ||
1635 | * bit of the address. This works fine for the code that updates the | ||
1636 | * page tables. But at the end of the process we need to flush the cache | ||
1637 | * and the non-canonical address causes a #GP fault when used by the | ||
1638 | * CLFLUSH instruction. | ||
1639 | * | ||
1640 | * But in the common case we already have a canonical address. This code | ||
1641 | * will fix the top bit if needed and is a no-op otherwise. | ||
1642 | */ | ||
1643 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
1644 | { | ||
1645 | #ifdef CONFIG_X86_64 | ||
1646 | return (long)(addr << 1) >> 1; | ||
1647 | #else | ||
1648 | return addr; | ||
1649 | #endif | ||
1650 | } | ||
1651 | |||
1652 | |||
1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1654 | pgprot_t mask_set, pgprot_t mask_clr, | 1654 | pgprot_t mask_set, pgprot_t mask_clr, |
1655 | int force_split, int in_flag, | 1655 | int force_split, int in_flag, |
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c index 4a6a5a26c582..eb33432f2f24 100644 --- a/arch/x86/platform/uv/bios_uv.c +++ b/arch/x86/platform/uv/bios_uv.c | |||
@@ -29,7 +29,8 @@ | |||
29 | 29 | ||
30 | struct uv_systab *uv_systab; | 30 | struct uv_systab *uv_systab; |
31 | 31 | ||
32 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | 32 | static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
33 | u64 a4, u64 a5) | ||
33 | { | 34 | { |
34 | struct uv_systab *tab = uv_systab; | 35 | struct uv_systab *tab = uv_systab; |
35 | s64 ret; | 36 | s64 ret; |
@@ -51,6 +52,19 @@ s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | |||
51 | 52 | ||
52 | return ret; | 53 | return ret; |
53 | } | 54 | } |
55 | |||
56 | s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5) | ||
57 | { | ||
58 | s64 ret; | ||
59 | |||
60 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
61 | return BIOS_STATUS_ABORT; | ||
62 | |||
63 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); | ||
64 | up(&__efi_uv_runtime_lock); | ||
65 | |||
66 | return ret; | ||
67 | } | ||
54 | EXPORT_SYMBOL_GPL(uv_bios_call); | 68 | EXPORT_SYMBOL_GPL(uv_bios_call); |
55 | 69 | ||
56 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | 70 | s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, |
@@ -59,10 +73,15 @@ s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, | |||
59 | unsigned long bios_flags; | 73 | unsigned long bios_flags; |
60 | s64 ret; | 74 | s64 ret; |
61 | 75 | ||
76 | if (down_interruptible(&__efi_uv_runtime_lock)) | ||
77 | return BIOS_STATUS_ABORT; | ||
78 | |||
62 | local_irq_save(bios_flags); | 79 | local_irq_save(bios_flags); |
63 | ret = uv_bios_call(which, a1, a2, a3, a4, a5); | 80 | ret = __uv_bios_call(which, a1, a2, a3, a4, a5); |
64 | local_irq_restore(bios_flags); | 81 | local_irq_restore(bios_flags); |
65 | 82 | ||
83 | up(&__efi_uv_runtime_lock); | ||
84 | |||
66 | return ret; | 85 | return ret; |
67 | } | 86 | } |
68 | 87 | ||
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c index fc714ef402a6..2620baa1f699 100644 --- a/block/blk-iolatency.c +++ b/block/blk-iolatency.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <linux/sched/loadavg.h> | 72 | #include <linux/sched/loadavg.h> |
73 | #include <linux/sched/signal.h> | 73 | #include <linux/sched/signal.h> |
74 | #include <trace/events/block.h> | 74 | #include <trace/events/block.h> |
75 | #include <linux/blk-mq.h> | ||
75 | #include "blk-rq-qos.h" | 76 | #include "blk-rq-qos.h" |
76 | #include "blk-stat.h" | 77 | #include "blk-stat.h" |
77 | 78 | ||
@@ -591,6 +592,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |||
591 | u64 now = ktime_to_ns(ktime_get()); | 592 | u64 now = ktime_to_ns(ktime_get()); |
592 | bool issue_as_root = bio_issue_as_root_blkg(bio); | 593 | bool issue_as_root = bio_issue_as_root_blkg(bio); |
593 | bool enabled = false; | 594 | bool enabled = false; |
595 | int inflight = 0; | ||
594 | 596 | ||
595 | blkg = bio->bi_blkg; | 597 | blkg = bio->bi_blkg; |
596 | if (!blkg || !bio_flagged(bio, BIO_TRACKED)) | 598 | if (!blkg || !bio_flagged(bio, BIO_TRACKED)) |
@@ -601,6 +603,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |||
601 | return; | 603 | return; |
602 | 604 | ||
603 | enabled = blk_iolatency_enabled(iolat->blkiolat); | 605 | enabled = blk_iolatency_enabled(iolat->blkiolat); |
606 | if (!enabled) | ||
607 | return; | ||
608 | |||
604 | while (blkg && blkg->parent) { | 609 | while (blkg && blkg->parent) { |
605 | iolat = blkg_to_lat(blkg); | 610 | iolat = blkg_to_lat(blkg); |
606 | if (!iolat) { | 611 | if (!iolat) { |
@@ -609,8 +614,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio) | |||
609 | } | 614 | } |
610 | rqw = &iolat->rq_wait; | 615 | rqw = &iolat->rq_wait; |
611 | 616 | ||
612 | atomic_dec(&rqw->inflight); | 617 | inflight = atomic_dec_return(&rqw->inflight); |
613 | if (!enabled || iolat->min_lat_nsec == 0) | 618 | WARN_ON_ONCE(inflight < 0); |
619 | if (iolat->min_lat_nsec == 0) | ||
614 | goto next; | 620 | goto next; |
615 | iolatency_record_time(iolat, &bio->bi_issue, now, | 621 | iolatency_record_time(iolat, &bio->bi_issue, now, |
616 | issue_as_root); | 622 | issue_as_root); |
@@ -754,10 +760,13 @@ int blk_iolatency_init(struct request_queue *q) | |||
754 | return 0; | 760 | return 0; |
755 | } | 761 | } |
756 | 762 | ||
757 | static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) | 763 | /* |
764 | * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise | ||
765 | * return 0. | ||
766 | */ | ||
767 | static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) | ||
758 | { | 768 | { |
759 | struct iolatency_grp *iolat = blkg_to_lat(blkg); | 769 | struct iolatency_grp *iolat = blkg_to_lat(blkg); |
760 | struct blk_iolatency *blkiolat = iolat->blkiolat; | ||
761 | u64 oldval = iolat->min_lat_nsec; | 770 | u64 oldval = iolat->min_lat_nsec; |
762 | 771 | ||
763 | iolat->min_lat_nsec = val; | 772 | iolat->min_lat_nsec = val; |
@@ -766,9 +775,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val) | |||
766 | BLKIOLATENCY_MAX_WIN_SIZE); | 775 | BLKIOLATENCY_MAX_WIN_SIZE); |
767 | 776 | ||
768 | if (!oldval && val) | 777 | if (!oldval && val) |
769 | atomic_inc(&blkiolat->enabled); | 778 | return 1; |
770 | if (oldval && !val) | 779 | if (oldval && !val) |
771 | atomic_dec(&blkiolat->enabled); | 780 | return -1; |
781 | return 0; | ||
772 | } | 782 | } |
773 | 783 | ||
774 | static void iolatency_clear_scaling(struct blkcg_gq *blkg) | 784 | static void iolatency_clear_scaling(struct blkcg_gq *blkg) |
@@ -800,6 +810,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |||
800 | u64 lat_val = 0; | 810 | u64 lat_val = 0; |
801 | u64 oldval; | 811 | u64 oldval; |
802 | int ret; | 812 | int ret; |
813 | int enable = 0; | ||
803 | 814 | ||
804 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); | 815 | ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx); |
805 | if (ret) | 816 | if (ret) |
@@ -834,7 +845,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |||
834 | blkg = ctx.blkg; | 845 | blkg = ctx.blkg; |
835 | oldval = iolat->min_lat_nsec; | 846 | oldval = iolat->min_lat_nsec; |
836 | 847 | ||
837 | iolatency_set_min_lat_nsec(blkg, lat_val); | 848 | enable = iolatency_set_min_lat_nsec(blkg, lat_val); |
849 | if (enable) { | ||
850 | WARN_ON_ONCE(!blk_get_queue(blkg->q)); | ||
851 | blkg_get(blkg); | ||
852 | } | ||
853 | |||
838 | if (oldval != iolat->min_lat_nsec) { | 854 | if (oldval != iolat->min_lat_nsec) { |
839 | iolatency_clear_scaling(blkg); | 855 | iolatency_clear_scaling(blkg); |
840 | } | 856 | } |
@@ -842,6 +858,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf, | |||
842 | ret = 0; | 858 | ret = 0; |
843 | out: | 859 | out: |
844 | blkg_conf_finish(&ctx); | 860 | blkg_conf_finish(&ctx); |
861 | if (ret == 0 && enable) { | ||
862 | struct iolatency_grp *tmp = blkg_to_lat(blkg); | ||
863 | struct blk_iolatency *blkiolat = tmp->blkiolat; | ||
864 | |||
865 | blk_mq_freeze_queue(blkg->q); | ||
866 | |||
867 | if (enable == 1) | ||
868 | atomic_inc(&blkiolat->enabled); | ||
869 | else if (enable == -1) | ||
870 | atomic_dec(&blkiolat->enabled); | ||
871 | else | ||
872 | WARN_ON_ONCE(1); | ||
873 | |||
874 | blk_mq_unfreeze_queue(blkg->q); | ||
875 | |||
876 | blkg_put(blkg); | ||
877 | blk_put_queue(blkg->q); | ||
878 | } | ||
845 | return ret ?: nbytes; | 879 | return ret ?: nbytes; |
846 | } | 880 | } |
847 | 881 | ||
@@ -977,8 +1011,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd) | |||
977 | { | 1011 | { |
978 | struct iolatency_grp *iolat = pd_to_lat(pd); | 1012 | struct iolatency_grp *iolat = pd_to_lat(pd); |
979 | struct blkcg_gq *blkg = lat_to_blkg(iolat); | 1013 | struct blkcg_gq *blkg = lat_to_blkg(iolat); |
1014 | struct blk_iolatency *blkiolat = iolat->blkiolat; | ||
1015 | int ret; | ||
980 | 1016 | ||
981 | iolatency_set_min_lat_nsec(blkg, 0); | 1017 | ret = iolatency_set_min_lat_nsec(blkg, 0); |
1018 | if (ret == 1) | ||
1019 | atomic_inc(&blkiolat->enabled); | ||
1020 | if (ret == -1) | ||
1021 | atomic_dec(&blkiolat->enabled); | ||
982 | iolatency_clear_scaling(blkg); | 1022 | iolatency_clear_scaling(blkg); |
983 | } | 1023 | } |
984 | 1024 | ||
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f8120832ca7b..7921573aebbc 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
@@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { | |||
839 | static bool debugfs_create_files(struct dentry *parent, void *data, | 839 | static bool debugfs_create_files(struct dentry *parent, void *data, |
840 | const struct blk_mq_debugfs_attr *attr) | 840 | const struct blk_mq_debugfs_attr *attr) |
841 | { | 841 | { |
842 | if (IS_ERR_OR_NULL(parent)) | ||
843 | return false; | ||
844 | |||
842 | d_inode(parent)->i_private = data; | 845 | d_inode(parent)->i_private = data; |
843 | 846 | ||
844 | for (; attr->name; attr++) { | 847 | for (; attr->name; attr++) { |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..9437a5eb07cf 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -737,12 +737,20 @@ static void blk_mq_requeue_work(struct work_struct *work) | |||
737 | spin_unlock_irq(&q->requeue_lock); | 737 | spin_unlock_irq(&q->requeue_lock); |
738 | 738 | ||
739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { | 739 | list_for_each_entry_safe(rq, next, &rq_list, queuelist) { |
740 | if (!(rq->rq_flags & RQF_SOFTBARRIER)) | 740 | if (!(rq->rq_flags & (RQF_SOFTBARRIER | RQF_DONTPREP))) |
741 | continue; | 741 | continue; |
742 | 742 | ||
743 | rq->rq_flags &= ~RQF_SOFTBARRIER; | 743 | rq->rq_flags &= ~RQF_SOFTBARRIER; |
744 | list_del_init(&rq->queuelist); | 744 | list_del_init(&rq->queuelist); |
745 | blk_mq_sched_insert_request(rq, true, false, false); | 745 | /* |
746 | * If RQF_DONTPREP, rq has contained some driver specific | ||
747 | * data, so insert it to hctx dispatch list to avoid any | ||
748 | * merge. | ||
749 | */ | ||
750 | if (rq->rq_flags & RQF_DONTPREP) | ||
751 | blk_mq_request_bypass_insert(rq, false); | ||
752 | else | ||
753 | blk_mq_sched_insert_request(rq, true, false, false); | ||
746 | } | 754 | } |
747 | 755 | ||
748 | while (!list_empty(&rq_list)) { | 756 | while (!list_empty(&rq_list)) { |
diff --git a/block/blk-mq.h b/block/blk-mq.h index d943d46b0785..d0b3dd54ef8d 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -36,7 +36,6 @@ struct blk_mq_ctx { | |||
36 | struct kobject kobj; | 36 | struct kobject kobj; |
37 | } ____cacheline_aligned_in_smp; | 37 | } ____cacheline_aligned_in_smp; |
38 | 38 | ||
39 | void blk_mq_freeze_queue(struct request_queue *q); | ||
40 | void blk_mq_free_queue(struct request_queue *q); | 39 | void blk_mq_free_queue(struct request_queue *q); |
41 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); | 40 | int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); |
42 | void blk_mq_wake_waiters(struct request_queue *q); | 41 | void blk_mq_wake_waiters(struct request_queue *q); |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 17eb09d222ff..ec78a04eb136 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
@@ -122,8 +122,10 @@ static void alg_do_release(const struct af_alg_type *type, void *private) | |||
122 | 122 | ||
123 | int af_alg_release(struct socket *sock) | 123 | int af_alg_release(struct socket *sock) |
124 | { | 124 | { |
125 | if (sock->sk) | 125 | if (sock->sk) { |
126 | sock_put(sock->sk); | 126 | sock_put(sock->sk); |
127 | sock->sk = NULL; | ||
128 | } | ||
127 | return 0; | 129 | return 0; |
128 | } | 130 | } |
129 | EXPORT_SYMBOL_GPL(af_alg_release); | 131 | EXPORT_SYMBOL_GPL(af_alg_release); |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..147f6c7ea59c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -1029,6 +1029,9 @@ void __init acpi_early_init(void) | |||
1029 | 1029 | ||
1030 | acpi_permanent_mmap = true; | 1030 | acpi_permanent_mmap = true; |
1031 | 1031 | ||
1032 | /* Initialize debug output. Linux does not use ACPICA defaults */ | ||
1033 | acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; | ||
1034 | |||
1032 | #ifdef CONFIG_X86 | 1035 | #ifdef CONFIG_X86 |
1033 | /* | 1036 | /* |
1034 | * If the machine falls into the DMI check table, | 1037 | * If the machine falls into the DMI check table, |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..4d2b2ad1ee0e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name) | |||
5854 | static int __init binder_init(void) | 5854 | static int __init binder_init(void) |
5855 | { | 5855 | { |
5856 | int ret; | 5856 | int ret; |
5857 | char *device_name, *device_names, *device_tmp; | 5857 | char *device_name, *device_tmp; |
5858 | struct binder_device *device; | 5858 | struct binder_device *device; |
5859 | struct hlist_node *tmp; | 5859 | struct hlist_node *tmp; |
5860 | char *device_names = NULL; | ||
5860 | 5861 | ||
5861 | ret = binder_alloc_shrinker_init(); | 5862 | ret = binder_alloc_shrinker_init(); |
5862 | if (ret) | 5863 | if (ret) |
@@ -5898,23 +5899,29 @@ static int __init binder_init(void) | |||
5898 | &transaction_log_fops); | 5899 | &transaction_log_fops); |
5899 | } | 5900 | } |
5900 | 5901 | ||
5901 | /* | 5902 | if (strcmp(binder_devices_param, "") != 0) { |
5902 | * Copy the module_parameter string, because we don't want to | 5903 | /* |
5903 | * tokenize it in-place. | 5904 | * Copy the module_parameter string, because we don't want to |
5904 | */ | 5905 | * tokenize it in-place. |
5905 | device_names = kstrdup(binder_devices_param, GFP_KERNEL); | 5906 | */ |
5906 | if (!device_names) { | 5907 | device_names = kstrdup(binder_devices_param, GFP_KERNEL); |
5907 | ret = -ENOMEM; | 5908 | if (!device_names) { |
5908 | goto err_alloc_device_names_failed; | 5909 | ret = -ENOMEM; |
5909 | } | 5910 | goto err_alloc_device_names_failed; |
5911 | } | ||
5910 | 5912 | ||
5911 | device_tmp = device_names; | 5913 | device_tmp = device_names; |
5912 | while ((device_name = strsep(&device_tmp, ","))) { | 5914 | while ((device_name = strsep(&device_tmp, ","))) { |
5913 | ret = init_binder_device(device_name); | 5915 | ret = init_binder_device(device_name); |
5914 | if (ret) | 5916 | if (ret) |
5915 | goto err_init_binder_device_failed; | 5917 | goto err_init_binder_device_failed; |
5918 | } | ||
5916 | } | 5919 | } |
5917 | 5920 | ||
5921 | ret = init_binderfs(); | ||
5922 | if (ret) | ||
5923 | goto err_init_binder_device_failed; | ||
5924 | |||
5918 | return ret; | 5925 | return ret; |
5919 | 5926 | ||
5920 | err_init_binder_device_failed: | 5927 | err_init_binder_device_failed: |
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7fb97f503ef2..045b3e42d98b 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h | |||
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode) | |||
46 | } | 46 | } |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_ANDROID_BINDERFS | ||
50 | extern int __init init_binderfs(void); | ||
51 | #else | ||
52 | static inline int __init init_binderfs(void) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | #endif | ||
57 | |||
49 | #endif /* _LINUX_BINDER_INTERNAL_H */ | 58 | #endif /* _LINUX_BINDER_INTERNAL_H */ |
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 6a2185eb66c5..e773f45d19d9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c | |||
@@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
395 | struct inode *inode = NULL; | 395 | struct inode *inode = NULL; |
396 | struct dentry *root = sb->s_root; | 396 | struct dentry *root = sb->s_root; |
397 | struct binderfs_info *info = sb->s_fs_info; | 397 | struct binderfs_info *info = sb->s_fs_info; |
398 | #if defined(CONFIG_IPC_NS) | ||
399 | bool use_reserve = (info->ipc_ns == &init_ipc_ns); | ||
400 | #else | ||
401 | bool use_reserve = true; | ||
402 | #endif | ||
398 | 403 | ||
399 | device = kzalloc(sizeof(*device), GFP_KERNEL); | 404 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
400 | if (!device) | 405 | if (!device) |
@@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
413 | 418 | ||
414 | /* Reserve a new minor number for the new device. */ | 419 | /* Reserve a new minor number for the new device. */ |
415 | mutex_lock(&binderfs_minors_mutex); | 420 | mutex_lock(&binderfs_minors_mutex); |
416 | minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); | 421 | minor = ida_alloc_max(&binderfs_minors, |
422 | use_reserve ? BINDERFS_MAX_MINOR : | ||
423 | BINDERFS_MAX_MINOR_CAPPED, | ||
424 | GFP_KERNEL); | ||
417 | mutex_unlock(&binderfs_minors_mutex); | 425 | mutex_unlock(&binderfs_minors_mutex); |
418 | if (minor < 0) { | 426 | if (minor < 0) { |
419 | ret = minor; | 427 | ret = minor; |
@@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = { | |||
542 | .fs_flags = FS_USERNS_MOUNT, | 550 | .fs_flags = FS_USERNS_MOUNT, |
543 | }; | 551 | }; |
544 | 552 | ||
545 | static int __init init_binderfs(void) | 553 | int __init init_binderfs(void) |
546 | { | 554 | { |
547 | int ret; | 555 | int ret; |
548 | 556 | ||
@@ -560,5 +568,3 @@ static int __init init_binderfs(void) | |||
560 | 568 | ||
561 | return ret; | 569 | return ret; |
562 | } | 570 | } |
563 | |||
564 | device_initcall(init_binderfs); | ||
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index b8c3f9e6af89..adf28788cab5 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4554,6 +4554,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4554 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, | 4554 | { "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, }, |
4555 | { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, | 4555 | { "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, }, |
4556 | { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, | 4556 | { "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, }, |
4557 | { "SAMSUNG MZ7TE512HMHP-000L1", "EXT06L0Q", ATA_HORKAGE_NOLPM, }, | ||
4557 | 4558 | ||
4558 | /* devices that don't properly handle queued TRIM commands */ | 4559 | /* devices that don't properly handle queued TRIM commands */ |
4559 | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | 4560 | { "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
diff --git a/drivers/auxdisplay/ht16k33.c b/drivers/auxdisplay/ht16k33.c index a43276c76fc6..21393ec3b9a4 100644 --- a/drivers/auxdisplay/ht16k33.c +++ b/drivers/auxdisplay/ht16k33.c | |||
@@ -509,7 +509,7 @@ static int ht16k33_remove(struct i2c_client *client) | |||
509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); | 509 | struct ht16k33_priv *priv = i2c_get_clientdata(client); |
510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; | 510 | struct ht16k33_fbdev *fbdev = &priv->fbdev; |
511 | 511 | ||
512 | cancel_delayed_work(&fbdev->work); | 512 | cancel_delayed_work_sync(&fbdev->work); |
513 | unregister_framebuffer(fbdev->info); | 513 | unregister_framebuffer(fbdev->info); |
514 | framebuffer_release(fbdev->info); | 514 | framebuffer_release(fbdev->info); |
515 | free_page((unsigned long) fbdev->buffer); | 515 | free_page((unsigned long) fbdev->buffer); |
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c | |||
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) | |||
79 | ct_idx = get_cacheinfo_idx(this_leaf->type); | 79 | ct_idx = get_cacheinfo_idx(this_leaf->type); |
80 | propname = cache_type_info[ct_idx].size_prop; | 80 | propname = cache_type_info[ct_idx].size_prop; |
81 | 81 | ||
82 | if (of_property_read_u32(np, propname, &this_leaf->size)) | 82 | of_property_read_u32(np, propname, &this_leaf->size); |
83 | this_leaf->size = 0; | ||
84 | } | 83 | } |
85 | 84 | ||
86 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ | 85 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ |
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) | |||
114 | ct_idx = get_cacheinfo_idx(this_leaf->type); | 113 | ct_idx = get_cacheinfo_idx(this_leaf->type); |
115 | propname = cache_type_info[ct_idx].nr_sets_prop; | 114 | propname = cache_type_info[ct_idx].nr_sets_prop; |
116 | 115 | ||
117 | if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) | 116 | of_property_read_u32(np, propname, &this_leaf->number_of_sets); |
118 | this_leaf->number_of_sets = 0; | ||
119 | } | 117 | } |
120 | 118 | ||
121 | static void cache_associativity(struct cacheinfo *this_leaf) | 119 | static void cache_associativity(struct cacheinfo *this_leaf) |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 0ea2139c50d8..ccd296dbb95c 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -95,7 +95,7 @@ static void __update_runtime_status(struct device *dev, enum rpm_status status) | |||
95 | static void pm_runtime_deactivate_timer(struct device *dev) | 95 | static void pm_runtime_deactivate_timer(struct device *dev) |
96 | { | 96 | { |
97 | if (dev->power.timer_expires > 0) { | 97 | if (dev->power.timer_expires > 0) { |
98 | hrtimer_cancel(&dev->power.suspend_timer); | 98 | hrtimer_try_to_cancel(&dev->power.suspend_timer); |
99 | dev->power.timer_expires = 0; | 99 | dev->power.timer_expires = 0; |
100 | } | 100 | } |
101 | } | 101 | } |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 6f2856c6d0f2..55481b40df9a 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4075,7 +4075,7 @@ static unsigned int floppy_check_events(struct gendisk *disk, | |||
4075 | 4075 | ||
4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { | 4076 | if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) { |
4077 | if (lock_fdc(drive)) | 4077 | if (lock_fdc(drive)) |
4078 | return -EINTR; | 4078 | return 0; |
4079 | poll_drive(false, 0); | 4079 | poll_drive(false, 0); |
4080 | process_fd_request(); | 4080 | process_fd_request(); |
4081 | } | 4081 | } |
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index f94d33525771..d299ec79e4c3 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c | |||
@@ -781,12 +781,12 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = { | |||
781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, | 781 | SYSC_QUIRK("smartreflex", 0, -1, 0x38, -1, 0x00000000, 0xffffffff, |
782 | SYSC_QUIRK_LEGACY_IDLE), | 782 | SYSC_QUIRK_LEGACY_IDLE), |
783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, | 783 | SYSC_QUIRK("timer", 0, 0, 0x10, 0x14, 0x00000015, 0xffffffff, |
784 | SYSC_QUIRK_LEGACY_IDLE), | 784 | 0), |
785 | /* Some timers on omap4 and later */ | 785 | /* Some timers on omap4 and later */ |
786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, | 786 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x50002100, 0xffffffff, |
787 | SYSC_QUIRK_LEGACY_IDLE), | 787 | 0), |
788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, | 788 | SYSC_QUIRK("timer", 0, 0, 0x10, -1, 0x4fff1301, 0xffff00ff, |
789 | SYSC_QUIRK_LEGACY_IDLE), | 789 | 0), |
790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, | 790 | SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff, |
791 | SYSC_QUIRK_LEGACY_IDLE), | 791 | SYSC_QUIRK_LEGACY_IDLE), |
792 | /* Uarts on omap4 and later */ | 792 | /* Uarts on omap4 and later */ |
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c index 2fe225a697df..3487e03d4bc6 100644 --- a/drivers/clk/at91/at91sam9x5.c +++ b/drivers/clk/at91/at91sam9x5.c | |||
@@ -144,8 +144,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, | |||
144 | return; | 144 | return; |
145 | 145 | ||
146 | at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, | 146 | at91sam9x5_pmc = pmc_data_allocate(PMC_MAIN + 1, |
147 | nck(at91sam9x5_systemck), | 147 | nck(at91sam9x5_systemck), 31, 0); |
148 | nck(at91sam9x35_periphck), 0); | ||
149 | if (!at91sam9x5_pmc) | 148 | if (!at91sam9x5_pmc) |
150 | return; | 149 | return; |
151 | 150 | ||
@@ -210,7 +209,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np, | |||
210 | parent_names[1] = "mainck"; | 209 | parent_names[1] = "mainck"; |
211 | parent_names[2] = "plladivck"; | 210 | parent_names[2] = "plladivck"; |
212 | parent_names[3] = "utmick"; | 211 | parent_names[3] = "utmick"; |
213 | parent_names[4] = "mck"; | 212 | parent_names[4] = "masterck"; |
214 | for (i = 0; i < 2; i++) { | 213 | for (i = 0; i < 2; i++) { |
215 | char name[6]; | 214 | char name[6]; |
216 | 215 | ||
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c index d69ad96fe988..cd0ef7274fdb 100644 --- a/drivers/clk/at91/sama5d2.c +++ b/drivers/clk/at91/sama5d2.c | |||
@@ -240,7 +240,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) | |||
240 | parent_names[1] = "mainck"; | 240 | parent_names[1] = "mainck"; |
241 | parent_names[2] = "plladivck"; | 241 | parent_names[2] = "plladivck"; |
242 | parent_names[3] = "utmick"; | 242 | parent_names[3] = "utmick"; |
243 | parent_names[4] = "mck"; | 243 | parent_names[4] = "masterck"; |
244 | for (i = 0; i < 3; i++) { | 244 | for (i = 0; i < 3; i++) { |
245 | char name[6]; | 245 | char name[6]; |
246 | 246 | ||
@@ -291,7 +291,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np) | |||
291 | parent_names[1] = "mainck"; | 291 | parent_names[1] = "mainck"; |
292 | parent_names[2] = "plladivck"; | 292 | parent_names[2] = "plladivck"; |
293 | parent_names[3] = "utmick"; | 293 | parent_names[3] = "utmick"; |
294 | parent_names[4] = "mck"; | 294 | parent_names[4] = "masterck"; |
295 | parent_names[5] = "audiopll_pmcck"; | 295 | parent_names[5] = "audiopll_pmcck"; |
296 | for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { | 296 | for (i = 0; i < ARRAY_SIZE(sama5d2_gck); i++) { |
297 | hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, | 297 | hw = at91_clk_register_generated(regmap, &pmc_pcr_lock, |
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c index e358be7f6c8d..b645a9d59cdb 100644 --- a/drivers/clk/at91/sama5d4.c +++ b/drivers/clk/at91/sama5d4.c | |||
@@ -207,7 +207,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np) | |||
207 | parent_names[1] = "mainck"; | 207 | parent_names[1] = "mainck"; |
208 | parent_names[2] = "plladivck"; | 208 | parent_names[2] = "plladivck"; |
209 | parent_names[3] = "utmick"; | 209 | parent_names[3] = "utmick"; |
210 | parent_names[4] = "mck"; | 210 | parent_names[4] = "masterck"; |
211 | for (i = 0; i < 3; i++) { | 211 | for (i = 0; i < 3; i++) { |
212 | char name[6]; | 212 | char name[6]; |
213 | 213 | ||
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 3b97f60540ad..609970c0b666 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
@@ -264,9 +264,9 @@ static SUNXI_CCU_GATE(ahb1_mmc1_clk, "ahb1-mmc1", "ahb1", | |||
264 | static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", | 264 | static SUNXI_CCU_GATE(ahb1_mmc2_clk, "ahb1-mmc2", "ahb1", |
265 | 0x060, BIT(10), 0); | 265 | 0x060, BIT(10), 0); |
266 | static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", | 266 | static SUNXI_CCU_GATE(ahb1_mmc3_clk, "ahb1-mmc3", "ahb1", |
267 | 0x060, BIT(12), 0); | 267 | 0x060, BIT(11), 0); |
268 | static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", | 268 | static SUNXI_CCU_GATE(ahb1_nand1_clk, "ahb1-nand1", "ahb1", |
269 | 0x060, BIT(13), 0); | 269 | 0x060, BIT(12), 0); |
270 | static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", | 270 | static SUNXI_CCU_GATE(ahb1_nand0_clk, "ahb1-nand0", "ahb1", |
271 | 0x060, BIT(13), 0); | 271 | 0x060, BIT(13), 0); |
272 | static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", | 272 | static SUNXI_CCU_GATE(ahb1_sdram_clk, "ahb1-sdram", "ahb1", |
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c index 621b1cd996db..ac12f261f8ca 100644 --- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c +++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c | |||
@@ -542,7 +542,7 @@ static struct ccu_reset_map sun8i_v3s_ccu_resets[] = { | |||
542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, | 542 | [RST_BUS_OHCI0] = { 0x2c0, BIT(29) }, |
543 | 543 | ||
544 | [RST_BUS_VE] = { 0x2c4, BIT(0) }, | 544 | [RST_BUS_VE] = { 0x2c4, BIT(0) }, |
545 | [RST_BUS_TCON0] = { 0x2c4, BIT(3) }, | 545 | [RST_BUS_TCON0] = { 0x2c4, BIT(4) }, |
546 | [RST_BUS_CSI] = { 0x2c4, BIT(8) }, | 546 | [RST_BUS_CSI] = { 0x2c4, BIT(8) }, |
547 | [RST_BUS_DE] = { 0x2c4, BIT(12) }, | 547 | [RST_BUS_DE] = { 0x2c4, BIT(12) }, |
548 | [RST_BUS_DBG] = { 0x2c4, BIT(31) }, | 548 | [RST_BUS_DBG] = { 0x2c4, BIT(31) }, |
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c index 595124074821..c364027638e1 100644 --- a/drivers/clocksource/timer-ti-dm.c +++ b/drivers/clocksource/timer-ti-dm.c | |||
@@ -154,6 +154,10 @@ static int omap_dm_timer_of_set_source(struct omap_dm_timer *timer) | |||
154 | if (IS_ERR(parent)) | 154 | if (IS_ERR(parent)) |
155 | return -ENODEV; | 155 | return -ENODEV; |
156 | 156 | ||
157 | /* Bail out if both clocks point to fck */ | ||
158 | if (clk_is_match(parent, timer->fclk)) | ||
159 | return 0; | ||
160 | |||
157 | ret = clk_set_parent(timer->fclk, parent); | 161 | ret = clk_set_parent(timer->fclk, parent); |
158 | if (ret < 0) | 162 | if (ret < 0) |
159 | pr_err("%s: failed to set parent\n", __func__); | 163 | pr_err("%s: failed to set parent\n", __func__); |
@@ -864,7 +868,6 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
864 | timer->pdev = pdev; | 868 | timer->pdev = pdev; |
865 | 869 | ||
866 | pm_runtime_enable(dev); | 870 | pm_runtime_enable(dev); |
867 | pm_runtime_irq_safe(dev); | ||
868 | 871 | ||
869 | if (!timer->reserved) { | 872 | if (!timer->reserved) { |
870 | ret = pm_runtime_get_sync(dev); | 873 | ret = pm_runtime_get_sync(dev); |
diff --git a/drivers/cpufreq/scmi-cpufreq.c b/drivers/cpufreq/scmi-cpufreq.c index 242c3370544e..9ed46d188cb5 100644 --- a/drivers/cpufreq/scmi-cpufreq.c +++ b/drivers/cpufreq/scmi-cpufreq.c | |||
@@ -187,8 +187,8 @@ static int scmi_cpufreq_exit(struct cpufreq_policy *policy) | |||
187 | 187 | ||
188 | cpufreq_cooling_unregister(priv->cdev); | 188 | cpufreq_cooling_unregister(priv->cdev); |
189 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); | 189 | dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); |
190 | kfree(priv); | ||
191 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); | 190 | dev_pm_opp_remove_all_dynamic(priv->cpu_dev); |
191 | kfree(priv); | ||
192 | 192 | ||
193 | return 0; | 193 | return 0; |
194 | } | 194 | } |
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..b0125ad65825 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c | |||
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
380 | rc = cc_ivgen_init(new_drvdata); | 380 | rc = cc_ivgen_init(new_drvdata); |
381 | if (rc) { | 381 | if (rc) { |
382 | dev_err(dev, "cc_ivgen_init failed\n"); | 382 | dev_err(dev, "cc_ivgen_init failed\n"); |
383 | goto post_power_mgr_err; | 383 | goto post_buf_mgr_err; |
384 | } | 384 | } |
385 | 385 | ||
386 | /* Allocate crypto algs */ | 386 | /* Allocate crypto algs */ |
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
403 | goto post_hash_err; | 403 | goto post_hash_err; |
404 | } | 404 | } |
405 | 405 | ||
406 | /* All set, we can allow autosuspend */ | ||
407 | cc_pm_go(new_drvdata); | ||
408 | |||
406 | /* If we got here and FIPS mode is enabled | 409 | /* If we got here and FIPS mode is enabled |
407 | * it means all FIPS test passed, so let TEE | 410 | * it means all FIPS test passed, so let TEE |
408 | * know we're good. | 411 | * know we're good. |
@@ -417,8 +420,6 @@ post_cipher_err: | |||
417 | cc_cipher_free(new_drvdata); | 420 | cc_cipher_free(new_drvdata); |
418 | post_ivgen_err: | 421 | post_ivgen_err: |
419 | cc_ivgen_fini(new_drvdata); | 422 | cc_ivgen_fini(new_drvdata); |
420 | post_power_mgr_err: | ||
421 | cc_pm_fini(new_drvdata); | ||
422 | post_buf_mgr_err: | 423 | post_buf_mgr_err: |
423 | cc_buffer_mgr_fini(new_drvdata); | 424 | cc_buffer_mgr_fini(new_drvdata); |
424 | post_req_mgr_err: | 425 | post_req_mgr_err: |
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c | |||
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) | |||
100 | 100 | ||
101 | int cc_pm_init(struct cc_drvdata *drvdata) | 101 | int cc_pm_init(struct cc_drvdata *drvdata) |
102 | { | 102 | { |
103 | int rc = 0; | ||
104 | struct device *dev = drvdata_to_dev(drvdata); | 103 | struct device *dev = drvdata_to_dev(drvdata); |
105 | 104 | ||
106 | /* must be before the enabling to avoid resdundent suspending */ | 105 | /* must be before the enabling to avoid resdundent suspending */ |
107 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); | 106 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); |
108 | pm_runtime_use_autosuspend(dev); | 107 | pm_runtime_use_autosuspend(dev); |
109 | /* activate the PM module */ | 108 | /* activate the PM module */ |
110 | rc = pm_runtime_set_active(dev); | 109 | return pm_runtime_set_active(dev); |
111 | if (rc) | 110 | } |
112 | return rc; | ||
113 | /* enable the PM module*/ | ||
114 | pm_runtime_enable(dev); | ||
115 | 111 | ||
116 | return rc; | 112 | /* enable the PM module*/ |
113 | void cc_pm_go(struct cc_drvdata *drvdata) | ||
114 | { | ||
115 | pm_runtime_enable(drvdata_to_dev(drvdata)); | ||
117 | } | 116 | } |
118 | 117 | ||
119 | void cc_pm_fini(struct cc_drvdata *drvdata) | 118 | void cc_pm_fini(struct cc_drvdata *drvdata) |
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..f62624357020 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h | |||
@@ -16,6 +16,7 @@ | |||
16 | extern const struct dev_pm_ops ccree_pm; | 16 | extern const struct dev_pm_ops ccree_pm; |
17 | 17 | ||
18 | int cc_pm_init(struct cc_drvdata *drvdata); | 18 | int cc_pm_init(struct cc_drvdata *drvdata); |
19 | void cc_pm_go(struct cc_drvdata *drvdata); | ||
19 | void cc_pm_fini(struct cc_drvdata *drvdata); | 20 | void cc_pm_fini(struct cc_drvdata *drvdata); |
20 | int cc_pm_suspend(struct device *dev); | 21 | int cc_pm_suspend(struct device *dev); |
21 | int cc_pm_resume(struct device *dev); | 22 | int cc_pm_resume(struct device *dev); |
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) | |||
29 | return 0; | 30 | return 0; |
30 | } | 31 | } |
31 | 32 | ||
33 | static void cc_pm_go(struct cc_drvdata *drvdata) {} | ||
34 | |||
32 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} | 35 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} |
33 | 36 | ||
34 | static inline int cc_pm_suspend(struct device *dev) | 37 | static inline int cc_pm_suspend(struct device *dev) |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -203,6 +203,7 @@ struct at_xdmac_chan { | |||
203 | u32 save_cim; | 203 | u32 save_cim; |
204 | u32 save_cnda; | 204 | u32 save_cnda; |
205 | u32 save_cndc; | 205 | u32 save_cndc; |
206 | u32 irq_status; | ||
206 | unsigned long status; | 207 | unsigned long status; |
207 | struct tasklet_struct tasklet; | 208 | struct tasklet_struct tasklet; |
208 | struct dma_slave_config sconfig; | 209 | struct dma_slave_config sconfig; |
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) | |||
1580 | struct at_xdmac_desc *desc; | 1581 | struct at_xdmac_desc *desc; |
1581 | u32 error_mask; | 1582 | u32 error_mask; |
1582 | 1583 | ||
1583 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", | 1584 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", |
1584 | __func__, atchan->status); | 1585 | __func__, atchan->irq_status); |
1585 | 1586 | ||
1586 | error_mask = AT_XDMAC_CIS_RBEIS | 1587 | error_mask = AT_XDMAC_CIS_RBEIS |
1587 | | AT_XDMAC_CIS_WBEIS | 1588 | | AT_XDMAC_CIS_WBEIS |
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) | |||
1589 | 1590 | ||
1590 | if (at_xdmac_chan_is_cyclic(atchan)) { | 1591 | if (at_xdmac_chan_is_cyclic(atchan)) { |
1591 | at_xdmac_handle_cyclic(atchan); | 1592 | at_xdmac_handle_cyclic(atchan); |
1592 | } else if ((atchan->status & AT_XDMAC_CIS_LIS) | 1593 | } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) |
1593 | || (atchan->status & error_mask)) { | 1594 | || (atchan->irq_status & error_mask)) { |
1594 | struct dma_async_tx_descriptor *txd; | 1595 | struct dma_async_tx_descriptor *txd; |
1595 | 1596 | ||
1596 | if (atchan->status & AT_XDMAC_CIS_RBEIS) | 1597 | if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) |
1597 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); | 1598 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); |
1598 | if (atchan->status & AT_XDMAC_CIS_WBEIS) | 1599 | if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) |
1599 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); | 1600 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); |
1600 | if (atchan->status & AT_XDMAC_CIS_ROIS) | 1601 | if (atchan->irq_status & AT_XDMAC_CIS_ROIS) |
1601 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); | 1602 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); |
1602 | 1603 | ||
1603 | spin_lock(&atchan->lock); | 1604 | spin_lock(&atchan->lock); |
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | |||
1652 | atchan = &atxdmac->chan[i]; | 1653 | atchan = &atxdmac->chan[i]; |
1653 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | 1654 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); |
1654 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); | 1655 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); |
1655 | atchan->status = chan_status & chan_imr; | 1656 | atchan->irq_status = chan_status & chan_imr; |
1656 | dev_vdbg(atxdmac->dma.dev, | 1657 | dev_vdbg(atxdmac->dma.dev, |
1657 | "%s: chan%d: imr=0x%x, status=0x%x\n", | 1658 | "%s: chan%d: imr=0x%x, status=0x%x\n", |
1658 | __func__, i, chan_imr, chan_status); | 1659 | __func__, i, chan_imr, chan_status); |
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | |||
1666 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | 1667 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), |
1667 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | 1668 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); |
1668 | 1669 | ||
1669 | if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) | 1670 | if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) |
1670 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | 1671 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); |
1671 | 1672 | ||
1672 | tasklet_schedule(&atchan->tasklet); | 1673 | tasklet_schedule(&atchan->tasklet); |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( | |||
406 | } | 406 | } |
407 | } | 407 | } |
408 | 408 | ||
409 | static int bcm2835_dma_abort(void __iomem *chan_base) | 409 | static int bcm2835_dma_abort(struct bcm2835_chan *c) |
410 | { | 410 | { |
411 | unsigned long cs; | 411 | void __iomem *chan_base = c->chan_base; |
412 | long int timeout = 10000; | 412 | long int timeout = 10000; |
413 | 413 | ||
414 | cs = readl(chan_base + BCM2835_DMA_CS); | 414 | /* |
415 | if (!(cs & BCM2835_DMA_ACTIVE)) | 415 | * A zero control block address means the channel is idle. |
416 | * (The ACTIVE flag in the CS register is not a reliable indicator.) | ||
417 | */ | ||
418 | if (!readl(chan_base + BCM2835_DMA_ADDR)) | ||
416 | return 0; | 419 | return 0; |
417 | 420 | ||
418 | /* Write 0 to the active bit - Pause the DMA */ | 421 | /* Write 0 to the active bit - Pause the DMA */ |
419 | writel(0, chan_base + BCM2835_DMA_CS); | 422 | writel(0, chan_base + BCM2835_DMA_CS); |
420 | 423 | ||
421 | /* Wait for any current AXI transfer to complete */ | 424 | /* Wait for any current AXI transfer to complete */ |
422 | while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { | 425 | while ((readl(chan_base + BCM2835_DMA_CS) & |
426 | BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) | ||
423 | cpu_relax(); | 427 | cpu_relax(); |
424 | cs = readl(chan_base + BCM2835_DMA_CS); | ||
425 | } | ||
426 | 428 | ||
427 | /* We'll un-pause when we set of our next DMA */ | 429 | /* Peripheral might be stuck and fail to signal AXI write responses */ |
428 | if (!timeout) | 430 | if (!timeout) |
429 | return -ETIMEDOUT; | 431 | dev_err(c->vc.chan.device->dev, |
430 | 432 | "failed to complete outstanding writes\n"); | |
431 | if (!(cs & BCM2835_DMA_ACTIVE)) | ||
432 | return 0; | ||
433 | |||
434 | /* Terminate the control block chain */ | ||
435 | writel(0, chan_base + BCM2835_DMA_NEXTCB); | ||
436 | |||
437 | /* Abort the whole DMA */ | ||
438 | writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, | ||
439 | chan_base + BCM2835_DMA_CS); | ||
440 | 433 | ||
434 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); | ||
441 | return 0; | 435 | return 0; |
442 | } | 436 | } |
443 | 437 | ||
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
476 | 470 | ||
477 | spin_lock_irqsave(&c->vc.lock, flags); | 471 | spin_lock_irqsave(&c->vc.lock, flags); |
478 | 472 | ||
479 | /* Acknowledge interrupt */ | 473 | /* |
480 | writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); | 474 | * Clear the INT flag to receive further interrupts. Keep the channel |
475 | * active in case the descriptor is cyclic or in case the client has | ||
476 | * already terminated the descriptor and issued a new one. (May happen | ||
477 | * if this IRQ handler is threaded.) If the channel is finished, it | ||
478 | * will remain idle despite the ACTIVE flag being set. | ||
479 | */ | ||
480 | writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, | ||
481 | c->chan_base + BCM2835_DMA_CS); | ||
481 | 482 | ||
482 | d = c->desc; | 483 | d = c->desc; |
483 | 484 | ||
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
485 | if (d->cyclic) { | 486 | if (d->cyclic) { |
486 | /* call the cyclic callback */ | 487 | /* call the cyclic callback */ |
487 | vchan_cyclic_callback(&d->vd); | 488 | vchan_cyclic_callback(&d->vd); |
488 | 489 | } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { | |
489 | /* Keep the DMA engine running */ | ||
490 | writel(BCM2835_DMA_ACTIVE, | ||
491 | c->chan_base + BCM2835_DMA_CS); | ||
492 | } else { | ||
493 | vchan_cookie_complete(&c->desc->vd); | 490 | vchan_cookie_complete(&c->desc->vd); |
494 | bcm2835_dma_start_desc(c); | 491 | bcm2835_dma_start_desc(c); |
495 | } | 492 | } |
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
779 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 776 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
780 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | 777 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); |
781 | unsigned long flags; | 778 | unsigned long flags; |
782 | int timeout = 10000; | ||
783 | LIST_HEAD(head); | 779 | LIST_HEAD(head); |
784 | 780 | ||
785 | spin_lock_irqsave(&c->vc.lock, flags); | 781 | spin_lock_irqsave(&c->vc.lock, flags); |
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
789 | list_del_init(&c->node); | 785 | list_del_init(&c->node); |
790 | spin_unlock(&d->lock); | 786 | spin_unlock(&d->lock); |
791 | 787 | ||
792 | /* | 788 | /* stop DMA activity */ |
793 | * Stop DMA activity: we assume the callback will not be called | ||
794 | * after bcm_dma_abort() returns (even if it does, it will see | ||
795 | * c->desc is NULL and exit.) | ||
796 | */ | ||
797 | if (c->desc) { | 789 | if (c->desc) { |
798 | vchan_terminate_vdesc(&c->desc->vd); | 790 | vchan_terminate_vdesc(&c->desc->vd); |
799 | c->desc = NULL; | 791 | c->desc = NULL; |
800 | bcm2835_dma_abort(c->chan_base); | 792 | bcm2835_dma_abort(c); |
801 | |||
802 | /* Wait for stopping */ | ||
803 | while (--timeout) { | ||
804 | if (!(readl(c->chan_base + BCM2835_DMA_CS) & | ||
805 | BCM2835_DMA_ACTIVE)) | ||
806 | break; | ||
807 | |||
808 | cpu_relax(); | ||
809 | } | ||
810 | |||
811 | if (!timeout) | ||
812 | dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); | ||
813 | } | 793 | } |
814 | 794 | ||
815 | vchan_get_all_descriptors(&c->vc, &head); | 795 | vchan_get_all_descriptors(&c->vc, &head); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -711,11 +711,9 @@ static int dmatest_func(void *data) | |||
711 | srcs[i] = um->addr[i] + src_off; | 711 | srcs[i] = um->addr[i] + src_off; |
712 | ret = dma_mapping_error(dev->dev, um->addr[i]); | 712 | ret = dma_mapping_error(dev->dev, um->addr[i]); |
713 | if (ret) { | 713 | if (ret) { |
714 | dmaengine_unmap_put(um); | ||
715 | result("src mapping error", total_tests, | 714 | result("src mapping error", total_tests, |
716 | src_off, dst_off, len, ret); | 715 | src_off, dst_off, len, ret); |
717 | failed_tests++; | 716 | goto error_unmap_continue; |
718 | continue; | ||
719 | } | 717 | } |
720 | um->to_cnt++; | 718 | um->to_cnt++; |
721 | } | 719 | } |
@@ -730,11 +728,9 @@ static int dmatest_func(void *data) | |||
730 | DMA_BIDIRECTIONAL); | 728 | DMA_BIDIRECTIONAL); |
731 | ret = dma_mapping_error(dev->dev, dsts[i]); | 729 | ret = dma_mapping_error(dev->dev, dsts[i]); |
732 | if (ret) { | 730 | if (ret) { |
733 | dmaengine_unmap_put(um); | ||
734 | result("dst mapping error", total_tests, | 731 | result("dst mapping error", total_tests, |
735 | src_off, dst_off, len, ret); | 732 | src_off, dst_off, len, ret); |
736 | failed_tests++; | 733 | goto error_unmap_continue; |
737 | continue; | ||
738 | } | 734 | } |
739 | um->bidi_cnt++; | 735 | um->bidi_cnt++; |
740 | } | 736 | } |
@@ -762,12 +758,10 @@ static int dmatest_func(void *data) | |||
762 | } | 758 | } |
763 | 759 | ||
764 | if (!tx) { | 760 | if (!tx) { |
765 | dmaengine_unmap_put(um); | ||
766 | result("prep error", total_tests, src_off, | 761 | result("prep error", total_tests, src_off, |
767 | dst_off, len, ret); | 762 | dst_off, len, ret); |
768 | msleep(100); | 763 | msleep(100); |
769 | failed_tests++; | 764 | goto error_unmap_continue; |
770 | continue; | ||
771 | } | 765 | } |
772 | 766 | ||
773 | done->done = false; | 767 | done->done = false; |
@@ -776,12 +770,10 @@ static int dmatest_func(void *data) | |||
776 | cookie = tx->tx_submit(tx); | 770 | cookie = tx->tx_submit(tx); |
777 | 771 | ||
778 | if (dma_submit_error(cookie)) { | 772 | if (dma_submit_error(cookie)) { |
779 | dmaengine_unmap_put(um); | ||
780 | result("submit error", total_tests, src_off, | 773 | result("submit error", total_tests, src_off, |
781 | dst_off, len, ret); | 774 | dst_off, len, ret); |
782 | msleep(100); | 775 | msleep(100); |
783 | failed_tests++; | 776 | goto error_unmap_continue; |
784 | continue; | ||
785 | } | 777 | } |
786 | dma_async_issue_pending(chan); | 778 | dma_async_issue_pending(chan); |
787 | 779 | ||
@@ -790,22 +782,20 @@ static int dmatest_func(void *data) | |||
790 | 782 | ||
791 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 783 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
792 | 784 | ||
793 | dmaengine_unmap_put(um); | ||
794 | |||
795 | if (!done->done) { | 785 | if (!done->done) { |
796 | result("test timed out", total_tests, src_off, dst_off, | 786 | result("test timed out", total_tests, src_off, dst_off, |
797 | len, 0); | 787 | len, 0); |
798 | failed_tests++; | 788 | goto error_unmap_continue; |
799 | continue; | ||
800 | } else if (status != DMA_COMPLETE) { | 789 | } else if (status != DMA_COMPLETE) { |
801 | result(status == DMA_ERROR ? | 790 | result(status == DMA_ERROR ? |
802 | "completion error status" : | 791 | "completion error status" : |
803 | "completion busy status", total_tests, src_off, | 792 | "completion busy status", total_tests, src_off, |
804 | dst_off, len, ret); | 793 | dst_off, len, ret); |
805 | failed_tests++; | 794 | goto error_unmap_continue; |
806 | continue; | ||
807 | } | 795 | } |
808 | 796 | ||
797 | dmaengine_unmap_put(um); | ||
798 | |||
809 | if (params->noverify) { | 799 | if (params->noverify) { |
810 | verbose_result("test passed", total_tests, src_off, | 800 | verbose_result("test passed", total_tests, src_off, |
811 | dst_off, len, 0); | 801 | dst_off, len, 0); |
@@ -846,6 +836,12 @@ static int dmatest_func(void *data) | |||
846 | verbose_result("test passed", total_tests, src_off, | 836 | verbose_result("test passed", total_tests, src_off, |
847 | dst_off, len, 0); | 837 | dst_off, len, 0); |
848 | } | 838 | } |
839 | |||
840 | continue; | ||
841 | |||
842 | error_unmap_continue: | ||
843 | dmaengine_unmap_put(um); | ||
844 | failed_tests++; | ||
849 | } | 845 | } |
850 | ktime = ktime_sub(ktime_get(), ktime); | 846 | ktime = ktime_sub(ktime_get(), ktime); |
851 | ktime = ktime_sub(ktime, comparetime); | 847 | ktime = ktime_sub(ktime, comparetime); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) | |||
618 | { | 618 | { |
619 | struct imxdma_channel *imxdmac = (void *)data; | 619 | struct imxdma_channel *imxdmac = (void *)data; |
620 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 620 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
621 | struct imxdma_desc *desc; | 621 | struct imxdma_desc *desc, *next_desc; |
622 | unsigned long flags; | 622 | unsigned long flags; |
623 | 623 | ||
624 | spin_lock_irqsave(&imxdma->lock, flags); | 624 | spin_lock_irqsave(&imxdma->lock, flags); |
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) | |||
648 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | 648 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); |
649 | 649 | ||
650 | if (!list_empty(&imxdmac->ld_queue)) { | 650 | if (!list_empty(&imxdmac->ld_queue)) { |
651 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | 651 | next_desc = list_first_entry(&imxdmac->ld_queue, |
652 | node); | 652 | struct imxdma_desc, node); |
653 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | 653 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); |
654 | if (imxdma_xfer_desc(desc) < 0) | 654 | if (imxdma_xfer_desc(next_desc) < 0) |
655 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | 655 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", |
656 | __func__, imxdmac->channel); | 656 | __func__, imxdmac->channel); |
657 | } | 657 | } |
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f..92f843eaf1e0 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c | |||
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) | |||
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(scmi_driver_unregister); | 120 | EXPORT_SYMBOL_GPL(scmi_driver_unregister); |
121 | 121 | ||
122 | static void scmi_device_release(struct device *dev) | ||
123 | { | ||
124 | kfree(to_scmi_dev(dev)); | ||
125 | } | ||
126 | |||
122 | struct scmi_device * | 127 | struct scmi_device * |
123 | scmi_device_create(struct device_node *np, struct device *parent, int protocol) | 128 | scmi_device_create(struct device_node *np, struct device *parent, int protocol) |
124 | { | 129 | { |
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) | |||
138 | scmi_dev->dev.parent = parent; | 143 | scmi_dev->dev.parent = parent; |
139 | scmi_dev->dev.of_node = np; | 144 | scmi_dev->dev.of_node = np; |
140 | scmi_dev->dev.bus = &scmi_bus_type; | 145 | scmi_dev->dev.bus = &scmi_bus_type; |
146 | scmi_dev->dev.release = scmi_device_release; | ||
141 | dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); | 147 | dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); |
142 | 148 | ||
143 | retval = device_register(&scmi_dev->dev); | 149 | retval = device_register(&scmi_dev->dev); |
@@ -156,9 +162,8 @@ free_mem: | |||
156 | void scmi_device_destroy(struct scmi_device *scmi_dev) | 162 | void scmi_device_destroy(struct scmi_device *scmi_dev) |
157 | { | 163 | { |
158 | scmi_handle_put(scmi_dev->handle); | 164 | scmi_handle_put(scmi_dev->handle); |
159 | device_unregister(&scmi_dev->dev); | ||
160 | ida_simple_remove(&scmi_bus_id, scmi_dev->id); | 165 | ida_simple_remove(&scmi_bus_id, scmi_dev->id); |
161 | kfree(scmi_dev); | 166 | device_unregister(&scmi_dev->dev); |
162 | } | 167 | } |
163 | 168 | ||
164 | void scmi_set_handle(struct scmi_device *scmi_dev) | 169 | void scmi_set_handle(struct scmi_device *scmi_dev) |
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 4c46ff6f2242..55b77c576c42 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c | |||
@@ -592,11 +592,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz, | |||
592 | 592 | ||
593 | early_memunmap(tbl, sizeof(*tbl)); | 593 | early_memunmap(tbl, sizeof(*tbl)); |
594 | } | 594 | } |
595 | return 0; | ||
596 | } | ||
597 | 595 | ||
598 | int __init efi_apply_persistent_mem_reservations(void) | ||
599 | { | ||
600 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { | 596 | if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) { |
601 | unsigned long prsv = efi.mem_reserve; | 597 | unsigned long prsv = efi.mem_reserve; |
602 | 598 | ||
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index eee42d5e25ee..c037c6c5d0b7 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -75,9 +75,6 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg) | |||
75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; | 75 | efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID; |
76 | efi_status_t status; | 76 | efi_status_t status; |
77 | 77 | ||
78 | if (IS_ENABLED(CONFIG_ARM)) | ||
79 | return; | ||
80 | |||
81 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), | 78 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv), |
82 | (void **)&rsv); | 79 | (void **)&rsv); |
83 | if (status != EFI_SUCCESS) { | 80 | if (status != EFI_SUCCESS) { |
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c index 8903b9ccfc2b..e2abfdb5cee6 100644 --- a/drivers/firmware/efi/runtime-wrappers.c +++ b/drivers/firmware/efi/runtime-wrappers.c | |||
@@ -147,6 +147,13 @@ void efi_call_virt_check_flags(unsigned long flags, const char *call) | |||
147 | static DEFINE_SEMAPHORE(efi_runtime_lock); | 147 | static DEFINE_SEMAPHORE(efi_runtime_lock); |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Expose the EFI runtime lock to the UV platform | ||
151 | */ | ||
152 | #ifdef CONFIG_X86_UV | ||
153 | extern struct semaphore __efi_uv_runtime_lock __alias(efi_runtime_lock); | ||
154 | #endif | ||
155 | |||
156 | /* | ||
150 | * Calls the appropriate efi_runtime_service() with the appropriate | 157 | * Calls the appropriate efi_runtime_service() with the appropriate |
151 | * arguments. | 158 | * arguments. |
152 | * | 159 | * |
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a1a09e04fab8..13851b3d1c56 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c | |||
@@ -508,14 +508,11 @@ static int __init s10_init(void) | |||
508 | return -ENODEV; | 508 | return -ENODEV; |
509 | 509 | ||
510 | np = of_find_matching_node(fw_np, s10_of_match); | 510 | np = of_find_matching_node(fw_np, s10_of_match); |
511 | if (!np) { | 511 | if (!np) |
512 | of_node_put(fw_np); | ||
513 | return -ENODEV; | 512 | return -ENODEV; |
514 | } | ||
515 | 513 | ||
516 | of_node_put(np); | 514 | of_node_put(np); |
517 | ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); | 515 | ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); |
518 | of_node_put(fw_np); | ||
519 | if (ret) | 516 | if (ret) |
520 | return ret; | 517 | return ret; |
521 | 518 | ||
diff --git a/drivers/gpio/gpio-mt7621.c b/drivers/gpio/gpio-mt7621.c index 00e954f22bc9..74401e0adb29 100644 --- a/drivers/gpio/gpio-mt7621.c +++ b/drivers/gpio/gpio-mt7621.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #define GPIO_REG_EDGE 0xA0 | 30 | #define GPIO_REG_EDGE 0xA0 |
31 | 31 | ||
32 | struct mtk_gc { | 32 | struct mtk_gc { |
33 | struct irq_chip irq_chip; | ||
33 | struct gpio_chip chip; | 34 | struct gpio_chip chip; |
34 | spinlock_t lock; | 35 | spinlock_t lock; |
35 | int bank; | 36 | int bank; |
@@ -189,13 +190,6 @@ mediatek_gpio_irq_type(struct irq_data *d, unsigned int type) | |||
189 | return 0; | 190 | return 0; |
190 | } | 191 | } |
191 | 192 | ||
192 | static struct irq_chip mediatek_gpio_irq_chip = { | ||
193 | .irq_unmask = mediatek_gpio_irq_unmask, | ||
194 | .irq_mask = mediatek_gpio_irq_mask, | ||
195 | .irq_mask_ack = mediatek_gpio_irq_mask, | ||
196 | .irq_set_type = mediatek_gpio_irq_type, | ||
197 | }; | ||
198 | |||
199 | static int | 193 | static int |
200 | mediatek_gpio_xlate(struct gpio_chip *chip, | 194 | mediatek_gpio_xlate(struct gpio_chip *chip, |
201 | const struct of_phandle_args *spec, u32 *flags) | 195 | const struct of_phandle_args *spec, u32 *flags) |
@@ -254,6 +248,13 @@ mediatek_gpio_bank_probe(struct device *dev, | |||
254 | return ret; | 248 | return ret; |
255 | } | 249 | } |
256 | 250 | ||
251 | rg->irq_chip.name = dev_name(dev); | ||
252 | rg->irq_chip.parent_device = dev; | ||
253 | rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask; | ||
254 | rg->irq_chip.irq_mask = mediatek_gpio_irq_mask; | ||
255 | rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask; | ||
256 | rg->irq_chip.irq_set_type = mediatek_gpio_irq_type; | ||
257 | |||
257 | if (mtk->gpio_irq) { | 258 | if (mtk->gpio_irq) { |
258 | /* | 259 | /* |
259 | * Manually request the irq here instead of passing | 260 | * Manually request the irq here instead of passing |
@@ -270,14 +271,14 @@ mediatek_gpio_bank_probe(struct device *dev, | |||
270 | return ret; | 271 | return ret; |
271 | } | 272 | } |
272 | 273 | ||
273 | ret = gpiochip_irqchip_add(&rg->chip, &mediatek_gpio_irq_chip, | 274 | ret = gpiochip_irqchip_add(&rg->chip, &rg->irq_chip, |
274 | 0, handle_simple_irq, IRQ_TYPE_NONE); | 275 | 0, handle_simple_irq, IRQ_TYPE_NONE); |
275 | if (ret) { | 276 | if (ret) { |
276 | dev_err(dev, "failed to add gpiochip_irqchip\n"); | 277 | dev_err(dev, "failed to add gpiochip_irqchip\n"); |
277 | return ret; | 278 | return ret; |
278 | } | 279 | } |
279 | 280 | ||
280 | gpiochip_set_chained_irqchip(&rg->chip, &mediatek_gpio_irq_chip, | 281 | gpiochip_set_chained_irqchip(&rg->chip, &rg->irq_chip, |
281 | mtk->gpio_irq, NULL); | 282 | mtk->gpio_irq, NULL); |
282 | } | 283 | } |
283 | 284 | ||
@@ -310,7 +311,6 @@ mediatek_gpio_probe(struct platform_device *pdev) | |||
310 | mtk->gpio_irq = irq_of_parse_and_map(np, 0); | 311 | mtk->gpio_irq = irq_of_parse_and_map(np, 0); |
311 | mtk->dev = dev; | 312 | mtk->dev = dev; |
312 | platform_set_drvdata(pdev, mtk); | 313 | platform_set_drvdata(pdev, mtk); |
313 | mediatek_gpio_irq_chip.name = dev_name(dev); | ||
314 | 314 | ||
315 | for (i = 0; i < MTK_BANK_CNT; i++) { | 315 | for (i = 0; i < MTK_BANK_CNT; i++) { |
316 | ret = mediatek_gpio_bank_probe(dev, np, i); | 316 | ret = mediatek_gpio_bank_probe(dev, np, i); |
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c index e9600b556f39..bcc6be4a5cb2 100644 --- a/drivers/gpio/gpio-pxa.c +++ b/drivers/gpio/gpio-pxa.c | |||
@@ -245,6 +245,7 @@ static bool pxa_gpio_has_pinctrl(void) | |||
245 | { | 245 | { |
246 | switch (gpio_type) { | 246 | switch (gpio_type) { |
247 | case PXA3XX_GPIO: | 247 | case PXA3XX_GPIO: |
248 | case MMP2_GPIO: | ||
248 | return false; | 249 | return false; |
249 | 250 | ||
250 | default: | 251 | default: |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index bc62bf41b7e9..5dc349173e4f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -212,6 +212,7 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
212 | } | 212 | } |
213 | 213 | ||
214 | if (amdgpu_device_is_px(dev)) { | 214 | if (amdgpu_device_is_px(dev)) { |
215 | dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); | ||
215 | pm_runtime_use_autosuspend(dev->dev); | 216 | pm_runtime_use_autosuspend(dev->dev); |
216 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 217 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
217 | pm_runtime_set_active(dev->dev); | 218 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
1686 | effective_mode &= ~S_IWUSR; | 1686 | effective_mode &= ~S_IWUSR; |
1687 | 1687 | ||
1688 | if ((adev->flags & AMD_IS_APU) && | 1688 | if ((adev->flags & AMD_IS_APU) && |
1689 | (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | 1689 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
1690 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | ||
1690 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| | 1691 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
1691 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) | 1692 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) |
1692 | return 0; | 1693 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "amdgpu_gem.h" | 38 | #include "amdgpu_gem.h" |
39 | #include <drm/amdgpu_drm.h> | 39 | #include <drm/amdgpu_drm.h> |
40 | #include <linux/dma-buf.h> | 40 | #include <linux/dma-buf.h> |
41 | #include <linux/dma-fence-array.h> | ||
41 | 42 | ||
42 | /** | 43 | /** |
43 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table | 44 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table |
@@ -187,6 +188,48 @@ error: | |||
187 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
188 | } | 189 | } |
189 | 190 | ||
191 | static int | ||
192 | __reservation_object_make_exclusive(struct reservation_object *obj) | ||
193 | { | ||
194 | struct dma_fence **fences; | ||
195 | unsigned int count; | ||
196 | int r; | ||
197 | |||
198 | if (!reservation_object_get_list(obj)) /* no shared fences to convert */ | ||
199 | return 0; | ||
200 | |||
201 | r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); | ||
202 | if (r) | ||
203 | return r; | ||
204 | |||
205 | if (count == 0) { | ||
206 | /* Now that was unexpected. */ | ||
207 | } else if (count == 1) { | ||
208 | reservation_object_add_excl_fence(obj, fences[0]); | ||
209 | dma_fence_put(fences[0]); | ||
210 | kfree(fences); | ||
211 | } else { | ||
212 | struct dma_fence_array *array; | ||
213 | |||
214 | array = dma_fence_array_create(count, fences, | ||
215 | dma_fence_context_alloc(1), 0, | ||
216 | false); | ||
217 | if (!array) | ||
218 | goto err_fences_put; | ||
219 | |||
220 | reservation_object_add_excl_fence(obj, &array->base); | ||
221 | dma_fence_put(&array->base); | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | |||
226 | err_fences_put: | ||
227 | while (count--) | ||
228 | dma_fence_put(fences[count]); | ||
229 | kfree(fences); | ||
230 | return -ENOMEM; | ||
231 | } | ||
232 | |||
190 | /** | 233 | /** |
191 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation | 234 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation |
192 | * @dma_buf: Shared DMA buffer | 235 | * @dma_buf: Shared DMA buffer |
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, | |||
218 | 261 | ||
219 | if (attach->dev->driver != adev->dev->driver) { | 262 | if (attach->dev->driver != adev->dev->driver) { |
220 | /* | 263 | /* |
221 | * Wait for all shared fences to complete before we switch to future | 264 | * We only create shared fences for internal use, but importers |
222 | * use of exclusive fence on this prime shared bo. | 265 | * of the dmabuf rely on exclusive fences for implicitly |
266 | * tracking write hazards. As any of the current fences may | ||
267 | * correspond to a write, we need to convert all existing | ||
268 | * fences on the reservation object into a single exclusive | ||
269 | * fence. | ||
223 | */ | 270 | */ |
224 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | 271 | r = __reservation_object_make_exclusive(bo->tbo.resv); |
225 | true, false, | 272 | if (r) |
226 | MAX_SCHEDULE_TIMEOUT); | ||
227 | if (unlikely(r < 0)) { | ||
228 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); | ||
229 | goto error_unreserve; | 273 | goto error_unreserve; |
230 | } | ||
231 | } | 274 | } |
232 | 275 | ||
233 | /* pin buffer into GTT */ | 276 | /* pin buffer into GTT */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) | |||
90 | adev->psp.sos_fw = NULL; | 90 | adev->psp.sos_fw = NULL; |
91 | release_firmware(adev->psp.asd_fw); | 91 | release_firmware(adev->psp.asd_fw); |
92 | adev->psp.asd_fw = NULL; | 92 | adev->psp.asd_fw = NULL; |
93 | release_firmware(adev->psp.ta_fw); | 93 | if (adev->psp.ta_fw) { |
94 | adev->psp.ta_fw = NULL; | 94 | release_firmware(adev->psp.ta_fw); |
95 | adev->psp.ta_fw = NULL; | ||
96 | } | ||
95 | return 0; | 97 | return 0; |
96 | } | 98 | } |
97 | 99 | ||
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) | |||
435 | struct ta_xgmi_shared_memory *xgmi_cmd; | 437 | struct ta_xgmi_shared_memory *xgmi_cmd; |
436 | int ret; | 438 | int ret; |
437 | 439 | ||
440 | if (!psp->adev->psp.ta_fw) | ||
441 | return -ENOENT; | ||
442 | |||
438 | if (!psp->xgmi_context.initialized) { | 443 | if (!psp->xgmi_context.initialized) { |
439 | ret = psp_xgmi_init_shared_buf(psp); | 444 | ret = psp_xgmi_init_shared_buf(psp); |
440 | if (ret) | 445 | if (ret) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..698bcb8ce61d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -638,12 +638,14 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, | |||
638 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | 638 | struct ttm_bo_global *glob = adev->mman.bdev.glob; |
639 | struct amdgpu_vm_bo_base *bo_base; | 639 | struct amdgpu_vm_bo_base *bo_base; |
640 | 640 | ||
641 | #if 0 | ||
641 | if (vm->bulk_moveable) { | 642 | if (vm->bulk_moveable) { |
642 | spin_lock(&glob->lru_lock); | 643 | spin_lock(&glob->lru_lock); |
643 | ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); | 644 | ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); |
644 | spin_unlock(&glob->lru_lock); | 645 | spin_unlock(&glob->lru_lock); |
645 | return; | 646 | return; |
646 | } | 647 | } |
648 | #endif | ||
647 | 649 | ||
648 | memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); | 650 | memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); |
649 | 651 | ||
@@ -3363,14 +3365,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, | |||
3363 | struct amdgpu_task_info *task_info) | 3365 | struct amdgpu_task_info *task_info) |
3364 | { | 3366 | { |
3365 | struct amdgpu_vm *vm; | 3367 | struct amdgpu_vm *vm; |
3368 | unsigned long flags; | ||
3366 | 3369 | ||
3367 | spin_lock(&adev->vm_manager.pasid_lock); | 3370 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); |
3368 | 3371 | ||
3369 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); | 3372 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); |
3370 | if (vm) | 3373 | if (vm) |
3371 | *task_info = vm->task_info; | 3374 | *task_info = vm->task_info; |
3372 | 3375 | ||
3373 | spin_unlock(&adev->vm_manager.pasid_lock); | 3376 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
3374 | } | 3377 | } |
3375 | 3378 | ||
3376 | /** | 3379 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | |||
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, | 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, |
94 | bool enable) | 94 | bool enable) |
95 | { | 95 | { |
96 | u32 tmp = 0; | ||
96 | 97 | ||
98 | if (enable) { | ||
99 | tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | | ||
100 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | | ||
101 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); | ||
102 | |||
103 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, | ||
104 | lower_32_bits(adev->doorbell.base)); | ||
105 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, | ||
106 | upper_32_bits(adev->doorbell.base)); | ||
107 | } | ||
108 | |||
109 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); | ||
97 | } | 110 | } |
98 | 111 | ||
99 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, | 112 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | |||
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
152 | 152 | ||
153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); | 153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); |
154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); | 154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); |
155 | if (err) | 155 | if (err) { |
156 | goto out2; | 156 | release_firmware(adev->psp.ta_fw); |
157 | 157 | adev->psp.ta_fw = NULL; | |
158 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | 158 | dev_info(adev->dev, |
159 | if (err) | 159 | "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); |
160 | goto out2; | 160 | } else { |
161 | 161 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | |
162 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; | 162 | if (err) |
163 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); | 163 | goto out2; |
164 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | 164 | |
165 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | 165 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; |
166 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | 166 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); |
167 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | ||
168 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | ||
169 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | ||
170 | } | ||
167 | 171 | ||
168 | return 0; | 172 | return 0; |
169 | 173 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 6811a5d05b27..aa2f71cc1eba 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | |||
@@ -128,7 +128,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2_init[] = { | |||
128 | 128 | ||
129 | static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = | 129 | static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = |
130 | { | 130 | { |
131 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | 131 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CHICKEN_BITS, 0xfe931f07, 0x02831f07), |
132 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), | 132 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_CLK_CTRL, 0xffffffff, 0x3f000100), |
133 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), | 133 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), |
134 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), | 134 | SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), |
@@ -158,7 +158,7 @@ static const struct soc15_reg_golden golden_settings_sdma0_4_2[] = | |||
158 | }; | 158 | }; |
159 | 159 | ||
160 | static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { | 160 | static const struct soc15_reg_golden golden_settings_sdma1_4_2[] = { |
161 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831d07), | 161 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CHICKEN_BITS, 0xfe931f07, 0x02831f07), |
162 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), | 162 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_CLK_CTRL, 0xffffffff, 0x3f000100), |
163 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), | 163 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG, 0x0000773f, 0x00004002), |
164 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), | 164 | SOC15_REG_GOLDEN_VALUE(SDMA1, 0, mmSDMA1_GB_ADDR_CONFIG_READ, 0x0000773f, 0x00004002), |
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) | |||
729 | case CHIP_RAVEN: | 729 | case CHIP_RAVEN: |
730 | adev->asic_funcs = &soc15_asic_funcs; | 730 | adev->asic_funcs = &soc15_asic_funcs; |
731 | if (adev->rev_id >= 0x8) | 731 | if (adev->rev_id >= 0x8) |
732 | adev->external_rev_id = adev->rev_id + 0x81; | 732 | adev->external_rev_id = adev->rev_id + 0x79; |
733 | else if (adev->pdev->device == 0x15d8) | 733 | else if (adev->pdev->device == 0x15d8) |
734 | adev->external_rev_id = adev->rev_id + 0x41; | 734 | adev->external_rev_id = adev->rev_id + 0x41; |
735 | else if (adev->rev_id == 1) | ||
736 | adev->external_rev_id = adev->rev_id + 0x20; | ||
735 | else | 737 | else |
736 | adev->external_rev_id = 0x1; | 738 | adev->external_rev_id = adev->rev_id + 0x01; |
737 | 739 | ||
738 | if (adev->rev_id >= 0x8) { | 740 | if (adev->rev_id >= 0x8) { |
739 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 741 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
863 | return 0; | 863 | return 0; |
864 | } | 864 | } |
865 | 865 | ||
866 | #if CONFIG_X86_64 | 866 | #ifdef CONFIG_X86_64 |
867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
868 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
869 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..5296b8f3e0ab 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -786,12 +786,13 @@ static int dm_suspend(void *handle) | |||
786 | struct amdgpu_display_manager *dm = &adev->dm; | 786 | struct amdgpu_display_manager *dm = &adev->dm; |
787 | int ret = 0; | 787 | int ret = 0; |
788 | 788 | ||
789 | WARN_ON(adev->dm.cached_state); | ||
790 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); | ||
791 | |||
789 | s3_handle_mst(adev->ddev, true); | 792 | s3_handle_mst(adev->ddev, true); |
790 | 793 | ||
791 | amdgpu_dm_irq_suspend(adev); | 794 | amdgpu_dm_irq_suspend(adev); |
792 | 795 | ||
793 | WARN_ON(adev->dm.cached_state); | ||
794 | adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev); | ||
795 | 796 | ||
796 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); | 797 | dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); |
797 | 798 | ||
@@ -4082,7 +4083,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
4082 | } | 4083 | } |
4083 | 4084 | ||
4084 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || | 4085 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || |
4085 | connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 4086 | connector_type == DRM_MODE_CONNECTOR_DisplayPort || |
4087 | connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
4086 | drm_connector_attach_vrr_capable_property( | 4088 | drm_connector_attach_vrr_capable_property( |
4087 | &aconnector->base); | 4089 | &aconnector->base); |
4088 | } | 4090 | } |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | |||
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us | |||
671 | return bytes_from_user; | 671 | return bytes_from_user; |
672 | } | 672 | } |
673 | 673 | ||
674 | /* | ||
675 | * Returns the min and max vrr vfreq through the connector's debugfs file. | ||
676 | * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range | ||
677 | */ | ||
678 | static int vrr_range_show(struct seq_file *m, void *data) | ||
679 | { | ||
680 | struct drm_connector *connector = m->private; | ||
681 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
682 | |||
683 | if (connector->status != connector_status_connected) | ||
684 | return -ENODEV; | ||
685 | |||
686 | seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); | ||
687 | seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | DEFINE_SHOW_ATTRIBUTE(vrr_range); | ||
692 | |||
674 | static const struct file_operations dp_link_settings_debugfs_fops = { | 693 | static const struct file_operations dp_link_settings_debugfs_fops = { |
675 | .owner = THIS_MODULE, | 694 | .owner = THIS_MODULE, |
676 | .read = dp_link_settings_read, | 695 | .read = dp_link_settings_read, |
@@ -697,7 +716,8 @@ static const struct { | |||
697 | } dp_debugfs_entries[] = { | 716 | } dp_debugfs_entries[] = { |
698 | {"link_settings", &dp_link_settings_debugfs_fops}, | 717 | {"link_settings", &dp_link_settings_debugfs_fops}, |
699 | {"phy_settings", &dp_phy_settings_debugfs_fop}, | 718 | {"phy_settings", &dp_phy_settings_debugfs_fop}, |
700 | {"test_pattern", &dp_phy_test_pattern_fops} | 719 | {"test_pattern", &dp_phy_test_pattern_fops}, |
720 | {"vrr_range", &vrr_range_fops} | ||
701 | }; | 721 | }; |
702 | 722 | ||
703 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) | 723 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..7a72ee46f14b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( | |||
591 | dc, | 591 | dc, |
592 | context->bw.dce.sclk_khz); | 592 | context->bw.dce.sclk_khz); |
593 | 593 | ||
594 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | 594 | /* |
595 | * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. | ||
596 | * This is not required for less than 5 displays, | ||
597 | * thus don't request decfclk in dc to avoid impact | ||
598 | * on power saving. | ||
599 | * | ||
600 | */ | ||
601 | pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? | ||
602 | pp_display_cfg->min_engine_clock_khz : 0; | ||
595 | 603 | ||
596 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 604 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
597 | = context->bw.dce.sclk_deep_sleep_khz; | 605 | = context->bw.dce.sclk_deep_sleep_khz; |
@@ -654,6 +662,11 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, | |||
654 | { | 662 | { |
655 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); | 663 | struct dce_clk_mgr *clk_mgr_dce = TO_DCE_CLK_MGR(clk_mgr); |
656 | struct dm_pp_power_level_change_request level_change_req; | 664 | struct dm_pp_power_level_change_request level_change_req; |
665 | int patched_disp_clk = context->bw.dce.dispclk_khz; | ||
666 | |||
667 | /*TODO: W/A for dal3 linux, investigate why this works */ | ||
668 | if (!clk_mgr_dce->dfs_bypass_active) | ||
669 | patched_disp_clk = patched_disp_clk * 115 / 100; | ||
657 | 670 | ||
658 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); | 671 | level_change_req.power_level = dce_get_required_clocks_state(clk_mgr, context); |
659 | /* get max clock state from PPLIB */ | 672 | /* get max clock state from PPLIB */ |
@@ -663,9 +676,9 @@ static void dce11_update_clocks(struct clk_mgr *clk_mgr, | |||
663 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; | 676 | clk_mgr_dce->cur_min_clks_state = level_change_req.power_level; |
664 | } | 677 | } |
665 | 678 | ||
666 | if (should_set_clock(safe_to_lower, context->bw.dce.dispclk_khz, clk_mgr->clks.dispclk_khz)) { | 679 | if (should_set_clock(safe_to_lower, patched_disp_clk, clk_mgr->clks.dispclk_khz)) { |
667 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, context->bw.dce.dispclk_khz); | 680 | context->bw.dce.dispclk_khz = dce_set_clock(clk_mgr, patched_disp_clk); |
668 | clk_mgr->clks.dispclk_khz = context->bw.dce.dispclk_khz; | 681 | clk_mgr->clks.dispclk_khz = patched_disp_clk; |
669 | } | 682 | } |
670 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); | 683 | dce11_pplib_apply_display_requirements(clk_mgr->ctx->dc, context); |
671 | } | 684 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h index acd418515346..a6b80fdaa666 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.h | |||
@@ -37,6 +37,10 @@ void dce100_prepare_bandwidth( | |||
37 | struct dc *dc, | 37 | struct dc *dc, |
38 | struct dc_state *context); | 38 | struct dc_state *context); |
39 | 39 | ||
40 | void dce100_optimize_bandwidth( | ||
41 | struct dc *dc, | ||
42 | struct dc_state *context); | ||
43 | |||
40 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, | 44 | bool dce100_enable_display_power_gating(struct dc *dc, uint8_t controller_id, |
41 | struct dc_bios *dcb, | 45 | struct dc_bios *dcb, |
42 | enum pipe_gating_control power_gating); | 46 | enum pipe_gating_control power_gating); |
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c index a60a90e68d91..c4543178ba20 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_hw_sequencer.c | |||
@@ -77,6 +77,6 @@ void dce80_hw_sequencer_construct(struct dc *dc) | |||
77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; | 77 | dc->hwss.enable_display_power_gating = dce100_enable_display_power_gating; |
78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; | 78 | dc->hwss.pipe_control_lock = dce_pipe_control_lock; |
79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; | 79 | dc->hwss.prepare_bandwidth = dce100_prepare_bandwidth; |
80 | dc->hwss.optimize_bandwidth = dce100_prepare_bandwidth; | 80 | dc->hwss.optimize_bandwidth = dce100_optimize_bandwidth; |
81 | } | 81 | } |
82 | 82 | ||
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index cdd1d6b7b9f2..4e9ea50141bd 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c | |||
@@ -790,9 +790,22 @@ bool dce80_validate_bandwidth( | |||
790 | struct dc *dc, | 790 | struct dc *dc, |
791 | struct dc_state *context) | 791 | struct dc_state *context) |
792 | { | 792 | { |
793 | /* TODO implement when needed but for now hardcode max value*/ | 793 | int i; |
794 | context->bw.dce.dispclk_khz = 681000; | 794 | bool at_least_one_pipe = false; |
795 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; | 795 | |
796 | for (i = 0; i < dc->res_pool->pipe_count; i++) { | ||
797 | if (context->res_ctx.pipe_ctx[i].stream) | ||
798 | at_least_one_pipe = true; | ||
799 | } | ||
800 | |||
801 | if (at_least_one_pipe) { | ||
802 | /* TODO implement when needed but for now hardcode max value*/ | ||
803 | context->bw.dce.dispclk_khz = 681000; | ||
804 | context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; | ||
805 | } else { | ||
806 | context->bw.dce.dispclk_khz = 0; | ||
807 | context->bw.dce.yclk_khz = 0; | ||
808 | } | ||
796 | 809 | ||
797 | return true; | 810 | return true; |
798 | } | 811 | } |
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 58a12ddf12f3..41883c981789 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | |||
@@ -2658,8 +2658,8 @@ static void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) | |||
2658 | .mirror = pipe_ctx->plane_state->horizontal_mirror | 2658 | .mirror = pipe_ctx->plane_state->horizontal_mirror |
2659 | }; | 2659 | }; |
2660 | 2660 | ||
2661 | pos_cpy.x -= pipe_ctx->plane_state->dst_rect.x; | 2661 | pos_cpy.x_hotspot += pipe_ctx->plane_state->dst_rect.x; |
2662 | pos_cpy.y -= pipe_ctx->plane_state->dst_rect.y; | 2662 | pos_cpy.y_hotspot += pipe_ctx->plane_state->dst_rect.y; |
2663 | 2663 | ||
2664 | if (pipe_ctx->plane_state->address.type | 2664 | if (pipe_ctx->plane_state->address.type |
2665 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) | 2665 | == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, | |||
1033 | break; | 1033 | break; |
1034 | case amd_pp_dpp_clock: | 1034 | case amd_pp_dpp_clock: |
1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; | 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; |
1036 | break; | ||
1036 | default: | 1037 | default: |
1037 | return -EINVAL; | 1038 | return -EINVAL; |
1038 | } | 1039 | } |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, | |||
528 | 528 | ||
529 | object_count = cl->object_count; | 529 | object_count = cl->object_count; |
530 | 530 | ||
531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); | 531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), |
532 | array_size(object_count, sizeof(__u32))); | ||
532 | if (IS_ERR(object_ids)) | 533 | if (IS_ERR(object_ids)) |
533 | return PTR_ERR(object_ids); | 534 | return PTR_ERR(object_ids); |
534 | 535 | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) | |||
758 | if (mode->hsync) | 758 | if (mode->hsync) |
759 | return mode->hsync; | 759 | return mode->hsync; |
760 | 760 | ||
761 | if (mode->htotal < 0) | 761 | if (mode->htotal <= 0) |
762 | return 0; | 762 | return 0; |
763 | 763 | ||
764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1824 | return 0; | 1824 | return 0; |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | static inline bool | ||
1828 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | ||
1829 | unsigned long addr, unsigned long size) | ||
1830 | { | ||
1831 | if (vma->vm_file != filp) | ||
1832 | return false; | ||
1833 | |||
1834 | return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; | ||
1835 | } | ||
1836 | |||
1827 | /** | 1837 | /** |
1828 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 1838 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1829 | * it is mapped to. | 1839 | * it is mapped to. |
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1882 | return -EINTR; | 1892 | return -EINTR; |
1883 | } | 1893 | } |
1884 | vma = find_vma(mm, addr); | 1894 | vma = find_vma(mm, addr); |
1885 | if (vma) | 1895 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1886 | vma->vm_page_prot = | 1896 | vma->vm_page_prot = |
1887 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 1897 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1888 | else | 1898 | else |
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) | |||
594 | * Update the bitmask of enabled events and increment | 594 | * Update the bitmask of enabled events and increment |
595 | * the event reference counter. | 595 | * the event reference counter. |
596 | */ | 596 | */ |
597 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 597 | BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); |
598 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); | ||
598 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); | 599 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
599 | i915->pmu.enable |= BIT_ULL(bit); | 600 | i915->pmu.enable |= BIT_ULL(bit); |
600 | i915->pmu.enable_count[bit]++; | 601 | i915->pmu.enable_count[bit]++; |
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) | |||
615 | engine = intel_engine_lookup_user(i915, | 616 | engine = intel_engine_lookup_user(i915, |
616 | engine_event_class(event), | 617 | engine_event_class(event), |
617 | engine_event_instance(event)); | 618 | engine_event_instance(event)); |
618 | GEM_BUG_ON(!engine); | ||
619 | engine->pmu.enable |= BIT(sample); | ||
620 | 619 | ||
621 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 620 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != |
621 | I915_ENGINE_SAMPLE_COUNT); | ||
622 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != | ||
623 | I915_ENGINE_SAMPLE_COUNT); | ||
624 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); | ||
625 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
622 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); | 626 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
627 | |||
628 | engine->pmu.enable |= BIT(sample); | ||
623 | engine->pmu.enable_count[sample]++; | 629 | engine->pmu.enable_count[sample]++; |
624 | } | 630 | } |
625 | 631 | ||
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) | |||
649 | engine = intel_engine_lookup_user(i915, | 655 | engine = intel_engine_lookup_user(i915, |
650 | engine_event_class(event), | 656 | engine_event_class(event), |
651 | engine_event_instance(event)); | 657 | engine_event_instance(event)); |
652 | GEM_BUG_ON(!engine); | 658 | |
653 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 659 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); |
660 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
654 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); | 661 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
662 | |||
655 | /* | 663 | /* |
656 | * Decrement the reference count and clear the enabled | 664 | * Decrement the reference count and clear the enabled |
657 | * bitmask when the last listener on an event goes away. | 665 | * bitmask when the last listener on an event goes away. |
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) | |||
660 | engine->pmu.enable &= ~BIT(sample); | 668 | engine->pmu.enable &= ~BIT(sample); |
661 | } | 669 | } |
662 | 670 | ||
663 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 671 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); |
664 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); | 672 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
665 | /* | 673 | /* |
666 | * Decrement the reference count and clear the enabled | 674 | * Decrement the reference count and clear the enabled |
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
@@ -31,6 +31,8 @@ enum { | |||
31 | ((1 << I915_PMU_SAMPLE_BITS) + \ | 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ |
32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) | 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) |
33 | 33 | ||
34 | #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) | ||
35 | |||
34 | struct i915_pmu_sample { | 36 | struct i915_pmu_sample { |
35 | u64 cur; | 37 | u64 cur; |
36 | }; | 38 | }; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1790,7 +1790,7 @@ enum i915_power_well_id { | |||
1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 | 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 |
1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 | 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 |
1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 | 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 |
1793 | #define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ | 1793 | #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ |
1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
@@ -1798,7 +1798,7 @@ enum i915_power_well_id { | |||
1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ | 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ |
1800 | 4 * (dw)) | 1800 | 4 * (dw)) |
1801 | #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ | 1801 | #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ |
1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ | 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ |
1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
@@ -1834,9 +1834,9 @@ enum i915_power_well_id { | |||
1834 | 1834 | ||
1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 | 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 |
1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 | 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 |
1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) | 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) |
1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) | 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) |
1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ |
1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
1841 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
@@ -1864,8 +1864,12 @@ enum i915_power_well_id { | |||
1864 | #define RTERM_SELECT(x) ((x) << 3) | 1864 | #define RTERM_SELECT(x) ((x) << 3) |
1865 | #define RTERM_SELECT_MASK (0x7 << 3) | 1865 | #define RTERM_SELECT_MASK (0x7 << 3) |
1866 | 1866 | ||
1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) | 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) |
1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) | 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) |
1869 | #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) | ||
1870 | #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) | ||
1871 | #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) | ||
1872 | #define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) | ||
1869 | #define N_SCALAR(x) ((x) << 24) | 1873 | #define N_SCALAR(x) ((x) << 24) |
1870 | #define N_SCALAR_MASK (0x7F << 24) | 1874 | #define N_SCALAR_MASK (0x7F << 24) |
1871 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { | |||
494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ | 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ |
495 | }; | 495 | }; |
496 | 496 | ||
497 | struct icl_combo_phy_ddi_buf_trans { | 497 | /* icl_combo_phy_ddi_translations */ |
498 | u32 dw2_swing_select; | 498 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { |
499 | u32 dw2_swing_scalar; | 499 | /* NT mV Trans mV db */ |
500 | u32 dw4_scaling; | 500 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
501 | }; | 501 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
502 | 502 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ | |
503 | /* Voltage Swing Programming for VccIO 0.85V for DP */ | 503 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
504 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { | 504 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
505 | /* Voltage mV db */ | 505 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
506 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 506 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
507 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 507 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
508 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 508 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
509 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 509 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
510 | { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ | ||
511 | { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ | ||
512 | { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ | ||
513 | { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ | ||
514 | { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ | ||
515 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
516 | }; | ||
517 | |||
518 | /* FIXME - After table is updated in Bspec */ | ||
519 | /* Voltage Swing Programming for VccIO 0.85V for eDP */ | ||
520 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { | ||
521 | /* Voltage mV db */ | ||
522 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | ||
523 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | ||
524 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | ||
525 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | ||
526 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | ||
527 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
528 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
529 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
530 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
531 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
532 | }; | ||
533 | |||
534 | /* Voltage Swing Programming for VccIO 0.95V for DP */ | ||
535 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { | ||
536 | /* Voltage mV db */ | ||
537 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | ||
538 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | ||
539 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | ||
540 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | ||
541 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | ||
542 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | ||
543 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | ||
544 | { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ | ||
545 | { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ | ||
546 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
547 | }; | 510 | }; |
548 | 511 | ||
549 | /* FIXME - After table is updated in Bspec */ | 512 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { |
550 | /* Voltage Swing Programming for VccIO 0.95V for eDP */ | 513 | /* NT mV Trans mV db */ |
551 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { | 514 | { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ |
552 | /* Voltage mV db */ | 515 | { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ |
553 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 516 | { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ |
554 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 517 | { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ |
555 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 518 | { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ |
556 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 519 | { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ |
557 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 520 | { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ |
558 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | 521 | { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ |
559 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | 522 | { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ |
560 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | 523 | { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
561 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
562 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
563 | }; | 524 | }; |
564 | 525 | ||
565 | /* Voltage Swing Programming for VccIO 1.05V for DP */ | 526 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { |
566 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { | 527 | /* NT mV Trans mV db */ |
567 | /* Voltage mV db */ | 528 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
568 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 529 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
569 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 530 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ |
570 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 531 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
571 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 532 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
572 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | 533 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
573 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | 534 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
574 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | 535 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
575 | { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ | 536 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
576 | { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ | 537 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
577 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
578 | }; | 538 | }; |
579 | 539 | ||
580 | /* FIXME - After table is updated in Bspec */ | 540 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { |
581 | /* Voltage Swing Programming for VccIO 1.05V for eDP */ | 541 | /* NT mV Trans mV db */ |
582 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { | 542 | { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ |
583 | /* Voltage mV db */ | 543 | { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ |
584 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 544 | { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ |
585 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 545 | { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ |
586 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 546 | { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ |
587 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 547 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ |
588 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 548 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ |
589 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
590 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
591 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
592 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
593 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
594 | }; | 549 | }; |
595 | 550 | ||
596 | struct icl_mg_phy_ddi_buf_trans { | 551 | struct icl_mg_phy_ddi_buf_trans { |
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
871 | } | 826 | } |
872 | } | 827 | } |
873 | 828 | ||
874 | static const struct icl_combo_phy_ddi_buf_trans * | 829 | static const struct cnl_ddi_buf_trans * |
875 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, | 830 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, |
876 | int type, int *n_entries) | 831 | int type, int rate, int *n_entries) |
877 | { | 832 | { |
878 | u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; | 833 | if (type == INTEL_OUTPUT_HDMI) { |
879 | 834 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); | |
880 | if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { | 835 | return icl_combo_phy_ddi_translations_hdmi; |
881 | switch (voltage) { | 836 | } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { |
882 | case VOLTAGE_INFO_0_85V: | 837 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); |
883 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); | 838 | return icl_combo_phy_ddi_translations_edp_hbr3; |
884 | return icl_combo_phy_ddi_translations_edp_0_85V; | 839 | } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { |
885 | case VOLTAGE_INFO_0_95V: | 840 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); |
886 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); | 841 | return icl_combo_phy_ddi_translations_edp_hbr2; |
887 | return icl_combo_phy_ddi_translations_edp_0_95V; | ||
888 | case VOLTAGE_INFO_1_05V: | ||
889 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); | ||
890 | return icl_combo_phy_ddi_translations_edp_1_05V; | ||
891 | default: | ||
892 | MISSING_CASE(voltage); | ||
893 | return NULL; | ||
894 | } | ||
895 | } else { | ||
896 | switch (voltage) { | ||
897 | case VOLTAGE_INFO_0_85V: | ||
898 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); | ||
899 | return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; | ||
900 | case VOLTAGE_INFO_0_95V: | ||
901 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); | ||
902 | return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; | ||
903 | case VOLTAGE_INFO_1_05V: | ||
904 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); | ||
905 | return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; | ||
906 | default: | ||
907 | MISSING_CASE(voltage); | ||
908 | return NULL; | ||
909 | } | ||
910 | } | 842 | } |
843 | |||
844 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); | ||
845 | return icl_combo_phy_ddi_translations_dp_hbr2; | ||
911 | } | 846 | } |
912 | 847 | ||
913 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | 848 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) |
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
918 | 853 | ||
919 | if (IS_ICELAKE(dev_priv)) { | 854 | if (IS_ICELAKE(dev_priv)) { |
920 | if (intel_port_is_combophy(dev_priv, port)) | 855 | if (intel_port_is_combophy(dev_priv, port)) |
921 | icl_get_combo_buf_trans(dev_priv, port, | 856 | icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, |
922 | INTEL_OUTPUT_HDMI, &n_entries); | 857 | 0, &n_entries); |
923 | else | 858 | else |
924 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 859 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
925 | default_entry = n_entries - 1; | 860 | default_entry = n_entries - 1; |
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
1086 | return DDI_CLK_SEL_TBT_810; | 1021 | return DDI_CLK_SEL_TBT_810; |
1087 | default: | 1022 | default: |
1088 | MISSING_CASE(clock); | 1023 | MISSING_CASE(clock); |
1089 | break; | 1024 | return DDI_CLK_SEL_NONE; |
1090 | } | 1025 | } |
1091 | case DPLL_ID_ICL_MGPLL1: | 1026 | case DPLL_ID_ICL_MGPLL1: |
1092 | case DPLL_ID_ICL_MGPLL2: | 1027 | case DPLL_ID_ICL_MGPLL2: |
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2275 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) | 2210 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) |
2276 | { | 2211 | { |
2277 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2212 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2213 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
2278 | enum port port = encoder->port; | 2214 | enum port port = encoder->port; |
2279 | int n_entries; | 2215 | int n_entries; |
2280 | 2216 | ||
2281 | if (IS_ICELAKE(dev_priv)) { | 2217 | if (IS_ICELAKE(dev_priv)) { |
2282 | if (intel_port_is_combophy(dev_priv, port)) | 2218 | if (intel_port_is_combophy(dev_priv, port)) |
2283 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, | 2219 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, |
2284 | &n_entries); | 2220 | intel_dp->link_rate, &n_entries); |
2285 | else | 2221 | else |
2286 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 2222 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
2287 | } else if (IS_CANNONLAKE(dev_priv)) { | 2223 | } else if (IS_CANNONLAKE(dev_priv)) { |
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2462 | } | 2398 | } |
2463 | 2399 | ||
2464 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | 2400 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, |
2465 | u32 level, enum port port, int type) | 2401 | u32 level, enum port port, int type, |
2402 | int rate) | ||
2466 | { | 2403 | { |
2467 | const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; | 2404 | const struct cnl_ddi_buf_trans *ddi_translations = NULL; |
2468 | u32 n_entries, val; | 2405 | u32 n_entries, val; |
2469 | int ln; | 2406 | int ln; |
2470 | 2407 | ||
2471 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, | 2408 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, |
2472 | &n_entries); | 2409 | rate, &n_entries); |
2473 | if (!ddi_translations) | 2410 | if (!ddi_translations) |
2474 | return; | 2411 | return; |
2475 | 2412 | ||
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2478 | level = n_entries - 1; | 2415 | level = n_entries - 1; |
2479 | } | 2416 | } |
2480 | 2417 | ||
2481 | /* Set PORT_TX_DW5 Rterm Sel to 110b. */ | 2418 | /* Set PORT_TX_DW5 */ |
2482 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2419 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
2483 | val &= ~RTERM_SELECT_MASK; | 2420 | val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | |
2421 | TAP2_DISABLE | TAP3_DISABLE); | ||
2422 | val |= SCALING_MODE_SEL(0x2); | ||
2484 | val |= RTERM_SELECT(0x6); | 2423 | val |= RTERM_SELECT(0x6); |
2485 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2424 | val |= TAP3_DISABLE; |
2486 | |||
2487 | /* Program PORT_TX_DW5 */ | ||
2488 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
2489 | /* Set DisableTap2 and DisableTap3 if MIPI DSI | ||
2490 | * Clear DisableTap2 and DisableTap3 for all other Ports | ||
2491 | */ | ||
2492 | if (type == INTEL_OUTPUT_DSI) { | ||
2493 | val |= TAP2_DISABLE; | ||
2494 | val |= TAP3_DISABLE; | ||
2495 | } else { | ||
2496 | val &= ~TAP2_DISABLE; | ||
2497 | val &= ~TAP3_DISABLE; | ||
2498 | } | ||
2499 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2425 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2500 | 2426 | ||
2501 | /* Program PORT_TX_DW2 */ | 2427 | /* Program PORT_TX_DW2 */ |
2502 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | 2428 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); |
2503 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | 2429 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | |
2504 | RCOMP_SCALAR_MASK); | 2430 | RCOMP_SCALAR_MASK); |
2505 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); | 2431 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); |
2506 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); | 2432 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); |
2507 | /* Program Rcomp scalar for every table entry */ | 2433 | /* Program Rcomp scalar for every table entry */ |
2508 | val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); | 2434 | val |= RCOMP_SCALAR(0x98); |
2509 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); | 2435 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); |
2510 | 2436 | ||
2511 | /* Program PORT_TX_DW4 */ | 2437 | /* Program PORT_TX_DW4 */ |
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2514 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); | 2440 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); |
2515 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | 2441 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | |
2516 | CURSOR_COEFF_MASK); | 2442 | CURSOR_COEFF_MASK); |
2517 | val |= ddi_translations[level].dw4_scaling; | 2443 | val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); |
2444 | val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); | ||
2445 | val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); | ||
2518 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); | 2446 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); |
2519 | } | 2447 | } |
2448 | |||
2449 | /* Program PORT_TX_DW7 */ | ||
2450 | val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); | ||
2451 | val &= ~N_SCALAR_MASK; | ||
2452 | val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); | ||
2453 | I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); | ||
2520 | } | 2454 | } |
2521 | 2455 | ||
2522 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | 2456 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, |
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2581 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2515 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2582 | 2516 | ||
2583 | /* 5. Program swing and de-emphasis */ | 2517 | /* 5. Program swing and de-emphasis */ |
2584 | icl_ddi_combo_vswing_program(dev_priv, level, port, type); | 2518 | icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); |
2585 | 2519 | ||
2586 | /* 6. Set training enable to trigger update */ | 2520 | /* 6. Set training enable to trigger update */ |
2587 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2521 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15415 | } | 15415 | } |
15416 | } | 15416 | } |
15417 | 15417 | ||
15418 | static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) | ||
15419 | { | ||
15420 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
15421 | |||
15422 | /* | ||
15423 | * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram | ||
15424 | * the hardware when a high res displays plugged in. DPLL P | ||
15425 | * divider is zero, and the pipe timings are bonkers. We'll | ||
15426 | * try to disable everything in that case. | ||
15427 | * | ||
15428 | * FIXME would be nice to be able to sanitize this state | ||
15429 | * without several WARNs, but for now let's take the easy | ||
15430 | * road. | ||
15431 | */ | ||
15432 | return IS_GEN6(dev_priv) && | ||
15433 | crtc_state->base.active && | ||
15434 | crtc_state->shared_dpll && | ||
15435 | crtc_state->port_clock == 0; | ||
15436 | } | ||
15437 | |||
15418 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15438 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
15419 | { | 15439 | { |
15420 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 15440 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
15421 | struct intel_connector *connector; | 15441 | struct intel_connector *connector; |
15442 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
15443 | struct intel_crtc_state *crtc_state = crtc ? | ||
15444 | to_intel_crtc_state(crtc->base.state) : NULL; | ||
15422 | 15445 | ||
15423 | /* We need to check both for a crtc link (meaning that the | 15446 | /* We need to check both for a crtc link (meaning that the |
15424 | * encoder is active and trying to read from a pipe) and the | 15447 | * encoder is active and trying to read from a pipe) and the |
15425 | * pipe itself being active. */ | 15448 | * pipe itself being active. */ |
15426 | bool has_active_crtc = encoder->base.crtc && | 15449 | bool has_active_crtc = crtc_state && |
15427 | to_intel_crtc(encoder->base.crtc)->active; | 15450 | crtc_state->base.active; |
15451 | |||
15452 | if (crtc_state && has_bogus_dpll_config(crtc_state)) { | ||
15453 | DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", | ||
15454 | pipe_name(crtc->pipe)); | ||
15455 | has_active_crtc = false; | ||
15456 | } | ||
15428 | 15457 | ||
15429 | connector = intel_encoder_find_connector(encoder); | 15458 | connector = intel_encoder_find_connector(encoder); |
15430 | if (connector && !has_active_crtc) { | 15459 | if (connector && !has_active_crtc) { |
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
15435 | /* Connector is active, but has no active pipe. This is | 15464 | /* Connector is active, but has no active pipe. This is |
15436 | * fallout from our resume register restoring. Disable | 15465 | * fallout from our resume register restoring. Disable |
15437 | * the encoder manually again. */ | 15466 | * the encoder manually again. */ |
15438 | if (encoder->base.crtc) { | 15467 | if (crtc_state) { |
15439 | struct drm_crtc_state *crtc_state = encoder->base.crtc->state; | 15468 | struct drm_encoder *best_encoder; |
15440 | 15469 | ||
15441 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15470 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
15442 | encoder->base.base.id, | 15471 | encoder->base.base.id, |
15443 | encoder->base.name); | 15472 | encoder->base.name); |
15473 | |||
15474 | /* avoid oopsing in case the hooks consult best_encoder */ | ||
15475 | best_encoder = connector->base.state->best_encoder; | ||
15476 | connector->base.state->best_encoder = &encoder->base; | ||
15477 | |||
15444 | if (encoder->disable) | 15478 | if (encoder->disable) |
15445 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15479 | encoder->disable(encoder, crtc_state, |
15480 | connector->base.state); | ||
15446 | if (encoder->post_disable) | 15481 | if (encoder->post_disable) |
15447 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15482 | encoder->post_disable(encoder, crtc_state, |
15483 | connector->base.state); | ||
15484 | |||
15485 | connector->base.state->best_encoder = best_encoder; | ||
15448 | } | 15486 | } |
15449 | encoder->base.crtc = NULL; | 15487 | encoder->base.crtc = NULL; |
15450 | 15488 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) | |||
304 | static int icl_max_source_rate(struct intel_dp *intel_dp) | 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
305 | { | 305 | { |
306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
307 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
307 | enum port port = dig_port->base.port; | 308 | enum port port = dig_port->base.port; |
308 | 309 | ||
309 | if (port == PORT_B) | 310 | if (intel_port_is_combophy(dev_priv, port) && |
311 | !intel_dp_is_edp(intel_dp)) | ||
310 | return 540000; | 312 | return 540000; |
311 | 313 | ||
312 | return 810000; | 314 | return 810000; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,6 +209,16 @@ struct intel_fbdev { | |||
209 | unsigned long vma_flags; | 209 | unsigned long vma_flags; |
210 | async_cookie_t cookie; | 210 | async_cookie_t cookie; |
211 | int preferred_bpp; | 211 | int preferred_bpp; |
212 | |||
213 | /* Whether or not fbdev hpd processing is temporarily suspended */ | ||
214 | bool hpd_suspended : 1; | ||
215 | /* Set when a hotplug was received while HPD processing was | ||
216 | * suspended | ||
217 | */ | ||
218 | bool hpd_waiting : 1; | ||
219 | |||
220 | /* Protects hpd_suspended */ | ||
221 | struct mutex hpd_lock; | ||
212 | }; | 222 | }; |
213 | 223 | ||
214 | struct intel_encoder { | 224 | struct intel_encoder { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..4ee16b264dbe 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -336,8 +336,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
336 | bool *enabled, int width, int height) | 336 | bool *enabled, int width, int height) |
337 | { | 337 | { |
338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); | 338 | struct drm_i915_private *dev_priv = to_i915(fb_helper->dev); |
339 | unsigned long conn_configured, conn_seq, mask; | ||
340 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); | 339 | unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG); |
340 | unsigned long conn_configured, conn_seq; | ||
341 | int i, j; | 341 | int i, j; |
342 | bool *save_enabled; | 342 | bool *save_enabled; |
343 | bool fallback = true, ret = true; | 343 | bool fallback = true, ret = true; |
@@ -355,10 +355,9 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, | |||
355 | drm_modeset_backoff(&ctx); | 355 | drm_modeset_backoff(&ctx); |
356 | 356 | ||
357 | memcpy(save_enabled, enabled, count); | 357 | memcpy(save_enabled, enabled, count); |
358 | mask = GENMASK(count - 1, 0); | 358 | conn_seq = GENMASK(count - 1, 0); |
359 | conn_configured = 0; | 359 | conn_configured = 0; |
360 | retry: | 360 | retry: |
361 | conn_seq = conn_configured; | ||
362 | for (i = 0; i < count; i++) { | 361 | for (i = 0; i < count; i++) { |
363 | struct drm_fb_helper_connector *fb_conn; | 362 | struct drm_fb_helper_connector *fb_conn; |
364 | struct drm_connector *connector; | 363 | struct drm_connector *connector; |
@@ -371,7 +370,8 @@ retry: | |||
371 | if (conn_configured & BIT(i)) | 370 | if (conn_configured & BIT(i)) |
372 | continue; | 371 | continue; |
373 | 372 | ||
374 | if (conn_seq == 0 && !connector->has_tile) | 373 | /* First pass, only consider tiled connectors */ |
374 | if (conn_seq == GENMASK(count - 1, 0) && !connector->has_tile) | ||
375 | continue; | 375 | continue; |
376 | 376 | ||
377 | if (connector->status == connector_status_connected) | 377 | if (connector->status == connector_status_connected) |
@@ -475,8 +475,10 @@ retry: | |||
475 | conn_configured |= BIT(i); | 475 | conn_configured |= BIT(i); |
476 | } | 476 | } |
477 | 477 | ||
478 | if ((conn_configured & mask) != mask && conn_configured != conn_seq) | 478 | if (conn_configured != conn_seq) { /* repeat until no more are found */ |
479 | conn_seq = conn_configured; | ||
479 | goto retry; | 480 | goto retry; |
481 | } | ||
480 | 482 | ||
481 | /* | 483 | /* |
482 | * If the BIOS didn't enable everything it could, fall back to have the | 484 | * If the BIOS didn't enable everything it could, fall back to have the |
@@ -679,6 +681,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
679 | if (ifbdev == NULL) | 681 | if (ifbdev == NULL) |
680 | return -ENOMEM; | 682 | return -ENOMEM; |
681 | 683 | ||
684 | mutex_init(&ifbdev->hpd_lock); | ||
682 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); | 685 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); |
683 | 686 | ||
684 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 687 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
@@ -752,6 +755,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) | |||
752 | intel_fbdev_destroy(ifbdev); | 755 | intel_fbdev_destroy(ifbdev); |
753 | } | 756 | } |
754 | 757 | ||
758 | /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD | ||
759 | * processing, fbdev will perform a full connector reprobe if a hotplug event | ||
760 | * was received while HPD was suspended. | ||
761 | */ | ||
762 | static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) | ||
763 | { | ||
764 | bool send_hpd = false; | ||
765 | |||
766 | mutex_lock(&ifbdev->hpd_lock); | ||
767 | ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; | ||
768 | send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; | ||
769 | ifbdev->hpd_waiting = false; | ||
770 | mutex_unlock(&ifbdev->hpd_lock); | ||
771 | |||
772 | if (send_hpd) { | ||
773 | DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); | ||
774 | drm_fb_helper_hotplug_event(&ifbdev->helper); | ||
775 | } | ||
776 | } | ||
777 | |||
755 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) | 778 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) |
756 | { | 779 | { |
757 | struct drm_i915_private *dev_priv = to_i915(dev); | 780 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -773,6 +796,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
773 | */ | 796 | */ |
774 | if (state != FBINFO_STATE_RUNNING) | 797 | if (state != FBINFO_STATE_RUNNING) |
775 | flush_work(&dev_priv->fbdev_suspend_work); | 798 | flush_work(&dev_priv->fbdev_suspend_work); |
799 | |||
776 | console_lock(); | 800 | console_lock(); |
777 | } else { | 801 | } else { |
778 | /* | 802 | /* |
@@ -800,17 +824,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
800 | 824 | ||
801 | drm_fb_helper_set_suspend(&ifbdev->helper, state); | 825 | drm_fb_helper_set_suspend(&ifbdev->helper, state); |
802 | console_unlock(); | 826 | console_unlock(); |
827 | |||
828 | intel_fbdev_hpd_set_suspend(ifbdev, state); | ||
803 | } | 829 | } |
804 | 830 | ||
805 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 831 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
806 | { | 832 | { |
807 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 833 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
834 | bool send_hpd; | ||
808 | 835 | ||
809 | if (!ifbdev) | 836 | if (!ifbdev) |
810 | return; | 837 | return; |
811 | 838 | ||
812 | intel_fbdev_sync(ifbdev); | 839 | intel_fbdev_sync(ifbdev); |
813 | if (ifbdev->vma || ifbdev->helper.deferred_setup) | 840 | |
841 | mutex_lock(&ifbdev->hpd_lock); | ||
842 | send_hpd = !ifbdev->hpd_suspended; | ||
843 | ifbdev->hpd_waiting = true; | ||
844 | mutex_unlock(&ifbdev->hpd_lock); | ||
845 | |||
846 | if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) | ||
814 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 847 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
815 | } | 848 | } |
816 | 849 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -55,7 +55,12 @@ | |||
55 | struct opregion_header { | 55 | struct opregion_header { |
56 | u8 signature[16]; | 56 | u8 signature[16]; |
57 | u32 size; | 57 | u32 size; |
58 | u32 opregion_ver; | 58 | struct { |
59 | u8 rsvd; | ||
60 | u8 revision; | ||
61 | u8 minor; | ||
62 | u8 major; | ||
63 | } __packed over; | ||
59 | u8 bios_ver[32]; | 64 | u8 bios_ver[32]; |
60 | u8 vbios_ver[16]; | 65 | u8 vbios_ver[16]; |
61 | u8 driver_ver[16]; | 66 | u8 driver_ver[16]; |
@@ -119,7 +124,8 @@ struct opregion_asle { | |||
119 | u64 fdss; | 124 | u64 fdss; |
120 | u32 fdsp; | 125 | u32 fdsp; |
121 | u32 stat; | 126 | u32 stat; |
122 | u64 rvda; /* Physical address of raw vbt data */ | 127 | u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) |
128 | * address of raw VBT data. */ | ||
123 | u32 rvds; /* Size of raw vbt data */ | 129 | u32 rvds; /* Size of raw vbt data */ |
124 | u8 rsvd[58]; | 130 | u8 rsvd[58]; |
125 | } __packed; | 131 | } __packed; |
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
925 | opregion->header = base; | 931 | opregion->header = base; |
926 | opregion->lid_state = base + ACPI_CLID; | 932 | opregion->lid_state = base + ACPI_CLID; |
927 | 933 | ||
934 | DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", | ||
935 | opregion->header->over.major, | ||
936 | opregion->header->over.minor, | ||
937 | opregion->header->over.revision); | ||
938 | |||
928 | mboxes = opregion->header->mboxes; | 939 | mboxes = opregion->header->mboxes; |
929 | if (mboxes & MBOX_ACPI) { | 940 | if (mboxes & MBOX_ACPI) { |
930 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
953 | if (dmi_check_system(intel_no_opregion_vbt)) | 964 | if (dmi_check_system(intel_no_opregion_vbt)) |
954 | goto out; | 965 | goto out; |
955 | 966 | ||
956 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | 967 | if (opregion->header->over.major >= 2 && opregion->asle && |
957 | opregion->asle->rvda && opregion->asle->rvds) { | 968 | opregion->asle->rvda && opregion->asle->rvds) { |
958 | opregion->rvda = memremap(opregion->asle->rvda, | 969 | resource_size_t rvda = opregion->asle->rvda; |
959 | opregion->asle->rvds, | 970 | |
971 | /* | ||
972 | * opregion 2.0: rvda is the physical VBT address. | ||
973 | * | ||
974 | * opregion 2.1+: rvda is unsigned, relative offset from | ||
975 | * opregion base, and should never point within opregion. | ||
976 | */ | ||
977 | if (opregion->header->over.major > 2 || | ||
978 | opregion->header->over.minor >= 1) { | ||
979 | WARN_ON(rvda < OPREGION_SIZE); | ||
980 | |||
981 | rvda += asls; | ||
982 | } | ||
983 | |||
984 | opregion->rvda = memremap(rvda, opregion->asle->rvds, | ||
960 | MEMREMAP_WB); | 985 | MEMREMAP_WB); |
986 | |||
961 | vbt = opregion->rvda; | 987 | vbt = opregion->rvda; |
962 | vbt_size = opregion->asle->rvds; | 988 | vbt_size = opregion->asle->rvds; |
963 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 989 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
967 | goto out; | 993 | goto out; |
968 | } else { | 994 | } else { |
969 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); | 995 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); |
996 | memunmap(opregion->rvda); | ||
997 | opregion->rvda = NULL; | ||
970 | } | 998 | } |
971 | } | 999 | } |
972 | 1000 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -415,16 +415,17 @@ struct intel_engine_cs { | |||
415 | /** | 415 | /** |
416 | * @enable_count: Reference count for the enabled samplers. | 416 | * @enable_count: Reference count for the enabled samplers. |
417 | * | 417 | * |
418 | * Index number corresponds to the bit number from @enable. | 418 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. |
419 | */ | 419 | */ |
420 | unsigned int enable_count[I915_PMU_SAMPLE_BITS]; | 420 | unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; |
421 | /** | 421 | /** |
422 | * @sample: Counter values for sampling events. | 422 | * @sample: Counter values for sampling events. |
423 | * | 423 | * |
424 | * Our internal timer stores the current counters in this field. | 424 | * Our internal timer stores the current counters in this field. |
425 | * | ||
426 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. | ||
425 | */ | 427 | */ |
426 | #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) | 428 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; |
427 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; | ||
428 | } pmu; | 429 | } pmu; |
429 | 430 | ||
430 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, | |||
494 | 494 | ||
495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); | 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
496 | 496 | ||
497 | keymsk = key->channel_mask & 0x3ffffff; | 497 | keymsk = key->channel_mask & 0x7ffffff; |
498 | if (alpha < 0xff) | 498 | if (alpha < 0xff) |
499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; |
500 | 500 | ||
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
643 | int bus_format; | 643 | int bus_format; |
644 | 644 | ||
645 | ret = of_property_read_u32(child, "reg", &i); | 645 | ret = of_property_read_u32(child, "reg", &i); |
646 | if (ret || i < 0 || i > 1) | 646 | if (ret || i < 0 || i > 1) { |
647 | return -EINVAL; | 647 | ret = -EINVAL; |
648 | goto free_child; | ||
649 | } | ||
648 | 650 | ||
649 | if (!of_device_is_available(child)) | 651 | if (!of_device_is_available(child)) |
650 | continue; | 652 | continue; |
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
657 | channel = &imx_ldb->channel[i]; | 659 | channel = &imx_ldb->channel[i]; |
658 | channel->ldb = imx_ldb; | 660 | channel->ldb = imx_ldb; |
659 | channel->chno = i; | 661 | channel->chno = i; |
660 | channel->child = child; | ||
661 | 662 | ||
662 | /* | 663 | /* |
663 | * The output port is port@4 with an external 4-port mux or | 664 | * The output port is port@4 with an external 4-port mux or |
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
667 | imx_ldb->lvds_mux ? 4 : 2, 0, | 668 | imx_ldb->lvds_mux ? 4 : 2, 0, |
668 | &channel->panel, &channel->bridge); | 669 | &channel->panel, &channel->bridge); |
669 | if (ret && ret != -ENODEV) | 670 | if (ret && ret != -ENODEV) |
670 | return ret; | 671 | goto free_child; |
671 | 672 | ||
672 | /* panel ddc only if there is no bridge */ | 673 | /* panel ddc only if there is no bridge */ |
673 | if (!channel->bridge) { | 674 | if (!channel->bridge) { |
674 | ret = imx_ldb_panel_ddc(dev, channel, child); | 675 | ret = imx_ldb_panel_ddc(dev, channel, child); |
675 | if (ret) | 676 | if (ret) |
676 | return ret; | 677 | goto free_child; |
677 | } | 678 | } |
678 | 679 | ||
679 | bus_format = of_get_bus_format(dev, child); | 680 | bus_format = of_get_bus_format(dev, child); |
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
689 | if (bus_format < 0) { | 690 | if (bus_format < 0) { |
690 | dev_err(dev, "could not determine data mapping: %d\n", | 691 | dev_err(dev, "could not determine data mapping: %d\n", |
691 | bus_format); | 692 | bus_format); |
692 | return bus_format; | 693 | ret = bus_format; |
694 | goto free_child; | ||
693 | } | 695 | } |
694 | channel->bus_format = bus_format; | 696 | channel->bus_format = bus_format; |
697 | channel->child = child; | ||
695 | 698 | ||
696 | ret = imx_ldb_register(drm, channel); | 699 | ret = imx_ldb_register(drm, channel); |
697 | if (ret) | 700 | if (ret) { |
698 | return ret; | 701 | channel->child = NULL; |
702 | goto free_child; | ||
703 | } | ||
699 | } | 704 | } |
700 | 705 | ||
701 | dev_set_drvdata(dev, imx_ldb); | 706 | dev_set_drvdata(dev, imx_ldb); |
702 | 707 | ||
703 | return 0; | 708 | return 0; |
709 | |||
710 | free_child: | ||
711 | of_node_put(child); | ||
712 | return ret; | ||
704 | } | 713 | } |
705 | 714 | ||
706 | static void imx_ldb_unbind(struct device *dev, struct device *master, | 715 | static void imx_ldb_unbind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
370 | if (ret) | 370 | if (ret) |
371 | return ret; | 371 | return ret; |
372 | 372 | ||
373 | /* CRTC should be enabled */ | 373 | /* nothing to check when disabling or disabled */ |
374 | if (!crtc_state->enable) | 374 | if (!crtc_state->enable) |
375 | return -EINVAL; | 375 | return 0; |
376 | 376 | ||
377 | switch (plane->type) { | 377 | switch (plane->type) { |
378 | case DRM_PLANE_TYPE_PRIMARY: | 378 | case DRM_PLANE_TYPE_PRIMARY: |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) | |||
1406 | 1406 | ||
1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) |
1408 | { | 1408 | { |
1409 | struct dsi_data *dsi = p; | 1409 | struct dsi_data *dsi = s->private; |
1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; | 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; |
1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; | 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; |
1412 | int dsi_module = dsi->module_id; | 1412 | int dsi_module = dsi->module_id; |
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | |||
1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) |
1469 | { | 1469 | { |
1470 | struct dsi_data *dsi = p; | 1470 | struct dsi_data *dsi = s->private; |
1471 | unsigned long flags; | 1471 | unsigned long flags; |
1472 | struct dsi_irq_stats stats; | 1472 | struct dsi_irq_stats stats; |
1473 | 1473 | ||
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | |||
1558 | 1558 | ||
1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) | 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) |
1560 | { | 1560 | { |
1561 | struct dsi_data *dsi = p; | 1561 | struct dsi_data *dsi = s->private; |
1562 | 1562 | ||
1563 | if (dsi_runtime_get(dsi)) | 1563 | if (dsi_runtime_get(dsi)) |
1564 | return 0; | 1564 | return 0; |
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, | |||
4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; | 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; |
4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; | 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; |
4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; | 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; |
4754 | /* | ||
4755 | * HACK: These flags should be handled through the omap_dss_device bus | ||
4756 | * flags, but this will only be possible when the DSI encoder will be | ||
4757 | * converted to the omapdrm-managed encoder model. | ||
4758 | */ | ||
4759 | dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; | ||
4760 | dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; | ||
4761 | dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; | ||
4762 | dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; | ||
4763 | dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; | ||
4764 | dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; | ||
4754 | 4765 | ||
4755 | dss_mgr_set_timings(&dsi->output, &dsi->vm); | 4766 | dss_mgr_set_timings(&dsi->output, &dsi->vm); |
4756 | 4767 | ||
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) | |||
5083 | 5094 | ||
5084 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); | 5095 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); |
5085 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, | 5096 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, |
5086 | dsi_dump_dsi_regs, &dsi); | 5097 | dsi_dump_dsi_regs, dsi); |
5087 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 5098 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
5088 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); | 5099 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); |
5089 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, | 5100 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, |
5090 | dsi_dump_dsi_irqs, &dsi); | 5101 | dsi_dump_dsi_irqs, dsi); |
5091 | #endif | 5102 | #endif |
5092 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); | 5103 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); |
5093 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, | 5104 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, |
5094 | dsi_dump_dsi_clocks, &dsi); | 5105 | dsi_dump_dsi_clocks, dsi); |
5095 | 5106 | ||
5096 | return 0; | 5107 | return 0; |
5097 | } | 5108 | } |
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) | |||
5104 | dss_debugfs_remove_file(dsi->debugfs.irqs); | 5115 | dss_debugfs_remove_file(dsi->debugfs.irqs); |
5105 | dss_debugfs_remove_file(dsi->debugfs.regs); | 5116 | dss_debugfs_remove_file(dsi->debugfs.regs); |
5106 | 5117 | ||
5107 | of_platform_depopulate(dev); | ||
5108 | |||
5109 | WARN_ON(dsi->scp_clk_refcount > 0); | 5118 | WARN_ON(dsi->scp_clk_refcount > 0); |
5110 | 5119 | ||
5111 | dss_pll_unregister(&dsi->pll); | 5120 | dss_pll_unregister(&dsi->pll); |
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) | |||
5457 | 5466 | ||
5458 | dsi_uninit_output(dsi); | 5467 | dsi_uninit_output(dsi); |
5459 | 5468 | ||
5469 | of_platform_depopulate(&pdev->dev); | ||
5470 | |||
5460 | pm_runtime_disable(&pdev->dev); | 5471 | pm_runtime_disable(&pdev->dev); |
5461 | 5472 | ||
5462 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { | 5473 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5676 | u16 data_offset, size; | 5676 | u16 data_offset, size; |
5677 | u8 frev, crev; | 5677 | u8 frev, crev; |
5678 | struct ci_power_info *pi; | 5678 | struct ci_power_info *pi; |
5679 | enum pci_bus_speed speed_cap; | 5679 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
5680 | struct pci_dev *root = rdev->pdev->bus->self; | 5680 | struct pci_dev *root = rdev->pdev->bus->self; |
5681 | int ret; | 5681 | int ret; |
5682 | 5682 | ||
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5685 | return -ENOMEM; | 5685 | return -ENOMEM; |
5686 | rdev->pm.dpm.priv = pi; | 5686 | rdev->pm.dpm.priv = pi; |
5687 | 5687 | ||
5688 | speed_cap = pcie_get_speed_cap(root); | 5688 | if (!pci_is_root_bus(rdev->pdev->bus)) |
5689 | speed_cap = pcie_get_speed_cap(root); | ||
5689 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 5690 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
5690 | pi->sys_pcie_mask = 0; | 5691 | pi->sys_pcie_mask = 0; |
5691 | } else { | 5692 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index dec1e081f529..6a8fb6fd183c 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c | |||
@@ -172,6 +172,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | if (radeon_is_px(dev)) { | 174 | if (radeon_is_px(dev)) { |
175 | dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NEVER_SKIP); | ||
175 | pm_runtime_use_autosuspend(dev->dev); | 176 | pm_runtime_use_autosuspend(dev->dev); |
176 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); | 177 | pm_runtime_set_autosuspend_delay(dev->dev, 5000); |
177 | pm_runtime_set_active(dev->dev); | 178 | pm_runtime_set_active(dev->dev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6899 | struct ni_power_info *ni_pi; | 6899 | struct ni_power_info *ni_pi; |
6900 | struct si_power_info *si_pi; | 6900 | struct si_power_info *si_pi; |
6901 | struct atom_clock_dividers dividers; | 6901 | struct atom_clock_dividers dividers; |
6902 | enum pci_bus_speed speed_cap; | 6902 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
6903 | struct pci_dev *root = rdev->pdev->bus->self; | 6903 | struct pci_dev *root = rdev->pdev->bus->self; |
6904 | int ret; | 6904 | int ret; |
6905 | 6905 | ||
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6911 | eg_pi = &ni_pi->eg; | 6911 | eg_pi = &ni_pi->eg; |
6912 | pi = &eg_pi->rv7xx; | 6912 | pi = &eg_pi->rv7xx; |
6913 | 6913 | ||
6914 | speed_cap = pcie_get_speed_cap(root); | 6914 | if (!pci_is_root_bus(rdev->pdev->bus)) |
6915 | speed_cap = pcie_get_speed_cap(root); | ||
6915 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 6916 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
6916 | si_pi->sys_pcie_mask = 0; | 6917 | si_pi->sys_pcie_mask = 0; |
6917 | } else { | 6918 | } else { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 37f93022a106..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
@@ -1,17 +1,8 @@ | |||
1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | 2 | /* |
3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
4 | * Author: | 4 | * Author: |
5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | 6 | */ |
16 | 7 | ||
17 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h | |||
@@ -1,17 +1,8 @@ | |||
1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | 2 | /* |
3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
4 | * Author: | 4 | * Author: |
5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | 6 | */ |
16 | 7 | ||
17 | #ifdef CONFIG_ROCKCHIP_RGB | 8 | #ifdef CONFIG_ROCKCHIP_RGB |
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c | |||
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) | |||
440 | 440 | ||
441 | while ((entity->dependency = | 441 | while ((entity->dependency = |
442 | sched->ops->dependency(sched_job, entity))) { | 442 | sched->ops->dependency(sched_job, entity))) { |
443 | trace_drm_sched_job_wait_dep(sched_job, entity->dependency); | ||
443 | 444 | ||
444 | if (drm_sched_entity_add_dependency_cb(entity)) { | 445 | if (drm_sched_entity_add_dependency_cb(entity)) |
445 | |||
446 | trace_drm_sched_job_wait_dep(sched_job, | ||
447 | entity->dependency); | ||
448 | return NULL; | 446 | return NULL; |
449 | } | ||
450 | } | 447 | } |
451 | 448 | ||
452 | /* skip jobs from entity that marked guilty */ | 449 | /* skip jobs from entity that marked guilty */ |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
761 | return PTR_ERR(tcon->sclk0); | 761 | return PTR_ERR(tcon->sclk0); |
762 | } | 762 | } |
763 | } | 763 | } |
764 | clk_prepare_enable(tcon->sclk0); | ||
764 | 765 | ||
765 | if (tcon->quirks->has_channel_1) { | 766 | if (tcon->quirks->has_channel_1) { |
766 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); | 767 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); |
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
775 | 776 | ||
776 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) | 777 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) |
777 | { | 778 | { |
779 | clk_disable_unprepare(tcon->sclk0); | ||
778 | clk_disable_unprepare(tcon->clk); | 780 | clk_disable_unprepare(tcon->clk); |
779 | } | 781 | } |
780 | 782 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c | |||
@@ -1,4 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | |||
2 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
3 | #include <linux/crc32.h> | 4 | #include <linux/crc32.h> |
4 | #include <drm/drm_atomic.h> | 5 | #include <drm/drm_atomic.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_atomic_helper.h> | 4 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c | |||
@@ -1,9 +1,4 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | 2 | ||
8 | /** | 3 | /** |
9 | * DOC: vkms (Virtual Kernel Modesetting) | 4 | * DOC: vkms (Virtual Kernel Modesetting) |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h | |||
@@ -1,3 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | |||
1 | #ifndef _VKMS_DRV_H_ | 3 | #ifndef _VKMS_DRV_H_ |
2 | #define _VKMS_DRV_H_ | 4 | #define _VKMS_DRV_H_ |
3 | 5 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include <linux/shmem_fs.h> | 3 | #include <linux/shmem_fs.h> |
10 | 4 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_crtc_helper.h> | 4 | #include <drm/drm_crtc_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_plane_helper.h> | 4 | #include <drm/drm_plane_helper.h> |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -26,6 +26,7 @@ | |||
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
29 | #include <linux/dma-mapping.h> | ||
29 | 30 | ||
30 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
31 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
@@ -34,7 +35,6 @@ | |||
34 | #include <drm/ttm/ttm_placement.h> | 35 | #include <drm/ttm/ttm_placement.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> | 36 | #include <drm/ttm/ttm_bo_driver.h> |
36 | #include <drm/ttm/ttm_module.h> | 37 | #include <drm/ttm/ttm_module.h> |
37 | #include <linux/intel-iommu.h> | ||
38 | 38 | ||
39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
40 | #define VMWGFX_CHIP_SVGAII 0 | 40 | #define VMWGFX_CHIP_SVGAII 0 |
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | /** | 548 | /** |
549 | * vmw_assume_iommu - Figure out whether coherent dma-remapping might be | ||
550 | * taking place. | ||
551 | * @dev: Pointer to the struct drm_device. | ||
552 | * | ||
553 | * Return: true if iommu present, false otherwise. | ||
554 | */ | ||
555 | static bool vmw_assume_iommu(struct drm_device *dev) | ||
556 | { | ||
557 | const struct dma_map_ops *ops = get_dma_ops(dev->dev); | ||
558 | |||
559 | return !dma_is_direct(ops) && ops && | ||
560 | ops->map_page != dma_direct_map_page; | ||
561 | } | ||
562 | |||
563 | /** | ||
549 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | 564 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
550 | * system. | 565 | * system. |
551 | * | 566 | * |
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
565 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 580 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
566 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 581 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
567 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 582 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
568 | #ifdef CONFIG_X86 | ||
569 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
570 | 583 | ||
571 | #ifdef CONFIG_INTEL_IOMMU | 584 | if (vmw_force_coherent) |
572 | if (intel_iommu_enabled) { | 585 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
586 | else if (vmw_assume_iommu(dev_priv->dev)) | ||
573 | dev_priv->map_mode = vmw_dma_map_populate; | 587 | dev_priv->map_mode = vmw_dma_map_populate; |
574 | goto out_fixup; | 588 | else if (!vmw_force_iommu) |
575 | } | ||
576 | #endif | ||
577 | |||
578 | if (!(vmw_force_iommu || vmw_force_coherent)) { | ||
579 | dev_priv->map_mode = vmw_dma_phys; | 589 | dev_priv->map_mode = vmw_dma_phys; |
580 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 590 | else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) |
581 | return 0; | ||
582 | } | ||
583 | |||
584 | dev_priv->map_mode = vmw_dma_map_populate; | ||
585 | |||
586 | if (dma_ops && dma_ops->sync_single_for_cpu) | ||
587 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 591 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
588 | #ifdef CONFIG_SWIOTLB | 592 | else |
589 | if (swiotlb_nr_tbl() == 0) | ||
590 | dev_priv->map_mode = vmw_dma_map_populate; | 593 | dev_priv->map_mode = vmw_dma_map_populate; |
591 | #endif | ||
592 | 594 | ||
593 | #ifdef CONFIG_INTEL_IOMMU | 595 | if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) |
594 | out_fixup: | ||
595 | #endif | ||
596 | if (dev_priv->map_mode == vmw_dma_map_populate && | ||
597 | vmw_restrict_iommu) | ||
598 | dev_priv->map_mode = vmw_dma_map_bind; | 596 | dev_priv->map_mode = vmw_dma_map_bind; |
599 | 597 | ||
600 | if (vmw_force_coherent) | 598 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ |
601 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 599 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && |
602 | 600 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) | |
603 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) | ||
604 | /* | ||
605 | * No coherent page pool | ||
606 | */ | ||
607 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | ||
608 | return -EINVAL; | 601 | return -EINVAL; |
609 | #endif | ||
610 | |||
611 | #else /* CONFIG_X86 */ | ||
612 | dev_priv->map_mode = vmw_dma_map_populate; | ||
613 | #endif /* CONFIG_X86 */ | ||
614 | 602 | ||
615 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 603 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
616 | |||
617 | return 0; | 604 | return 0; |
618 | } | 605 | } |
619 | 606 | ||
@@ -625,24 +612,20 @@ out_fixup: | |||
625 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | 612 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
626 | * restriction also for 64-bit systems. | 613 | * restriction also for 64-bit systems. |
627 | */ | 614 | */ |
628 | #ifdef CONFIG_INTEL_IOMMU | ||
629 | static int vmw_dma_masks(struct vmw_private *dev_priv) | 615 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
630 | { | 616 | { |
631 | struct drm_device *dev = dev_priv->dev; | 617 | struct drm_device *dev = dev_priv->dev; |
618 | int ret = 0; | ||
632 | 619 | ||
633 | if (intel_iommu_enabled && | 620 | ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
621 | if (dev_priv->map_mode != vmw_dma_phys && | ||
634 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | 622 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
635 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | 623 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
636 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | 624 | return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
637 | } | 625 | } |
638 | return 0; | 626 | |
639 | } | 627 | return ret; |
640 | #else | ||
641 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
642 | { | ||
643 | return 0; | ||
644 | } | 628 | } |
645 | #endif | ||
646 | 629 | ||
647 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 630 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
648 | { | 631 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
3570 | *p_fence = NULL; | 3570 | *p_fence = NULL; |
3571 | } | 3571 | } |
3572 | 3572 | ||
3573 | return 0; | 3573 | return ret; |
3574 | } | 3574 | } |
3575 | 3575 | ||
3576 | /** | 3576 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
1646 | struct drm_connector_state *conn_state; | 1646 | struct drm_connector_state *conn_state; |
1647 | struct vmw_connector_state *vmw_conn_state; | 1647 | struct vmw_connector_state *vmw_conn_state; |
1648 | 1648 | ||
1649 | if (!du->pref_active) { | 1649 | if (!du->pref_active && new_crtc_state->enable) { |
1650 | ret = -EINVAL; | 1650 | ret = -EINVAL; |
1651 | goto clean; | 1651 | goto clean; |
1652 | } | 1652 | } |
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, | |||
2554 | user_fence_rep) | 2554 | user_fence_rep) |
2555 | { | 2555 | { |
2556 | struct vmw_fence_obj *fence = NULL; | 2556 | struct vmw_fence_obj *fence = NULL; |
2557 | uint32_t handle; | 2557 | uint32_t handle = 0; |
2558 | int ret; | 2558 | int ret = 0; |
2559 | 2559 | ||
2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || | 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || |
2561 | out_fence) | 2561 | out_fence) |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { | |||
898 | .cpmem_ofs = 0x1f000000, | 898 | .cpmem_ofs = 0x1f000000, |
899 | .srm_ofs = 0x1f040000, | 899 | .srm_ofs = 0x1f040000, |
900 | .tpm_ofs = 0x1f060000, | 900 | .tpm_ofs = 0x1f060000, |
901 | .csi0_ofs = 0x1f030000, | 901 | .csi0_ofs = 0x1e030000, |
902 | .csi1_ofs = 0x1f038000, | 902 | .csi1_ofs = 0x1e038000, |
903 | .ic_ofs = 0x1e020000, | 903 | .ic_ofs = 0x1e020000, |
904 | .disp0_ofs = 0x1e040000, | 904 | .disp0_ofs = 0x1e040000, |
905 | .disp1_ofs = 0x1e048000, | 905 | .disp1_ofs = 0x1e048000, |
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { | |||
914 | .cpmem_ofs = 0x07000000, | 914 | .cpmem_ofs = 0x07000000, |
915 | .srm_ofs = 0x07040000, | 915 | .srm_ofs = 0x07040000, |
916 | .tpm_ofs = 0x07060000, | 916 | .tpm_ofs = 0x07060000, |
917 | .csi0_ofs = 0x07030000, | 917 | .csi0_ofs = 0x06030000, |
918 | .csi1_ofs = 0x07038000, | 918 | .csi1_ofs = 0x06038000, |
919 | .ic_ofs = 0x06020000, | 919 | .ic_ofs = 0x06020000, |
920 | .disp0_ofs = 0x06040000, | 920 | .disp0_ofs = 0x06040000, |
921 | .disp1_ofs = 0x06048000, | 921 | .disp1_ofs = 0x06048000, |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -106,6 +106,7 @@ struct ipu_pre { | |||
106 | void *buffer_virt; | 106 | void *buffer_virt; |
107 | bool in_use; | 107 | bool in_use; |
108 | unsigned int safe_window_end; | 108 | unsigned int safe_window_end; |
109 | unsigned int last_bufaddr; | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 112 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
185 | 186 | ||
186 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 187 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
187 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 188 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
189 | pre->last_bufaddr = bufaddr; | ||
188 | 190 | ||
189 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | | 191 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | |
190 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | | 192 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | |||
242 | unsigned short current_yblock; | 244 | unsigned short current_yblock; |
243 | u32 val; | 245 | u32 val; |
244 | 246 | ||
247 | if (bufaddr == pre->last_bufaddr) | ||
248 | return; | ||
249 | |||
245 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 250 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
251 | pre->last_bufaddr = bufaddr; | ||
246 | 252 | ||
247 | do { | 253 | do { |
248 | if (time_after(jiffies, timeout)) { | 254 | if (time_after(jiffies, timeout)) { |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c530476edba6..ac9fda1b5a72 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/kfifo.h> | ||
33 | #include <linux/sched/signal.h> | 34 | #include <linux/sched/signal.h> |
34 | #include <linux/export.h> | 35 | #include <linux/export.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); | |||
661 | /* enqueue string to 'events' ring buffer */ | 662 | /* enqueue string to 'events' ring buffer */ |
662 | void hid_debug_event(struct hid_device *hdev, char *buf) | 663 | void hid_debug_event(struct hid_device *hdev, char *buf) |
663 | { | 664 | { |
664 | unsigned i; | ||
665 | struct hid_debug_list *list; | 665 | struct hid_debug_list *list; |
666 | unsigned long flags; | 666 | unsigned long flags; |
667 | 667 | ||
668 | spin_lock_irqsave(&hdev->debug_list_lock, flags); | 668 | spin_lock_irqsave(&hdev->debug_list_lock, flags); |
669 | list_for_each_entry(list, &hdev->debug_list, node) { | 669 | list_for_each_entry(list, &hdev->debug_list, node) |
670 | for (i = 0; buf[i]; i++) | 670 | kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); |
671 | list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = | ||
672 | buf[i]; | ||
673 | list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; | ||
674 | } | ||
675 | spin_unlock_irqrestore(&hdev->debug_list_lock, flags); | 671 | spin_unlock_irqrestore(&hdev->debug_list_lock, flags); |
676 | 672 | ||
677 | wake_up_interruptible(&hdev->debug_wait); | 673 | wake_up_interruptible(&hdev->debug_wait); |
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu | |||
722 | hid_debug_event(hdev, buf); | 718 | hid_debug_event(hdev, buf); |
723 | 719 | ||
724 | kfree(buf); | 720 | kfree(buf); |
725 | wake_up_interruptible(&hdev->debug_wait); | 721 | wake_up_interruptible(&hdev->debug_wait); |
726 | |||
727 | } | 722 | } |
728 | EXPORT_SYMBOL_GPL(hid_dump_input); | 723 | EXPORT_SYMBOL_GPL(hid_dump_input); |
729 | 724 | ||
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) | |||
1083 | goto out; | 1078 | goto out; |
1084 | } | 1079 | } |
1085 | 1080 | ||
1086 | if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { | 1081 | err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); |
1087 | err = -ENOMEM; | 1082 | if (err) { |
1088 | kfree(list); | 1083 | kfree(list); |
1089 | goto out; | 1084 | goto out; |
1090 | } | 1085 | } |
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, | |||
1104 | size_t count, loff_t *ppos) | 1099 | size_t count, loff_t *ppos) |
1105 | { | 1100 | { |
1106 | struct hid_debug_list *list = file->private_data; | 1101 | struct hid_debug_list *list = file->private_data; |
1107 | int ret = 0, len; | 1102 | int ret = 0, copied; |
1108 | DECLARE_WAITQUEUE(wait, current); | 1103 | DECLARE_WAITQUEUE(wait, current); |
1109 | 1104 | ||
1110 | mutex_lock(&list->read_mutex); | 1105 | mutex_lock(&list->read_mutex); |
1111 | while (ret == 0) { | 1106 | if (kfifo_is_empty(&list->hid_debug_fifo)) { |
1112 | if (list->head == list->tail) { | 1107 | add_wait_queue(&list->hdev->debug_wait, &wait); |
1113 | add_wait_queue(&list->hdev->debug_wait, &wait); | 1108 | set_current_state(TASK_INTERRUPTIBLE); |
1114 | set_current_state(TASK_INTERRUPTIBLE); | 1109 | |
1115 | 1110 | while (kfifo_is_empty(&list->hid_debug_fifo)) { | |
1116 | while (list->head == list->tail) { | 1111 | if (file->f_flags & O_NONBLOCK) { |
1117 | if (file->f_flags & O_NONBLOCK) { | 1112 | ret = -EAGAIN; |
1118 | ret = -EAGAIN; | 1113 | break; |
1119 | break; | 1114 | } |
1120 | } | ||
1121 | if (signal_pending(current)) { | ||
1122 | ret = -ERESTARTSYS; | ||
1123 | break; | ||
1124 | } | ||
1125 | 1115 | ||
1126 | if (!list->hdev || !list->hdev->debug) { | 1116 | if (signal_pending(current)) { |
1127 | ret = -EIO; | 1117 | ret = -ERESTARTSYS; |
1128 | set_current_state(TASK_RUNNING); | 1118 | break; |
1129 | goto out; | 1119 | } |
1130 | } | ||
1131 | 1120 | ||
1132 | /* allow O_NONBLOCK from other threads */ | 1121 | /* if list->hdev is NULL we cannot remove_wait_queue(). |
1133 | mutex_unlock(&list->read_mutex); | 1122 | * if list->hdev->debug is 0 then hid_debug_unregister() |
1134 | schedule(); | 1123 | * was already called and list->hdev is being destroyed. |
1135 | mutex_lock(&list->read_mutex); | 1124 | * if we add remove_wait_queue() here we can hit a race. |
1136 | set_current_state(TASK_INTERRUPTIBLE); | 1125 | */ |
1126 | if (!list->hdev || !list->hdev->debug) { | ||
1127 | ret = -EIO; | ||
1128 | set_current_state(TASK_RUNNING); | ||
1129 | goto out; | ||
1137 | } | 1130 | } |
1138 | 1131 | ||
1139 | set_current_state(TASK_RUNNING); | 1132 | /* allow O_NONBLOCK from other threads */ |
1140 | remove_wait_queue(&list->hdev->debug_wait, &wait); | 1133 | mutex_unlock(&list->read_mutex); |
1134 | schedule(); | ||
1135 | mutex_lock(&list->read_mutex); | ||
1136 | set_current_state(TASK_INTERRUPTIBLE); | ||
1141 | } | 1137 | } |
1142 | 1138 | ||
1143 | if (ret) | 1139 | __set_current_state(TASK_RUNNING); |
1144 | goto out; | 1140 | remove_wait_queue(&list->hdev->debug_wait, &wait); |
1145 | 1141 | ||
1146 | /* pass the ringbuffer contents to userspace */ | 1142 | if (ret) |
1147 | copy_rest: | ||
1148 | if (list->tail == list->head) | ||
1149 | goto out; | 1143 | goto out; |
1150 | if (list->tail > list->head) { | ||
1151 | len = list->tail - list->head; | ||
1152 | if (len > count) | ||
1153 | len = count; | ||
1154 | |||
1155 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { | ||
1156 | ret = -EFAULT; | ||
1157 | goto out; | ||
1158 | } | ||
1159 | ret += len; | ||
1160 | list->head += len; | ||
1161 | } else { | ||
1162 | len = HID_DEBUG_BUFSIZE - list->head; | ||
1163 | if (len > count) | ||
1164 | len = count; | ||
1165 | |||
1166 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { | ||
1167 | ret = -EFAULT; | ||
1168 | goto out; | ||
1169 | } | ||
1170 | list->head = 0; | ||
1171 | ret += len; | ||
1172 | count -= len; | ||
1173 | if (count > 0) | ||
1174 | goto copy_rest; | ||
1175 | } | ||
1176 | |||
1177 | } | 1144 | } |
1145 | |||
1146 | /* pass the fifo content to userspace, locking is not needed with only | ||
1147 | * one concurrent reader and one concurrent writer | ||
1148 | */ | ||
1149 | ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); | ||
1150 | if (ret) | ||
1151 | goto out; | ||
1152 | ret = copied; | ||
1178 | out: | 1153 | out: |
1179 | mutex_unlock(&list->read_mutex); | 1154 | mutex_unlock(&list->read_mutex); |
1180 | return ret; | 1155 | return ret; |
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) | |||
1185 | struct hid_debug_list *list = file->private_data; | 1160 | struct hid_debug_list *list = file->private_data; |
1186 | 1161 | ||
1187 | poll_wait(file, &list->hdev->debug_wait, wait); | 1162 | poll_wait(file, &list->hdev->debug_wait, wait); |
1188 | if (list->head != list->tail) | 1163 | if (!kfifo_is_empty(&list->hid_debug_fifo)) |
1189 | return EPOLLIN | EPOLLRDNORM; | 1164 | return EPOLLIN | EPOLLRDNORM; |
1190 | if (!list->hdev->debug) | 1165 | if (!list->hdev->debug) |
1191 | return EPOLLERR | EPOLLHUP; | 1166 | return EPOLLERR | EPOLLHUP; |
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) | |||
1200 | spin_lock_irqsave(&list->hdev->debug_list_lock, flags); | 1175 | spin_lock_irqsave(&list->hdev->debug_list_lock, flags); |
1201 | list_del(&list->node); | 1176 | list_del(&list->node); |
1202 | spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); | 1177 | spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); |
1203 | kfree(list->hid_debug_buf); | 1178 | kfifo_free(&list->hid_debug_fifo); |
1204 | kfree(list); | 1179 | kfree(list); |
1205 | 1180 | ||
1206 | return 0; | 1181 | return 0; |
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void) | |||
1246 | { | 1221 | { |
1247 | debugfs_remove_recursive(hid_debug_root); | 1222 | debugfs_remove_recursive(hid_debug_root); |
1248 | } | 1223 | } |
1249 | |||
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4adec4ab7d06..59ee01f3d022 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) | |||
3594 | fan5pin |= cr1b & BIT(5); | 3594 | fan5pin |= cr1b & BIT(5); |
3595 | fan5pin |= creb & BIT(5); | 3595 | fan5pin |= creb & BIT(5); |
3596 | 3596 | ||
3597 | fan6pin = creb & BIT(3); | 3597 | fan6pin = !dsw_en && (cr2d & BIT(1)); |
3598 | fan6pin |= creb & BIT(3); | ||
3598 | 3599 | ||
3599 | pwm5pin |= cr2d & BIT(7); | 3600 | pwm5pin |= cr2d & BIT(7); |
3600 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); | 3601 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); |
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index ec6e69aa3a8e..d2fbb4bb4a43 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c | |||
@@ -183,6 +183,15 @@ static void bcm2835_i2c_start_transfer(struct bcm2835_i2c_dev *i2c_dev) | |||
183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); | 183 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, c); |
184 | } | 184 | } |
185 | 185 | ||
186 | static void bcm2835_i2c_finish_transfer(struct bcm2835_i2c_dev *i2c_dev) | ||
187 | { | ||
188 | i2c_dev->curr_msg = NULL; | ||
189 | i2c_dev->num_msgs = 0; | ||
190 | |||
191 | i2c_dev->msg_buf = NULL; | ||
192 | i2c_dev->msg_buf_remaining = 0; | ||
193 | } | ||
194 | |||
186 | /* | 195 | /* |
187 | * Note about I2C_C_CLEAR on error: | 196 | * Note about I2C_C_CLEAR on error: |
188 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in | 197 | * The I2C_C_CLEAR on errors will take some time to resolve -- if you were in |
@@ -283,6 +292,9 @@ static int bcm2835_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], | |||
283 | 292 | ||
284 | time_left = wait_for_completion_timeout(&i2c_dev->completion, | 293 | time_left = wait_for_completion_timeout(&i2c_dev->completion, |
285 | adap->timeout); | 294 | adap->timeout); |
295 | |||
296 | bcm2835_i2c_finish_transfer(i2c_dev); | ||
297 | |||
286 | if (!time_left) { | 298 | if (!time_left) { |
287 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, | 299 | bcm2835_i2c_writel(i2c_dev, BCM2835_I2C_C, |
288 | BCM2835_I2C_C_CLEAR); | 300 | BCM2835_I2C_C_CLEAR); |
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index b13605718291..d917cefc5a19 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c | |||
@@ -382,8 +382,10 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) | |||
382 | * Check for the message size against FIFO depth and set the | 382 | * Check for the message size against FIFO depth and set the |
383 | * 'hold bus' bit if it is greater than FIFO depth. | 383 | * 'hold bus' bit if it is greater than FIFO depth. |
384 | */ | 384 | */ |
385 | if (id->recv_count > CDNS_I2C_FIFO_DEPTH) | 385 | if ((id->recv_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
386 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 386 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
387 | else | ||
388 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
387 | 389 | ||
388 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 390 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
389 | 391 | ||
@@ -440,8 +442,11 @@ static void cdns_i2c_msend(struct cdns_i2c *id) | |||
440 | * Check for the message size against FIFO depth and set the | 442 | * Check for the message size against FIFO depth and set the |
441 | * 'hold bus' bit if it is greater than FIFO depth. | 443 | * 'hold bus' bit if it is greater than FIFO depth. |
442 | */ | 444 | */ |
443 | if (id->send_count > CDNS_I2C_FIFO_DEPTH) | 445 | if ((id->send_count > CDNS_I2C_FIFO_DEPTH) || id->bus_hold_flag) |
444 | ctrl_reg |= CDNS_I2C_CR_HOLD; | 446 | ctrl_reg |= CDNS_I2C_CR_HOLD; |
447 | else | ||
448 | ctrl_reg = ctrl_reg & ~CDNS_I2C_CR_HOLD; | ||
449 | |||
445 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); | 450 | cdns_i2c_writereg(ctrl_reg, CDNS_I2C_CR_OFFSET); |
446 | 451 | ||
447 | /* Clear the interrupts in interrupt status register. */ | 452 | /* Clear the interrupts in interrupt status register. */ |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b1086bfb0465..cd9c65f3d404 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev) | |||
1500 | return 0; | 1500 | return 0; |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | #ifdef CONFIG_PM | 1503 | static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) |
1504 | static int omap_i2c_runtime_suspend(struct device *dev) | ||
1505 | { | 1504 | { |
1506 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); | 1505 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); |
1507 | 1506 | ||
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev) | |||
1527 | return 0; | 1526 | return 0; |
1528 | } | 1527 | } |
1529 | 1528 | ||
1530 | static int omap_i2c_runtime_resume(struct device *dev) | 1529 | static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) |
1531 | { | 1530 | { |
1532 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); | 1531 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); |
1533 | 1532 | ||
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev) | |||
1542 | } | 1541 | } |
1543 | 1542 | ||
1544 | static const struct dev_pm_ops omap_i2c_pm_ops = { | 1543 | static const struct dev_pm_ops omap_i2c_pm_ops = { |
1544 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | ||
1545 | pm_runtime_force_resume) | ||
1545 | SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, | 1546 | SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, |
1546 | omap_i2c_runtime_resume, NULL) | 1547 | omap_i2c_runtime_resume, NULL) |
1547 | }; | 1548 | }; |
1548 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1549 | #else | ||
1550 | #define OMAP_I2C_PM_OPS NULL | ||
1551 | #endif /* CONFIG_PM */ | ||
1552 | 1549 | ||
1553 | static struct platform_driver omap_i2c_driver = { | 1550 | static struct platform_driver omap_i2c_driver = { |
1554 | .probe = omap_i2c_probe, | 1551 | .probe = omap_i2c_probe, |
1555 | .remove = omap_i2c_remove, | 1552 | .remove = omap_i2c_remove, |
1556 | .driver = { | 1553 | .driver = { |
1557 | .name = "omap_i2c", | 1554 | .name = "omap_i2c", |
1558 | .pm = OMAP_I2C_PM_OPS, | 1555 | .pm = &omap_i2c_pm_ops, |
1559 | .of_match_table = of_match_ptr(omap_i2c_of_match), | 1556 | .of_match_table = of_match_ptr(omap_i2c_of_match), |
1560 | }, | 1557 | }, |
1561 | }; | 1558 | }; |
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 031d568b4972..4e339cfd0c54 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c | |||
@@ -27,9 +27,18 @@ | |||
27 | #include <linux/iio/machine.h> | 27 | #include <linux/iio/machine.h> |
28 | #include <linux/iio/driver.h> | 28 | #include <linux/iio/driver.h> |
29 | 29 | ||
30 | #define AXP288_ADC_EN_MASK 0xF1 | 30 | /* |
31 | #define AXP288_ADC_TS_PIN_GPADC 0xF2 | 31 | * This mask enables all ADCs except for the battery temp-sensor (TS), that is |
32 | #define AXP288_ADC_TS_PIN_ON 0xF3 | 32 | * left as-is to avoid breaking charging on devices without a temp-sensor. |
33 | */ | ||
34 | #define AXP288_ADC_EN_MASK 0xF0 | ||
35 | #define AXP288_ADC_TS_ENABLE 0x01 | ||
36 | |||
37 | #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) | ||
38 | #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) | ||
39 | #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) | ||
40 | #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) | ||
41 | #define AXP288_ADC_TS_CURRENT_ON (3 << 0) | ||
33 | 42 | ||
34 | enum axp288_adc_id { | 43 | enum axp288_adc_id { |
35 | AXP288_ADC_TS, | 44 | AXP288_ADC_TS, |
@@ -44,6 +53,7 @@ enum axp288_adc_id { | |||
44 | struct axp288_adc_info { | 53 | struct axp288_adc_info { |
45 | int irq; | 54 | int irq; |
46 | struct regmap *regmap; | 55 | struct regmap *regmap; |
56 | bool ts_enabled; | ||
47 | }; | 57 | }; |
48 | 58 | ||
49 | static const struct iio_chan_spec axp288_adc_channels[] = { | 59 | static const struct iio_chan_spec axp288_adc_channels[] = { |
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address, | |||
115 | return IIO_VAL_INT; | 125 | return IIO_VAL_INT; |
116 | } | 126 | } |
117 | 127 | ||
118 | static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, | 128 | /* |
119 | unsigned long address) | 129 | * The current-source used for the battery temp-sensor (TS) is shared |
130 | * with the GPADC. For proper fuel-gauge and charger operation the TS | ||
131 | * current-source needs to be permanently on. But to read the GPADC we | ||
132 | * need to temporary switch the TS current-source to ondemand, so that | ||
133 | * the GPADC can use it, otherwise we will always read an all 0 value. | ||
134 | */ | ||
135 | static int axp288_adc_set_ts(struct axp288_adc_info *info, | ||
136 | unsigned int mode, unsigned long address) | ||
120 | { | 137 | { |
121 | int ret; | 138 | int ret; |
122 | 139 | ||
123 | /* channels other than GPADC do not need to switch TS pin */ | 140 | /* No need to switch the current-source if the TS pin is disabled */ |
141 | if (!info->ts_enabled) | ||
142 | return 0; | ||
143 | |||
144 | /* Channels other than GPADC do not need the current source */ | ||
124 | if (address != AXP288_GP_ADC_H) | 145 | if (address != AXP288_GP_ADC_H) |
125 | return 0; | 146 | return 0; |
126 | 147 | ||
127 | ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); | 148 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, |
149 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); | ||
128 | if (ret) | 150 | if (ret) |
129 | return ret; | 151 | return ret; |
130 | 152 | ||
131 | /* When switching to the GPADC pin give things some time to settle */ | 153 | /* When switching to the GPADC pin give things some time to settle */ |
132 | if (mode == AXP288_ADC_TS_PIN_GPADC) | 154 | if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) |
133 | usleep_range(6000, 10000); | 155 | usleep_range(6000, 10000); |
134 | 156 | ||
135 | return 0; | 157 | return 0; |
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
145 | mutex_lock(&indio_dev->mlock); | 167 | mutex_lock(&indio_dev->mlock); |
146 | switch (mask) { | 168 | switch (mask) { |
147 | case IIO_CHAN_INFO_RAW: | 169 | case IIO_CHAN_INFO_RAW: |
148 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, | 170 | if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, |
149 | chan->address)) { | 171 | chan->address)) { |
150 | dev_err(&indio_dev->dev, "GPADC mode\n"); | 172 | dev_err(&indio_dev->dev, "GPADC mode\n"); |
151 | ret = -EINVAL; | 173 | ret = -EINVAL; |
152 | break; | 174 | break; |
153 | } | 175 | } |
154 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); | 176 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); |
155 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, | 177 | if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, |
156 | chan->address)) | 178 | chan->address)) |
157 | dev_err(&indio_dev->dev, "TS pin restore\n"); | 179 | dev_err(&indio_dev->dev, "TS pin restore\n"); |
158 | break; | 180 | break; |
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
164 | return ret; | 186 | return ret; |
165 | } | 187 | } |
166 | 188 | ||
167 | static int axp288_adc_set_state(struct regmap *regmap) | 189 | static int axp288_adc_initialize(struct axp288_adc_info *info) |
168 | { | 190 | { |
169 | /* ADC should be always enabled for internal FG to function */ | 191 | int ret, adc_enable_val; |
170 | if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) | 192 | |
171 | return -EIO; | 193 | /* |
194 | * Determine if the TS pin is enabled and set the TS current-source | ||
195 | * accordingly. | ||
196 | */ | ||
197 | ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); | ||
198 | if (ret) | ||
199 | return ret; | ||
200 | |||
201 | if (adc_enable_val & AXP288_ADC_TS_ENABLE) { | ||
202 | info->ts_enabled = true; | ||
203 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, | ||
204 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, | ||
205 | AXP288_ADC_TS_CURRENT_ON); | ||
206 | } else { | ||
207 | info->ts_enabled = false; | ||
208 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, | ||
209 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, | ||
210 | AXP288_ADC_TS_CURRENT_OFF); | ||
211 | } | ||
212 | if (ret) | ||
213 | return ret; | ||
172 | 214 | ||
173 | return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | 215 | /* Turn on the ADC for all channels except TS, leave TS as is */ |
216 | return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, | ||
217 | AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); | ||
174 | } | 218 | } |
175 | 219 | ||
176 | static const struct iio_info axp288_adc_iio_info = { | 220 | static const struct iio_info axp288_adc_iio_info = { |
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev) | |||
200 | * Set ADC to enabled state at all time, including system suspend. | 244 | * Set ADC to enabled state at all time, including system suspend. |
201 | * otherwise internal fuel gauge functionality may be affected. | 245 | * otherwise internal fuel gauge functionality may be affected. |
202 | */ | 246 | */ |
203 | ret = axp288_adc_set_state(axp20x->regmap); | 247 | ret = axp288_adc_initialize(info); |
204 | if (ret) { | 248 | if (ret) { |
205 | dev_err(&pdev->dev, "unable to enable ADC device\n"); | 249 | dev_err(&pdev->dev, "unable to enable ADC device\n"); |
206 | return ret; | 250 | return ret; |
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 184d686ebd99..8b4568edd5cb 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #define ADS8688_VREF_MV 4096 | 42 | #define ADS8688_VREF_MV 4096 |
43 | #define ADS8688_REALBITS 16 | 43 | #define ADS8688_REALBITS 16 |
44 | #define ADS8688_MAX_CHANNELS 8 | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * enum ads8688_range - ADS8688 reference voltage range | 47 | * enum ads8688_range - ADS8688 reference voltage range |
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) | |||
385 | { | 386 | { |
386 | struct iio_poll_func *pf = p; | 387 | struct iio_poll_func *pf = p; |
387 | struct iio_dev *indio_dev = pf->indio_dev; | 388 | struct iio_dev *indio_dev = pf->indio_dev; |
388 | u16 buffer[8]; | 389 | u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; |
389 | int i, j = 0; | 390 | int i, j = 0; |
390 | 391 | ||
391 | for (i = 0; i < indio_dev->masklength; i++) { | 392 | for (i = 0; i < indio_dev->masklength; i++) { |
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index a406ad31b096..3a20cb5d9bff 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c | |||
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, | |||
444 | case IIO_CHAN_INFO_SCALE: | 444 | case IIO_CHAN_INFO_SCALE: |
445 | switch (chan->type) { | 445 | switch (chan->type) { |
446 | case IIO_TEMP: | 446 | case IIO_TEMP: |
447 | *val = 1; /* 0.01 */ | 447 | *val = 10; |
448 | *val2 = 100; | 448 | return IIO_VAL_INT; |
449 | break; | ||
450 | case IIO_PH: | 449 | case IIO_PH: |
451 | *val = 1; /* 0.001 */ | 450 | *val = 1; /* 0.001 */ |
452 | *val2 = 1000; | 451 | *val2 = 1000; |
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, | |||
477 | int val, int val2, long mask) | 476 | int val, int val2, long mask) |
478 | { | 477 | { |
479 | struct atlas_data *data = iio_priv(indio_dev); | 478 | struct atlas_data *data = iio_priv(indio_dev); |
480 | __be32 reg = cpu_to_be32(val); | 479 | __be32 reg = cpu_to_be32(val / 10); |
481 | 480 | ||
482 | if (val2 != 0 || val < 0 || val > 20000) | 481 | if (val2 != 0 || val < 0 || val > 20000) |
483 | return -EINVAL; | 482 | return -EINVAL; |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index c13c0ba30f63..d499cd61c0e8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
@@ -783,6 +783,7 @@ void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, | |||
783 | static int c4iw_rdev_open(struct c4iw_rdev *rdev) | 783 | static int c4iw_rdev_open(struct c4iw_rdev *rdev) |
784 | { | 784 | { |
785 | int err; | 785 | int err; |
786 | unsigned int factor; | ||
786 | 787 | ||
787 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); | 788 | c4iw_init_dev_ucontext(rdev, &rdev->uctx); |
788 | 789 | ||
@@ -806,8 +807,18 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) | |||
806 | return -EINVAL; | 807 | return -EINVAL; |
807 | } | 808 | } |
808 | 809 | ||
809 | rdev->qpmask = rdev->lldi.udb_density - 1; | 810 | /* This implementation requires a sge_host_page_size <= PAGE_SIZE. */ |
810 | rdev->cqmask = rdev->lldi.ucq_density - 1; | 811 | if (rdev->lldi.sge_host_page_size > PAGE_SIZE) { |
812 | pr_err("%s: unsupported sge host page size %u\n", | ||
813 | pci_name(rdev->lldi.pdev), | ||
814 | rdev->lldi.sge_host_page_size); | ||
815 | return -EINVAL; | ||
816 | } | ||
817 | |||
818 | factor = PAGE_SIZE / rdev->lldi.sge_host_page_size; | ||
819 | rdev->qpmask = (rdev->lldi.udb_density * factor) - 1; | ||
820 | rdev->cqmask = (rdev->lldi.ucq_density * factor) - 1; | ||
821 | |||
811 | pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", | 822 | pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u srq size %u\n", |
812 | pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, | 823 | pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, |
813 | rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), | 824 | rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 31d91538bbf4..694324b37480 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -3032,7 +3032,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
3032 | { | 3032 | { |
3033 | struct srp_target_port *target = host_to_target(scmnd->device->host); | 3033 | struct srp_target_port *target = host_to_target(scmnd->device->host); |
3034 | struct srp_rdma_ch *ch; | 3034 | struct srp_rdma_ch *ch; |
3035 | int i, j; | ||
3036 | u8 status; | 3035 | u8 status; |
3037 | 3036 | ||
3038 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); | 3037 | shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); |
@@ -3044,15 +3043,6 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
3044 | if (status) | 3043 | if (status) |
3045 | return FAILED; | 3044 | return FAILED; |
3046 | 3045 | ||
3047 | for (i = 0; i < target->ch_count; i++) { | ||
3048 | ch = &target->ch[i]; | ||
3049 | for (j = 0; j < target->req_ring_size; ++j) { | ||
3050 | struct srp_request *req = &ch->req_ring[j]; | ||
3051 | |||
3052 | srp_finish_req(ch, req, scmnd->device, DID_RESET << 16); | ||
3053 | } | ||
3054 | } | ||
3055 | |||
3056 | return SUCCESS; | 3046 | return SUCCESS; |
3057 | } | 3047 | } |
3058 | 3048 | ||
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 4713957b0cbb..a878351f1643 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -420,7 +420,7 @@ config KEYBOARD_MPR121 | |||
420 | 420 | ||
421 | config KEYBOARD_SNVS_PWRKEY | 421 | config KEYBOARD_SNVS_PWRKEY |
422 | tristate "IMX SNVS Power Key Driver" | 422 | tristate "IMX SNVS Power Key Driver" |
423 | depends on SOC_IMX6SX | 423 | depends on SOC_IMX6SX || SOC_IMX7D |
424 | depends on OF | 424 | depends on OF |
425 | help | 425 | help |
426 | This is the snvs powerkey driver for the Freescale i.MX application | 426 | This is the snvs powerkey driver for the Freescale i.MX application |
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c index 312916f99597..73686c2460ce 100644 --- a/drivers/input/keyboard/cap11xx.c +++ b/drivers/input/keyboard/cap11xx.c | |||
@@ -75,9 +75,7 @@ | |||
75 | struct cap11xx_led { | 75 | struct cap11xx_led { |
76 | struct cap11xx_priv *priv; | 76 | struct cap11xx_priv *priv; |
77 | struct led_classdev cdev; | 77 | struct led_classdev cdev; |
78 | struct work_struct work; | ||
79 | u32 reg; | 78 | u32 reg; |
80 | enum led_brightness new_brightness; | ||
81 | }; | 79 | }; |
82 | #endif | 80 | #endif |
83 | 81 | ||
@@ -233,30 +231,21 @@ static void cap11xx_input_close(struct input_dev *idev) | |||
233 | } | 231 | } |
234 | 232 | ||
235 | #ifdef CONFIG_LEDS_CLASS | 233 | #ifdef CONFIG_LEDS_CLASS |
236 | static void cap11xx_led_work(struct work_struct *work) | 234 | static int cap11xx_led_set(struct led_classdev *cdev, |
235 | enum led_brightness value) | ||
237 | { | 236 | { |
238 | struct cap11xx_led *led = container_of(work, struct cap11xx_led, work); | 237 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); |
239 | struct cap11xx_priv *priv = led->priv; | 238 | struct cap11xx_priv *priv = led->priv; |
240 | int value = led->new_brightness; | ||
241 | 239 | ||
242 | /* | 240 | /* |
243 | * All LEDs share the same duty cycle as this is a HW limitation. | 241 | * All LEDs share the same duty cycle as this is a HW |
244 | * Brightness levels per LED are either 0 (OFF) and 1 (ON). | 242 | * limitation. Brightness levels per LED are either |
243 | * 0 (OFF) and 1 (ON). | ||
245 | */ | 244 | */ |
246 | regmap_update_bits(priv->regmap, CAP11XX_REG_LED_OUTPUT_CONTROL, | 245 | return regmap_update_bits(priv->regmap, |
247 | BIT(led->reg), value ? BIT(led->reg) : 0); | 246 | CAP11XX_REG_LED_OUTPUT_CONTROL, |
248 | } | 247 | BIT(led->reg), |
249 | 248 | value ? BIT(led->reg) : 0); | |
250 | static void cap11xx_led_set(struct led_classdev *cdev, | ||
251 | enum led_brightness value) | ||
252 | { | ||
253 | struct cap11xx_led *led = container_of(cdev, struct cap11xx_led, cdev); | ||
254 | |||
255 | if (led->new_brightness == value) | ||
256 | return; | ||
257 | |||
258 | led->new_brightness = value; | ||
259 | schedule_work(&led->work); | ||
260 | } | 249 | } |
261 | 250 | ||
262 | static int cap11xx_init_leds(struct device *dev, | 251 | static int cap11xx_init_leds(struct device *dev, |
@@ -299,7 +288,7 @@ static int cap11xx_init_leds(struct device *dev, | |||
299 | led->cdev.default_trigger = | 288 | led->cdev.default_trigger = |
300 | of_get_property(child, "linux,default-trigger", NULL); | 289 | of_get_property(child, "linux,default-trigger", NULL); |
301 | led->cdev.flags = 0; | 290 | led->cdev.flags = 0; |
302 | led->cdev.brightness_set = cap11xx_led_set; | 291 | led->cdev.brightness_set_blocking = cap11xx_led_set; |
303 | led->cdev.max_brightness = 1; | 292 | led->cdev.max_brightness = 1; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | 294 | ||
@@ -312,8 +301,6 @@ static int cap11xx_init_leds(struct device *dev, | |||
312 | led->reg = reg; | 301 | led->reg = reg; |
313 | led->priv = priv; | 302 | led->priv = priv; |
314 | 303 | ||
315 | INIT_WORK(&led->work, cap11xx_led_work); | ||
316 | |||
317 | error = devm_led_classdev_register(dev, &led->cdev); | 304 | error = devm_led_classdev_register(dev, &led->cdev); |
318 | if (error) { | 305 | if (error) { |
319 | of_node_put(child); | 306 | of_node_put(child); |
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c index 403452ef00e6..3d1cb7bf5e35 100644 --- a/drivers/input/keyboard/matrix_keypad.c +++ b/drivers/input/keyboard/matrix_keypad.c | |||
@@ -222,7 +222,7 @@ static void matrix_keypad_stop(struct input_dev *dev) | |||
222 | keypad->stopped = true; | 222 | keypad->stopped = true; |
223 | spin_unlock_irq(&keypad->lock); | 223 | spin_unlock_irq(&keypad->lock); |
224 | 224 | ||
225 | flush_work(&keypad->work.work); | 225 | flush_delayed_work(&keypad->work); |
226 | /* | 226 | /* |
227 | * matrix_keypad_scan() will leave IRQs enabled; | 227 | * matrix_keypad_scan() will leave IRQs enabled; |
228 | * we should disable them now. | 228 | * we should disable them now. |
diff --git a/drivers/input/keyboard/qt2160.c b/drivers/input/keyboard/qt2160.c index 43b86482dda0..d466bc07aebb 100644 --- a/drivers/input/keyboard/qt2160.c +++ b/drivers/input/keyboard/qt2160.c | |||
@@ -58,10 +58,9 @@ static unsigned char qt2160_key2code[] = { | |||
58 | struct qt2160_led { | 58 | struct qt2160_led { |
59 | struct qt2160_data *qt2160; | 59 | struct qt2160_data *qt2160; |
60 | struct led_classdev cdev; | 60 | struct led_classdev cdev; |
61 | struct work_struct work; | ||
62 | char name[32]; | 61 | char name[32]; |
63 | int id; | 62 | int id; |
64 | enum led_brightness new_brightness; | 63 | enum led_brightness brightness; |
65 | }; | 64 | }; |
66 | #endif | 65 | #endif |
67 | 66 | ||
@@ -74,7 +73,6 @@ struct qt2160_data { | |||
74 | u16 key_matrix; | 73 | u16 key_matrix; |
75 | #ifdef CONFIG_LEDS_CLASS | 74 | #ifdef CONFIG_LEDS_CLASS |
76 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; | 75 | struct qt2160_led leds[QT2160_NUM_LEDS_X]; |
77 | struct mutex led_lock; | ||
78 | #endif | 76 | #endif |
79 | }; | 77 | }; |
80 | 78 | ||
@@ -83,46 +81,39 @@ static int qt2160_write(struct i2c_client *client, u8 reg, u8 data); | |||
83 | 81 | ||
84 | #ifdef CONFIG_LEDS_CLASS | 82 | #ifdef CONFIG_LEDS_CLASS |
85 | 83 | ||
86 | static void qt2160_led_work(struct work_struct *work) | 84 | static int qt2160_led_set(struct led_classdev *cdev, |
85 | enum led_brightness value) | ||
87 | { | 86 | { |
88 | struct qt2160_led *led = container_of(work, struct qt2160_led, work); | 87 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); |
89 | struct qt2160_data *qt2160 = led->qt2160; | 88 | struct qt2160_data *qt2160 = led->qt2160; |
90 | struct i2c_client *client = qt2160->client; | 89 | struct i2c_client *client = qt2160->client; |
91 | int value = led->new_brightness; | ||
92 | u32 drive, pwmen; | 90 | u32 drive, pwmen; |
93 | 91 | ||
94 | mutex_lock(&qt2160->led_lock); | 92 | if (value != led->brightness) { |
95 | 93 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | |
96 | drive = qt2160_read(client, QT2160_CMD_DRIVE_X); | 94 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); |
97 | pwmen = qt2160_read(client, QT2160_CMD_PWMEN_X); | 95 | if (value != LED_OFF) { |
98 | if (value != LED_OFF) { | 96 | drive |= BIT(led->id); |
99 | drive |= (1 << led->id); | 97 | pwmen |= BIT(led->id); |
100 | pwmen |= (1 << led->id); | ||
101 | |||
102 | } else { | ||
103 | drive &= ~(1 << led->id); | ||
104 | pwmen &= ~(1 << led->id); | ||
105 | } | ||
106 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); | ||
107 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); | ||
108 | 98 | ||
109 | /* | 99 | } else { |
110 | * Changing this register will change the brightness | 100 | drive &= ~BIT(led->id); |
111 | * of every LED in the qt2160. It's a HW limitation. | 101 | pwmen &= ~BIT(led->id); |
112 | */ | 102 | } |
113 | if (value != LED_OFF) | 103 | qt2160_write(client, QT2160_CMD_DRIVE_X, drive); |
114 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | 104 | qt2160_write(client, QT2160_CMD_PWMEN_X, pwmen); |
115 | 105 | ||
116 | mutex_unlock(&qt2160->led_lock); | 106 | /* |
117 | } | 107 | * Changing this register will change the brightness |
108 | * of every LED in the qt2160. It's a HW limitation. | ||
109 | */ | ||
110 | if (value != LED_OFF) | ||
111 | qt2160_write(client, QT2160_CMD_PWM_DUTY, value); | ||
118 | 112 | ||
119 | static void qt2160_led_set(struct led_classdev *cdev, | 113 | led->brightness = value; |
120 | enum led_brightness value) | 114 | } |
121 | { | ||
122 | struct qt2160_led *led = container_of(cdev, struct qt2160_led, cdev); | ||
123 | 115 | ||
124 | led->new_brightness = value; | 116 | return 0; |
125 | schedule_work(&led->work); | ||
126 | } | 117 | } |
127 | 118 | ||
128 | #endif /* CONFIG_LEDS_CLASS */ | 119 | #endif /* CONFIG_LEDS_CLASS */ |
@@ -293,20 +284,16 @@ static int qt2160_register_leds(struct qt2160_data *qt2160) | |||
293 | int ret; | 284 | int ret; |
294 | int i; | 285 | int i; |
295 | 286 | ||
296 | mutex_init(&qt2160->led_lock); | ||
297 | |||
298 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 287 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { |
299 | struct qt2160_led *led = &qt2160->leds[i]; | 288 | struct qt2160_led *led = &qt2160->leds[i]; |
300 | 289 | ||
301 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); | 290 | snprintf(led->name, sizeof(led->name), "qt2160:x%d", i); |
302 | led->cdev.name = led->name; | 291 | led->cdev.name = led->name; |
303 | led->cdev.brightness_set = qt2160_led_set; | 292 | led->cdev.brightness_set_blocking = qt2160_led_set; |
304 | led->cdev.brightness = LED_OFF; | 293 | led->cdev.brightness = LED_OFF; |
305 | led->id = i; | 294 | led->id = i; |
306 | led->qt2160 = qt2160; | 295 | led->qt2160 = qt2160; |
307 | 296 | ||
308 | INIT_WORK(&led->work, qt2160_led_work); | ||
309 | |||
310 | ret = led_classdev_register(&client->dev, &led->cdev); | 297 | ret = led_classdev_register(&client->dev, &led->cdev); |
311 | if (ret < 0) | 298 | if (ret < 0) |
312 | return ret; | 299 | return ret; |
@@ -324,10 +311,8 @@ static void qt2160_unregister_leds(struct qt2160_data *qt2160) | |||
324 | { | 311 | { |
325 | int i; | 312 | int i; |
326 | 313 | ||
327 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) { | 314 | for (i = 0; i < QT2160_NUM_LEDS_X; i++) |
328 | led_classdev_unregister(&qt2160->leds[i].cdev); | 315 | led_classdev_unregister(&qt2160->leds[i].cdev); |
329 | cancel_work_sync(&qt2160->leds[i].work); | ||
330 | } | ||
331 | } | 316 | } |
332 | 317 | ||
333 | #else | 318 | #else |
diff --git a/drivers/input/keyboard/st-keyscan.c b/drivers/input/keyboard/st-keyscan.c index babcfb165e4f..3b85631fde91 100644 --- a/drivers/input/keyboard/st-keyscan.c +++ b/drivers/input/keyboard/st-keyscan.c | |||
@@ -153,6 +153,8 @@ static int keyscan_probe(struct platform_device *pdev) | |||
153 | 153 | ||
154 | input_dev->id.bustype = BUS_HOST; | 154 | input_dev->id.bustype = BUS_HOST; |
155 | 155 | ||
156 | keypad_data->input_dev = input_dev; | ||
157 | |||
156 | error = keypad_matrix_key_parse_dt(keypad_data); | 158 | error = keypad_matrix_key_parse_dt(keypad_data); |
157 | if (error) | 159 | if (error) |
158 | return error; | 160 | return error; |
@@ -168,8 +170,6 @@ static int keyscan_probe(struct platform_device *pdev) | |||
168 | 170 | ||
169 | input_set_drvdata(input_dev, keypad_data); | 171 | input_set_drvdata(input_dev, keypad_data); |
170 | 172 | ||
171 | keypad_data->input_dev = input_dev; | ||
172 | |||
173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); | 174 | keypad_data->base = devm_ioremap_resource(&pdev->dev, res); |
175 | if (IS_ERR(keypad_data->base)) | 175 | if (IS_ERR(keypad_data->base)) |
diff --git a/drivers/input/misc/apanel.c b/drivers/input/misc/apanel.c index 094bddf56755..c1e66f45d552 100644 --- a/drivers/input/misc/apanel.c +++ b/drivers/input/misc/apanel.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/input-polldev.h> | 23 | #include <linux/input-polldev.h> |
24 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
25 | #include <linux/workqueue.h> | ||
26 | #include <linux/leds.h> | 25 | #include <linux/leds.h> |
27 | 26 | ||
28 | #define APANEL_NAME "Fujitsu Application Panel" | 27 | #define APANEL_NAME "Fujitsu Application Panel" |
@@ -59,8 +58,6 @@ struct apanel { | |||
59 | struct i2c_client *client; | 58 | struct i2c_client *client; |
60 | unsigned short keymap[MAX_PANEL_KEYS]; | 59 | unsigned short keymap[MAX_PANEL_KEYS]; |
61 | u16 nkeys; | 60 | u16 nkeys; |
62 | u16 led_bits; | ||
63 | struct work_struct led_work; | ||
64 | struct led_classdev mail_led; | 61 | struct led_classdev mail_led; |
65 | }; | 62 | }; |
66 | 63 | ||
@@ -109,25 +106,13 @@ static void apanel_poll(struct input_polled_dev *ipdev) | |||
109 | report_key(idev, ap->keymap[i]); | 106 | report_key(idev, ap->keymap[i]); |
110 | } | 107 | } |
111 | 108 | ||
112 | /* Track state changes of LED */ | 109 | static int mail_led_set(struct led_classdev *led, |
113 | static void led_update(struct work_struct *work) | ||
114 | { | ||
115 | struct apanel *ap = container_of(work, struct apanel, led_work); | ||
116 | |||
117 | i2c_smbus_write_word_data(ap->client, 0x10, ap->led_bits); | ||
118 | } | ||
119 | |||
120 | static void mail_led_set(struct led_classdev *led, | ||
121 | enum led_brightness value) | 110 | enum led_brightness value) |
122 | { | 111 | { |
123 | struct apanel *ap = container_of(led, struct apanel, mail_led); | 112 | struct apanel *ap = container_of(led, struct apanel, mail_led); |
113 | u16 led_bits = value != LED_OFF ? 0x8000 : 0x0000; | ||
124 | 114 | ||
125 | if (value != LED_OFF) | 115 | return i2c_smbus_write_word_data(ap->client, 0x10, led_bits); |
126 | ap->led_bits |= 0x8000; | ||
127 | else | ||
128 | ap->led_bits &= ~0x8000; | ||
129 | |||
130 | schedule_work(&ap->led_work); | ||
131 | } | 116 | } |
132 | 117 | ||
133 | static int apanel_remove(struct i2c_client *client) | 118 | static int apanel_remove(struct i2c_client *client) |
@@ -179,7 +164,7 @@ static struct apanel apanel = { | |||
179 | }, | 164 | }, |
180 | .mail_led = { | 165 | .mail_led = { |
181 | .name = "mail:blue", | 166 | .name = "mail:blue", |
182 | .brightness_set = mail_led_set, | 167 | .brightness_set_blocking = mail_led_set, |
183 | }, | 168 | }, |
184 | }; | 169 | }; |
185 | 170 | ||
@@ -235,7 +220,6 @@ static int apanel_probe(struct i2c_client *client, | |||
235 | if (err) | 220 | if (err) |
236 | goto out3; | 221 | goto out3; |
237 | 222 | ||
238 | INIT_WORK(&ap->led_work, led_update); | ||
239 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { | 223 | if (device_chip[APANEL_DEV_LED] != CHIP_NONE) { |
240 | err = led_classdev_register(&client->dev, &ap->mail_led); | 224 | err = led_classdev_register(&client->dev, &ap->mail_led); |
241 | if (err) | 225 | if (err) |
diff --git a/drivers/input/misc/bma150.c b/drivers/input/misc/bma150.c index 1efcfdf9f8a8..dd9dd4e40827 100644 --- a/drivers/input/misc/bma150.c +++ b/drivers/input/misc/bma150.c | |||
@@ -481,13 +481,14 @@ static int bma150_register_input_device(struct bma150_data *bma150) | |||
481 | idev->close = bma150_irq_close; | 481 | idev->close = bma150_irq_close; |
482 | input_set_drvdata(idev, bma150); | 482 | input_set_drvdata(idev, bma150); |
483 | 483 | ||
484 | bma150->input = idev; | ||
485 | |||
484 | error = input_register_device(idev); | 486 | error = input_register_device(idev); |
485 | if (error) { | 487 | if (error) { |
486 | input_free_device(idev); | 488 | input_free_device(idev); |
487 | return error; | 489 | return error; |
488 | } | 490 | } |
489 | 491 | ||
490 | bma150->input = idev; | ||
491 | return 0; | 492 | return 0; |
492 | } | 493 | } |
493 | 494 | ||
@@ -510,15 +511,15 @@ static int bma150_register_polled_device(struct bma150_data *bma150) | |||
510 | 511 | ||
511 | bma150_init_input_device(bma150, ipoll_dev->input); | 512 | bma150_init_input_device(bma150, ipoll_dev->input); |
512 | 513 | ||
514 | bma150->input_polled = ipoll_dev; | ||
515 | bma150->input = ipoll_dev->input; | ||
516 | |||
513 | error = input_register_polled_device(ipoll_dev); | 517 | error = input_register_polled_device(ipoll_dev); |
514 | if (error) { | 518 | if (error) { |
515 | input_free_polled_device(ipoll_dev); | 519 | input_free_polled_device(ipoll_dev); |
516 | return error; | 520 | return error; |
517 | } | 521 | } |
518 | 522 | ||
519 | bma150->input_polled = ipoll_dev; | ||
520 | bma150->input = ipoll_dev->input; | ||
521 | |||
522 | return 0; | 523 | return 0; |
523 | } | 524 | } |
524 | 525 | ||
diff --git a/drivers/input/misc/pwm-vibra.c b/drivers/input/misc/pwm-vibra.c index 55da191ae550..dbb6d9e1b947 100644 --- a/drivers/input/misc/pwm-vibra.c +++ b/drivers/input/misc/pwm-vibra.c | |||
@@ -34,6 +34,7 @@ struct pwm_vibrator { | |||
34 | struct work_struct play_work; | 34 | struct work_struct play_work; |
35 | u16 level; | 35 | u16 level; |
36 | u32 direction_duty_cycle; | 36 | u32 direction_duty_cycle; |
37 | bool vcc_on; | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | 40 | static int pwm_vibrator_start(struct pwm_vibrator *vibrator) |
@@ -42,10 +43,13 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
42 | struct pwm_state state; | 43 | struct pwm_state state; |
43 | int err; | 44 | int err; |
44 | 45 | ||
45 | err = regulator_enable(vibrator->vcc); | 46 | if (!vibrator->vcc_on) { |
46 | if (err) { | 47 | err = regulator_enable(vibrator->vcc); |
47 | dev_err(pdev, "failed to enable regulator: %d", err); | 48 | if (err) { |
48 | return err; | 49 | dev_err(pdev, "failed to enable regulator: %d", err); |
50 | return err; | ||
51 | } | ||
52 | vibrator->vcc_on = true; | ||
49 | } | 53 | } |
50 | 54 | ||
51 | pwm_get_state(vibrator->pwm, &state); | 55 | pwm_get_state(vibrator->pwm, &state); |
@@ -76,11 +80,14 @@ static int pwm_vibrator_start(struct pwm_vibrator *vibrator) | |||
76 | 80 | ||
77 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) | 81 | static void pwm_vibrator_stop(struct pwm_vibrator *vibrator) |
78 | { | 82 | { |
79 | regulator_disable(vibrator->vcc); | ||
80 | |||
81 | if (vibrator->pwm_dir) | 83 | if (vibrator->pwm_dir) |
82 | pwm_disable(vibrator->pwm_dir); | 84 | pwm_disable(vibrator->pwm_dir); |
83 | pwm_disable(vibrator->pwm); | 85 | pwm_disable(vibrator->pwm); |
86 | |||
87 | if (vibrator->vcc_on) { | ||
88 | regulator_disable(vibrator->vcc); | ||
89 | vibrator->vcc_on = false; | ||
90 | } | ||
84 | } | 91 | } |
85 | 92 | ||
86 | static void pwm_vibrator_play_work(struct work_struct *work) | 93 | static void pwm_vibrator_play_work(struct work_struct *work) |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index f322a1768fbb..225ae6980182 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -1336,7 +1336,6 @@ MODULE_DEVICE_TABLE(i2c, elan_id); | |||
1336 | static const struct acpi_device_id elan_acpi_id[] = { | 1336 | static const struct acpi_device_id elan_acpi_id[] = { |
1337 | { "ELAN0000", 0 }, | 1337 | { "ELAN0000", 0 }, |
1338 | { "ELAN0100", 0 }, | 1338 | { "ELAN0100", 0 }, |
1339 | { "ELAN0501", 0 }, | ||
1340 | { "ELAN0600", 0 }, | 1339 | { "ELAN0600", 0 }, |
1341 | { "ELAN0602", 0 }, | 1340 | { "ELAN0602", 0 }, |
1342 | { "ELAN0605", 0 }, | 1341 | { "ELAN0605", 0 }, |
@@ -1346,6 +1345,7 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
1346 | { "ELAN060C", 0 }, | 1345 | { "ELAN060C", 0 }, |
1347 | { "ELAN0611", 0 }, | 1346 | { "ELAN0611", 0 }, |
1348 | { "ELAN0612", 0 }, | 1347 | { "ELAN0612", 0 }, |
1348 | { "ELAN0617", 0 }, | ||
1349 | { "ELAN0618", 0 }, | 1349 | { "ELAN0618", 0 }, |
1350 | { "ELAN061C", 0 }, | 1350 | { "ELAN061C", 0 }, |
1351 | { "ELAN061D", 0 }, | 1351 | { "ELAN061D", 0 }, |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index 9fe075c137dc..a7f8b1614559 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1119,6 +1119,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse, | |||
1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad | 1119 | * Asus UX31 0x361f00 20, 15, 0e clickpad |
1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad | 1120 | * Asus UX32VD 0x361f02 00, 15, 0e clickpad |
1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad | 1121 | * Avatar AVIU-145A2 0x361f00 ? clickpad |
1122 | * Fujitsu CELSIUS H760 0x570f02 40, 14, 0c 3 hw buttons (**) | ||
1123 | * Fujitsu CELSIUS H780 0x5d0f02 41, 16, 0d 3 hw buttons (**) | ||
1122 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons | 1124 | * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons |
1123 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons | 1125 | * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons |
1124 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons | 1126 | * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons |
@@ -1171,6 +1173,13 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = { | |||
1171 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), | 1173 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H760"), |
1172 | }, | 1174 | }, |
1173 | }, | 1175 | }, |
1176 | { | ||
1177 | /* Fujitsu H780 also has a middle button */ | ||
1178 | .matches = { | ||
1179 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), | ||
1180 | DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H780"), | ||
1181 | }, | ||
1182 | }, | ||
1174 | #endif | 1183 | #endif |
1175 | { } | 1184 | { } |
1176 | }; | 1185 | }; |
diff --git a/drivers/input/serio/ps2-gpio.c b/drivers/input/serio/ps2-gpio.c index c62cceb97bb1..5e8d8384aa2a 100644 --- a/drivers/input/serio/ps2-gpio.c +++ b/drivers/input/serio/ps2-gpio.c | |||
@@ -76,6 +76,7 @@ static void ps2_gpio_close(struct serio *serio) | |||
76 | { | 76 | { |
77 | struct ps2_gpio_data *drvdata = serio->port_data; | 77 | struct ps2_gpio_data *drvdata = serio->port_data; |
78 | 78 | ||
79 | flush_delayed_work(&drvdata->tx_work); | ||
79 | disable_irq(drvdata->irq); | 80 | disable_irq(drvdata->irq); |
80 | } | 81 | } |
81 | 82 | ||
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1457f931218e..78188bf7e90d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1; | |||
363 | static int dmar_forcedac; | 363 | static int dmar_forcedac; |
364 | static int intel_iommu_strict; | 364 | static int intel_iommu_strict; |
365 | static int intel_iommu_superpage = 1; | 365 | static int intel_iommu_superpage = 1; |
366 | static int intel_iommu_sm = 1; | 366 | static int intel_iommu_sm; |
367 | static int iommu_identity_mapping; | 367 | static int iommu_identity_mapping; |
368 | 368 | ||
369 | #define IDENTMAP_ALL 1 | 369 | #define IDENTMAP_ALL 1 |
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str) | |||
456 | } else if (!strncmp(str, "sp_off", 6)) { | 456 | } else if (!strncmp(str, "sp_off", 6)) { |
457 | pr_info("Disable supported super page\n"); | 457 | pr_info("Disable supported super page\n"); |
458 | intel_iommu_superpage = 0; | 458 | intel_iommu_superpage = 0; |
459 | } else if (!strncmp(str, "sm_off", 6)) { | 459 | } else if (!strncmp(str, "sm_on", 5)) { |
460 | pr_info("Intel-IOMMU: disable scalable mode support\n"); | 460 | pr_info("Intel-IOMMU: scalable mode supported\n"); |
461 | intel_iommu_sm = 0; | 461 | intel_iommu_sm = 1; |
462 | } else if (!strncmp(str, "tboot_noforce", 13)) { | 462 | } else if (!strncmp(str, "tboot_noforce", 13)) { |
463 | printk(KERN_INFO | 463 | printk(KERN_INFO |
464 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); | 464 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7f2a45445b00..c3aba3fc818d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -97,9 +97,14 @@ struct its_device; | |||
97 | * The ITS structure - contains most of the infrastructure, with the | 97 | * The ITS structure - contains most of the infrastructure, with the |
98 | * top-level MSI domain, the command queue, the collections, and the | 98 | * top-level MSI domain, the command queue, the collections, and the |
99 | * list of devices writing to it. | 99 | * list of devices writing to it. |
100 | * | ||
101 | * dev_alloc_lock has to be taken for device allocations, while the | ||
102 | * spinlock must be taken to parse data structures such as the device | ||
103 | * list. | ||
100 | */ | 104 | */ |
101 | struct its_node { | 105 | struct its_node { |
102 | raw_spinlock_t lock; | 106 | raw_spinlock_t lock; |
107 | struct mutex dev_alloc_lock; | ||
103 | struct list_head entry; | 108 | struct list_head entry; |
104 | void __iomem *base; | 109 | void __iomem *base; |
105 | phys_addr_t phys_base; | 110 | phys_addr_t phys_base; |
@@ -156,6 +161,7 @@ struct its_device { | |||
156 | void *itt; | 161 | void *itt; |
157 | u32 nr_ites; | 162 | u32 nr_ites; |
158 | u32 device_id; | 163 | u32 device_id; |
164 | bool shared; | ||
159 | }; | 165 | }; |
160 | 166 | ||
161 | static struct { | 167 | static struct { |
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) | |||
1580 | nr_irqs /= 2; | 1586 | nr_irqs /= 2; |
1581 | } while (nr_irqs > 0); | 1587 | } while (nr_irqs > 0); |
1582 | 1588 | ||
1589 | if (!nr_irqs) | ||
1590 | err = -ENOSPC; | ||
1591 | |||
1583 | if (err) | 1592 | if (err) |
1584 | goto out; | 1593 | goto out; |
1585 | 1594 | ||
@@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void) | |||
2059 | return 0; | 2068 | return 0; |
2060 | } | 2069 | } |
2061 | 2070 | ||
2071 | static u64 its_clear_vpend_valid(void __iomem *vlpi_base) | ||
2072 | { | ||
2073 | u32 count = 1000000; /* 1s! */ | ||
2074 | bool clean; | ||
2075 | u64 val; | ||
2076 | |||
2077 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2078 | val &= ~GICR_VPENDBASER_Valid; | ||
2079 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | ||
2080 | |||
2081 | do { | ||
2082 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2083 | clean = !(val & GICR_VPENDBASER_Dirty); | ||
2084 | if (!clean) { | ||
2085 | count--; | ||
2086 | cpu_relax(); | ||
2087 | udelay(1); | ||
2088 | } | ||
2089 | } while (!clean && count); | ||
2090 | |||
2091 | return val; | ||
2092 | } | ||
2093 | |||
2062 | static void its_cpu_init_lpis(void) | 2094 | static void its_cpu_init_lpis(void) |
2063 | { | 2095 | { |
2064 | void __iomem *rbase = gic_data_rdist_rd_base(); | 2096 | void __iomem *rbase = gic_data_rdist_rd_base(); |
@@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void) | |||
2144 | val |= GICR_CTLR_ENABLE_LPIS; | 2176 | val |= GICR_CTLR_ENABLE_LPIS; |
2145 | writel_relaxed(val, rbase + GICR_CTLR); | 2177 | writel_relaxed(val, rbase + GICR_CTLR); |
2146 | 2178 | ||
2179 | if (gic_rdists->has_vlpis) { | ||
2180 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | ||
2181 | |||
2182 | /* | ||
2183 | * It's possible for CPU to receive VLPIs before it is | ||
2184 | * sheduled as a vPE, especially for the first CPU, and the | ||
2185 | * VLPI with INTID larger than 2^(IDbits+1) will be considered | ||
2186 | * as out of range and dropped by GIC. | ||
2187 | * So we initialize IDbits to known value to avoid VLPI drop. | ||
2188 | */ | ||
2189 | val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | ||
2190 | pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", | ||
2191 | smp_processor_id(), val); | ||
2192 | gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); | ||
2193 | |||
2194 | /* | ||
2195 | * Also clear Valid bit of GICR_VPENDBASER, in case some | ||
2196 | * ancient programming gets left in and has possibility of | ||
2197 | * corrupting memory. | ||
2198 | */ | ||
2199 | val = its_clear_vpend_valid(vlpi_base); | ||
2200 | WARN_ON(val & GICR_VPENDBASER_Dirty); | ||
2201 | } | ||
2202 | |||
2147 | /* Make sure the GIC has seen the above */ | 2203 | /* Make sure the GIC has seen the above */ |
2148 | dsb(sy); | 2204 | dsb(sy); |
2149 | out: | 2205 | out: |
@@ -2422,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2422 | struct its_device *its_dev; | 2478 | struct its_device *its_dev; |
2423 | struct msi_domain_info *msi_info; | 2479 | struct msi_domain_info *msi_info; |
2424 | u32 dev_id; | 2480 | u32 dev_id; |
2481 | int err = 0; | ||
2425 | 2482 | ||
2426 | /* | 2483 | /* |
2427 | * We ignore "dev" entierely, and rely on the dev_id that has | 2484 | * We ignore "dev" entierely, and rely on the dev_id that has |
@@ -2444,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2444 | return -EINVAL; | 2501 | return -EINVAL; |
2445 | } | 2502 | } |
2446 | 2503 | ||
2504 | mutex_lock(&its->dev_alloc_lock); | ||
2447 | its_dev = its_find_device(its, dev_id); | 2505 | its_dev = its_find_device(its, dev_id); |
2448 | if (its_dev) { | 2506 | if (its_dev) { |
2449 | /* | 2507 | /* |
@@ -2451,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2451 | * another alias (PCI bridge of some sort). No need to | 2509 | * another alias (PCI bridge of some sort). No need to |
2452 | * create the device. | 2510 | * create the device. |
2453 | */ | 2511 | */ |
2512 | its_dev->shared = true; | ||
2454 | pr_debug("Reusing ITT for devID %x\n", dev_id); | 2513 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
2455 | goto out; | 2514 | goto out; |
2456 | } | 2515 | } |
2457 | 2516 | ||
2458 | its_dev = its_create_device(its, dev_id, nvec, true); | 2517 | its_dev = its_create_device(its, dev_id, nvec, true); |
2459 | if (!its_dev) | 2518 | if (!its_dev) { |
2460 | return -ENOMEM; | 2519 | err = -ENOMEM; |
2520 | goto out; | ||
2521 | } | ||
2461 | 2522 | ||
2462 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 2523 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
2463 | out: | 2524 | out: |
2525 | mutex_unlock(&its->dev_alloc_lock); | ||
2464 | info->scratchpad[0].ptr = its_dev; | 2526 | info->scratchpad[0].ptr = its_dev; |
2465 | return 0; | 2527 | return err; |
2466 | } | 2528 | } |
2467 | 2529 | ||
2468 | static struct msi_domain_ops its_msi_domain_ops = { | 2530 | static struct msi_domain_ops its_msi_domain_ops = { |
@@ -2566,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2566 | { | 2628 | { |
2567 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | 2629 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
2568 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 2630 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
2631 | struct its_node *its = its_dev->its; | ||
2569 | int i; | 2632 | int i; |
2570 | 2633 | ||
2571 | for (i = 0; i < nr_irqs; i++) { | 2634 | for (i = 0; i < nr_irqs; i++) { |
@@ -2580,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2580 | irq_domain_reset_irq_data(data); | 2643 | irq_domain_reset_irq_data(data); |
2581 | } | 2644 | } |
2582 | 2645 | ||
2583 | /* If all interrupts have been freed, start mopping the floor */ | 2646 | mutex_lock(&its->dev_alloc_lock); |
2584 | if (bitmap_empty(its_dev->event_map.lpi_map, | 2647 | |
2648 | /* | ||
2649 | * If all interrupts have been freed, start mopping the | ||
2650 | * floor. This is conditionned on the device not being shared. | ||
2651 | */ | ||
2652 | if (!its_dev->shared && | ||
2653 | bitmap_empty(its_dev->event_map.lpi_map, | ||
2585 | its_dev->event_map.nr_lpis)) { | 2654 | its_dev->event_map.nr_lpis)) { |
2586 | its_lpi_free(its_dev->event_map.lpi_map, | 2655 | its_lpi_free(its_dev->event_map.lpi_map, |
2587 | its_dev->event_map.lpi_base, | 2656 | its_dev->event_map.lpi_base, |
@@ -2593,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2593 | its_free_device(its_dev); | 2662 | its_free_device(its_dev); |
2594 | } | 2663 | } |
2595 | 2664 | ||
2665 | mutex_unlock(&its->dev_alloc_lock); | ||
2666 | |||
2596 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | 2667 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
2597 | } | 2668 | } |
2598 | 2669 | ||
@@ -2755,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe) | |||
2755 | static void its_vpe_deschedule(struct its_vpe *vpe) | 2826 | static void its_vpe_deschedule(struct its_vpe *vpe) |
2756 | { | 2827 | { |
2757 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | 2828 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2758 | u32 count = 1000000; /* 1s! */ | ||
2759 | bool clean; | ||
2760 | u64 val; | 2829 | u64 val; |
2761 | 2830 | ||
2762 | /* We're being scheduled out */ | 2831 | val = its_clear_vpend_valid(vlpi_base); |
2763 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2764 | val &= ~GICR_VPENDBASER_Valid; | ||
2765 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | ||
2766 | |||
2767 | do { | ||
2768 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2769 | clean = !(val & GICR_VPENDBASER_Dirty); | ||
2770 | if (!clean) { | ||
2771 | count--; | ||
2772 | cpu_relax(); | ||
2773 | udelay(1); | ||
2774 | } | ||
2775 | } while (!clean && count); | ||
2776 | 2832 | ||
2777 | if (unlikely(!clean && !count)) { | 2833 | if (unlikely(val & GICR_VPENDBASER_Dirty)) { |
2778 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | 2834 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); |
2779 | vpe->idai = false; | 2835 | vpe->idai = false; |
2780 | vpe->pending_last = true; | 2836 | vpe->pending_last = true; |
@@ -3517,6 +3573,7 @@ static int __init its_probe_one(struct resource *res, | |||
3517 | } | 3573 | } |
3518 | 3574 | ||
3519 | raw_spin_lock_init(&its->lock); | 3575 | raw_spin_lock_init(&its->lock); |
3576 | mutex_init(&its->dev_alloc_lock); | ||
3520 | INIT_LIST_HEAD(&its->entry); | 3577 | INIT_LIST_HEAD(&its->entry); |
3521 | INIT_LIST_HEAD(&its->its_device_list); | 3578 | INIT_LIST_HEAD(&its->its_device_list); |
3522 | typer = gic_read_typer(its_base + GITS_TYPER); | 3579 | typer = gic_read_typer(its_base + GITS_TYPER); |
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d7764..3496b61a312a 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #define SEL_INT_PENDING (1 << 6) | 34 | #define SEL_INT_PENDING (1 << 6) |
35 | #define SEL_INT_NUM_MASK 0x3f | 35 | #define SEL_INT_NUM_MASK 0x3f |
36 | 36 | ||
37 | #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) | ||
38 | #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) | ||
39 | |||
37 | struct icu_chip_data { | 40 | struct icu_chip_data { |
38 | int nr_irqs; | 41 | int nr_irqs; |
39 | unsigned int virq_base; | 42 | unsigned int virq_base; |
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = { | |||
190 | static const struct mmp_intc_conf mmp2_conf = { | 193 | static const struct mmp_intc_conf mmp2_conf = { |
191 | .conf_enable = 0x20, | 194 | .conf_enable = 0x20, |
192 | .conf_disable = 0x0, | 195 | .conf_disable = 0x0, |
193 | .conf_mask = 0x7f, | 196 | .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | |
197 | MMP2_ICU_INT_ROUTE_PJ4_FIQ, | ||
194 | }; | 198 | }; |
195 | 199 | ||
196 | static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) | 200 | static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) |
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10..578978711887 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c | |||
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) | |||
170 | spin_lock_irqsave(&timer->dev->lock, flags); | 170 | spin_lock_irqsave(&timer->dev->lock, flags); |
171 | if (timer->id >= 0) | 171 | if (timer->id >= 0) |
172 | list_move_tail(&timer->list, &timer->dev->expired); | 172 | list_move_tail(&timer->list, &timer->dev->expired); |
173 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
174 | wake_up_interruptible(&timer->dev->wait); | 173 | wake_up_interruptible(&timer->dev->wait); |
174 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
175 | } | 175 | } |
176 | 176 | ||
177 | static int | 177 | static int |
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c index d713271ebf7c..a64116586b4c 100644 --- a/drivers/mailbox/bcm-flexrm-mailbox.c +++ b/drivers/mailbox/bcm-flexrm-mailbox.c | |||
@@ -1396,9 +1396,9 @@ static void flexrm_shutdown(struct mbox_chan *chan) | |||
1396 | 1396 | ||
1397 | /* Clear ring flush state */ | 1397 | /* Clear ring flush state */ |
1398 | timeout = 1000; /* timeout of 1s */ | 1398 | timeout = 1000; /* timeout of 1s */ |
1399 | writel_relaxed(0x0, ring + RING_CONTROL); | 1399 | writel_relaxed(0x0, ring->regs + RING_CONTROL); |
1400 | do { | 1400 | do { |
1401 | if (!(readl_relaxed(ring + RING_FLUSH_DONE) & | 1401 | if (!(readl_relaxed(ring->regs + RING_FLUSH_DONE) & |
1402 | FLUSH_DONE_MASK)) | 1402 | FLUSH_DONE_MASK)) |
1403 | break; | 1403 | break; |
1404 | mdelay(1); | 1404 | mdelay(1); |
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c index c6a7d4582dc6..38d9df3fb199 100644 --- a/drivers/mailbox/mailbox.c +++ b/drivers/mailbox/mailbox.c | |||
@@ -310,6 +310,7 @@ int mbox_flush(struct mbox_chan *chan, unsigned long timeout) | |||
310 | 310 | ||
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | EXPORT_SYMBOL_GPL(mbox_flush); | ||
313 | 314 | ||
314 | /** | 315 | /** |
315 | * mbox_request_channel - Request a mailbox channel. | 316 | * mbox_request_channel - Request a mailbox channel. |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 47d4e0d30bf0..dd538e6b2748 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) | |||
932 | if (IS_ERR(bip)) | 932 | if (IS_ERR(bip)) |
933 | return PTR_ERR(bip); | 933 | return PTR_ERR(bip); |
934 | 934 | ||
935 | tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); | 935 | tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); |
936 | 936 | ||
937 | bip->bip_iter.bi_size = tag_len; | 937 | bip->bip_iter.bi_size = tag_len; |
938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; | 938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4eb5f8c56535..a20531e5f3b4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) | |||
131 | static void rq_completed(struct mapped_device *md) | 131 | static void rq_completed(struct mapped_device *md) |
132 | { | 132 | { |
133 | /* nudge anyone waiting on suspend queue */ | 133 | /* nudge anyone waiting on suspend queue */ |
134 | if (unlikely(waitqueue_active(&md->wait))) | 134 | if (unlikely(wq_has_sleeper(&md->wait))) |
135 | wake_up(&md->wait); | 135 | wake_up(&md->wait); |
136 | 136 | ||
137 | /* | 137 | /* |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..e83b63608262 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -257,6 +257,7 @@ struct pool { | |||
257 | 257 | ||
258 | spinlock_t lock; | 258 | spinlock_t lock; |
259 | struct bio_list deferred_flush_bios; | 259 | struct bio_list deferred_flush_bios; |
260 | struct bio_list deferred_flush_completions; | ||
260 | struct list_head prepared_mappings; | 261 | struct list_head prepared_mappings; |
261 | struct list_head prepared_discards; | 262 | struct list_head prepared_discards; |
262 | struct list_head prepared_discards_pt2; | 263 | struct list_head prepared_discards_pt2; |
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
956 | mempool_free(m, &m->tc->pool->mapping_pool); | 957 | mempool_free(m, &m->tc->pool->mapping_pool); |
957 | } | 958 | } |
958 | 959 | ||
960 | static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) | ||
961 | { | ||
962 | struct pool *pool = tc->pool; | ||
963 | unsigned long flags; | ||
964 | |||
965 | /* | ||
966 | * If the bio has the REQ_FUA flag set we must commit the metadata | ||
967 | * before signaling its completion. | ||
968 | */ | ||
969 | if (!bio_triggers_commit(tc, bio)) { | ||
970 | bio_endio(bio); | ||
971 | return; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * Complete bio with an error if earlier I/O caused changes to the | ||
976 | * metadata that can't be committed, e.g, due to I/O errors on the | ||
977 | * metadata device. | ||
978 | */ | ||
979 | if (dm_thin_aborted_changes(tc->td)) { | ||
980 | bio_io_error(bio); | ||
981 | return; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * Batch together any bios that trigger commits and then issue a | ||
986 | * single commit for them in process_deferred_bios(). | ||
987 | */ | ||
988 | spin_lock_irqsave(&pool->lock, flags); | ||
989 | bio_list_add(&pool->deferred_flush_completions, bio); | ||
990 | spin_unlock_irqrestore(&pool->lock, flags); | ||
991 | } | ||
992 | |||
959 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) | 993 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
960 | { | 994 | { |
961 | struct thin_c *tc = m->tc; | 995 | struct thin_c *tc = m->tc; |
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
988 | */ | 1022 | */ |
989 | if (bio) { | 1023 | if (bio) { |
990 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); | 1024 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); |
991 | bio_endio(bio); | 1025 | complete_overwrite_bio(tc, bio); |
992 | } else { | 1026 | } else { |
993 | inc_all_io_entry(tc->pool, m->cell->holder); | 1027 | inc_all_io_entry(tc->pool, m->cell->holder); |
994 | remap_and_issue(tc, m->cell->holder, m->data_block); | 1028 | remap_and_issue(tc, m->cell->holder, m->data_block); |
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) | |||
2317 | { | 2351 | { |
2318 | unsigned long flags; | 2352 | unsigned long flags; |
2319 | struct bio *bio; | 2353 | struct bio *bio; |
2320 | struct bio_list bios; | 2354 | struct bio_list bios, bio_completions; |
2321 | struct thin_c *tc; | 2355 | struct thin_c *tc; |
2322 | 2356 | ||
2323 | tc = get_first_thin(pool); | 2357 | tc = get_first_thin(pool); |
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) | |||
2328 | } | 2362 | } |
2329 | 2363 | ||
2330 | /* | 2364 | /* |
2331 | * If there are any deferred flush bios, we must commit | 2365 | * If there are any deferred flush bios, we must commit the metadata |
2332 | * the metadata before issuing them. | 2366 | * before issuing them or signaling their completion. |
2333 | */ | 2367 | */ |
2334 | bio_list_init(&bios); | 2368 | bio_list_init(&bios); |
2369 | bio_list_init(&bio_completions); | ||
2370 | |||
2335 | spin_lock_irqsave(&pool->lock, flags); | 2371 | spin_lock_irqsave(&pool->lock, flags); |
2336 | bio_list_merge(&bios, &pool->deferred_flush_bios); | 2372 | bio_list_merge(&bios, &pool->deferred_flush_bios); |
2337 | bio_list_init(&pool->deferred_flush_bios); | 2373 | bio_list_init(&pool->deferred_flush_bios); |
2374 | |||
2375 | bio_list_merge(&bio_completions, &pool->deferred_flush_completions); | ||
2376 | bio_list_init(&pool->deferred_flush_completions); | ||
2338 | spin_unlock_irqrestore(&pool->lock, flags); | 2377 | spin_unlock_irqrestore(&pool->lock, flags); |
2339 | 2378 | ||
2340 | if (bio_list_empty(&bios) && | 2379 | if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && |
2341 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) | 2380 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) |
2342 | return; | 2381 | return; |
2343 | 2382 | ||
2344 | if (commit(pool)) { | 2383 | if (commit(pool)) { |
2384 | bio_list_merge(&bios, &bio_completions); | ||
2385 | |||
2345 | while ((bio = bio_list_pop(&bios))) | 2386 | while ((bio = bio_list_pop(&bios))) |
2346 | bio_io_error(bio); | 2387 | bio_io_error(bio); |
2347 | return; | 2388 | return; |
2348 | } | 2389 | } |
2349 | pool->last_commit_jiffies = jiffies; | 2390 | pool->last_commit_jiffies = jiffies; |
2350 | 2391 | ||
2392 | while ((bio = bio_list_pop(&bio_completions))) | ||
2393 | bio_endio(bio); | ||
2394 | |||
2351 | while ((bio = bio_list_pop(&bios))) | 2395 | while ((bio = bio_list_pop(&bios))) |
2352 | generic_make_request(bio); | 2396 | generic_make_request(bio); |
2353 | } | 2397 | } |
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2954 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); | 2998 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); |
2955 | spin_lock_init(&pool->lock); | 2999 | spin_lock_init(&pool->lock); |
2956 | bio_list_init(&pool->deferred_flush_bios); | 3000 | bio_list_init(&pool->deferred_flush_bios); |
3001 | bio_list_init(&pool->deferred_flush_completions); | ||
2957 | INIT_LIST_HEAD(&pool->prepared_mappings); | 3002 | INIT_LIST_HEAD(&pool->prepared_mappings); |
2958 | INIT_LIST_HEAD(&pool->prepared_discards); | 3003 | INIT_LIST_HEAD(&pool->prepared_discards); |
2959 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); | 3004 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b53c3841b53..515e6af9bed2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io) | |||
699 | true, duration, &io->stats_aux); | 699 | true, duration, &io->stats_aux); |
700 | 700 | ||
701 | /* nudge anyone waiting on suspend queue */ | 701 | /* nudge anyone waiting on suspend queue */ |
702 | if (unlikely(waitqueue_active(&md->wait))) | 702 | if (unlikely(wq_has_sleeper(&md->wait))) |
703 | wake_up(&md->wait); | 703 | wake_up(&md->wait); |
704 | } | 704 | } |
705 | 705 | ||
@@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, | |||
1336 | return r; | 1336 | return r; |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | bio_trim(clone, sector - clone->bi_iter.bi_sector, len); | 1339 | bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); |
1340 | clone->bi_iter.bi_size = to_bytes(len); | ||
1341 | |||
1342 | if (bio_integrity(bio)) | ||
1343 | bio_integrity_trim(clone); | ||
1340 | 1344 | ||
1341 | return 0; | 1345 | return 0; |
1342 | } | 1346 | } |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1d54109071cc..fa47249fa3e4 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1863,6 +1863,20 @@ static void end_sync_read(struct bio *bio) | |||
1863 | reschedule_retry(r1_bio); | 1863 | reschedule_retry(r1_bio); |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio) | ||
1867 | { | ||
1868 | sector_t sync_blocks = 0; | ||
1869 | sector_t s = r1_bio->sector; | ||
1870 | long sectors_to_go = r1_bio->sectors; | ||
1871 | |||
1872 | /* make sure these bits don't get cleared. */ | ||
1873 | do { | ||
1874 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
1875 | s += sync_blocks; | ||
1876 | sectors_to_go -= sync_blocks; | ||
1877 | } while (sectors_to_go > 0); | ||
1878 | } | ||
1879 | |||
1866 | static void end_sync_write(struct bio *bio) | 1880 | static void end_sync_write(struct bio *bio) |
1867 | { | 1881 | { |
1868 | int uptodate = !bio->bi_status; | 1882 | int uptodate = !bio->bi_status; |
@@ -1874,15 +1888,7 @@ static void end_sync_write(struct bio *bio) | |||
1874 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; | 1888 | struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev; |
1875 | 1889 | ||
1876 | if (!uptodate) { | 1890 | if (!uptodate) { |
1877 | sector_t sync_blocks = 0; | 1891 | abort_sync_write(mddev, r1_bio); |
1878 | sector_t s = r1_bio->sector; | ||
1879 | long sectors_to_go = r1_bio->sectors; | ||
1880 | /* make sure these bits doesn't get cleared. */ | ||
1881 | do { | ||
1882 | md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1); | ||
1883 | s += sync_blocks; | ||
1884 | sectors_to_go -= sync_blocks; | ||
1885 | } while (sectors_to_go > 0); | ||
1886 | set_bit(WriteErrorSeen, &rdev->flags); | 1892 | set_bit(WriteErrorSeen, &rdev->flags); |
1887 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) | 1893 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
1888 | set_bit(MD_RECOVERY_NEEDED, & | 1894 | set_bit(MD_RECOVERY_NEEDED, & |
@@ -2172,8 +2178,10 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) | |||
2172 | (i == r1_bio->read_disk || | 2178 | (i == r1_bio->read_disk || |
2173 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) | 2179 | !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) |
2174 | continue; | 2180 | continue; |
2175 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) | 2181 | if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) { |
2182 | abort_sync_write(mddev, r1_bio); | ||
2176 | continue; | 2183 | continue; |
2184 | } | ||
2177 | 2185 | ||
2178 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); | 2186 | bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); |
2179 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) | 2187 | if (test_bit(FailFast, &conf->mirrors[i].rdev->flags)) |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1fc8ea0f519b..ca4c9cc218a2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head, | |||
401 | struct mei_cl_cb *cb, *next; | 401 | struct mei_cl_cb *cb, *next; |
402 | 402 | ||
403 | list_for_each_entry_safe(cb, next, head, list) { | 403 | list_for_each_entry_safe(cb, next, head, list) { |
404 | if (cl == cb->cl) | 404 | if (cl == cb->cl) { |
405 | list_del_init(&cb->list); | 405 | list_del_init(&cb->list); |
406 | if (cb->fop_type == MEI_FOP_READ) | ||
407 | mei_io_cb_free(cb); | ||
408 | } | ||
406 | } | 409 | } |
407 | } | 410 | } |
408 | 411 | ||
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 23739a60517f..bb1ee9834a02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -139,6 +139,8 @@ | |||
139 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ | 139 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ |
140 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ | 140 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ |
141 | 141 | ||
142 | #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ | ||
143 | |||
142 | /* | 144 | /* |
143 | * MEI HW Section | 145 | * MEI HW Section |
144 | */ | 146 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index e89497f858ae..3ab946ad3257 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
105 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, | 105 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, |
106 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, | 106 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, |
107 | 107 | ||
108 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, | ||
109 | |||
108 | /* required last entry */ | 110 | /* required last entry */ |
109 | {0, } | 111 | {0, } |
110 | }; | 112 | }; |
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 2bfa3a903bf9..744757f541be 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c | |||
@@ -47,7 +47,8 @@ | |||
47 | * @dc: Virtio device control | 47 | * @dc: Virtio device control |
48 | * @vpdev: VOP device which is the parent for this virtio device | 48 | * @vpdev: VOP device which is the parent for this virtio device |
49 | * @vr: Buffer for accessing the VRING | 49 | * @vr: Buffer for accessing the VRING |
50 | * @used: Buffer for used | 50 | * @used_virt: Virtual address of used ring |
51 | * @used: DMA address of used ring | ||
51 | * @used_size: Size of the used buffer | 52 | * @used_size: Size of the used buffer |
52 | * @reset_done: Track whether VOP reset is complete | 53 | * @reset_done: Track whether VOP reset is complete |
53 | * @virtio_cookie: Cookie returned upon requesting a interrupt | 54 | * @virtio_cookie: Cookie returned upon requesting a interrupt |
@@ -61,6 +62,7 @@ struct _vop_vdev { | |||
61 | struct mic_device_ctrl __iomem *dc; | 62 | struct mic_device_ctrl __iomem *dc; |
62 | struct vop_device *vpdev; | 63 | struct vop_device *vpdev; |
63 | void __iomem *vr[VOP_MAX_VRINGS]; | 64 | void __iomem *vr[VOP_MAX_VRINGS]; |
65 | void *used_virt[VOP_MAX_VRINGS]; | ||
64 | dma_addr_t used[VOP_MAX_VRINGS]; | 66 | dma_addr_t used[VOP_MAX_VRINGS]; |
65 | int used_size[VOP_MAX_VRINGS]; | 67 | int used_size[VOP_MAX_VRINGS]; |
66 | struct completion reset_done; | 68 | struct completion reset_done; |
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq) | |||
260 | static void vop_del_vq(struct virtqueue *vq, int n) | 262 | static void vop_del_vq(struct virtqueue *vq, int n) |
261 | { | 263 | { |
262 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); | 264 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); |
263 | struct vring *vr = (struct vring *)(vq + 1); | ||
264 | struct vop_device *vpdev = vdev->vpdev; | 265 | struct vop_device *vpdev = vdev->vpdev; |
265 | 266 | ||
266 | dma_unmap_single(&vpdev->dev, vdev->used[n], | 267 | dma_unmap_single(&vpdev->dev, vdev->used[n], |
267 | vdev->used_size[n], DMA_BIDIRECTIONAL); | 268 | vdev->used_size[n], DMA_BIDIRECTIONAL); |
268 | free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); | 269 | free_pages((unsigned long)vdev->used_virt[n], |
270 | get_order(vdev->used_size[n])); | ||
269 | vring_del_virtqueue(vq); | 271 | vring_del_virtqueue(vq); |
270 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); | 272 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); |
271 | vdev->vr[n] = NULL; | 273 | vdev->vr[n] = NULL; |
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev) | |||
283 | vop_del_vq(vq, idx++); | 285 | vop_del_vq(vq, idx++); |
284 | } | 286 | } |
285 | 287 | ||
288 | static struct virtqueue *vop_new_virtqueue(unsigned int index, | ||
289 | unsigned int num, | ||
290 | struct virtio_device *vdev, | ||
291 | bool context, | ||
292 | void *pages, | ||
293 | bool (*notify)(struct virtqueue *vq), | ||
294 | void (*callback)(struct virtqueue *vq), | ||
295 | const char *name, | ||
296 | void *used) | ||
297 | { | ||
298 | bool weak_barriers = false; | ||
299 | struct vring vring; | ||
300 | |||
301 | vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); | ||
302 | vring.used = used; | ||
303 | |||
304 | return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, | ||
305 | notify, callback, name); | ||
306 | } | ||
307 | |||
286 | /* | 308 | /* |
287 | * This routine will assign vring's allocated in host/io memory. Code in | 309 | * This routine will assign vring's allocated in host/io memory. Code in |
288 | * virtio_ring.c however continues to access this io memory as if it were local | 310 | * virtio_ring.c however continues to access this io memory as if it were local |
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
302 | struct _mic_vring_info __iomem *info; | 324 | struct _mic_vring_info __iomem *info; |
303 | void *used; | 325 | void *used; |
304 | int vr_size, _vr_size, err, magic; | 326 | int vr_size, _vr_size, err, magic; |
305 | struct vring *vr; | ||
306 | u8 type = ioread8(&vdev->desc->type); | 327 | u8 type = ioread8(&vdev->desc->type); |
307 | 328 | ||
308 | if (index >= ioread8(&vdev->desc->num_vq)) | 329 | if (index >= ioread8(&vdev->desc->num_vq)) |
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
322 | return ERR_PTR(-ENOMEM); | 343 | return ERR_PTR(-ENOMEM); |
323 | vdev->vr[index] = va; | 344 | vdev->vr[index] = va; |
324 | memset_io(va, 0x0, _vr_size); | 345 | memset_io(va, 0x0, _vr_size); |
325 | vq = vring_new_virtqueue( | 346 | |
326 | index, | ||
327 | le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, | ||
328 | dev, | ||
329 | false, | ||
330 | ctx, | ||
331 | (void __force *)va, vop_notify, callback, name); | ||
332 | if (!vq) { | ||
333 | err = -ENOMEM; | ||
334 | goto unmap; | ||
335 | } | ||
336 | info = va + _vr_size; | 347 | info = va + _vr_size; |
337 | magic = ioread32(&info->magic); | 348 | magic = ioread32(&info->magic); |
338 | 349 | ||
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
341 | goto unmap; | 352 | goto unmap; |
342 | } | 353 | } |
343 | 354 | ||
344 | /* Allocate and reassign used ring now */ | ||
345 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | 355 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + |
346 | sizeof(struct vring_used_elem) * | 356 | sizeof(struct vring_used_elem) * |
347 | le16_to_cpu(config.num)); | 357 | le16_to_cpu(config.num)); |
348 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | 358 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
349 | get_order(vdev->used_size[index])); | 359 | get_order(vdev->used_size[index])); |
360 | vdev->used_virt[index] = used; | ||
350 | if (!used) { | 361 | if (!used) { |
351 | err = -ENOMEM; | 362 | err = -ENOMEM; |
352 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | 363 | dev_err(_vop_dev(vdev), "%s %d err %d\n", |
353 | __func__, __LINE__, err); | 364 | __func__, __LINE__, err); |
354 | goto del_vq; | 365 | goto unmap; |
366 | } | ||
367 | |||
368 | vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, | ||
369 | (void __force *)va, vop_notify, callback, | ||
370 | name, used); | ||
371 | if (!vq) { | ||
372 | err = -ENOMEM; | ||
373 | goto free_used; | ||
355 | } | 374 | } |
375 | |||
356 | vdev->used[index] = dma_map_single(&vpdev->dev, used, | 376 | vdev->used[index] = dma_map_single(&vpdev->dev, used, |
357 | vdev->used_size[index], | 377 | vdev->used_size[index], |
358 | DMA_BIDIRECTIONAL); | 378 | DMA_BIDIRECTIONAL); |
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
360 | err = -ENOMEM; | 380 | err = -ENOMEM; |
361 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | 381 | dev_err(_vop_dev(vdev), "%s %d err %d\n", |
362 | __func__, __LINE__, err); | 382 | __func__, __LINE__, err); |
363 | goto free_used; | 383 | goto del_vq; |
364 | } | 384 | } |
365 | writeq(vdev->used[index], &vqconfig->used_address); | 385 | writeq(vdev->used[index], &vqconfig->used_address); |
366 | /* | ||
367 | * To reassign the used ring here we are directly accessing | ||
368 | * struct vring_virtqueue which is a private data structure | ||
369 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
370 | * vring_new_virtqueue() would ensure that | ||
371 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
372 | */ | ||
373 | vr = (struct vring *)(vq + 1); | ||
374 | vr->used = used; | ||
375 | 386 | ||
376 | vq->priv = vdev; | 387 | vq->priv = vdev; |
377 | return vq; | 388 | return vq; |
389 | del_vq: | ||
390 | vring_del_virtqueue(vq); | ||
378 | free_used: | 391 | free_used: |
379 | free_pages((unsigned long)used, | 392 | free_pages((unsigned long)used, |
380 | get_order(vdev->used_size[index])); | 393 | get_order(vdev->used_size[index])); |
381 | del_vq: | ||
382 | vring_del_virtqueue(vq); | ||
383 | unmap: | 394 | unmap: |
384 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); | 395 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); |
385 | return ERR_PTR(err); | 396 | return ERR_PTR(err); |
@@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, | |||
581 | int ret = -1; | 592 | int ret = -1; |
582 | 593 | ||
583 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | 594 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { |
595 | struct device *dev = get_device(&vdev->vdev.dev); | ||
596 | |||
584 | dev_dbg(&vpdev->dev, | 597 | dev_dbg(&vpdev->dev, |
585 | "%s %d config_change %d type %d vdev %p\n", | 598 | "%s %d config_change %d type %d vdev %p\n", |
586 | __func__, __LINE__, | 599 | __func__, __LINE__, |
@@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, | |||
592 | iowrite8(-1, &dc->h2c_vdev_db); | 605 | iowrite8(-1, &dc->h2c_vdev_db); |
593 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | 606 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) |
594 | wait_for_completion(&vdev->reset_done); | 607 | wait_for_completion(&vdev->reset_done); |
595 | put_device(&vdev->vdev.dev); | 608 | put_device(dev); |
596 | iowrite8(1, &dc->guest_ack); | 609 | iowrite8(1, &dc->guest_ack); |
597 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", | 610 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", |
598 | __func__, __LINE__, ioread8(&dc->guest_ack)); | 611 | __func__, __LINE__, ioread8(&dc->guest_ack)); |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..14f3fdb8c6bb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |||
2112 | if (waiting) | 2112 | if (waiting) |
2113 | wake_up(&mq->wait); | 2113 | wake_up(&mq->wait); |
2114 | else | 2114 | else |
2115 | kblockd_schedule_work(&mq->complete_work); | 2115 | queue_work(mq->card->complete_wq, &mq->complete_work); |
2116 | 2116 | ||
2117 | return; | 2117 | return; |
2118 | } | 2118 | } |
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
2924 | 2924 | ||
2925 | mmc_fixup_device(card, mmc_blk_fixups); | 2925 | mmc_fixup_device(card, mmc_blk_fixups); |
2926 | 2926 | ||
2927 | card->complete_wq = alloc_workqueue("mmc_complete", | ||
2928 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | ||
2929 | if (unlikely(!card->complete_wq)) { | ||
2930 | pr_err("Failed to create mmc completion workqueue"); | ||
2931 | return -ENOMEM; | ||
2932 | } | ||
2933 | |||
2927 | md = mmc_blk_alloc(card); | 2934 | md = mmc_blk_alloc(card); |
2928 | if (IS_ERR(md)) | 2935 | if (IS_ERR(md)) |
2929 | return PTR_ERR(md); | 2936 | return PTR_ERR(md); |
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card) | |||
2987 | pm_runtime_put_noidle(&card->dev); | 2994 | pm_runtime_put_noidle(&card->dev); |
2988 | mmc_blk_remove_req(md); | 2995 | mmc_blk_remove_req(md); |
2989 | dev_set_drvdata(&card->dev, NULL); | 2996 | dev_set_drvdata(&card->dev, NULL); |
2997 | destroy_workqueue(card->complete_wq); | ||
2990 | } | 2998 | } |
2991 | 2999 | ||
2992 | static int _mmc_blk_suspend(struct mmc_card *card) | 3000 | static int _mmc_blk_suspend(struct mmc_card *card) |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
1338 | host->regs + SD_EMMC_IRQ_EN); | 1338 | host->regs + SD_EMMC_IRQ_EN); |
1339 | 1339 | ||
1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, | 1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, |
1341 | meson_mmc_irq_thread, IRQF_SHARED, NULL, host); | 1341 | meson_mmc_irq_thread, IRQF_SHARED, |
1342 | dev_name(&pdev->dev), host); | ||
1342 | if (ret) | 1343 | if (ret) |
1343 | goto err_init_clk; | 1344 | goto err_init_clk; |
1344 | 1345 | ||
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..70fadc976795 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
1401 | 1401 | ||
1402 | if (host->cfg->clk_delays || host->use_new_timings) | 1402 | /* |
1403 | * Some H5 devices do not have signal traces precise enough to | ||
1404 | * use HS DDR mode for their eMMC chips. | ||
1405 | * | ||
1406 | * We still enable HS DDR modes for all the other controller | ||
1407 | * variants that support them. | ||
1408 | */ | ||
1409 | if ((host->cfg->clk_delays || host->use_new_timings) && | ||
1410 | !of_device_is_compatible(pdev->dev.of_node, | ||
1411 | "allwinner,sun50i-h5-emmc")) | ||
1403 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; | 1412 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; |
1404 | 1413 | ||
1405 | ret = mmc_of_parse(mmc); | 1414 | ret = mmc_of_parse(mmc); |
1406 | if (ret) | 1415 | if (ret) |
1407 | goto error_free_dma; | 1416 | goto error_free_dma; |
1408 | 1417 | ||
1418 | /* | ||
1419 | * If we don't support delay chains in the SoC, we can't use any | ||
1420 | * of the higher speed modes. Mask them out in case the device | ||
1421 | * tree specifies the properties for them, which gets added to | ||
1422 | * the caps by mmc_of_parse() above. | ||
1423 | */ | ||
1424 | if (!(host->cfg->clk_delays || host->use_new_timings)) { | ||
1425 | mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | | ||
1426 | MMC_CAP_1_2V_DDR | MMC_CAP_UHS); | ||
1427 | mmc->caps2 &= ~MMC_CAP2_HS200; | ||
1428 | } | ||
1429 | |||
1430 | /* TODO: This driver doesn't support HS400 mode yet */ | ||
1431 | mmc->caps2 &= ~MMC_CAP2_HS400; | ||
1432 | |||
1409 | ret = sunxi_mmc_init_host(host); | 1433 | ret = sunxi_mmc_init_host(host); |
1410 | if (ret) | 1434 | if (ret) |
1411 | goto error_free_dma; | 1435 | goto error_free_dma; |
diff --git a/drivers/mtd/devices/powernv_flash.c b/drivers/mtd/devices/powernv_flash.c index 22f753e555ac..83f88b8b5d9f 100644 --- a/drivers/mtd/devices/powernv_flash.c +++ b/drivers/mtd/devices/powernv_flash.c | |||
@@ -212,7 +212,7 @@ static int powernv_flash_set_driver_info(struct device *dev, | |||
212 | * Going to have to check what details I need to set and how to | 212 | * Going to have to check what details I need to set and how to |
213 | * get them | 213 | * get them |
214 | */ | 214 | */ |
215 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFn", dev->of_node); | 215 | mtd->name = devm_kasprintf(dev, GFP_KERNEL, "%pOFP", dev->of_node); |
216 | mtd->type = MTD_NORFLASH; | 216 | mtd->type = MTD_NORFLASH; |
217 | mtd->flags = MTD_WRITEABLE; | 217 | mtd->flags = MTD_WRITEABLE; |
218 | mtd->size = size; | 218 | mtd->size = size; |
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 999b705769a8..3ef01baef9b6 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c | |||
@@ -507,6 +507,7 @@ static int mtd_nvmem_add(struct mtd_info *mtd) | |||
507 | { | 507 | { |
508 | struct nvmem_config config = {}; | 508 | struct nvmem_config config = {}; |
509 | 509 | ||
510 | config.id = -1; | ||
510 | config.dev = &mtd->dev; | 511 | config.dev = &mtd->dev; |
511 | config.name = mtd->name; | 512 | config.name = mtd->name; |
512 | config.owner = THIS_MODULE; | 513 | config.owner = THIS_MODULE; |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 60104e1079c5..37f174ccbcec 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, | |||
480 | /* let's register it anyway to preserve ordering */ | 480 | /* let's register it anyway to preserve ordering */ |
481 | slave->offset = 0; | 481 | slave->offset = 0; |
482 | slave->mtd.size = 0; | 482 | slave->mtd.size = 0; |
483 | |||
484 | /* Initialize ->erasesize to make add_mtd_device() happy. */ | ||
485 | slave->mtd.erasesize = parent->erasesize; | ||
486 | |||
483 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", | 487 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", |
484 | part->name); | 488 | part->name); |
485 | goto out_register; | 489 | goto out_register; |
@@ -632,7 +636,6 @@ err_remove_part: | |||
632 | mutex_unlock(&mtd_partitions_mutex); | 636 | mutex_unlock(&mtd_partitions_mutex); |
633 | 637 | ||
634 | free_partition(new); | 638 | free_partition(new); |
635 | pr_info("%s:%i\n", __func__, __LINE__); | ||
636 | 639 | ||
637 | return ret; | 640 | return ret; |
638 | } | 641 | } |
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index bd4cfac6b5aa..a4768df5083f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c | |||
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) | |||
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Reset BCH here, too. We got failures otherwise :( | 157 | * Reset BCH here, too. We got failures otherwise :( |
158 | * See later BCH reset for explanation of MX23 handling | 158 | * See later BCH reset for explanation of MX23 and MX28 handling |
159 | */ | 159 | */ |
160 | ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); | 160 | ret = gpmi_reset_block(r->bch_regs, |
161 | GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); | ||
161 | if (ret) | 162 | if (ret) |
162 | goto err_out; | 163 | goto err_out; |
163 | 164 | ||
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
263 | /* | 264 | /* |
264 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this | 265 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
265 | * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. | 266 | * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. |
266 | * On the other hand, the MX28 needs the reset, because one case has been | 267 | * and MX28. |
267 | * seen where the BCH produced ECC errors constantly after 10000 | ||
268 | * consecutive reboots. The latter case has not been seen on the MX23 | ||
269 | * yet, still we don't know if it could happen there as well. | ||
270 | */ | 268 | */ |
271 | ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); | 269 | ret = gpmi_reset_block(r->bch_regs, |
270 | GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); | ||
272 | if (ret) | 271 | if (ret) |
273 | goto err_out; | 272 | goto err_out; |
274 | 273 | ||
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index cca4b24d2ffa..839494ac457c 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c | |||
@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip) | |||
410 | 410 | ||
411 | /** | 411 | /** |
412 | * nand_fill_oob - [INTERN] Transfer client buffer to oob | 412 | * nand_fill_oob - [INTERN] Transfer client buffer to oob |
413 | * @chip: NAND chip object | ||
413 | * @oob: oob data buffer | 414 | * @oob: oob data buffer |
414 | * @len: oob data write length | 415 | * @len: oob data write length |
415 | * @ops: oob ops structure | 416 | * @ops: oob ops structure |
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 1b722fe9213c..19a2b563acdf 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c | |||
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td) | |||
158 | 158 | ||
159 | /** | 159 | /** |
160 | * read_bbt - [GENERIC] Read the bad block table starting from page | 160 | * read_bbt - [GENERIC] Read the bad block table starting from page |
161 | * @chip: NAND chip object | 161 | * @this: NAND chip object |
162 | * @buf: temporary buffer | 162 | * @buf: temporary buffer |
163 | * @page: the starting page | 163 | * @page: the starting page |
164 | * @num: the number of bbt descriptors to read | 164 | * @num: the number of bbt descriptors to read |
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 479c2f2cf17f..fa87ae28cdfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c | |||
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
304 | struct nand_device *nand = spinand_to_nand(spinand); | 304 | struct nand_device *nand = spinand_to_nand(spinand); |
305 | struct mtd_info *mtd = nanddev_to_mtd(nand); | 305 | struct mtd_info *mtd = nanddev_to_mtd(nand); |
306 | struct nand_page_io_req adjreq = *req; | 306 | struct nand_page_io_req adjreq = *req; |
307 | unsigned int nbytes = 0; | 307 | void *buf = spinand->databuf; |
308 | void *buf = NULL; | 308 | unsigned int nbytes; |
309 | u16 column = 0; | 309 | u16 column = 0; |
310 | int ret; | 310 | int ret; |
311 | 311 | ||
312 | memset(spinand->databuf, 0xff, | 312 | /* |
313 | nanddev_page_size(nand) + | 313 | * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset |
314 | nanddev_per_page_oobsize(nand)); | 314 | * the cache content to 0xFF (depends on vendor implementation), so we |
315 | * must fill the page cache entirely even if we only want to program | ||
316 | * the data portion of the page, otherwise we might corrupt the BBM or | ||
317 | * user data previously programmed in OOB area. | ||
318 | */ | ||
319 | nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); | ||
320 | memset(spinand->databuf, 0xff, nbytes); | ||
321 | adjreq.dataoffs = 0; | ||
322 | adjreq.datalen = nanddev_page_size(nand); | ||
323 | adjreq.databuf.out = spinand->databuf; | ||
324 | adjreq.ooblen = nanddev_per_page_oobsize(nand); | ||
325 | adjreq.ooboffs = 0; | ||
326 | adjreq.oobbuf.out = spinand->oobbuf; | ||
315 | 327 | ||
316 | if (req->datalen) { | 328 | if (req->datalen) |
317 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, | 329 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, |
318 | req->datalen); | 330 | req->datalen); |
319 | adjreq.dataoffs = 0; | ||
320 | adjreq.datalen = nanddev_page_size(nand); | ||
321 | adjreq.databuf.out = spinand->databuf; | ||
322 | nbytes = adjreq.datalen; | ||
323 | buf = spinand->databuf; | ||
324 | } | ||
325 | 331 | ||
326 | if (req->ooblen) { | 332 | if (req->ooblen) { |
327 | if (req->mode == MTD_OPS_AUTO_OOB) | 333 | if (req->mode == MTD_OPS_AUTO_OOB) |
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
332 | else | 338 | else |
333 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, | 339 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, |
334 | req->ooblen); | 340 | req->ooblen); |
335 | |||
336 | adjreq.ooblen = nanddev_per_page_oobsize(nand); | ||
337 | adjreq.ooboffs = 0; | ||
338 | nbytes += nanddev_per_page_oobsize(nand); | ||
339 | if (!buf) { | ||
340 | buf = spinand->oobbuf; | ||
341 | column = nanddev_page_size(nand); | ||
342 | } | ||
343 | } | 341 | } |
344 | 342 | ||
345 | spinand_cache_op_adjust_colum(spinand, &adjreq, &column); | 343 | spinand_cache_op_adjust_colum(spinand, &adjreq, &column); |
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
370 | 368 | ||
371 | /* | 369 | /* |
372 | * We need to use the RANDOM LOAD CACHE operation if there's | 370 | * We need to use the RANDOM LOAD CACHE operation if there's |
373 | * more than one iteration, because the LOAD operation resets | 371 | * more than one iteration, because the LOAD operation might |
374 | * the cache to 0xff. | 372 | * reset the cache to 0xff. |
375 | */ | 373 | */ |
376 | if (nbytes) { | 374 | if (nbytes) { |
377 | column = op.addr.val; | 375 | column = op.addr.val; |
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand) | |||
1018 | for (i = 0; i < nand->memorg.ntargets; i++) { | 1016 | for (i = 0; i < nand->memorg.ntargets; i++) { |
1019 | ret = spinand_select_target(spinand, i); | 1017 | ret = spinand_select_target(spinand, i); |
1020 | if (ret) | 1018 | if (ret) |
1021 | goto err_free_bufs; | 1019 | goto err_manuf_cleanup; |
1022 | 1020 | ||
1023 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); | 1021 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); |
1024 | if (ret) | 1022 | if (ret) |
1025 | goto err_free_bufs; | 1023 | goto err_manuf_cleanup; |
1026 | } | 1024 | } |
1027 | 1025 | ||
1028 | ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); | 1026 | ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..21bf8ac78380 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -197,9 +197,9 @@ config VXLAN | |||
197 | 197 | ||
198 | config GENEVE | 198 | config GENEVE |
199 | tristate "Generic Network Virtualization Encapsulation" | 199 | tristate "Generic Network Virtualization Encapsulation" |
200 | depends on INET && NET_UDP_TUNNEL | 200 | depends on INET |
201 | depends on IPV6 || !IPV6 | 201 | depends on IPV6 || !IPV6 |
202 | select NET_IP_TUNNEL | 202 | select NET_UDP_TUNNEL |
203 | select GRO_CELLS | 203 | select GRO_CELLS |
204 | ---help--- | 204 | ---help--- |
205 | This allows one to create geneve virtual interfaces that provide | 205 | This allows one to create geneve virtual interfaces that provide |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 485462d3087f..537c90c8eb0a 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -1183,29 +1183,22 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1183 | } | 1183 | } |
1184 | } | 1184 | } |
1185 | 1185 | ||
1186 | /* Link-local multicast packets should be passed to the | 1186 | /* |
1187 | * stack on the link they arrive as well as pass them to the | 1187 | * For packets determined by bond_should_deliver_exact_match() call to |
1188 | * bond-master device. These packets are mostly usable when | 1188 | * be suppressed we want to make an exception for link-local packets. |
1189 | * stack receives it with the link on which they arrive | 1189 | * This is necessary for e.g. LLDP daemons to be able to monitor |
1190 | * (e.g. LLDP) they also must be available on master. Some of | 1190 | * inactive slave links without being forced to bind to them |
1191 | * the use cases include (but are not limited to): LLDP agents | 1191 | * explicitly. |
1192 | * that must be able to operate both on enslaved interfaces as | 1192 | * |
1193 | * well as on bonds themselves; linux bridges that must be able | 1193 | * At the same time, packets that are passed to the bonding master |
1194 | * to process/pass BPDUs from attached bonds when any kind of | 1194 | * (including link-local ones) can have their originating interface |
1195 | * STP version is enabled on the network. | 1195 | * determined via PACKET_ORIGDEV socket option. |
1196 | */ | 1196 | */ |
1197 | if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) { | 1197 | if (bond_should_deliver_exact_match(skb, slave, bond)) { |
1198 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | 1198 | if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) |
1199 | 1199 | return RX_HANDLER_PASS; | |
1200 | if (nskb) { | ||
1201 | nskb->dev = bond->dev; | ||
1202 | nskb->queue_mapping = 0; | ||
1203 | netif_rx(nskb); | ||
1204 | } | ||
1205 | return RX_HANDLER_PASS; | ||
1206 | } | ||
1207 | if (bond_should_deliver_exact_match(skb, slave, bond)) | ||
1208 | return RX_HANDLER_EXACT; | 1200 | return RX_HANDLER_EXACT; |
1201 | } | ||
1209 | 1202 | ||
1210 | skb->dev = bond->dev; | 1203 | skb->dev = bond->dev; |
1211 | 1204 | ||
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 0e4bbdcc614f..c76892ac4e69 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -344,7 +344,8 @@ static void b53_set_forwarding(struct b53_device *dev, int enable) | |||
344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); | 344 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_CTRL, mgmt); |
345 | } | 345 | } |
346 | 346 | ||
347 | static void b53_enable_vlan(struct b53_device *dev, bool enable) | 347 | static void b53_enable_vlan(struct b53_device *dev, bool enable, |
348 | bool enable_filtering) | ||
348 | { | 349 | { |
349 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; | 350 | u8 mgmt, vc0, vc1, vc4 = 0, vc5; |
350 | 351 | ||
@@ -369,8 +370,13 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
369 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; | 370 | vc0 |= VC0_VLAN_EN | VC0_VID_CHK_EN | VC0_VID_HASH_VID; |
370 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; | 371 | vc1 |= VC1_RX_MCST_UNTAG_EN | VC1_RX_MCST_FWD_EN; |
371 | vc4 &= ~VC4_ING_VID_CHECK_MASK; | 372 | vc4 &= ~VC4_ING_VID_CHECK_MASK; |
372 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; | 373 | if (enable_filtering) { |
373 | vc5 |= VC5_DROP_VTABLE_MISS; | 374 | vc4 |= VC4_ING_VID_VIO_DROP << VC4_ING_VID_CHECK_S; |
375 | vc5 |= VC5_DROP_VTABLE_MISS; | ||
376 | } else { | ||
377 | vc4 |= VC4_ING_VID_VIO_FWD << VC4_ING_VID_CHECK_S; | ||
378 | vc5 &= ~VC5_DROP_VTABLE_MISS; | ||
379 | } | ||
374 | 380 | ||
375 | if (is5325(dev)) | 381 | if (is5325(dev)) |
376 | vc0 &= ~VC0_RESERVED_1; | 382 | vc0 &= ~VC0_RESERVED_1; |
@@ -420,6 +426,9 @@ static void b53_enable_vlan(struct b53_device *dev, bool enable) | |||
420 | } | 426 | } |
421 | 427 | ||
422 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); | 428 | b53_write8(dev, B53_CTRL_PAGE, B53_SWITCH_MODE, mgmt); |
429 | |||
430 | dev->vlan_enabled = enable; | ||
431 | dev->vlan_filtering_enabled = enable_filtering; | ||
423 | } | 432 | } |
424 | 433 | ||
425 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) | 434 | static int b53_set_jumbo(struct b53_device *dev, bool enable, bool allow_10_100) |
@@ -632,25 +641,35 @@ static void b53_enable_mib(struct b53_device *dev) | |||
632 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); | 641 | b53_write8(dev, B53_MGMT_PAGE, B53_GLOBAL_CONFIG, gc); |
633 | } | 642 | } |
634 | 643 | ||
644 | static u16 b53_default_pvid(struct b53_device *dev) | ||
645 | { | ||
646 | if (is5325(dev) || is5365(dev)) | ||
647 | return 1; | ||
648 | else | ||
649 | return 0; | ||
650 | } | ||
651 | |||
635 | int b53_configure_vlan(struct dsa_switch *ds) | 652 | int b53_configure_vlan(struct dsa_switch *ds) |
636 | { | 653 | { |
637 | struct b53_device *dev = ds->priv; | 654 | struct b53_device *dev = ds->priv; |
638 | struct b53_vlan vl = { 0 }; | 655 | struct b53_vlan vl = { 0 }; |
639 | int i; | 656 | int i, def_vid; |
657 | |||
658 | def_vid = b53_default_pvid(dev); | ||
640 | 659 | ||
641 | /* clear all vlan entries */ | 660 | /* clear all vlan entries */ |
642 | if (is5325(dev) || is5365(dev)) { | 661 | if (is5325(dev) || is5365(dev)) { |
643 | for (i = 1; i < dev->num_vlans; i++) | 662 | for (i = def_vid; i < dev->num_vlans; i++) |
644 | b53_set_vlan_entry(dev, i, &vl); | 663 | b53_set_vlan_entry(dev, i, &vl); |
645 | } else { | 664 | } else { |
646 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); | 665 | b53_do_vlan_op(dev, VTA_CMD_CLEAR); |
647 | } | 666 | } |
648 | 667 | ||
649 | b53_enable_vlan(dev, false); | 668 | b53_enable_vlan(dev, false, dev->vlan_filtering_enabled); |
650 | 669 | ||
651 | b53_for_each_port(dev, i) | 670 | b53_for_each_port(dev, i) |
652 | b53_write16(dev, B53_VLAN_PAGE, | 671 | b53_write16(dev, B53_VLAN_PAGE, |
653 | B53_VLAN_PORT_DEF_TAG(i), 1); | 672 | B53_VLAN_PORT_DEF_TAG(i), def_vid); |
654 | 673 | ||
655 | if (!is5325(dev) && !is5365(dev)) | 674 | if (!is5325(dev) && !is5365(dev)) |
656 | b53_set_jumbo(dev, dev->enable_jumbo, false); | 675 | b53_set_jumbo(dev, dev->enable_jumbo, false); |
@@ -1255,6 +1274,46 @@ EXPORT_SYMBOL(b53_phylink_mac_link_up); | |||
1255 | 1274 | ||
1256 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) | 1275 | int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering) |
1257 | { | 1276 | { |
1277 | struct b53_device *dev = ds->priv; | ||
1278 | struct net_device *bridge_dev; | ||
1279 | unsigned int i; | ||
1280 | u16 pvid, new_pvid; | ||
1281 | |||
1282 | /* Handle the case were multiple bridges span the same switch device | ||
1283 | * and one of them has a different setting than what is being requested | ||
1284 | * which would be breaking filtering semantics for any of the other | ||
1285 | * bridge devices. | ||
1286 | */ | ||
1287 | b53_for_each_port(dev, i) { | ||
1288 | bridge_dev = dsa_to_port(ds, i)->bridge_dev; | ||
1289 | if (bridge_dev && | ||
1290 | bridge_dev != dsa_to_port(ds, port)->bridge_dev && | ||
1291 | br_vlan_enabled(bridge_dev) != vlan_filtering) { | ||
1292 | netdev_err(bridge_dev, | ||
1293 | "VLAN filtering is global to the switch!\n"); | ||
1294 | return -EINVAL; | ||
1295 | } | ||
1296 | } | ||
1297 | |||
1298 | b53_read16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), &pvid); | ||
1299 | new_pvid = pvid; | ||
1300 | if (dev->vlan_filtering_enabled && !vlan_filtering) { | ||
1301 | /* Filtering is currently enabled, use the default PVID since | ||
1302 | * the bridge does not expect tagging anymore | ||
1303 | */ | ||
1304 | dev->ports[port].pvid = pvid; | ||
1305 | new_pvid = b53_default_pvid(dev); | ||
1306 | } else if (!dev->vlan_filtering_enabled && vlan_filtering) { | ||
1307 | /* Filtering is currently disabled, restore the previous PVID */ | ||
1308 | new_pvid = dev->ports[port].pvid; | ||
1309 | } | ||
1310 | |||
1311 | if (pvid != new_pvid) | ||
1312 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | ||
1313 | new_pvid); | ||
1314 | |||
1315 | b53_enable_vlan(dev, dev->vlan_enabled, vlan_filtering); | ||
1316 | |||
1258 | return 0; | 1317 | return 0; |
1259 | } | 1318 | } |
1260 | EXPORT_SYMBOL(b53_vlan_filtering); | 1319 | EXPORT_SYMBOL(b53_vlan_filtering); |
@@ -1270,7 +1329,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, | |||
1270 | if (vlan->vid_end > dev->num_vlans) | 1329 | if (vlan->vid_end > dev->num_vlans) |
1271 | return -ERANGE; | 1330 | return -ERANGE; |
1272 | 1331 | ||
1273 | b53_enable_vlan(dev, true); | 1332 | b53_enable_vlan(dev, true, dev->vlan_filtering_enabled); |
1274 | 1333 | ||
1275 | return 0; | 1334 | return 0; |
1276 | } | 1335 | } |
@@ -1300,7 +1359,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port, | |||
1300 | b53_fast_age_vlan(dev, vid); | 1359 | b53_fast_age_vlan(dev, vid); |
1301 | } | 1360 | } |
1302 | 1361 | ||
1303 | if (pvid) { | 1362 | if (pvid && !dsa_is_cpu_port(ds, port)) { |
1304 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | 1363 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), |
1305 | vlan->vid_end); | 1364 | vlan->vid_end); |
1306 | b53_fast_age_vlan(dev, vid); | 1365 | b53_fast_age_vlan(dev, vid); |
@@ -1326,12 +1385,8 @@ int b53_vlan_del(struct dsa_switch *ds, int port, | |||
1326 | 1385 | ||
1327 | vl->members &= ~BIT(port); | 1386 | vl->members &= ~BIT(port); |
1328 | 1387 | ||
1329 | if (pvid == vid) { | 1388 | if (pvid == vid) |
1330 | if (is5325(dev) || is5365(dev)) | 1389 | pvid = b53_default_pvid(dev); |
1331 | pvid = 1; | ||
1332 | else | ||
1333 | pvid = 0; | ||
1334 | } | ||
1335 | 1390 | ||
1336 | if (untagged && !dsa_is_cpu_port(ds, port)) | 1391 | if (untagged && !dsa_is_cpu_port(ds, port)) |
1337 | vl->untag &= ~(BIT(port)); | 1392 | vl->untag &= ~(BIT(port)); |
@@ -1644,10 +1699,7 @@ void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br) | |||
1644 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); | 1699 | b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan); |
1645 | dev->ports[port].vlan_ctl_mask = pvlan; | 1700 | dev->ports[port].vlan_ctl_mask = pvlan; |
1646 | 1701 | ||
1647 | if (is5325(dev) || is5365(dev)) | 1702 | pvid = b53_default_pvid(dev); |
1648 | pvid = 1; | ||
1649 | else | ||
1650 | pvid = 0; | ||
1651 | 1703 | ||
1652 | /* Make this port join all VLANs without VLAN entries */ | 1704 | /* Make this port join all VLANs without VLAN entries */ |
1653 | if (is58xx(dev)) { | 1705 | if (is58xx(dev)) { |
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h index ec796482792d..4dc7ee38b258 100644 --- a/drivers/net/dsa/b53/b53_priv.h +++ b/drivers/net/dsa/b53/b53_priv.h | |||
@@ -91,6 +91,7 @@ enum { | |||
91 | struct b53_port { | 91 | struct b53_port { |
92 | u16 vlan_ctl_mask; | 92 | u16 vlan_ctl_mask; |
93 | struct ethtool_eee eee; | 93 | struct ethtool_eee eee; |
94 | u16 pvid; | ||
94 | }; | 95 | }; |
95 | 96 | ||
96 | struct b53_vlan { | 97 | struct b53_vlan { |
@@ -137,6 +138,8 @@ struct b53_device { | |||
137 | 138 | ||
138 | unsigned int num_vlans; | 139 | unsigned int num_vlans; |
139 | struct b53_vlan *vlans; | 140 | struct b53_vlan *vlans; |
141 | bool vlan_enabled; | ||
142 | bool vlan_filtering_enabled; | ||
140 | unsigned int num_ports; | 143 | unsigned int num_ports; |
141 | struct b53_port *ports; | 144 | struct b53_port *ports; |
142 | }; | 145 | }; |
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c | |||
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) | |||
511 | /* Clear all pending interrupts */ | 511 | /* Clear all pending interrupts */ |
512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); | 512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); |
513 | 513 | ||
514 | if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) | ||
515 | return; | ||
516 | |||
517 | for (i = 0; i < B53_N_PORTS; i++) { | 514 | for (i = 0; i < B53_N_PORTS; i++) { |
518 | port = &priv->port_intrs[i]; | 515 | port = &priv->port_intrs[i]; |
519 | 516 | ||
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..14138d423cf1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) | |||
690 | * port, the other ones have already been disabled during | 690 | * port, the other ones have already been disabled during |
691 | * bcm_sf2_sw_setup | 691 | * bcm_sf2_sw_setup |
692 | */ | 692 | */ |
693 | for (port = 0; port < DSA_MAX_PORTS; port++) { | 693 | for (port = 0; port < ds->num_ports; port++) { |
694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) | 694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) |
695 | bcm_sf2_port_disable(ds, port, NULL); | 695 | bcm_sf2_port_disable(ds, port, NULL); |
696 | } | 696 | } |
@@ -726,10 +726,11 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port, | |||
726 | { | 726 | { |
727 | struct net_device *p = ds->ports[port].cpu_dp->master; | 727 | struct net_device *p = ds->ports[port].cpu_dp->master; |
728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 728 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
729 | struct ethtool_wolinfo pwol; | 729 | struct ethtool_wolinfo pwol = { }; |
730 | 730 | ||
731 | /* Get the parent device WoL settings */ | 731 | /* Get the parent device WoL settings */ |
732 | p->ethtool_ops->get_wol(p, &pwol); | 732 | if (p->ethtool_ops->get_wol) |
733 | p->ethtool_ops->get_wol(p, &pwol); | ||
733 | 734 | ||
734 | /* Advertise the parent device supported settings */ | 735 | /* Advertise the parent device supported settings */ |
735 | wol->supported = pwol.supported; | 736 | wol->supported = pwol.supported; |
@@ -750,9 +751,10 @@ static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port, | |||
750 | struct net_device *p = ds->ports[port].cpu_dp->master; | 751 | struct net_device *p = ds->ports[port].cpu_dp->master; |
751 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 752 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
752 | s8 cpu_port = ds->ports[port].cpu_dp->index; | 753 | s8 cpu_port = ds->ports[port].cpu_dp->index; |
753 | struct ethtool_wolinfo pwol; | 754 | struct ethtool_wolinfo pwol = { }; |
754 | 755 | ||
755 | p->ethtool_ops->get_wol(p, &pwol); | 756 | if (p->ethtool_ops->get_wol) |
757 | p->ethtool_ops->get_wol(p, &pwol); | ||
756 | if (wol->wolopts & ~pwol.supported) | 758 | if (wol->wolopts & ~pwol.supported) |
757 | return -EINVAL; | 759 | return -EINVAL; |
758 | 760 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..12fd7ce3f1ff 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
261 | unsigned int sub_irq; | 261 | unsigned int sub_irq; |
262 | unsigned int n; | 262 | unsigned int n; |
263 | u16 reg; | 263 | u16 reg; |
264 | u16 ctl1; | ||
264 | int err; | 265 | int err; |
265 | 266 | ||
266 | mutex_lock(&chip->reg_lock); | 267 | mutex_lock(&chip->reg_lock); |
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
270 | if (err) | 271 | if (err) |
271 | goto out; | 272 | goto out; |
272 | 273 | ||
273 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { | 274 | do { |
274 | if (reg & (1 << n)) { | 275 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { |
275 | sub_irq = irq_find_mapping(chip->g1_irq.domain, n); | 276 | if (reg & (1 << n)) { |
276 | handle_nested_irq(sub_irq); | 277 | sub_irq = irq_find_mapping(chip->g1_irq.domain, |
277 | ++nhandled; | 278 | n); |
279 | handle_nested_irq(sub_irq); | ||
280 | ++nhandled; | ||
281 | } | ||
278 | } | 282 | } |
279 | } | 283 | |
284 | mutex_lock(&chip->reg_lock); | ||
285 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); | ||
286 | if (err) | ||
287 | goto unlock; | ||
288 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); | ||
289 | unlock: | ||
290 | mutex_unlock(&chip->reg_lock); | ||
291 | if (err) | ||
292 | goto out; | ||
293 | ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); | ||
294 | } while (reg & ctl1); | ||
295 | |||
280 | out: | 296 | out: |
281 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); | 297 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); |
282 | } | 298 | } |
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c | |||
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
314 | { | 314 | { |
315 | struct mv88e6xxx_chip *chip = dev_id; | 315 | struct mv88e6xxx_chip *chip = dev_id; |
316 | struct mv88e6xxx_atu_entry entry; | 316 | struct mv88e6xxx_atu_entry entry; |
317 | int spid; | ||
317 | int err; | 318 | int err; |
318 | u16 val; | 319 | u16 val; |
319 | 320 | ||
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
336 | if (err) | 337 | if (err) |
337 | goto out; | 338 | goto out; |
338 | 339 | ||
340 | spid = entry.state; | ||
341 | |||
339 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { | 342 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { |
340 | dev_err_ratelimited(chip->dev, | 343 | dev_err_ratelimited(chip->dev, |
341 | "ATU age out violation for %pM\n", | 344 | "ATU age out violation for %pM\n", |
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
344 | 347 | ||
345 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { | 348 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { |
346 | dev_err_ratelimited(chip->dev, | 349 | dev_err_ratelimited(chip->dev, |
347 | "ATU member violation for %pM portvec %x\n", | 350 | "ATU member violation for %pM portvec %x spid %d\n", |
348 | entry.mac, entry.portvec); | 351 | entry.mac, entry.portvec, spid); |
349 | chip->ports[entry.portvec].atu_member_violation++; | 352 | chip->ports[spid].atu_member_violation++; |
350 | } | 353 | } |
351 | 354 | ||
352 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { | 355 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { |
353 | dev_err_ratelimited(chip->dev, | 356 | dev_err_ratelimited(chip->dev, |
354 | "ATU miss violation for %pM portvec %x\n", | 357 | "ATU miss violation for %pM portvec %x spid %d\n", |
355 | entry.mac, entry.portvec); | 358 | entry.mac, entry.portvec, spid); |
356 | chip->ports[entry.portvec].atu_miss_violation++; | 359 | chip->ports[spid].atu_miss_violation++; |
357 | } | 360 | } |
358 | 361 | ||
359 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { | 362 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { |
360 | dev_err_ratelimited(chip->dev, | 363 | dev_err_ratelimited(chip->dev, |
361 | "ATU full violation for %pM portvec %x\n", | 364 | "ATU full violation for %pM portvec %x spid %d\n", |
362 | entry.mac, entry.portvec); | 365 | entry.mac, entry.portvec, spid); |
363 | chip->ports[entry.portvec].atu_full_violation++; | 366 | chip->ports[spid].atu_full_violation++; |
364 | } | 367 | } |
365 | mutex_unlock(&chip->reg_lock); | 368 | mutex_unlock(&chip->reg_lock); |
366 | 369 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2663 | goto err_device_destroy; | 2663 | goto err_device_destroy; |
2664 | } | 2664 | } |
2665 | 2665 | ||
2666 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2667 | /* Make sure we don't have a race with AENQ Links state handler */ | ||
2668 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2669 | netif_carrier_on(adapter->netdev); | ||
2670 | |||
2671 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, | 2666 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, |
2672 | adapter->num_queues); | 2667 | adapter->num_queues); |
2673 | if (rc) { | 2668 | if (rc) { |
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2684 | } | 2679 | } |
2685 | 2680 | ||
2686 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2681 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
2682 | |||
2683 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2684 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2685 | netif_carrier_on(adapter->netdev); | ||
2686 | |||
2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
2688 | dev_err(&pdev->dev, | 2688 | dev_err(&pdev->dev, |
2689 | "Device reset completed successfully, Driver info: %s\n", | 2689 | "Device reset completed successfully, Driver info: %s\n", |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define DRV_MODULE_VER_MAJOR 2 | 46 | #define DRV_MODULE_VER_MAJOR 2 |
47 | #define DRV_MODULE_VER_MINOR 0 | 47 | #define DRV_MODULE_VER_MINOR 0 |
48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 3 |
49 | 49 | ||
50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index bb41becb6609..31ff1e0d1baa 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c | |||
@@ -1335,13 +1335,11 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1335 | { | 1335 | { |
1336 | struct net_device *netdev; | 1336 | struct net_device *netdev; |
1337 | struct atl2_adapter *adapter; | 1337 | struct atl2_adapter *adapter; |
1338 | static int cards_found; | 1338 | static int cards_found = 0; |
1339 | unsigned long mmio_start; | 1339 | unsigned long mmio_start; |
1340 | int mmio_len; | 1340 | int mmio_len; |
1341 | int err; | 1341 | int err; |
1342 | 1342 | ||
1343 | cards_found = 0; | ||
1344 | |||
1345 | err = pci_enable_device(pdev); | 1343 | err = pci_enable_device(pdev); |
1346 | if (err) | 1344 | if (err) |
1347 | return err; | 1345 | return err; |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..bc3ac369cbe3 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -134,6 +134,10 @@ static void bcm_sysport_set_rx_csum(struct net_device *dev, | |||
134 | 134 | ||
135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); | 135 | priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM); |
136 | reg = rxchk_readl(priv, RXCHK_CONTROL); | 136 | reg = rxchk_readl(priv, RXCHK_CONTROL); |
137 | /* Clear L2 header checks, which would prevent BPDUs | ||
138 | * from being received. | ||
139 | */ | ||
140 | reg &= ~RXCHK_L2_HDR_DIS; | ||
137 | if (priv->rx_chk_en) | 141 | if (priv->rx_chk_en) |
138 | reg |= RXCHK_EN; | 142 | reg |= RXCHK_EN; |
139 | else | 143 | else |
@@ -520,7 +524,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
520 | struct ethtool_wolinfo *wol) | 524 | struct ethtool_wolinfo *wol) |
521 | { | 525 | { |
522 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 526 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
523 | u32 reg; | ||
524 | 527 | ||
525 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; | 528 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; |
526 | wol->wolopts = priv->wolopts; | 529 | wol->wolopts = priv->wolopts; |
@@ -528,11 +531,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
528 | if (!(priv->wolopts & WAKE_MAGICSECURE)) | 531 | if (!(priv->wolopts & WAKE_MAGICSECURE)) |
529 | return; | 532 | return; |
530 | 533 | ||
531 | /* Return the programmed SecureOn password */ | 534 | memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); |
532 | reg = umac_readl(priv, UMAC_PSW_MS); | ||
533 | put_unaligned_be16(reg, &wol->sopass[0]); | ||
534 | reg = umac_readl(priv, UMAC_PSW_LS); | ||
535 | put_unaligned_be32(reg, &wol->sopass[2]); | ||
536 | } | 535 | } |
537 | 536 | ||
538 | static int bcm_sysport_set_wol(struct net_device *dev, | 537 | static int bcm_sysport_set_wol(struct net_device *dev, |
@@ -548,13 +547,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, | |||
548 | if (wol->wolopts & ~supported) | 547 | if (wol->wolopts & ~supported) |
549 | return -EINVAL; | 548 | return -EINVAL; |
550 | 549 | ||
551 | /* Program the SecureOn password */ | 550 | if (wol->wolopts & WAKE_MAGICSECURE) |
552 | if (wol->wolopts & WAKE_MAGICSECURE) { | 551 | memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); |
553 | umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | ||
554 | UMAC_PSW_MS); | ||
555 | umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | ||
556 | UMAC_PSW_LS); | ||
557 | } | ||
558 | 552 | ||
559 | /* Flag the device and relevant IRQ as wakeup capable */ | 553 | /* Flag the device and relevant IRQ as wakeup capable */ |
560 | if (wol->wolopts) { | 554 | if (wol->wolopts) { |
@@ -2649,13 +2643,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) | |||
2649 | unsigned int index, i = 0; | 2643 | unsigned int index, i = 0; |
2650 | u32 reg; | 2644 | u32 reg; |
2651 | 2645 | ||
2652 | /* Password has already been programmed */ | ||
2653 | reg = umac_readl(priv, UMAC_MPD_CTRL); | 2646 | reg = umac_readl(priv, UMAC_MPD_CTRL); |
2654 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) | 2647 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) |
2655 | reg |= MPD_EN; | 2648 | reg |= MPD_EN; |
2656 | reg &= ~PSW_EN; | 2649 | reg &= ~PSW_EN; |
2657 | if (priv->wolopts & WAKE_MAGICSECURE) | 2650 | if (priv->wolopts & WAKE_MAGICSECURE) { |
2651 | /* Program the SecureOn password */ | ||
2652 | umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), | ||
2653 | UMAC_PSW_MS); | ||
2654 | umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), | ||
2655 | UMAC_PSW_LS); | ||
2658 | reg |= PSW_EN; | 2656 | reg |= PSW_EN; |
2657 | } | ||
2659 | umac_writel(priv, reg, UMAC_MPD_CTRL); | 2658 | umac_writel(priv, reg, UMAC_MPD_CTRL); |
2660 | 2659 | ||
2661 | if (priv->wolopts & WAKE_FILTER) { | 2660 | if (priv->wolopts & WAKE_FILTER) { |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __BCM_SYSPORT_H | 12 | #define __BCM_SYSPORT_H |
13 | 13 | ||
14 | #include <linux/bitmap.h> | 14 | #include <linux/bitmap.h> |
15 | #include <linux/ethtool.h> | ||
15 | #include <linux/if_vlan.h> | 16 | #include <linux/if_vlan.h> |
16 | #include <linux/net_dim.h> | 17 | #include <linux/net_dim.h> |
17 | 18 | ||
@@ -778,6 +779,7 @@ struct bcm_sysport_priv { | |||
778 | unsigned int crc_fwd:1; | 779 | unsigned int crc_fwd:1; |
779 | u16 rev; | 780 | u16 rev; |
780 | u32 wolopts; | 781 | u32 wolopts; |
782 | u8 sopass[SOPASS_MAX]; | ||
781 | unsigned int wol_irq_disabled:1; | 783 | unsigned int wol_irq_disabled:1; |
782 | 784 | ||
783 | /* MIB related fields */ | 785 | /* MIB related fields */ |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..d95730c6e0f2 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -3903,7 +3903,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3903 | if (len) | 3903 | if (len) |
3904 | break; | 3904 | break; |
3905 | /* on first few passes, just barely sleep */ | 3905 | /* on first few passes, just barely sleep */ |
3906 | if (i < DFLT_HWRM_CMD_TIMEOUT) | 3906 | if (i < HWRM_SHORT_TIMEOUT_COUNTER) |
3907 | usleep_range(HWRM_SHORT_MIN_TIMEOUT, | 3907 | usleep_range(HWRM_SHORT_MIN_TIMEOUT, |
3908 | HWRM_SHORT_MAX_TIMEOUT); | 3908 | HWRM_SHORT_MAX_TIMEOUT); |
3909 | else | 3909 | else |
@@ -3926,7 +3926,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, | |||
3926 | dma_rmb(); | 3926 | dma_rmb(); |
3927 | if (*valid) | 3927 | if (*valid) |
3928 | break; | 3928 | break; |
3929 | udelay(1); | 3929 | usleep_range(1, 5); |
3930 | } | 3930 | } |
3931 | 3931 | ||
3932 | if (j >= HWRM_VALID_BIT_DELAY_USEC) { | 3932 | if (j >= HWRM_VALID_BIT_DELAY_USEC) { |
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) | |||
4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; | 4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; | 4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
4975 | u32 map_idx = ring->map_idx; | 4975 | u32 map_idx = ring->map_idx; |
4976 | unsigned int vector; | ||
4976 | 4977 | ||
4978 | vector = bp->irq_tbl[map_idx].vector; | ||
4979 | disable_irq_nosync(vector); | ||
4977 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); | 4980 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
4978 | if (rc) | 4981 | if (rc) { |
4982 | enable_irq(vector); | ||
4979 | goto err_out; | 4983 | goto err_out; |
4984 | } | ||
4980 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); | 4985 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); |
4981 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); | 4986 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
4987 | enable_irq(vector); | ||
4982 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; | 4988 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
4983 | 4989 | ||
4984 | if (!i) { | 4990 | if (!i) { |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index a451796deefe..2fb653e0048d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -582,7 +582,7 @@ struct nqe_cn { | |||
582 | (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ | 582 | (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ |
583 | ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) | 583 | ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) |
584 | 584 | ||
585 | #define HWRM_VALID_BIT_DELAY_USEC 20 | 585 | #define HWRM_VALID_BIT_DELAY_USEC 150 |
586 | 586 | ||
587 | #define BNXT_HWRM_CHNL_CHIMP 0 | 587 | #define BNXT_HWRM_CHNL_CHIMP 0 |
588 | #define BNXT_HWRM_CHNL_KONG 1 | 588 | #define BNXT_HWRM_CHNL_KONG 1 |
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c | |||
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | |||
1288 | * for transmits, we just free buffers. | 1288 | * for transmits, we just free buffers. |
1289 | */ | 1289 | */ |
1290 | 1290 | ||
1291 | dev_kfree_skb_irq(sb); | 1291 | dev_consume_skb_irq(sb); |
1292 | 1292 | ||
1293 | /* | 1293 | /* |
1294 | * .. and advance to the next buffer. | 1294 | * .. and advance to the next buffer. |
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..05f4a3b21e29 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig | |||
@@ -54,7 +54,6 @@ config CAVIUM_PTP | |||
54 | tristate "Cavium PTP coprocessor as PTP clock" | 54 | tristate "Cavium PTP coprocessor as PTP clock" |
55 | depends on 64BIT && PCI | 55 | depends on 64BIT && PCI |
56 | imply PTP_1588_CLOCK | 56 | imply PTP_1588_CLOCK |
57 | default y | ||
58 | ---help--- | 57 | ---help--- |
59 | This driver adds support for the Precision Time Protocol Clocks and | 58 | This driver adds support for the Precision Time Protocol Clocks and |
60 | Timestamping coprocessor (PTP) found on Cavium processors. | 59 | Timestamping coprocessor (PTP) found on Cavium processors. |
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index f4d81765221e..62636c1ed141 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -271,7 +271,7 @@ struct xcast_addr_list { | |||
271 | }; | 271 | }; |
272 | 272 | ||
273 | struct nicvf_work { | 273 | struct nicvf_work { |
274 | struct delayed_work work; | 274 | struct work_struct work; |
275 | u8 mode; | 275 | u8 mode; |
276 | struct xcast_addr_list *mc; | 276 | struct xcast_addr_list *mc; |
277 | }; | 277 | }; |
@@ -327,7 +327,11 @@ struct nicvf { | |||
327 | struct nicvf_work rx_mode_work; | 327 | struct nicvf_work rx_mode_work; |
328 | /* spinlock to protect workqueue arguments from concurrent access */ | 328 | /* spinlock to protect workqueue arguments from concurrent access */ |
329 | spinlock_t rx_mode_wq_lock; | 329 | spinlock_t rx_mode_wq_lock; |
330 | 330 | /* workqueue for handling kernel ndo_set_rx_mode() calls */ | |
331 | struct workqueue_struct *nicvf_rx_mode_wq; | ||
332 | /* mutex to protect VF's mailbox contents from concurrent access */ | ||
333 | struct mutex rx_mode_mtx; | ||
334 | struct delayed_work link_change_work; | ||
331 | /* PTP timestamp */ | 335 | /* PTP timestamp */ |
332 | struct cavium_ptp *ptp_clock; | 336 | struct cavium_ptp *ptp_clock; |
333 | /* Inbound timestamping is on */ | 337 | /* Inbound timestamping is on */ |
@@ -575,10 +579,8 @@ struct set_ptp { | |||
575 | 579 | ||
576 | struct xcast { | 580 | struct xcast { |
577 | u8 msg; | 581 | u8 msg; |
578 | union { | 582 | u8 mode; |
579 | u8 mode; | 583 | u64 mac:48; |
580 | u64 mac; | ||
581 | } data; | ||
582 | }; | 584 | }; |
583 | 585 | ||
584 | /* 128 bit shared memory between PF and each VF */ | 586 | /* 128 bit shared memory between PF and each VF */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 6c8dcb65ff03..c90252829ed3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -57,14 +57,8 @@ struct nicpf { | |||
57 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) | 57 | #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF) |
58 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) | 58 | #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF) |
59 | u8 *vf_lmac_map; | 59 | u8 *vf_lmac_map; |
60 | struct delayed_work dwork; | ||
61 | struct workqueue_struct *check_link; | ||
62 | u8 *link; | ||
63 | u8 *duplex; | ||
64 | u32 *speed; | ||
65 | u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; | 60 | u16 cpi_base[MAX_NUM_VFS_SUPPORTED]; |
66 | u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; | 61 | u16 rssi_base[MAX_NUM_VFS_SUPPORTED]; |
67 | bool mbx_lock[MAX_NUM_VFS_SUPPORTED]; | ||
68 | 62 | ||
69 | /* MSI-X */ | 63 | /* MSI-X */ |
70 | u8 num_vec; | 64 | u8 num_vec; |
@@ -929,6 +923,35 @@ static void nic_config_timestamp(struct nicpf *nic, int vf, struct set_ptp *ptp) | |||
929 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); | 923 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (pkind_idx << 3), pkind_val); |
930 | } | 924 | } |
931 | 925 | ||
926 | /* Get BGX LMAC link status and update corresponding VF | ||
927 | * if there is a change, valid only if internal L2 switch | ||
928 | * is not present otherwise VF link is always treated as up | ||
929 | */ | ||
930 | static void nic_link_status_get(struct nicpf *nic, u8 vf) | ||
931 | { | ||
932 | union nic_mbx mbx = {}; | ||
933 | struct bgx_link_status link; | ||
934 | u8 bgx, lmac; | ||
935 | |||
936 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | ||
937 | |||
938 | /* Get BGX, LMAC indices for the VF */ | ||
939 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
940 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
941 | |||
942 | /* Get interface link status */ | ||
943 | bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); | ||
944 | |||
945 | /* Send a mbox message to VF with current link status */ | ||
946 | mbx.link_status.link_up = link.link_up; | ||
947 | mbx.link_status.duplex = link.duplex; | ||
948 | mbx.link_status.speed = link.speed; | ||
949 | mbx.link_status.mac_type = link.mac_type; | ||
950 | |||
951 | /* reply with link status */ | ||
952 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
953 | } | ||
954 | |||
932 | /* Interrupt handler to handle mailbox messages from VFs */ | 955 | /* Interrupt handler to handle mailbox messages from VFs */ |
933 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | 956 | static void nic_handle_mbx_intr(struct nicpf *nic, int vf) |
934 | { | 957 | { |
@@ -941,8 +964,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
941 | int i; | 964 | int i; |
942 | int ret = 0; | 965 | int ret = 0; |
943 | 966 | ||
944 | nic->mbx_lock[vf] = true; | ||
945 | |||
946 | mbx_addr = nic_get_mbx_addr(vf); | 967 | mbx_addr = nic_get_mbx_addr(vf); |
947 | mbx_data = (u64 *)&mbx; | 968 | mbx_data = (u64 *)&mbx; |
948 | 969 | ||
@@ -957,12 +978,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
957 | switch (mbx.msg.msg) { | 978 | switch (mbx.msg.msg) { |
958 | case NIC_MBOX_MSG_READY: | 979 | case NIC_MBOX_MSG_READY: |
959 | nic_mbx_send_ready(nic, vf); | 980 | nic_mbx_send_ready(nic, vf); |
960 | if (vf < nic->num_vf_en) { | 981 | return; |
961 | nic->link[vf] = 0; | ||
962 | nic->duplex[vf] = 0; | ||
963 | nic->speed[vf] = 0; | ||
964 | } | ||
965 | goto unlock; | ||
966 | case NIC_MBOX_MSG_QS_CFG: | 982 | case NIC_MBOX_MSG_QS_CFG: |
967 | reg_addr = NIC_PF_QSET_0_127_CFG | | 983 | reg_addr = NIC_PF_QSET_0_127_CFG | |
968 | (mbx.qs.num << NIC_QS_ID_SHIFT); | 984 | (mbx.qs.num << NIC_QS_ID_SHIFT); |
@@ -1031,7 +1047,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1031 | break; | 1047 | break; |
1032 | case NIC_MBOX_MSG_RSS_SIZE: | 1048 | case NIC_MBOX_MSG_RSS_SIZE: |
1033 | nic_send_rss_size(nic, vf); | 1049 | nic_send_rss_size(nic, vf); |
1034 | goto unlock; | 1050 | return; |
1035 | case NIC_MBOX_MSG_RSS_CFG: | 1051 | case NIC_MBOX_MSG_RSS_CFG: |
1036 | case NIC_MBOX_MSG_RSS_CFG_CONT: | 1052 | case NIC_MBOX_MSG_RSS_CFG_CONT: |
1037 | nic_config_rss(nic, &mbx.rss_cfg); | 1053 | nic_config_rss(nic, &mbx.rss_cfg); |
@@ -1039,7 +1055,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1039 | case NIC_MBOX_MSG_CFG_DONE: | 1055 | case NIC_MBOX_MSG_CFG_DONE: |
1040 | /* Last message of VF config msg sequence */ | 1056 | /* Last message of VF config msg sequence */ |
1041 | nic_enable_vf(nic, vf, true); | 1057 | nic_enable_vf(nic, vf, true); |
1042 | goto unlock; | 1058 | break; |
1043 | case NIC_MBOX_MSG_SHUTDOWN: | 1059 | case NIC_MBOX_MSG_SHUTDOWN: |
1044 | /* First msg in VF teardown sequence */ | 1060 | /* First msg in VF teardown sequence */ |
1045 | if (vf >= nic->num_vf_en) | 1061 | if (vf >= nic->num_vf_en) |
@@ -1049,19 +1065,19 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1049 | break; | 1065 | break; |
1050 | case NIC_MBOX_MSG_ALLOC_SQS: | 1066 | case NIC_MBOX_MSG_ALLOC_SQS: |
1051 | nic_alloc_sqs(nic, &mbx.sqs_alloc); | 1067 | nic_alloc_sqs(nic, &mbx.sqs_alloc); |
1052 | goto unlock; | 1068 | return; |
1053 | case NIC_MBOX_MSG_NICVF_PTR: | 1069 | case NIC_MBOX_MSG_NICVF_PTR: |
1054 | nic->nicvf[vf] = mbx.nicvf.nicvf; | 1070 | nic->nicvf[vf] = mbx.nicvf.nicvf; |
1055 | break; | 1071 | break; |
1056 | case NIC_MBOX_MSG_PNICVF_PTR: | 1072 | case NIC_MBOX_MSG_PNICVF_PTR: |
1057 | nic_send_pnicvf(nic, vf); | 1073 | nic_send_pnicvf(nic, vf); |
1058 | goto unlock; | 1074 | return; |
1059 | case NIC_MBOX_MSG_SNICVF_PTR: | 1075 | case NIC_MBOX_MSG_SNICVF_PTR: |
1060 | nic_send_snicvf(nic, &mbx.nicvf); | 1076 | nic_send_snicvf(nic, &mbx.nicvf); |
1061 | goto unlock; | 1077 | return; |
1062 | case NIC_MBOX_MSG_BGX_STATS: | 1078 | case NIC_MBOX_MSG_BGX_STATS: |
1063 | nic_get_bgx_stats(nic, &mbx.bgx_stats); | 1079 | nic_get_bgx_stats(nic, &mbx.bgx_stats); |
1064 | goto unlock; | 1080 | return; |
1065 | case NIC_MBOX_MSG_LOOPBACK: | 1081 | case NIC_MBOX_MSG_LOOPBACK: |
1066 | ret = nic_config_loopback(nic, &mbx.lbk); | 1082 | ret = nic_config_loopback(nic, &mbx.lbk); |
1067 | break; | 1083 | break; |
@@ -1070,7 +1086,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1070 | break; | 1086 | break; |
1071 | case NIC_MBOX_MSG_PFC: | 1087 | case NIC_MBOX_MSG_PFC: |
1072 | nic_pause_frame(nic, vf, &mbx.pfc); | 1088 | nic_pause_frame(nic, vf, &mbx.pfc); |
1073 | goto unlock; | 1089 | return; |
1074 | case NIC_MBOX_MSG_PTP_CFG: | 1090 | case NIC_MBOX_MSG_PTP_CFG: |
1075 | nic_config_timestamp(nic, vf, &mbx.ptp); | 1091 | nic_config_timestamp(nic, vf, &mbx.ptp); |
1076 | break; | 1092 | break; |
@@ -1094,7 +1110,7 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1094 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | 1110 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
1095 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | 1111 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
1096 | bgx_set_dmac_cam_filter(nic->node, bgx, lmac, | 1112 | bgx_set_dmac_cam_filter(nic->node, bgx, lmac, |
1097 | mbx.xcast.data.mac, | 1113 | mbx.xcast.mac, |
1098 | vf < NIC_VF_PER_MBX_REG ? vf : | 1114 | vf < NIC_VF_PER_MBX_REG ? vf : |
1099 | vf - NIC_VF_PER_MBX_REG); | 1115 | vf - NIC_VF_PER_MBX_REG); |
1100 | break; | 1116 | break; |
@@ -1106,8 +1122,15 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1106 | } | 1122 | } |
1107 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | 1123 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
1108 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | 1124 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
1109 | bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.data.mode); | 1125 | bgx_set_xcast_mode(nic->node, bgx, lmac, mbx.xcast.mode); |
1110 | break; | 1126 | break; |
1127 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: | ||
1128 | if (vf >= nic->num_vf_en) { | ||
1129 | ret = -1; /* NACK */ | ||
1130 | break; | ||
1131 | } | ||
1132 | nic_link_status_get(nic, vf); | ||
1133 | return; | ||
1111 | default: | 1134 | default: |
1112 | dev_err(&nic->pdev->dev, | 1135 | dev_err(&nic->pdev->dev, |
1113 | "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); | 1136 | "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg); |
@@ -1121,8 +1144,6 @@ static void nic_handle_mbx_intr(struct nicpf *nic, int vf) | |||
1121 | mbx.msg.msg, vf); | 1144 | mbx.msg.msg, vf); |
1122 | nic_mbx_send_nack(nic, vf); | 1145 | nic_mbx_send_nack(nic, vf); |
1123 | } | 1146 | } |
1124 | unlock: | ||
1125 | nic->mbx_lock[vf] = false; | ||
1126 | } | 1147 | } |
1127 | 1148 | ||
1128 | static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) | 1149 | static irqreturn_t nic_mbx_intr_handler(int irq, void *nic_irq) |
@@ -1270,52 +1291,6 @@ static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic) | |||
1270 | return 0; | 1291 | return 0; |
1271 | } | 1292 | } |
1272 | 1293 | ||
1273 | /* Poll for BGX LMAC link status and update corresponding VF | ||
1274 | * if there is a change, valid only if internal L2 switch | ||
1275 | * is not present otherwise VF link is always treated as up | ||
1276 | */ | ||
1277 | static void nic_poll_for_link(struct work_struct *work) | ||
1278 | { | ||
1279 | union nic_mbx mbx = {}; | ||
1280 | struct nicpf *nic; | ||
1281 | struct bgx_link_status link; | ||
1282 | u8 vf, bgx, lmac; | ||
1283 | |||
1284 | nic = container_of(work, struct nicpf, dwork.work); | ||
1285 | |||
1286 | mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | ||
1287 | |||
1288 | for (vf = 0; vf < nic->num_vf_en; vf++) { | ||
1289 | /* Poll only if VF is UP */ | ||
1290 | if (!nic->vf_enabled[vf]) | ||
1291 | continue; | ||
1292 | |||
1293 | /* Get BGX, LMAC indices for the VF */ | ||
1294 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
1295 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); | ||
1296 | /* Get interface link status */ | ||
1297 | bgx_get_lmac_link_state(nic->node, bgx, lmac, &link); | ||
1298 | |||
1299 | /* Inform VF only if link status changed */ | ||
1300 | if (nic->link[vf] == link.link_up) | ||
1301 | continue; | ||
1302 | |||
1303 | if (!nic->mbx_lock[vf]) { | ||
1304 | nic->link[vf] = link.link_up; | ||
1305 | nic->duplex[vf] = link.duplex; | ||
1306 | nic->speed[vf] = link.speed; | ||
1307 | |||
1308 | /* Send a mbox message to VF with current link status */ | ||
1309 | mbx.link_status.link_up = link.link_up; | ||
1310 | mbx.link_status.duplex = link.duplex; | ||
1311 | mbx.link_status.speed = link.speed; | ||
1312 | mbx.link_status.mac_type = link.mac_type; | ||
1313 | nic_send_msg_to_vf(nic, vf, &mbx); | ||
1314 | } | ||
1315 | } | ||
1316 | queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2); | ||
1317 | } | ||
1318 | |||
1319 | static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | 1294 | static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
1320 | { | 1295 | { |
1321 | struct device *dev = &pdev->dev; | 1296 | struct device *dev = &pdev->dev; |
@@ -1384,18 +1359,6 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1384 | if (!nic->vf_lmac_map) | 1359 | if (!nic->vf_lmac_map) |
1385 | goto err_release_regions; | 1360 | goto err_release_regions; |
1386 | 1361 | ||
1387 | nic->link = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); | ||
1388 | if (!nic->link) | ||
1389 | goto err_release_regions; | ||
1390 | |||
1391 | nic->duplex = devm_kmalloc_array(dev, max_lmac, sizeof(u8), GFP_KERNEL); | ||
1392 | if (!nic->duplex) | ||
1393 | goto err_release_regions; | ||
1394 | |||
1395 | nic->speed = devm_kmalloc_array(dev, max_lmac, sizeof(u32), GFP_KERNEL); | ||
1396 | if (!nic->speed) | ||
1397 | goto err_release_regions; | ||
1398 | |||
1399 | /* Initialize hardware */ | 1362 | /* Initialize hardware */ |
1400 | nic_init_hw(nic); | 1363 | nic_init_hw(nic); |
1401 | 1364 | ||
@@ -1411,22 +1374,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1411 | if (err) | 1374 | if (err) |
1412 | goto err_unregister_interrupts; | 1375 | goto err_unregister_interrupts; |
1413 | 1376 | ||
1414 | /* Register a physical link status poll fn() */ | ||
1415 | nic->check_link = alloc_workqueue("check_link_status", | ||
1416 | WQ_UNBOUND | WQ_MEM_RECLAIM, 1); | ||
1417 | if (!nic->check_link) { | ||
1418 | err = -ENOMEM; | ||
1419 | goto err_disable_sriov; | ||
1420 | } | ||
1421 | |||
1422 | INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link); | ||
1423 | queue_delayed_work(nic->check_link, &nic->dwork, 0); | ||
1424 | |||
1425 | return 0; | 1377 | return 0; |
1426 | 1378 | ||
1427 | err_disable_sriov: | ||
1428 | if (nic->flags & NIC_SRIOV_ENABLED) | ||
1429 | pci_disable_sriov(pdev); | ||
1430 | err_unregister_interrupts: | 1379 | err_unregister_interrupts: |
1431 | nic_unregister_interrupts(nic); | 1380 | nic_unregister_interrupts(nic); |
1432 | err_release_regions: | 1381 | err_release_regions: |
@@ -1447,12 +1396,6 @@ static void nic_remove(struct pci_dev *pdev) | |||
1447 | if (nic->flags & NIC_SRIOV_ENABLED) | 1396 | if (nic->flags & NIC_SRIOV_ENABLED) |
1448 | pci_disable_sriov(pdev); | 1397 | pci_disable_sriov(pdev); |
1449 | 1398 | ||
1450 | if (nic->check_link) { | ||
1451 | /* Destroy work Queue */ | ||
1452 | cancel_delayed_work_sync(&nic->dwork); | ||
1453 | destroy_workqueue(nic->check_link); | ||
1454 | } | ||
1455 | |||
1456 | nic_unregister_interrupts(nic); | 1399 | nic_unregister_interrupts(nic); |
1457 | pci_release_regions(pdev); | 1400 | pci_release_regions(pdev); |
1458 | 1401 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 88f8a8fa93cd..503cfadff4ac 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -68,9 +68,6 @@ module_param(cpi_alg, int, 0444); | |||
68 | MODULE_PARM_DESC(cpi_alg, | 68 | MODULE_PARM_DESC(cpi_alg, |
69 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); | 69 | "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); |
70 | 70 | ||
71 | /* workqueue for handling kernel ndo_set_rx_mode() calls */ | ||
72 | static struct workqueue_struct *nicvf_rx_mode_wq; | ||
73 | |||
74 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) | 71 | static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) |
75 | { | 72 | { |
76 | if (nic->sqs_mode) | 73 | if (nic->sqs_mode) |
@@ -127,6 +124,9 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) | |||
127 | { | 124 | { |
128 | int timeout = NIC_MBOX_MSG_TIMEOUT; | 125 | int timeout = NIC_MBOX_MSG_TIMEOUT; |
129 | int sleep = 10; | 126 | int sleep = 10; |
127 | int ret = 0; | ||
128 | |||
129 | mutex_lock(&nic->rx_mode_mtx); | ||
130 | 130 | ||
131 | nic->pf_acked = false; | 131 | nic->pf_acked = false; |
132 | nic->pf_nacked = false; | 132 | nic->pf_nacked = false; |
@@ -139,7 +139,8 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) | |||
139 | netdev_err(nic->netdev, | 139 | netdev_err(nic->netdev, |
140 | "PF NACK to mbox msg 0x%02x from VF%d\n", | 140 | "PF NACK to mbox msg 0x%02x from VF%d\n", |
141 | (mbx->msg.msg & 0xFF), nic->vf_id); | 141 | (mbx->msg.msg & 0xFF), nic->vf_id); |
142 | return -EINVAL; | 142 | ret = -EINVAL; |
143 | break; | ||
143 | } | 144 | } |
144 | msleep(sleep); | 145 | msleep(sleep); |
145 | if (nic->pf_acked) | 146 | if (nic->pf_acked) |
@@ -149,10 +150,12 @@ int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx) | |||
149 | netdev_err(nic->netdev, | 150 | netdev_err(nic->netdev, |
150 | "PF didn't ACK to mbox msg 0x%02x from VF%d\n", | 151 | "PF didn't ACK to mbox msg 0x%02x from VF%d\n", |
151 | (mbx->msg.msg & 0xFF), nic->vf_id); | 152 | (mbx->msg.msg & 0xFF), nic->vf_id); |
152 | return -EBUSY; | 153 | ret = -EBUSY; |
154 | break; | ||
153 | } | 155 | } |
154 | } | 156 | } |
155 | return 0; | 157 | mutex_unlock(&nic->rx_mode_mtx); |
158 | return ret; | ||
156 | } | 159 | } |
157 | 160 | ||
158 | /* Checks if VF is able to comminicate with PF | 161 | /* Checks if VF is able to comminicate with PF |
@@ -172,6 +175,17 @@ static int nicvf_check_pf_ready(struct nicvf *nic) | |||
172 | return 1; | 175 | return 1; |
173 | } | 176 | } |
174 | 177 | ||
178 | static void nicvf_send_cfg_done(struct nicvf *nic) | ||
179 | { | ||
180 | union nic_mbx mbx = {}; | ||
181 | |||
182 | mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; | ||
183 | if (nicvf_send_msg_to_pf(nic, &mbx)) { | ||
184 | netdev_err(nic->netdev, | ||
185 | "PF didn't respond to CFG DONE msg\n"); | ||
186 | } | ||
187 | } | ||
188 | |||
175 | static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) | 189 | static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx) |
176 | { | 190 | { |
177 | if (bgx->rx) | 191 | if (bgx->rx) |
@@ -228,21 +242,24 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic) | |||
228 | break; | 242 | break; |
229 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: | 243 | case NIC_MBOX_MSG_BGX_LINK_CHANGE: |
230 | nic->pf_acked = true; | 244 | nic->pf_acked = true; |
231 | nic->link_up = mbx.link_status.link_up; | 245 | if (nic->link_up != mbx.link_status.link_up) { |
232 | nic->duplex = mbx.link_status.duplex; | 246 | nic->link_up = mbx.link_status.link_up; |
233 | nic->speed = mbx.link_status.speed; | 247 | nic->duplex = mbx.link_status.duplex; |
234 | nic->mac_type = mbx.link_status.mac_type; | 248 | nic->speed = mbx.link_status.speed; |
235 | if (nic->link_up) { | 249 | nic->mac_type = mbx.link_status.mac_type; |
236 | netdev_info(nic->netdev, "Link is Up %d Mbps %s duplex\n", | 250 | if (nic->link_up) { |
237 | nic->speed, | 251 | netdev_info(nic->netdev, |
238 | nic->duplex == DUPLEX_FULL ? | 252 | "Link is Up %d Mbps %s duplex\n", |
239 | "Full" : "Half"); | 253 | nic->speed, |
240 | netif_carrier_on(nic->netdev); | 254 | nic->duplex == DUPLEX_FULL ? |
241 | netif_tx_start_all_queues(nic->netdev); | 255 | "Full" : "Half"); |
242 | } else { | 256 | netif_carrier_on(nic->netdev); |
243 | netdev_info(nic->netdev, "Link is Down\n"); | 257 | netif_tx_start_all_queues(nic->netdev); |
244 | netif_carrier_off(nic->netdev); | 258 | } else { |
245 | netif_tx_stop_all_queues(nic->netdev); | 259 | netdev_info(nic->netdev, "Link is Down\n"); |
260 | netif_carrier_off(nic->netdev); | ||
261 | netif_tx_stop_all_queues(nic->netdev); | ||
262 | } | ||
246 | } | 263 | } |
247 | break; | 264 | break; |
248 | case NIC_MBOX_MSG_ALLOC_SQS: | 265 | case NIC_MBOX_MSG_ALLOC_SQS: |
@@ -1311,6 +1328,11 @@ int nicvf_stop(struct net_device *netdev) | |||
1311 | struct nicvf_cq_poll *cq_poll = NULL; | 1328 | struct nicvf_cq_poll *cq_poll = NULL; |
1312 | union nic_mbx mbx = {}; | 1329 | union nic_mbx mbx = {}; |
1313 | 1330 | ||
1331 | cancel_delayed_work_sync(&nic->link_change_work); | ||
1332 | |||
1333 | /* wait till all queued set_rx_mode tasks completes */ | ||
1334 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
1335 | |||
1314 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; | 1336 | mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; |
1315 | nicvf_send_msg_to_pf(nic, &mbx); | 1337 | nicvf_send_msg_to_pf(nic, &mbx); |
1316 | 1338 | ||
@@ -1410,13 +1432,27 @@ static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) | |||
1410 | return nicvf_send_msg_to_pf(nic, &mbx); | 1432 | return nicvf_send_msg_to_pf(nic, &mbx); |
1411 | } | 1433 | } |
1412 | 1434 | ||
1435 | static void nicvf_link_status_check_task(struct work_struct *work_arg) | ||
1436 | { | ||
1437 | struct nicvf *nic = container_of(work_arg, | ||
1438 | struct nicvf, | ||
1439 | link_change_work.work); | ||
1440 | union nic_mbx mbx = {}; | ||
1441 | mbx.msg.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE; | ||
1442 | nicvf_send_msg_to_pf(nic, &mbx); | ||
1443 | queue_delayed_work(nic->nicvf_rx_mode_wq, | ||
1444 | &nic->link_change_work, 2 * HZ); | ||
1445 | } | ||
1446 | |||
1413 | int nicvf_open(struct net_device *netdev) | 1447 | int nicvf_open(struct net_device *netdev) |
1414 | { | 1448 | { |
1415 | int cpu, err, qidx; | 1449 | int cpu, err, qidx; |
1416 | struct nicvf *nic = netdev_priv(netdev); | 1450 | struct nicvf *nic = netdev_priv(netdev); |
1417 | struct queue_set *qs = nic->qs; | 1451 | struct queue_set *qs = nic->qs; |
1418 | struct nicvf_cq_poll *cq_poll = NULL; | 1452 | struct nicvf_cq_poll *cq_poll = NULL; |
1419 | union nic_mbx mbx = {}; | 1453 | |
1454 | /* wait till all queued set_rx_mode tasks completes if any */ | ||
1455 | drain_workqueue(nic->nicvf_rx_mode_wq); | ||
1420 | 1456 | ||
1421 | netif_carrier_off(netdev); | 1457 | netif_carrier_off(netdev); |
1422 | 1458 | ||
@@ -1512,8 +1548,12 @@ int nicvf_open(struct net_device *netdev) | |||
1512 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); | 1548 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); |
1513 | 1549 | ||
1514 | /* Send VF config done msg to PF */ | 1550 | /* Send VF config done msg to PF */ |
1515 | mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; | 1551 | nicvf_send_cfg_done(nic); |
1516 | nicvf_write_to_mbx(nic, &mbx); | 1552 | |
1553 | INIT_DELAYED_WORK(&nic->link_change_work, | ||
1554 | nicvf_link_status_check_task); | ||
1555 | queue_delayed_work(nic->nicvf_rx_mode_wq, | ||
1556 | &nic->link_change_work, 0); | ||
1517 | 1557 | ||
1518 | return 0; | 1558 | return 0; |
1519 | cleanup: | 1559 | cleanup: |
@@ -1941,15 +1981,17 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, | |||
1941 | 1981 | ||
1942 | /* flush DMAC filters and reset RX mode */ | 1982 | /* flush DMAC filters and reset RX mode */ |
1943 | mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; | 1983 | mbx.xcast.msg = NIC_MBOX_MSG_RESET_XCAST; |
1944 | nicvf_send_msg_to_pf(nic, &mbx); | 1984 | if (nicvf_send_msg_to_pf(nic, &mbx) < 0) |
1985 | goto free_mc; | ||
1945 | 1986 | ||
1946 | if (mode & BGX_XCAST_MCAST_FILTER) { | 1987 | if (mode & BGX_XCAST_MCAST_FILTER) { |
1947 | /* once enabling filtering, we need to signal to PF to add | 1988 | /* once enabling filtering, we need to signal to PF to add |
1948 | * its' own LMAC to the filter to accept packets for it. | 1989 | * its' own LMAC to the filter to accept packets for it. |
1949 | */ | 1990 | */ |
1950 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; | 1991 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; |
1951 | mbx.xcast.data.mac = 0; | 1992 | mbx.xcast.mac = 0; |
1952 | nicvf_send_msg_to_pf(nic, &mbx); | 1993 | if (nicvf_send_msg_to_pf(nic, &mbx) < 0) |
1994 | goto free_mc; | ||
1953 | } | 1995 | } |
1954 | 1996 | ||
1955 | /* check if we have any specific MACs to be added to PF DMAC filter */ | 1997 | /* check if we have any specific MACs to be added to PF DMAC filter */ |
@@ -1957,23 +1999,25 @@ static void __nicvf_set_rx_mode_task(u8 mode, struct xcast_addr_list *mc_addrs, | |||
1957 | /* now go through kernel list of MACs and add them one by one */ | 1999 | /* now go through kernel list of MACs and add them one by one */ |
1958 | for (idx = 0; idx < mc_addrs->count; idx++) { | 2000 | for (idx = 0; idx < mc_addrs->count; idx++) { |
1959 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; | 2001 | mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST; |
1960 | mbx.xcast.data.mac = mc_addrs->mc[idx]; | 2002 | mbx.xcast.mac = mc_addrs->mc[idx]; |
1961 | nicvf_send_msg_to_pf(nic, &mbx); | 2003 | if (nicvf_send_msg_to_pf(nic, &mbx) < 0) |
2004 | goto free_mc; | ||
1962 | } | 2005 | } |
1963 | kfree(mc_addrs); | ||
1964 | } | 2006 | } |
1965 | 2007 | ||
1966 | /* and finally set rx mode for PF accordingly */ | 2008 | /* and finally set rx mode for PF accordingly */ |
1967 | mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; | 2009 | mbx.xcast.msg = NIC_MBOX_MSG_SET_XCAST; |
1968 | mbx.xcast.data.mode = mode; | 2010 | mbx.xcast.mode = mode; |
1969 | 2011 | ||
1970 | nicvf_send_msg_to_pf(nic, &mbx); | 2012 | nicvf_send_msg_to_pf(nic, &mbx); |
2013 | free_mc: | ||
2014 | kfree(mc_addrs); | ||
1971 | } | 2015 | } |
1972 | 2016 | ||
1973 | static void nicvf_set_rx_mode_task(struct work_struct *work_arg) | 2017 | static void nicvf_set_rx_mode_task(struct work_struct *work_arg) |
1974 | { | 2018 | { |
1975 | struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, | 2019 | struct nicvf_work *vf_work = container_of(work_arg, struct nicvf_work, |
1976 | work.work); | 2020 | work); |
1977 | struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); | 2021 | struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work); |
1978 | u8 mode; | 2022 | u8 mode; |
1979 | struct xcast_addr_list *mc; | 2023 | struct xcast_addr_list *mc; |
@@ -2030,7 +2074,7 @@ static void nicvf_set_rx_mode(struct net_device *netdev) | |||
2030 | kfree(nic->rx_mode_work.mc); | 2074 | kfree(nic->rx_mode_work.mc); |
2031 | nic->rx_mode_work.mc = mc_list; | 2075 | nic->rx_mode_work.mc = mc_list; |
2032 | nic->rx_mode_work.mode = mode; | 2076 | nic->rx_mode_work.mode = mode; |
2033 | queue_delayed_work(nicvf_rx_mode_wq, &nic->rx_mode_work.work, 0); | 2077 | queue_work(nic->nicvf_rx_mode_wq, &nic->rx_mode_work.work); |
2034 | spin_unlock(&nic->rx_mode_wq_lock); | 2078 | spin_unlock(&nic->rx_mode_wq_lock); |
2035 | } | 2079 | } |
2036 | 2080 | ||
@@ -2187,8 +2231,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2187 | 2231 | ||
2188 | INIT_WORK(&nic->reset_task, nicvf_reset_task); | 2232 | INIT_WORK(&nic->reset_task, nicvf_reset_task); |
2189 | 2233 | ||
2190 | INIT_DELAYED_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); | 2234 | nic->nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_rx_mode_wq_VF%d", |
2235 | WQ_MEM_RECLAIM, | ||
2236 | nic->vf_id); | ||
2237 | INIT_WORK(&nic->rx_mode_work.work, nicvf_set_rx_mode_task); | ||
2191 | spin_lock_init(&nic->rx_mode_wq_lock); | 2238 | spin_lock_init(&nic->rx_mode_wq_lock); |
2239 | mutex_init(&nic->rx_mode_mtx); | ||
2192 | 2240 | ||
2193 | err = register_netdev(netdev); | 2241 | err = register_netdev(netdev); |
2194 | if (err) { | 2242 | if (err) { |
@@ -2228,13 +2276,15 @@ static void nicvf_remove(struct pci_dev *pdev) | |||
2228 | nic = netdev_priv(netdev); | 2276 | nic = netdev_priv(netdev); |
2229 | pnetdev = nic->pnicvf->netdev; | 2277 | pnetdev = nic->pnicvf->netdev; |
2230 | 2278 | ||
2231 | cancel_delayed_work_sync(&nic->rx_mode_work.work); | ||
2232 | |||
2233 | /* Check if this Qset is assigned to different VF. | 2279 | /* Check if this Qset is assigned to different VF. |
2234 | * If yes, clean primary and all secondary Qsets. | 2280 | * If yes, clean primary and all secondary Qsets. |
2235 | */ | 2281 | */ |
2236 | if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) | 2282 | if (pnetdev && (pnetdev->reg_state == NETREG_REGISTERED)) |
2237 | unregister_netdev(pnetdev); | 2283 | unregister_netdev(pnetdev); |
2284 | if (nic->nicvf_rx_mode_wq) { | ||
2285 | destroy_workqueue(nic->nicvf_rx_mode_wq); | ||
2286 | nic->nicvf_rx_mode_wq = NULL; | ||
2287 | } | ||
2238 | nicvf_unregister_interrupts(nic); | 2288 | nicvf_unregister_interrupts(nic); |
2239 | pci_set_drvdata(pdev, NULL); | 2289 | pci_set_drvdata(pdev, NULL); |
2240 | if (nic->drv_stats) | 2290 | if (nic->drv_stats) |
@@ -2261,17 +2311,11 @@ static struct pci_driver nicvf_driver = { | |||
2261 | static int __init nicvf_init_module(void) | 2311 | static int __init nicvf_init_module(void) |
2262 | { | 2312 | { |
2263 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); | 2313 | pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION); |
2264 | nicvf_rx_mode_wq = alloc_ordered_workqueue("nicvf_generic", | ||
2265 | WQ_MEM_RECLAIM); | ||
2266 | return pci_register_driver(&nicvf_driver); | 2314 | return pci_register_driver(&nicvf_driver); |
2267 | } | 2315 | } |
2268 | 2316 | ||
2269 | static void __exit nicvf_cleanup_module(void) | 2317 | static void __exit nicvf_cleanup_module(void) |
2270 | { | 2318 | { |
2271 | if (nicvf_rx_mode_wq) { | ||
2272 | destroy_workqueue(nicvf_rx_mode_wq); | ||
2273 | nicvf_rx_mode_wq = NULL; | ||
2274 | } | ||
2275 | pci_unregister_driver(&nicvf_driver); | 2319 | pci_unregister_driver(&nicvf_driver); |
2276 | } | 2320 | } |
2277 | 2321 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index e337da6ba2a4..673c57b8023f 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -1217,7 +1217,7 @@ static void bgx_init_hw(struct bgx *bgx) | |||
1217 | 1217 | ||
1218 | /* Disable MAC steering (NCSI traffic) */ | 1218 | /* Disable MAC steering (NCSI traffic) */ |
1219 | for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) | 1219 | for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++) |
1220 | bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00); | 1220 | bgx_reg_write(bgx, 0, BGX_CMR_RX_STEERING + (i * 8), 0x00); |
1221 | } | 1221 | } |
1222 | 1222 | ||
1223 | static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) | 1223 | static u8 bgx_get_lane2sds_cfg(struct bgx *bgx, struct lmac *lmac) |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index cbdd20b9ee6f..5cbc54e9eb19 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -60,7 +60,7 @@ | |||
60 | #define RX_DMACX_CAM_EN BIT_ULL(48) | 60 | #define RX_DMACX_CAM_EN BIT_ULL(48) |
61 | #define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) | 61 | #define RX_DMACX_CAM_LMACID(x) (((u64)x) << 49) |
62 | #define RX_DMAC_COUNT 32 | 62 | #define RX_DMAC_COUNT 32 |
63 | #define BGX_CMR_RX_STREERING 0x300 | 63 | #define BGX_CMR_RX_STEERING 0x300 |
64 | #define RX_TRAFFIC_STEER_RULE_COUNT 8 | 64 | #define RX_TRAFFIC_STEER_RULE_COUNT 8 |
65 | #define BGX_CMR_CHAN_MSK_AND 0x450 | 65 | #define BGX_CMR_CHAN_MSK_AND 0x450 |
66 | #define BGX_CMR_BIST_STATUS 0x460 | 66 | #define BGX_CMR_BIST_STATUS 0x460 |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index c041f44324db..b3654598a2d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | |||
@@ -660,6 +660,7 @@ static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) | |||
660 | lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; | 660 | lld->cclk_ps = 1000000000 / adap->params.vpd.cclk; |
661 | lld->udb_density = 1 << adap->params.sge.eq_qpp; | 661 | lld->udb_density = 1 << adap->params.sge.eq_qpp; |
662 | lld->ucq_density = 1 << adap->params.sge.iq_qpp; | 662 | lld->ucq_density = 1 << adap->params.sge.iq_qpp; |
663 | lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10); | ||
663 | lld->filt_mode = adap->params.tp.vlan_pri_map; | 664 | lld->filt_mode = adap->params.tp.vlan_pri_map; |
664 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ | 665 | /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */ |
665 | for (i = 0; i < NCHAN; i++) | 666 | for (i = 0; i < NCHAN; i++) |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h index 5fa9a2d5fc4b..21da34a4ca24 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h | |||
@@ -336,6 +336,7 @@ struct cxgb4_lld_info { | |||
336 | unsigned int cclk_ps; /* Core clock period in psec */ | 336 | unsigned int cclk_ps; /* Core clock period in psec */ |
337 | unsigned short udb_density; /* # of user DB/page */ | 337 | unsigned short udb_density; /* # of user DB/page */ |
338 | unsigned short ucq_density; /* # of user CQs/page */ | 338 | unsigned short ucq_density; /* # of user CQs/page */ |
339 | unsigned int sge_host_page_size; /* SGE host page size */ | ||
339 | unsigned short filt_mode; /* filter optional components */ | 340 | unsigned short filt_mode; /* filter optional components */ |
340 | unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ | 341 | unsigned short tx_modq[NCHAN]; /* maps each tx channel to a */ |
341 | /* scheduler queue */ | 342 | /* scheduler queue */ |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..9a7f70db20c7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1434 | * csum is correct or is zero. | 1434 | * csum is correct or is zero. |
1435 | */ | 1435 | */ |
1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && | 1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && |
1437 | tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { | 1437 | tcp_udp_csum_ok && outer_csum_ok && |
1438 | (ipv4_csum_ok || ipv6)) { | ||
1438 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1439 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1439 | skb->csum_level = encap; | 1440 | skb->csum_level = encap; |
1440 | } | 1441 | } |
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c | |||
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) | |||
585 | netif_dbg(de, tx_done, de->dev, | 585 | netif_dbg(de, tx_done, de->dev, |
586 | "tx done, slot %d\n", tx_tail); | 586 | "tx done, slot %d\n", tx_tail); |
587 | } | 587 | } |
588 | dev_kfree_skb_irq(skb); | 588 | dev_consume_skb_irq(skb); |
589 | } | 589 | } |
590 | 590 | ||
591 | next: | 591 | next: |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) | |||
2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ | 2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) | 2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2101 | static __u32 fec_enet_register_version = 2; | ||
2101 | static u32 fec_enet_register_offset[] = { | 2102 | static u32 fec_enet_register_offset[] = { |
2102 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, | 2103 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2103 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, | 2104 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { | |||
2128 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK | 2129 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2129 | }; | 2130 | }; |
2130 | #else | 2131 | #else |
2132 | static __u32 fec_enet_register_version = 1; | ||
2131 | static u32 fec_enet_register_offset[] = { | 2133 | static u32 fec_enet_register_offset[] = { |
2132 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, | 2134 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, |
2133 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, | 2135 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, |
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, | |||
2149 | u32 *buf = (u32 *)regbuf; | 2151 | u32 *buf = (u32 *)regbuf; |
2150 | u32 i, off; | 2152 | u32 i, off; |
2151 | 2153 | ||
2154 | regs->version = fec_enet_register_version; | ||
2155 | |||
2152 | memset(buf, 0, regs->len); | 2156 | memset(buf, 0, regs->len); |
2153 | 2157 | ||
2154 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { | 2158 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { |
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c | |||
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | |||
369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, | 369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, |
370 | DMA_TO_DEVICE); | 370 | DMA_TO_DEVICE); |
371 | 371 | ||
372 | dev_kfree_skb_irq(skb); | 372 | dev_consume_skb_irq(skb); |
373 | } | 373 | } |
374 | spin_unlock(&priv->lock); | 374 | spin_unlock(&priv->lock); |
375 | 375 | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) | |||
1879 | u16 i, j; | 1879 | u16 i, j; |
1880 | u8 __iomem *bd; | 1880 | u8 __iomem *bd; |
1881 | 1881 | ||
1882 | netdev_reset_queue(ugeth->ndev); | ||
1883 | |||
1882 | ug_info = ugeth->ug_info; | 1884 | ug_info = ugeth->ug_info; |
1883 | uf_info = &ug_info->uf_info; | 1885 | uf_info = &ug_info->uf_info; |
1884 | 1886 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..ac55db065f16 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); | 3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); |
3082 | if (!dsaf_dev) { | 3082 | if (!dsaf_dev) { |
3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); | 3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); |
3084 | put_device(&pdev->dev); | ||
3084 | return -ENODEV; | 3085 | return -ENODEV; |
3085 | } | 3086 | } |
3086 | 3087 | ||
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3088 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { | 3089 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { |
3089 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", | 3090 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", |
3090 | dsaf_dev->ae_dev.name); | 3091 | dsaf_dev->ae_dev.name); |
3092 | put_device(&pdev->dev); | ||
3091 | return -ENODEV; | 3093 | return -ENODEV; |
3092 | } | 3094 | } |
3093 | 3095 | ||
@@ -3126,6 +3128,9 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3126 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); | 3128 | dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1); |
3127 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); | 3129 | dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit); |
3128 | } | 3130 | } |
3131 | |||
3132 | put_device(&pdev->dev); | ||
3133 | |||
3129 | return 0; | 3134 | return 0; |
3130 | } | 3135 | } |
3131 | EXPORT_SYMBOL(hns_dsaf_roce_reset); | 3136 | EXPORT_SYMBOL(hns_dsaf_roce_reset); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f52e2c46e6a7..e4ff531db14a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c | |||
@@ -3289,8 +3289,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) | |||
3289 | i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : | 3289 | i40e_alloc_rx_buffers_zc(ring, I40E_DESC_UNUSED(ring)) : |
3290 | !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); | 3290 | !i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring)); |
3291 | if (!ok) { | 3291 | if (!ok) { |
3292 | /* Log this in case the user has forgotten to give the kernel | ||
3293 | * any buffers, even later in the application. | ||
3294 | */ | ||
3292 | dev_info(&vsi->back->pdev->dev, | 3295 | dev_info(&vsi->back->pdev->dev, |
3293 | "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", | 3296 | "Failed to allocate some buffers on %sRx ring %d (pf_q %d)\n", |
3294 | ring->xsk_umem ? "UMEM enabled " : "", | 3297 | ring->xsk_umem ? "UMEM enabled " : "", |
3295 | ring->queue_index, pf_q); | 3298 | ring->queue_index, pf_q); |
3296 | } | 3299 | } |
@@ -6725,8 +6728,13 @@ void i40e_down(struct i40e_vsi *vsi) | |||
6725 | 6728 | ||
6726 | for (i = 0; i < vsi->num_queue_pairs; i++) { | 6729 | for (i = 0; i < vsi->num_queue_pairs; i++) { |
6727 | i40e_clean_tx_ring(vsi->tx_rings[i]); | 6730 | i40e_clean_tx_ring(vsi->tx_rings[i]); |
6728 | if (i40e_enabled_xdp_vsi(vsi)) | 6731 | if (i40e_enabled_xdp_vsi(vsi)) { |
6732 | /* Make sure that in-progress ndo_xdp_xmit | ||
6733 | * calls are completed. | ||
6734 | */ | ||
6735 | synchronize_rcu(); | ||
6729 | i40e_clean_tx_ring(vsi->xdp_rings[i]); | 6736 | i40e_clean_tx_ring(vsi->xdp_rings[i]); |
6737 | } | ||
6730 | i40e_clean_rx_ring(vsi->rx_rings[i]); | 6738 | i40e_clean_rx_ring(vsi->rx_rings[i]); |
6731 | } | 6739 | } |
6732 | 6740 | ||
@@ -11895,6 +11903,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, | |||
11895 | if (old_prog) | 11903 | if (old_prog) |
11896 | bpf_prog_put(old_prog); | 11904 | bpf_prog_put(old_prog); |
11897 | 11905 | ||
11906 | /* Kick start the NAPI context if there is an AF_XDP socket open | ||
11907 | * on that queue id. This so that receiving will start. | ||
11908 | */ | ||
11909 | if (need_reset && prog) | ||
11910 | for (i = 0; i < vsi->num_queue_pairs; i++) | ||
11911 | if (vsi->xdp_rings[i]->xsk_umem) | ||
11912 | (void)i40e_xsk_async_xmit(vsi->netdev, i); | ||
11913 | |||
11898 | return 0; | 11914 | return 0; |
11899 | } | 11915 | } |
11900 | 11916 | ||
@@ -11955,8 +11971,13 @@ static void i40e_queue_pair_reset_stats(struct i40e_vsi *vsi, int queue_pair) | |||
11955 | static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) | 11971 | static void i40e_queue_pair_clean_rings(struct i40e_vsi *vsi, int queue_pair) |
11956 | { | 11972 | { |
11957 | i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); | 11973 | i40e_clean_tx_ring(vsi->tx_rings[queue_pair]); |
11958 | if (i40e_enabled_xdp_vsi(vsi)) | 11974 | if (i40e_enabled_xdp_vsi(vsi)) { |
11975 | /* Make sure that in-progress ndo_xdp_xmit calls are | ||
11976 | * completed. | ||
11977 | */ | ||
11978 | synchronize_rcu(); | ||
11959 | i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); | 11979 | i40e_clean_tx_ring(vsi->xdp_rings[queue_pair]); |
11980 | } | ||
11960 | i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); | 11981 | i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); |
11961 | } | 11982 | } |
11962 | 11983 | ||
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index a7e14e98889f..6c97667d20ef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -3709,6 +3709,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
3709 | struct i40e_netdev_priv *np = netdev_priv(dev); | 3709 | struct i40e_netdev_priv *np = netdev_priv(dev); |
3710 | unsigned int queue_index = smp_processor_id(); | 3710 | unsigned int queue_index = smp_processor_id(); |
3711 | struct i40e_vsi *vsi = np->vsi; | 3711 | struct i40e_vsi *vsi = np->vsi; |
3712 | struct i40e_pf *pf = vsi->back; | ||
3712 | struct i40e_ring *xdp_ring; | 3713 | struct i40e_ring *xdp_ring; |
3713 | int drops = 0; | 3714 | int drops = 0; |
3714 | int i; | 3715 | int i; |
@@ -3716,7 +3717,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
3716 | if (test_bit(__I40E_VSI_DOWN, vsi->state)) | 3717 | if (test_bit(__I40E_VSI_DOWN, vsi->state)) |
3717 | return -ENETDOWN; | 3718 | return -ENETDOWN; |
3718 | 3719 | ||
3719 | if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs) | 3720 | if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || |
3721 | test_bit(__I40E_CONFIG_BUSY, pf->state)) | ||
3720 | return -ENXIO; | 3722 | return -ENXIO; |
3721 | 3723 | ||
3722 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | 3724 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 870cf654e436..3827f16e6923 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c | |||
@@ -183,6 +183,11 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem, | |||
183 | err = i40e_queue_pair_enable(vsi, qid); | 183 | err = i40e_queue_pair_enable(vsi, qid); |
184 | if (err) | 184 | if (err) |
185 | return err; | 185 | return err; |
186 | |||
187 | /* Kick start the NAPI context so that receiving will start */ | ||
188 | err = i40e_xsk_async_xmit(vsi->netdev, qid); | ||
189 | if (err) | ||
190 | return err; | ||
186 | } | 191 | } |
187 | 192 | ||
188 | return 0; | 193 | return 0; |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index daff8183534b..cb35d8202572 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -3953,8 +3953,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) | |||
3953 | else | 3953 | else |
3954 | mrqc = IXGBE_MRQC_VMDQRSS64EN; | 3954 | mrqc = IXGBE_MRQC_VMDQRSS64EN; |
3955 | 3955 | ||
3956 | /* Enable L3/L4 for Tx Switched packets */ | 3956 | /* Enable L3/L4 for Tx Switched packets only for X550, |
3957 | mrqc |= IXGBE_MRQC_L3L4TXSWEN; | 3957 | * older devices do not support this feature |
3958 | */ | ||
3959 | if (hw->mac.type >= ixgbe_mac_X550) | ||
3960 | mrqc |= IXGBE_MRQC_L3L4TXSWEN; | ||
3958 | } else { | 3961 | } else { |
3959 | if (tcs > 4) | 3962 | if (tcs > 4) |
3960 | mrqc = IXGBE_MRQC_RTRSS8TCEN; | 3963 | mrqc = IXGBE_MRQC_RTRSS8TCEN; |
@@ -10225,6 +10228,7 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) | |||
10225 | int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; | 10228 | int i, frame_size = dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; |
10226 | struct ixgbe_adapter *adapter = netdev_priv(dev); | 10229 | struct ixgbe_adapter *adapter = netdev_priv(dev); |
10227 | struct bpf_prog *old_prog; | 10230 | struct bpf_prog *old_prog; |
10231 | bool need_reset; | ||
10228 | 10232 | ||
10229 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) | 10233 | if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) |
10230 | return -EINVAL; | 10234 | return -EINVAL; |
@@ -10247,9 +10251,10 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) | |||
10247 | return -ENOMEM; | 10251 | return -ENOMEM; |
10248 | 10252 | ||
10249 | old_prog = xchg(&adapter->xdp_prog, prog); | 10253 | old_prog = xchg(&adapter->xdp_prog, prog); |
10254 | need_reset = (!!prog != !!old_prog); | ||
10250 | 10255 | ||
10251 | /* If transitioning XDP modes reconfigure rings */ | 10256 | /* If transitioning XDP modes reconfigure rings */ |
10252 | if (!!prog != !!old_prog) { | 10257 | if (need_reset) { |
10253 | int err = ixgbe_setup_tc(dev, adapter->hw_tcs); | 10258 | int err = ixgbe_setup_tc(dev, adapter->hw_tcs); |
10254 | 10259 | ||
10255 | if (err) { | 10260 | if (err) { |
@@ -10265,6 +10270,14 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog) | |||
10265 | if (old_prog) | 10270 | if (old_prog) |
10266 | bpf_prog_put(old_prog); | 10271 | bpf_prog_put(old_prog); |
10267 | 10272 | ||
10273 | /* Kick start the NAPI context if there is an AF_XDP socket open | ||
10274 | * on that queue id. This so that receiving will start. | ||
10275 | */ | ||
10276 | if (need_reset && prog) | ||
10277 | for (i = 0; i < adapter->num_rx_queues; i++) | ||
10278 | if (adapter->xdp_ring[i]->xsk_umem) | ||
10279 | (void)ixgbe_xsk_async_xmit(adapter->netdev, i); | ||
10280 | |||
10268 | return 0; | 10281 | return 0; |
10269 | } | 10282 | } |
10270 | 10283 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index 65c3e2c979d4..36a8879536a4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c | |||
@@ -144,11 +144,19 @@ static int ixgbe_xsk_umem_enable(struct ixgbe_adapter *adapter, | |||
144 | ixgbe_txrx_ring_disable(adapter, qid); | 144 | ixgbe_txrx_ring_disable(adapter, qid); |
145 | 145 | ||
146 | err = ixgbe_add_xsk_umem(adapter, umem, qid); | 146 | err = ixgbe_add_xsk_umem(adapter, umem, qid); |
147 | if (err) | ||
148 | return err; | ||
147 | 149 | ||
148 | if (if_running) | 150 | if (if_running) { |
149 | ixgbe_txrx_ring_enable(adapter, qid); | 151 | ixgbe_txrx_ring_enable(adapter, qid); |
150 | 152 | ||
151 | return err; | 153 | /* Kick start the NAPI context so that receiving will start */ |
154 | err = ixgbe_xsk_async_xmit(adapter->netdev, qid); | ||
155 | if (err) | ||
156 | return err; | ||
157 | } | ||
158 | |||
159 | return 0; | ||
152 | } | 160 | } |
153 | 161 | ||
154 | static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) | 162 | static int ixgbe_xsk_umem_disable(struct ixgbe_adapter *adapter, u16 qid) |
@@ -634,7 +642,8 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) | |||
634 | dma_addr_t dma; | 642 | dma_addr_t dma; |
635 | 643 | ||
636 | while (budget-- > 0) { | 644 | while (budget-- > 0) { |
637 | if (unlikely(!ixgbe_desc_unused(xdp_ring))) { | 645 | if (unlikely(!ixgbe_desc_unused(xdp_ring)) || |
646 | !netif_carrier_ok(xdp_ring->netdev)) { | ||
638 | work_done = false; | 647 | work_done = false; |
639 | break; | 648 | break; |
640 | } | 649 | } |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 2f427271a793..292a668ce88e 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -2879,7 +2879,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2879 | 2879 | ||
2880 | ret = mv643xx_eth_shared_of_probe(pdev); | 2880 | ret = mv643xx_eth_shared_of_probe(pdev); |
2881 | if (ret) | 2881 | if (ret) |
2882 | return ret; | 2882 | goto err_put_clk; |
2883 | pd = dev_get_platdata(&pdev->dev); | 2883 | pd = dev_get_platdata(&pdev->dev); |
2884 | 2884 | ||
2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? | 2885 | msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? |
@@ -2887,6 +2887,11 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev) | |||
2887 | infer_hw_params(msp); | 2887 | infer_hw_params(msp); |
2888 | 2888 | ||
2889 | return 0; | 2889 | return 0; |
2890 | |||
2891 | err_put_clk: | ||
2892 | if (!IS_ERR(msp->clk)) | ||
2893 | clk_disable_unprepare(msp->clk); | ||
2894 | return ret; | ||
2890 | } | 2895 | } |
2891 | 2896 | ||
2892 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) | 2897 | static int mv643xx_eth_shared_remove(struct platform_device *pdev) |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 9d4568eb2297..8433fb9c3eee 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -2146,7 +2146,7 @@ err_drop_frame: | |||
2146 | if (unlikely(!skb)) | 2146 | if (unlikely(!skb)) |
2147 | goto err_drop_frame_ret_pool; | 2147 | goto err_drop_frame_ret_pool; |
2148 | 2148 | ||
2149 | dma_sync_single_range_for_cpu(dev->dev.parent, | 2149 | dma_sync_single_range_for_cpu(&pp->bm_priv->pdev->dev, |
2150 | rx_desc->buf_phys_addr, | 2150 | rx_desc->buf_phys_addr, |
2151 | MVNETA_MH_SIZE + NET_SKB_PAD, | 2151 | MVNETA_MH_SIZE + NET_SKB_PAD, |
2152 | rx_bytes, | 2152 | rx_bytes, |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
152 | memset(p, 0, regs->len); | 152 | memset(p, 0, regs->len); |
153 | memcpy_fromio(p, io, B3_RAM_ADDR); | 153 | memcpy_fromio(p, io, B3_RAM_ADDR); |
154 | 154 | ||
155 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, | 155 | if (regs->len > B3_RI_WTO_R1) { |
156 | regs->len - B3_RI_WTO_R1); | 156 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, |
157 | regs->len - B3_RI_WTO_R1); | ||
158 | } | ||
157 | } | 159 | } |
158 | 160 | ||
159 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ | 161 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f3a5fa84860f..57727fe1501e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -5073,7 +5073,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
5073 | INIT_WORK(&hw->restart_work, sky2_restart); | 5073 | INIT_WORK(&hw->restart_work, sky2_restart); |
5074 | 5074 | ||
5075 | pci_set_drvdata(pdev, hw); | 5075 | pci_set_drvdata(pdev, hw); |
5076 | pdev->d3_delay = 200; | 5076 | pdev->d3_delay = 300; |
5077 | 5077 | ||
5078 | return 0; | 5078 | return 0; |
5079 | 5079 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 6b88881b8e35..c1438ae52a11 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -3360,7 +3360,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, | |||
3360 | dev->addr_len = ETH_ALEN; | 3360 | dev->addr_len = ETH_ALEN; |
3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); | 3361 | mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]); |
3362 | if (!is_valid_ether_addr(dev->dev_addr)) { | 3362 | if (!is_valid_ether_addr(dev->dev_addr)) { |
3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n", | 3363 | en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n", |
3364 | priv->port, dev->dev_addr); | 3364 | priv->port, dev->dev_addr); |
3365 | err = -EINVAL; | 3365 | err = -EINVAL; |
3366 | goto out; | 3366 | goto out; |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |||
617 | } | 617 | } |
618 | #endif | 618 | #endif |
619 | 619 | ||
620 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | ||
621 | |||
620 | /* We reach this function only after checking that any of | 622 | /* We reach this function only after checking that any of |
621 | * the (IPv4 | IPv6) bits are set in cqe->status. | 623 | * the (IPv4 | IPv6) bits are set in cqe->status. |
622 | */ | 624 | */ |
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |||
624 | netdev_features_t dev_features) | 626 | netdev_features_t dev_features) |
625 | { | 627 | { |
626 | __wsum hw_checksum = 0; | 628 | __wsum hw_checksum = 0; |
629 | void *hdr; | ||
630 | |||
631 | /* CQE csum doesn't cover padding octets in short ethernet | ||
632 | * frames. And the pad field is appended prior to calculating | ||
633 | * and appending the FCS field. | ||
634 | * | ||
635 | * Detecting these padded frames requires to verify and parse | ||
636 | * IP headers, so we simply force all those small frames to skip | ||
637 | * checksum complete. | ||
638 | */ | ||
639 | if (short_frame(skb->len)) | ||
640 | return -EINVAL; | ||
627 | 641 | ||
628 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | 642 | hdr = (u8 *)va + sizeof(struct ethhdr); |
629 | |||
630 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | 643 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); |
631 | 644 | ||
632 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && | 645 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
@@ -819,6 +832,11 @@ xdp_drop_no_cnt: | |||
819 | skb_record_rx_queue(skb, cq_ring); | 832 | skb_record_rx_queue(skb, cq_ring); |
820 | 833 | ||
821 | if (likely(dev->features & NETIF_F_RXCSUM)) { | 834 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
835 | /* TODO: For IP non TCP/UDP packets when csum complete is | ||
836 | * not an option (not supported or any other reason) we can | ||
837 | * actually check cqe IPOK status bit and report | ||
838 | * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE | ||
839 | */ | ||
822 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | | 840 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
823 | MLX4_CQE_STATUS_UDP)) && | 841 | MLX4_CQE_STATUS_UDP)) && |
824 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | 842 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..e267ff93e8a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -1583,6 +1583,24 @@ no_trig: | |||
1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); | 1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | void mlx5_cmd_flush(struct mlx5_core_dev *dev) | ||
1587 | { | ||
1588 | struct mlx5_cmd *cmd = &dev->cmd; | ||
1589 | int i; | ||
1590 | |||
1591 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1592 | while (down_trylock(&cmd->sem)) | ||
1593 | mlx5_cmd_trigger_completions(dev); | ||
1594 | |||
1595 | while (down_trylock(&cmd->pages_sem)) | ||
1596 | mlx5_cmd_trigger_completions(dev); | ||
1597 | |||
1598 | /* Unlock cmdif */ | ||
1599 | up(&cmd->pages_sem); | ||
1600 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1601 | up(&cmd->sem); | ||
1602 | } | ||
1603 | |||
1586 | static int status_to_err(u8 status) | 1604 | static int status_to_err(u8 status) |
1587 | { | 1605 | { |
1588 | return status ? -1 : 0; /* TBD more meaningful codes */ | 1606 | return status ? -1 : 0; /* TBD more meaningful codes */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8fa8fdd30b85..448a92561567 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats { | |||
657 | enum { | 657 | enum { |
658 | MLX5E_STATE_OPENED, | 658 | MLX5E_STATE_OPENED, |
659 | MLX5E_STATE_DESTROYING, | 659 | MLX5E_STATE_DESTROYING, |
660 | MLX5E_STATE_XDP_TX_ENABLED, | ||
660 | }; | 661 | }; |
661 | 662 | ||
662 | struct mlx5e_rqt { | 663 | struct mlx5e_rqt { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..f3c7ab6faea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |||
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |||
256 | e->m_neigh.family = n->ops->family; | 256 | e->m_neigh.family = n->ops->family; |
257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
258 | e->out_dev = out_dev; | 258 | e->out_dev = out_dev; |
259 | e->route_dev = route_dev; | ||
259 | 260 | ||
260 | /* It's important to add the neigh to the hash table before checking | 261 | /* It's important to add the neigh to the hash table before checking |
261 | * the neigh validity state. So if we'll get a notification, in case the | 262 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |||
369 | e->m_neigh.family = n->ops->family; | 370 | e->m_neigh.family = n->ops->family; |
370 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 371 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
371 | e->out_dev = out_dev; | 372 | e->out_dev = out_dev; |
373 | e->route_dev = route_dev; | ||
372 | 374 | ||
373 | /* It's importent to add the neigh to the hash table before checking | 375 | /* It's importent to add the neigh to the hash table before checking |
374 | * the neigh validity state. So if we'll get a notification, in case the | 376 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
612 | struct mlx5_flow_spec *spec, | 614 | struct mlx5_flow_spec *spec, |
613 | struct tc_cls_flower_offload *f, | 615 | struct tc_cls_flower_offload *f, |
614 | void *headers_c, | 616 | void *headers_c, |
615 | void *headers_v) | 617 | void *headers_v, u8 *match_level) |
616 | { | 618 | { |
617 | int tunnel_type; | 619 | int tunnel_type; |
618 | int err = 0; | 620 | int err = 0; |
619 | 621 | ||
620 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); | 622 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); |
621 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { | 623 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { |
624 | *match_level = MLX5_MATCH_L4; | ||
622 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, | 625 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, |
623 | headers_c, headers_v); | 626 | headers_c, headers_v); |
624 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { | 627 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { |
628 | *match_level = MLX5_MATCH_L3; | ||
625 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, | 629 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, |
626 | headers_c, headers_v); | 630 | headers_c, headers_v); |
627 | } else { | 631 | } else { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h | |||
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
39 | struct mlx5_flow_spec *spec, | 39 | struct mlx5_flow_spec *spec, |
40 | struct tc_cls_flower_offload *f, | 40 | struct tc_cls_flower_offload *f, |
41 | void *headers_c, | 41 | void *headers_c, |
42 | void *headers_v); | 42 | void *headers_v, u8 *match_level); |
43 | 43 | ||
44 | #endif //__MLX5_EN_TC_TUNNEL_H__ | 44 | #endif //__MLX5_EN_TC_TUNNEL_H__ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | |||
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
365 | int sq_num; | 365 | int sq_num; |
366 | int i; | 366 | int i; |
367 | 367 | ||
368 | if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) | 368 | /* this flag is sufficient, no need to test internal sq state */ |
369 | if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) | ||
369 | return -ENETDOWN; | 370 | return -ENETDOWN; |
370 | 371 | ||
371 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | 372 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
378 | 379 | ||
379 | sq = &priv->channels.c[sq_num]->xdpsq; | 380 | sq = &priv->channels.c[sq_num]->xdpsq; |
380 | 381 | ||
381 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) | ||
382 | return -ENETDOWN; | ||
383 | |||
384 | for (i = 0; i < n; i++) { | 382 | for (i = 0; i < n; i++) { |
385 | struct xdp_frame *xdpf = frames[i]; | 383 | struct xdp_frame *xdpf = frames[i]; |
386 | struct mlx5e_xdp_info xdpi; | 384 | struct mlx5e_xdp_info xdpi; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | |||
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); | |||
50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | 50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
51 | u32 flags); | 51 | u32 flags); |
52 | 52 | ||
53 | static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) | ||
54 | { | ||
55 | set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
56 | } | ||
57 | |||
58 | static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) | ||
59 | { | ||
60 | clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
61 | /* let other device's napi(s) see our new state */ | ||
62 | synchronize_rcu(); | ||
63 | } | ||
64 | |||
65 | static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) | ||
66 | { | ||
67 | return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
68 | } | ||
69 | |||
53 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) | 70 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) |
54 | { | 71 | { |
55 | if (sq->doorbell_cseg) { | 72 | if (sq->doorbell_cseg) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..47233b9a4f81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
354 | 354 | ||
355 | new_channels.params = priv->channels.params; | 355 | new_channels.params = priv->channels.params; |
356 | new_channels.params.num_channels = count; | 356 | new_channels.params.num_channels = count; |
357 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
358 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
359 | MLX5E_INDIR_RQT_SIZE, count); | ||
360 | 357 | ||
361 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 358 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
362 | priv->channels.params = new_channels.params; | 359 | priv->channels.params = new_channels.params; |
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
372 | if (arfs_enabled) | 369 | if (arfs_enabled) |
373 | mlx5e_arfs_disable(priv); | 370 | mlx5e_arfs_disable(priv); |
374 | 371 | ||
372 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
373 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
374 | MLX5E_INDIR_RQT_SIZE, count); | ||
375 | |||
375 | /* Switch to new channels, set new parameters and close old ones */ | 376 | /* Switch to new channels, set new parameters and close old ones */ |
376 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); | 377 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); |
377 | 378 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 01819e5c9975..93e50ccd44c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | |||
2938 | 2938 | ||
2939 | mlx5e_build_tx2sq_maps(priv); | 2939 | mlx5e_build_tx2sq_maps(priv); |
2940 | mlx5e_activate_channels(&priv->channels); | 2940 | mlx5e_activate_channels(&priv->channels); |
2941 | mlx5e_xdp_tx_enable(priv); | ||
2941 | netif_tx_start_all_queues(priv->netdev); | 2942 | netif_tx_start_all_queues(priv->netdev); |
2942 | 2943 | ||
2943 | if (mlx5e_is_vport_rep(priv)) | 2944 | if (mlx5e_is_vport_rep(priv)) |
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) | |||
2959 | */ | 2960 | */ |
2960 | netif_tx_stop_all_queues(priv->netdev); | 2961 | netif_tx_stop_all_queues(priv->netdev); |
2961 | netif_tx_disable(priv->netdev); | 2962 | netif_tx_disable(priv->netdev); |
2963 | mlx5e_xdp_tx_disable(priv); | ||
2962 | mlx5e_deactivate_channels(&priv->channels); | 2964 | mlx5e_deactivate_channels(&priv->channels); |
2963 | } | 2965 | } |
2964 | 2966 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index f2573c2d2b5c..ef9e472daffb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |||
596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | 596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { |
597 | ether_addr_copy(e->h_dest, ha); | 597 | ether_addr_copy(e->h_dest, ha); |
598 | ether_addr_copy(eth->h_dest, ha); | 598 | ether_addr_copy(eth->h_dest, ha); |
599 | /* Update the encap source mac, in case that we delete | ||
600 | * the flows when encap source mac changed. | ||
601 | */ | ||
602 | ether_addr_copy(eth->h_source, e->route_dev->dev_addr); | ||
599 | 603 | ||
600 | mlx5e_tc_encap_flows_add(priv, e); | 604 | mlx5e_tc_encap_flows_add(priv, e); |
601 | } | 605 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..36eafc877e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |||
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry { | |||
148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
149 | 149 | ||
150 | struct net_device *out_dev; | 150 | struct net_device *out_dev; |
151 | struct net_device *route_dev; | ||
151 | int tunnel_type; | 152 | int tunnel_type; |
152 | int tunnel_hlen; | 153 | int tunnel_hlen; |
153 | int reformat_type; | 154 | int reformat_type; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cae6c6d48984..b5c1b039375a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr { | |||
128 | struct net_device *filter_dev; | 128 | struct net_device *filter_dev; |
129 | struct mlx5_flow_spec spec; | 129 | struct mlx5_flow_spec spec; |
130 | int num_mod_hdr_actions; | 130 | int num_mod_hdr_actions; |
131 | int max_mod_hdr_actions; | ||
131 | void *mod_hdr_actions; | 132 | void *mod_hdr_actions; |
132 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; | 133 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; |
133 | }; | 134 | }; |
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, | |||
1302 | static int parse_tunnel_attr(struct mlx5e_priv *priv, | 1303 | static int parse_tunnel_attr(struct mlx5e_priv *priv, |
1303 | struct mlx5_flow_spec *spec, | 1304 | struct mlx5_flow_spec *spec, |
1304 | struct tc_cls_flower_offload *f, | 1305 | struct tc_cls_flower_offload *f, |
1305 | struct net_device *filter_dev) | 1306 | struct net_device *filter_dev, u8 *match_level) |
1306 | { | 1307 | { |
1307 | struct netlink_ext_ack *extack = f->common.extack; | 1308 | struct netlink_ext_ack *extack = f->common.extack; |
1308 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1309 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, | |||
1317 | int err = 0; | 1318 | int err = 0; |
1318 | 1319 | ||
1319 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, | 1320 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, |
1320 | headers_c, headers_v); | 1321 | headers_c, headers_v, match_level); |
1321 | if (err) { | 1322 | if (err) { |
1322 | NL_SET_ERR_MSG_MOD(extack, | 1323 | NL_SET_ERR_MSG_MOD(extack, |
1323 | "failed to parse tunnel attributes"); | 1324 | "failed to parse tunnel attributes"); |
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1426 | struct mlx5_flow_spec *spec, | 1427 | struct mlx5_flow_spec *spec, |
1427 | struct tc_cls_flower_offload *f, | 1428 | struct tc_cls_flower_offload *f, |
1428 | struct net_device *filter_dev, | 1429 | struct net_device *filter_dev, |
1429 | u8 *match_level) | 1430 | u8 *match_level, u8 *tunnel_match_level) |
1430 | { | 1431 | { |
1431 | struct netlink_ext_ack *extack = f->common.extack; | 1432 | struct netlink_ext_ack *extack = f->common.extack; |
1432 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1433 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1477 | switch (key->addr_type) { | 1478 | switch (key->addr_type) { |
1478 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: | 1479 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
1479 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: | 1480 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
1480 | if (parse_tunnel_attr(priv, spec, f, filter_dev)) | 1481 | if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) |
1481 | return -EOPNOTSUPP; | 1482 | return -EOPNOTSUPP; |
1482 | break; | 1483 | break; |
1483 | default: | 1484 | default: |
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1826 | struct mlx5_core_dev *dev = priv->mdev; | 1827 | struct mlx5_core_dev *dev = priv->mdev; |
1827 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1828 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1828 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1829 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1830 | u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; | ||
1829 | struct mlx5_eswitch_rep *rep; | 1831 | struct mlx5_eswitch_rep *rep; |
1830 | u8 match_level; | ||
1831 | int err; | 1832 | int err; |
1832 | 1833 | ||
1833 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); | 1834 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); |
1834 | 1835 | ||
1835 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { | 1836 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { |
1836 | rep = rpriv->rep; | 1837 | rep = rpriv->rep; |
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1846 | } | 1847 | } |
1847 | } | 1848 | } |
1848 | 1849 | ||
1849 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) | 1850 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { |
1850 | flow->esw_attr->match_level = match_level; | 1851 | flow->esw_attr->match_level = match_level; |
1851 | else | 1852 | flow->esw_attr->tunnel_match_level = tunnel_match_level; |
1853 | } else { | ||
1852 | flow->nic_attr->match_level = match_level; | 1854 | flow->nic_attr->match_level = match_level; |
1855 | } | ||
1853 | 1856 | ||
1854 | return err; | 1857 | return err; |
1855 | } | 1858 | } |
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = { | |||
1934 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), | 1937 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), |
1935 | }; | 1938 | }; |
1936 | 1939 | ||
1937 | /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at | 1940 | /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at |
1938 | * max from the SW pedit action. On success, it says how many HW actions were | 1941 | * max from the SW pedit action. On success, attr->num_mod_hdr_actions |
1939 | * actually parsed. | 1942 | * says how many HW actions were actually parsed. |
1940 | */ | 1943 | */ |
1941 | static int offload_pedit_fields(struct pedit_headers *masks, | 1944 | static int offload_pedit_fields(struct pedit_headers *masks, |
1942 | struct pedit_headers *vals, | 1945 | struct pedit_headers *vals, |
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
1960 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; | 1963 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; |
1961 | 1964 | ||
1962 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); | 1965 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); |
1963 | action = parse_attr->mod_hdr_actions; | 1966 | action = parse_attr->mod_hdr_actions + |
1964 | max_actions = parse_attr->num_mod_hdr_actions; | 1967 | parse_attr->num_mod_hdr_actions * action_size; |
1965 | nactions = 0; | 1968 | |
1969 | max_actions = parse_attr->max_mod_hdr_actions; | ||
1970 | nactions = parse_attr->num_mod_hdr_actions; | ||
1966 | 1971 | ||
1967 | for (i = 0; i < ARRAY_SIZE(fields); i++) { | 1972 | for (i = 0; i < ARRAY_SIZE(fields); i++) { |
1968 | f = &fields[i]; | 1973 | f = &fields[i]; |
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, | |||
2073 | if (!parse_attr->mod_hdr_actions) | 2078 | if (!parse_attr->mod_hdr_actions) |
2074 | return -ENOMEM; | 2079 | return -ENOMEM; |
2075 | 2080 | ||
2076 | parse_attr->num_mod_hdr_actions = max_actions; | 2081 | parse_attr->max_mod_hdr_actions = max_actions; |
2077 | return 0; | 2082 | return 0; |
2078 | } | 2083 | } |
2079 | 2084 | ||
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, | |||
2119 | goto out_err; | 2124 | goto out_err; |
2120 | } | 2125 | } |
2121 | 2126 | ||
2122 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); | 2127 | if (!parse_attr->mod_hdr_actions) { |
2123 | if (err) | 2128 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); |
2124 | goto out_err; | 2129 | if (err) |
2130 | goto out_err; | ||
2131 | } | ||
2125 | 2132 | ||
2126 | err = offload_pedit_fields(masks, vals, parse_attr, extack); | 2133 | err = offload_pedit_fields(masks, vals, parse_attr, extack); |
2127 | if (err < 0) | 2134 | if (err < 0) |
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
2179 | 2186 | ||
2180 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2187 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
2181 | struct tcf_exts *exts, | 2188 | struct tcf_exts *exts, |
2189 | u32 actions, | ||
2182 | struct netlink_ext_ack *extack) | 2190 | struct netlink_ext_ack *extack) |
2183 | { | 2191 | { |
2184 | const struct tc_action *a; | 2192 | const struct tc_action *a; |
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
2188 | u16 ethertype; | 2196 | u16 ethertype; |
2189 | int nkeys, i; | 2197 | int nkeys, i; |
2190 | 2198 | ||
2191 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | 2199 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
2200 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); | ||
2201 | else | ||
2202 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | ||
2203 | |||
2192 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); | 2204 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); |
2193 | 2205 | ||
2194 | /* for non-IP we only re-write MACs, so we're okay */ | 2206 | /* for non-IP we only re-write MACs, so we're okay */ |
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, | |||
2245 | 2257 | ||
2246 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 2258 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
2247 | return modify_header_match_supported(&parse_attr->spec, exts, | 2259 | return modify_header_match_supported(&parse_attr->spec, exts, |
2248 | extack); | 2260 | actions, extack); |
2249 | 2261 | ||
2250 | return true; | 2262 | return true; |
2251 | } | 2263 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..0e55cd1f2e98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); | 388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { | 389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { |
390 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
391 | struct mlx5_wqe_eth_seg cur_eth = wqe->eth; | ||
392 | #endif | ||
390 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); | 393 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
391 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); | 394 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); |
395 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
396 | wqe->eth = cur_eth; | ||
397 | #endif | ||
392 | } | 398 | } |
393 | 399 | ||
394 | /* fill wqe */ | 400 | /* fill wqe */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..748ff178a1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr { | |||
312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; | 312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; |
313 | u32 mod_hdr_id; | 313 | u32 mod_hdr_id; |
314 | u8 match_level; | 314 | u8 match_level; |
315 | u8 tunnel_match_level; | ||
315 | struct mlx5_fc *counter; | 316 | struct mlx5_fc *counter; |
316 | u32 chain; | 317 | u32 chain; |
317 | u16 prio; | 318 | u16 prio; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..d4e6fe5b9300 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, | 160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
161 | source_eswitch_owner_vhca_id); | 161 | source_eswitch_owner_vhca_id); |
162 | 162 | ||
163 | if (attr->match_level == MLX5_MATCH_NONE) | 163 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
164 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; | 164 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { |
165 | else | 165 | if (attr->tunnel_match_level != MLX5_MATCH_NONE) |
166 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | 166 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
167 | MLX5_MATCH_MISC_PARAMETERS; | 167 | if (attr->match_level != MLX5_MATCH_NONE) |
168 | 168 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | |
169 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 169 | } else if (attr->match_level != MLX5_MATCH_NONE) { |
170 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | 170 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
171 | } | ||
171 | 172 | ||
172 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 173 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
173 | flow_act.modify_id = attr->mod_hdr_id; | 174 | flow_act.modify_id = attr->mod_hdr_id; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..503035469d2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c | |||
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
211 | enum port_module_event_status_type module_status; | 211 | enum port_module_event_status_type module_status; |
212 | enum port_module_event_error_type error_type; | 212 | enum port_module_event_error_type error_type; |
213 | struct mlx5_eqe_port_module *module_event_eqe; | 213 | struct mlx5_eqe_port_module *module_event_eqe; |
214 | const char *status_str, *error_str; | 214 | const char *status_str; |
215 | u8 module_num; | 215 | u8 module_num; |
216 | 216 | ||
217 | module_event_eqe = &eqe->data.port_module; | 217 | module_event_eqe = &eqe->data.port_module; |
218 | module_num = module_event_eqe->module; | ||
219 | module_status = module_event_eqe->module_status & | 218 | module_status = module_event_eqe->module_status & |
220 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; | 219 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; |
221 | error_type = module_event_eqe->error_type & | 220 | error_type = module_event_eqe->error_type & |
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
223 | 222 | ||
224 | if (module_status < MLX5_MODULE_STATUS_NUM) | 223 | if (module_status < MLX5_MODULE_STATUS_NUM) |
225 | events->pme_stats.status_counters[module_status]++; | 224 | events->pme_stats.status_counters[module_status]++; |
226 | status_str = mlx5_pme_status_to_string(module_status); | ||
227 | 225 | ||
228 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | 226 | if (module_status == MLX5_MODULE_STATUS_ERROR) |
229 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) | 227 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) |
230 | events->pme_stats.error_counters[error_type]++; | 228 | events->pme_stats.error_counters[error_type]++; |
231 | error_str = mlx5_pme_error_to_string(error_type); | ||
232 | } | ||
233 | 229 | ||
234 | if (!printk_ratelimit()) | 230 | if (!printk_ratelimit()) |
235 | return NOTIFY_OK; | 231 | return NOTIFY_OK; |
236 | 232 | ||
237 | if (module_status == MLX5_MODULE_STATUS_ERROR) | 233 | module_num = module_event_eqe->module; |
234 | status_str = mlx5_pme_status_to_string(module_status); | ||
235 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | ||
236 | const char *error_str = mlx5_pme_error_to_string(error_type); | ||
237 | |||
238 | mlx5_core_err(events->dev, | 238 | mlx5_core_err(events->dev, |
239 | "Port module event[error]: module %u, %s, %s\n", | 239 | "Port module event[error]: module %u, %s, %s\n", |
240 | module_num, status_str, error_str); | 240 | module_num, status_str, error_str); |
241 | else | 241 | } else { |
242 | mlx5_core_info(events->dev, | 242 | mlx5_core_info(events->dev, |
243 | "Port module event: module %u, %s\n", | 243 | "Port module event: module %u, %s\n", |
244 | module_num, status_str); | 244 | module_num, status_str); |
245 | } | ||
245 | 246 | ||
246 | return NOTIFY_OK; | 247 | return NOTIFY_OK; |
247 | } | 248 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) | |||
103 | mlx5_core_err(dev, "start\n"); | 103 | mlx5_core_err(dev, "start\n"); |
104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { | 104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { |
105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; | 105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
106 | mlx5_cmd_trigger_completions(dev); | 106 | mlx5_cmd_flush(dev); |
107 | } | 107 | } |
108 | 108 | ||
109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); | 109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 5300b0b6d836..4fdac020b795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, | |||
126 | struct ptp_system_timestamp *sts); | 126 | struct ptp_system_timestamp *sts); |
127 | 127 | ||
128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); | 128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); |
129 | void mlx5_cmd_flush(struct mlx5_core_dev *dev); | ||
129 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); | 130 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); |
130 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | 131 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); |
131 | 132 | ||
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 32519c93df17..b65e274b02e9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -862,8 +862,9 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 862 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { |
863 | bool configure = false; | 863 | bool configure = false; |
864 | bool pfc = false; | 864 | bool pfc = false; |
865 | u16 thres_cells; | ||
866 | u16 delay_cells; | ||
865 | bool lossy; | 867 | bool lossy; |
866 | u16 thres; | ||
867 | 868 | ||
868 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { | 869 | for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) { |
869 | if (prio_tc[j] == i) { | 870 | if (prio_tc[j] == i) { |
@@ -877,10 +878,11 @@ int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu, | |||
877 | continue; | 878 | continue; |
878 | 879 | ||
879 | lossy = !(pfc || pause_en); | 880 | lossy = !(pfc || pause_en); |
880 | thres = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); | 881 | thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu); |
881 | delay = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, pfc, | 882 | delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay, |
882 | pause_en); | 883 | pfc, pause_en); |
883 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres + delay, thres, lossy); | 884 | mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells, |
885 | thres_cells, lossy); | ||
884 | } | 886 | } |
885 | 887 | ||
886 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); | 888 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl); |
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index e23ca90289f7..0a868c829b90 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c | |||
@@ -1291,15 +1291,10 @@ wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, | |||
1291 | 1291 | ||
1292 | static int | 1292 | static int |
1293 | wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, | 1293 | wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, |
1294 | enum alu_op alu_op, bool skip) | 1294 | enum alu_op alu_op) |
1295 | { | 1295 | { |
1296 | const struct bpf_insn *insn = &meta->insn; | 1296 | const struct bpf_insn *insn = &meta->insn; |
1297 | 1297 | ||
1298 | if (skip) { | ||
1299 | meta->skip = true; | ||
1300 | return 0; | ||
1301 | } | ||
1302 | |||
1303 | wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); | 1298 | wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm); |
1304 | wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); | 1299 | wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0); |
1305 | 1300 | ||
@@ -2309,7 +2304,7 @@ static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | |||
2309 | 2304 | ||
2310 | static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2305 | static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
2311 | { | 2306 | { |
2312 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm); | 2307 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR); |
2313 | } | 2308 | } |
2314 | 2309 | ||
2315 | static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2310 | static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
@@ -2319,7 +2314,7 @@ static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | |||
2319 | 2314 | ||
2320 | static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2315 | static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
2321 | { | 2316 | { |
2322 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm); | 2317 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND); |
2323 | } | 2318 | } |
2324 | 2319 | ||
2325 | static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2320 | static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
@@ -2329,7 +2324,7 @@ static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | |||
2329 | 2324 | ||
2330 | static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2325 | static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
2331 | { | 2326 | { |
2332 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm); | 2327 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR); |
2333 | } | 2328 | } |
2334 | 2329 | ||
2335 | static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2330 | static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
@@ -2339,7 +2334,7 @@ static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | |||
2339 | 2334 | ||
2340 | static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2335 | static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
2341 | { | 2336 | { |
2342 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm); | 2337 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD); |
2343 | } | 2338 | } |
2344 | 2339 | ||
2345 | static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2340 | static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
@@ -2349,7 +2344,7 @@ static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | |||
2349 | 2344 | ||
2350 | static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2345 | static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
2351 | { | 2346 | { |
2352 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm); | 2347 | return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB); |
2353 | } | 2348 | } |
2354 | 2349 | ||
2355 | static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) | 2350 | static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..2d8a77cc156b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -53,7 +53,7 @@ | |||
53 | extern const struct qed_common_ops qed_common_ops_pass; | 53 | extern const struct qed_common_ops qed_common_ops_pass; |
54 | 54 | ||
55 | #define QED_MAJOR_VERSION 8 | 55 | #define QED_MAJOR_VERSION 8 |
56 | #define QED_MINOR_VERSION 33 | 56 | #define QED_MINOR_VERSION 37 |
57 | #define QED_REVISION_VERSION 0 | 57 | #define QED_REVISION_VERSION 0 |
58 | #define QED_ENGINEERING_VERSION 20 | 58 | #define QED_ENGINEERING_VERSION 20 |
59 | 59 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index beb8e5d6401a..ded556b7bab5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c | |||
@@ -1688,6 +1688,15 @@ qed_iwarp_parse_rx_pkt(struct qed_hwfn *p_hwfn, | |||
1688 | 1688 | ||
1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); | 1689 | eth_hlen = ETH_HLEN + (vlan_valid ? sizeof(u32) : 0); |
1690 | 1690 | ||
1691 | if (!ether_addr_equal(ethh->h_dest, | ||
1692 | p_hwfn->p_rdma_info->iwarp.mac_addr)) { | ||
1693 | DP_VERBOSE(p_hwfn, | ||
1694 | QED_MSG_RDMA, | ||
1695 | "Got unexpected mac %pM instead of %pM\n", | ||
1696 | ethh->h_dest, p_hwfn->p_rdma_info->iwarp.mac_addr); | ||
1697 | return -EINVAL; | ||
1698 | } | ||
1699 | |||
1691 | ether_addr_copy(remote_mac_addr, ethh->h_source); | 1700 | ether_addr_copy(remote_mac_addr, ethh->h_source); |
1692 | ether_addr_copy(local_mac_addr, ethh->h_dest); | 1701 | ether_addr_copy(local_mac_addr, ethh->h_dest); |
1693 | 1702 | ||
@@ -2605,7 +2614,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2605 | struct qed_iwarp_info *iwarp_info; | 2614 | struct qed_iwarp_info *iwarp_info; |
2606 | struct qed_ll2_acquire_data data; | 2615 | struct qed_ll2_acquire_data data; |
2607 | struct qed_ll2_cbs cbs; | 2616 | struct qed_ll2_cbs cbs; |
2608 | u32 mpa_buff_size; | 2617 | u32 buff_size; |
2609 | u16 n_ooo_bufs; | 2618 | u16 n_ooo_bufs; |
2610 | int rc = 0; | 2619 | int rc = 0; |
2611 | int i; | 2620 | int i; |
@@ -2632,7 +2641,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2632 | 2641 | ||
2633 | memset(&data, 0, sizeof(data)); | 2642 | memset(&data, 0, sizeof(data)); |
2634 | data.input.conn_type = QED_LL2_TYPE_IWARP; | 2643 | data.input.conn_type = QED_LL2_TYPE_IWARP; |
2635 | data.input.mtu = QED_IWARP_MAX_SYN_PKT_SIZE; | 2644 | data.input.mtu = params->max_mtu; |
2636 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; | 2645 | data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE; |
2637 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; | 2646 | data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE; |
2638 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ | 2647 | data.input.tx_max_bds_per_packet = 1; /* will never be fragmented */ |
@@ -2654,9 +2663,10 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2654 | goto err; | 2663 | goto err; |
2655 | } | 2664 | } |
2656 | 2665 | ||
2666 | buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2657 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2667 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2658 | QED_IWARP_LL2_SYN_RX_SIZE, | 2668 | QED_IWARP_LL2_SYN_RX_SIZE, |
2659 | QED_IWARP_MAX_SYN_PKT_SIZE, | 2669 | buff_size, |
2660 | iwarp_info->ll2_syn_handle); | 2670 | iwarp_info->ll2_syn_handle); |
2661 | if (rc) | 2671 | if (rc) |
2662 | goto err; | 2672 | goto err; |
@@ -2710,10 +2720,9 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2710 | if (rc) | 2720 | if (rc) |
2711 | goto err; | 2721 | goto err; |
2712 | 2722 | ||
2713 | mpa_buff_size = QED_IWARP_MAX_BUF_SIZE(params->max_mtu); | ||
2714 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, | 2723 | rc = qed_iwarp_ll2_alloc_buffers(p_hwfn, |
2715 | data.input.rx_num_desc, | 2724 | data.input.rx_num_desc, |
2716 | mpa_buff_size, | 2725 | buff_size, |
2717 | iwarp_info->ll2_mpa_handle); | 2726 | iwarp_info->ll2_mpa_handle); |
2718 | if (rc) | 2727 | if (rc) |
2719 | goto err; | 2728 | goto err; |
@@ -2726,7 +2735,7 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn, | |||
2726 | 2735 | ||
2727 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; | 2736 | iwarp_info->max_num_partial_fpdus = (u16)p_hwfn->p_rdma_info->num_qps; |
2728 | 2737 | ||
2729 | iwarp_info->mpa_intermediate_buf = kzalloc(mpa_buff_size, GFP_KERNEL); | 2738 | iwarp_info->mpa_intermediate_buf = kzalloc(buff_size, GFP_KERNEL); |
2730 | if (!iwarp_info->mpa_intermediate_buf) | 2739 | if (!iwarp_info->mpa_intermediate_buf) |
2731 | goto err; | 2740 | goto err; |
2732 | 2741 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h index b8f612d00241..7ac959038324 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.h | |||
@@ -46,7 +46,6 @@ enum qed_iwarp_qp_state qed_roce2iwarp_state(enum qed_roce_qp_state state); | |||
46 | 46 | ||
47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) | 47 | #define QED_IWARP_LL2_SYN_TX_SIZE (128) |
48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) | 48 | #define QED_IWARP_LL2_SYN_RX_SIZE (256) |
49 | #define QED_IWARP_MAX_SYN_PKT_SIZE (128) | ||
50 | 49 | ||
51 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) | 50 | #define QED_IWARP_LL2_OOO_DEF_TX_SIZE (256) |
52 | #define QED_IWARP_MAX_OOO (16) | 51 | #define QED_IWARP_MAX_OOO (16) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index e68ca83ae915..58be1c4c6668 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c | |||
@@ -2216,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2216 | u16 num_queues = 0; | 2216 | u16 num_queues = 0; |
2217 | 2217 | ||
2218 | /* Since the feature controls only queue-zones, | 2218 | /* Since the feature controls only queue-zones, |
2219 | * make sure we have the contexts [rx, tx, xdp] to | 2219 | * make sure we have the contexts [rx, xdp, tcs] to |
2220 | * match. | 2220 | * match. |
2221 | */ | 2221 | */ |
2222 | for_each_hwfn(cdev, i) { | 2222 | for_each_hwfn(cdev, i) { |
@@ -2226,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2226 | u16 cids; | 2226 | u16 cids; |
2227 | 2227 | ||
2228 | cids = hwfn->pf_params.eth_pf_params.num_cons; | 2228 | cids = hwfn->pf_params.eth_pf_params.num_cons; |
2229 | num_queues += min_t(u16, l2_queues, cids / 3); | 2229 | cids /= (2 + info->num_tc); |
2230 | num_queues += min_t(u16, l2_queues, cids); | ||
2230 | } | 2231 | } |
2231 | 2232 | ||
2232 | /* queues might theoretically be >256, but interrupts' | 2233 | /* queues might theoretically be >256, but interrupts' |
@@ -2870,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) | |||
2870 | p_hwfn = p_cid->p_owner; | 2871 | p_hwfn = p_cid->p_owner; |
2871 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); | 2872 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); |
2872 | if (rc) | 2873 | if (rc) |
2873 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); | 2874 | DP_VERBOSE(cdev, QED_MSG_DEBUG, |
2875 | "Unable to read queue coalescing\n"); | ||
2874 | 2876 | ||
2875 | return rc; | 2877 | return rc; |
2876 | } | 2878 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); | |||
382 | * @param p_hwfn | 382 | * @param p_hwfn |
383 | */ | 383 | */ |
384 | void qed_consq_free(struct qed_hwfn *p_hwfn); | 384 | void qed_consq_free(struct qed_hwfn *p_hwfn); |
385 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn); | ||
385 | 386 | ||
386 | /** | 387 | /** |
387 | * @file | 388 | * @file |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | |||
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) | |||
604 | 604 | ||
605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; | 605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; |
606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); | 606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); |
607 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) | ||
608 | p_ent->ramrod.pf_update.mf_vlan |= | ||
609 | cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); | ||
607 | 610 | ||
608 | return qed_spq_post(p_hwfn, p_ent, NULL); | 611 | return qed_spq_post(p_hwfn, p_ent, NULL); |
609 | } | 612 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..ba64ff9bedbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) | |||
397 | 397 | ||
398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); | 398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); |
399 | 399 | ||
400 | /* Attempt to post pending requests */ | ||
401 | spin_lock_bh(&p_hwfn->p_spq->lock); | ||
402 | rc = qed_spq_pend_post(p_hwfn); | ||
403 | spin_unlock_bh(&p_hwfn->p_spq->lock); | ||
404 | |||
400 | return rc; | 405 | return rc; |
401 | } | 406 | } |
402 | 407 | ||
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, | |||
767 | return 0; | 772 | return 0; |
768 | } | 773 | } |
769 | 774 | ||
770 | static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) | 775 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn) |
771 | { | 776 | { |
772 | struct qed_spq *p_spq = p_hwfn->p_spq; | 777 | struct qed_spq *p_spq = p_hwfn->p_spq; |
773 | struct qed_spq_entry *p_ent = NULL; | 778 | struct qed_spq_entry *p_ent = NULL; |
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
905 | struct qed_spq_entry *p_ent = NULL; | 910 | struct qed_spq_entry *p_ent = NULL; |
906 | struct qed_spq_entry *tmp; | 911 | struct qed_spq_entry *tmp; |
907 | struct qed_spq_entry *found = NULL; | 912 | struct qed_spq_entry *found = NULL; |
908 | int rc; | ||
909 | 913 | ||
910 | if (!p_hwfn) | 914 | if (!p_hwfn) |
911 | return -EINVAL; | 915 | return -EINVAL; |
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
963 | */ | 967 | */ |
964 | qed_spq_return_entry(p_hwfn, found); | 968 | qed_spq_return_entry(p_hwfn, found); |
965 | 969 | ||
966 | /* Attempt to post pending requests */ | 970 | return 0; |
967 | spin_lock_bh(&p_spq->lock); | ||
968 | rc = qed_spq_pend_post(p_hwfn); | ||
969 | spin_unlock_bh(&p_spq->lock); | ||
970 | |||
971 | return rc; | ||
972 | } | 971 | } |
973 | 972 | ||
974 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) | 973 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) |
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..730997b13747 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <net/tc_act/tc_gact.h> | 56 | #include <net/tc_act/tc_gact.h> |
57 | 57 | ||
58 | #define QEDE_MAJOR_VERSION 8 | 58 | #define QEDE_MAJOR_VERSION 8 |
59 | #define QEDE_MINOR_VERSION 33 | 59 | #define QEDE_MINOR_VERSION 37 |
60 | #define QEDE_REVISION_VERSION 0 | 60 | #define QEDE_REVISION_VERSION 0 |
61 | #define QEDE_ENGINEERING_VERSION 20 | 61 | #define QEDE_ENGINEERING_VERSION 20 |
62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ | 62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ |
@@ -494,6 +494,9 @@ struct qede_reload_args { | |||
494 | 494 | ||
495 | /* Datapath functions definition */ | 495 | /* Datapath functions definition */ |
496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); | 496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
497 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
498 | struct net_device *sb_dev, | ||
499 | select_queue_fallback_t fallback); | ||
497 | netdev_features_t qede_features_check(struct sk_buff *skb, | 500 | netdev_features_t qede_features_check(struct sk_buff *skb, |
498 | struct net_device *dev, | 501 | struct net_device *dev, |
499 | netdev_features_t features); | 502 | netdev_features_t features); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c | |||
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1695 | return NETDEV_TX_OK; | 1695 | return NETDEV_TX_OK; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
1699 | struct net_device *sb_dev, | ||
1700 | select_queue_fallback_t fallback) | ||
1701 | { | ||
1702 | struct qede_dev *edev = netdev_priv(dev); | ||
1703 | int total_txq; | ||
1704 | |||
1705 | total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; | ||
1706 | |||
1707 | return QEDE_TSS_COUNT(edev) ? | ||
1708 | fallback(dev, skb, NULL) % total_txq : 0; | ||
1709 | } | ||
1710 | |||
1698 | /* 8B udp header + 8B base tunnel header + 32B option length */ | 1711 | /* 8B udp header + 8B base tunnel header + 32B option length */ |
1699 | #define QEDE_MAX_TUN_HDR_LEN 48 | 1712 | #define QEDE_MAX_TUN_HDR_LEN 48 |
1700 | 1713 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..9790f26d17c4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
631 | .ndo_open = qede_open, | 631 | .ndo_open = qede_open, |
632 | .ndo_stop = qede_close, | 632 | .ndo_stop = qede_close, |
633 | .ndo_start_xmit = qede_start_xmit, | 633 | .ndo_start_xmit = qede_start_xmit, |
634 | .ndo_select_queue = qede_select_queue, | ||
634 | .ndo_set_rx_mode = qede_set_rx_mode, | 635 | .ndo_set_rx_mode = qede_set_rx_mode, |
635 | .ndo_set_mac_address = qede_set_mac_addr, | 636 | .ndo_set_mac_address = qede_set_mac_addr, |
636 | .ndo_validate_addr = eth_validate_addr, | 637 | .ndo_validate_addr = eth_validate_addr, |
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { | |||
666 | .ndo_open = qede_open, | 667 | .ndo_open = qede_open, |
667 | .ndo_stop = qede_close, | 668 | .ndo_stop = qede_close, |
668 | .ndo_start_xmit = qede_start_xmit, | 669 | .ndo_start_xmit = qede_start_xmit, |
670 | .ndo_select_queue = qede_select_queue, | ||
669 | .ndo_set_rx_mode = qede_set_rx_mode, | 671 | .ndo_set_rx_mode = qede_set_rx_mode, |
670 | .ndo_set_mac_address = qede_set_mac_addr, | 672 | .ndo_set_mac_address = qede_set_mac_addr, |
671 | .ndo_validate_addr = eth_validate_addr, | 673 | .ndo_validate_addr = eth_validate_addr, |
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { | |||
684 | .ndo_open = qede_open, | 686 | .ndo_open = qede_open, |
685 | .ndo_stop = qede_close, | 687 | .ndo_stop = qede_close, |
686 | .ndo_start_xmit = qede_start_xmit, | 688 | .ndo_start_xmit = qede_start_xmit, |
689 | .ndo_select_queue = qede_select_queue, | ||
687 | .ndo_set_rx_mode = qede_set_rx_mode, | 690 | .ndo_set_rx_mode = qede_set_rx_mode, |
688 | .ndo_set_mac_address = qede_set_mac_addr, | 691 | .ndo_set_mac_address = qede_set_mac_addr, |
689 | .ndo_validate_addr = eth_validate_addr, | 692 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index abb94c543aa2..6e36b88ca7c9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp) | |||
1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) | 1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) |
1287 | { | 1287 | { |
1288 | RTL_W16(tp, IntrStatus, bits); | 1288 | RTL_W16(tp, IntrStatus, bits); |
1289 | mmiowb(); | ||
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | static void rtl_irq_disable(struct rtl8169_private *tp) | 1292 | static void rtl_irq_disable(struct rtl8169_private *tp) |
1292 | { | 1293 | { |
1293 | RTL_W16(tp, IntrMask, 0); | 1294 | RTL_W16(tp, IntrMask, 0); |
1295 | mmiowb(); | ||
1294 | } | 1296 | } |
1295 | 1297 | ||
1296 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) | 1298 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) |
@@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6072 | struct device *d = tp_to_dev(tp); | 6074 | struct device *d = tp_to_dev(tp); |
6073 | dma_addr_t mapping; | 6075 | dma_addr_t mapping; |
6074 | u32 opts[2], len; | 6076 | u32 opts[2], len; |
6075 | bool stop_queue; | ||
6076 | int frags; | 6077 | int frags; |
6077 | 6078 | ||
6078 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { | 6079 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { |
@@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6114 | 6115 | ||
6115 | txd->opts2 = cpu_to_le32(opts[1]); | 6116 | txd->opts2 = cpu_to_le32(opts[1]); |
6116 | 6117 | ||
6118 | netdev_sent_queue(dev, skb->len); | ||
6119 | |||
6117 | skb_tx_timestamp(skb); | 6120 | skb_tx_timestamp(skb); |
6118 | 6121 | ||
6119 | /* Force memory writes to complete before releasing descriptor */ | 6122 | /* Force memory writes to complete before releasing descriptor */ |
@@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6126 | 6129 | ||
6127 | tp->cur_tx += frags + 1; | 6130 | tp->cur_tx += frags + 1; |
6128 | 6131 | ||
6129 | stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); | 6132 | RTL_W8(tp, TxPoll, NPQ); |
6130 | if (unlikely(stop_queue)) | ||
6131 | netif_stop_queue(dev); | ||
6132 | 6133 | ||
6133 | if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) | 6134 | mmiowb(); |
6134 | RTL_W8(tp, TxPoll, NPQ); | ||
6135 | 6135 | ||
6136 | if (unlikely(stop_queue)) { | 6136 | if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { |
6137 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must | ||
6138 | * not miss a ring update when it notices a stopped queue. | ||
6139 | */ | ||
6140 | smp_wmb(); | ||
6141 | netif_stop_queue(dev); | ||
6137 | /* Sync with rtl_tx: | 6142 | /* Sync with rtl_tx: |
6138 | * - publish queue status and cur_tx ring index (write barrier) | 6143 | * - publish queue status and cur_tx ring index (write barrier) |
6139 | * - refresh dirty_tx ring index (read barrier). | 6144 | * - refresh dirty_tx ring index (read barrier). |
@@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) | |||
6483 | 6488 | ||
6484 | if (work_done < budget) { | 6489 | if (work_done < budget) { |
6485 | napi_complete_done(napi, work_done); | 6490 | napi_complete_done(napi, work_done); |
6491 | |||
6486 | rtl_irq_enable(tp); | 6492 | rtl_irq_enable(tp); |
6493 | mmiowb(); | ||
6487 | } | 6494 | } |
6488 | 6495 | ||
6489 | return work_done; | 6496 | return work_done; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2f2bda68d861..c08034154a9a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |||
6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | 6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
6116 | { | 6116 | { |
6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | 6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); | 6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; |
6119 | struct efx_mcdi_mtd_partition *parts; | 6119 | struct efx_mcdi_mtd_partition *parts; |
6120 | size_t outlen, n_parts_total, i, n_parts; | 6120 | size_t outlen, n_parts_total, i, n_parts; |
6121 | unsigned int type; | 6121 | unsigned int type; |
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c | |||
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) | |||
1037 | skb = ep->tx_skbuff[entry]; | 1037 | skb = ep->tx_skbuff[entry]; |
1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, | 1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, |
1039 | skb->len, PCI_DMA_TODEVICE); | 1039 | skb->len, PCI_DMA_TODEVICE); |
1040 | dev_kfree_skb_irq(skb); | 1040 | dev_consume_skb_irq(skb); |
1041 | ep->tx_skbuff[entry] = NULL; | 1041 | ep->tx_skbuff[entry] = NULL; |
1042 | } | 1042 | } |
1043 | 1043 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index 20299f6f65fc..736e29635b77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -241,15 +241,18 @@ static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts) | |||
241 | static int dwmac4_rx_check_timestamp(void *desc) | 241 | static int dwmac4_rx_check_timestamp(void *desc) |
242 | { | 242 | { |
243 | struct dma_desc *p = (struct dma_desc *)desc; | 243 | struct dma_desc *p = (struct dma_desc *)desc; |
244 | unsigned int rdes0 = le32_to_cpu(p->des0); | ||
245 | unsigned int rdes1 = le32_to_cpu(p->des1); | ||
246 | unsigned int rdes3 = le32_to_cpu(p->des3); | ||
244 | u32 own, ctxt; | 247 | u32 own, ctxt; |
245 | int ret = 1; | 248 | int ret = 1; |
246 | 249 | ||
247 | own = p->des3 & RDES3_OWN; | 250 | own = rdes3 & RDES3_OWN; |
248 | ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) | 251 | ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR) |
249 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); | 252 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); |
250 | 253 | ||
251 | if (likely(!own && ctxt)) { | 254 | if (likely(!own && ctxt)) { |
252 | if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) | 255 | if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff)) |
253 | /* Corrupted value */ | 256 | /* Corrupted value */ |
254 | ret = -EINVAL; | 257 | ret = -EINVAL; |
255 | else | 258 | else |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..3c749c327cbd 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -696,33 +696,38 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, | |||
696 | struct ethtool_eee *edata) | 696 | struct ethtool_eee *edata) |
697 | { | 697 | { |
698 | struct stmmac_priv *priv = netdev_priv(dev); | 698 | struct stmmac_priv *priv = netdev_priv(dev); |
699 | int ret; | ||
699 | 700 | ||
700 | priv->eee_enabled = edata->eee_enabled; | 701 | if (!edata->eee_enabled) { |
701 | |||
702 | if (!priv->eee_enabled) | ||
703 | stmmac_disable_eee_mode(priv); | 702 | stmmac_disable_eee_mode(priv); |
704 | else { | 703 | } else { |
705 | /* We are asking for enabling the EEE but it is safe | 704 | /* We are asking for enabling the EEE but it is safe |
706 | * to verify all by invoking the eee_init function. | 705 | * to verify all by invoking the eee_init function. |
707 | * In case of failure it will return an error. | 706 | * In case of failure it will return an error. |
708 | */ | 707 | */ |
709 | priv->eee_enabled = stmmac_eee_init(priv); | 708 | edata->eee_enabled = stmmac_eee_init(priv); |
710 | if (!priv->eee_enabled) | 709 | if (!edata->eee_enabled) |
711 | return -EOPNOTSUPP; | 710 | return -EOPNOTSUPP; |
712 | |||
713 | /* Do not change tx_lpi_timer in case of failure */ | ||
714 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
715 | } | 711 | } |
716 | 712 | ||
717 | return phy_ethtool_set_eee(dev->phydev, edata); | 713 | ret = phy_ethtool_set_eee(dev->phydev, edata); |
714 | if (ret) | ||
715 | return ret; | ||
716 | |||
717 | priv->eee_enabled = edata->eee_enabled; | ||
718 | priv->tx_lpi_timer = edata->tx_lpi_timer; | ||
719 | return 0; | ||
718 | } | 720 | } |
719 | 721 | ||
720 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) | 722 | static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) |
721 | { | 723 | { |
722 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 724 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
723 | 725 | ||
724 | if (!clk) | 726 | if (!clk) { |
725 | return 0; | 727 | clk = priv->plat->clk_ref_rate; |
728 | if (!clk) | ||
729 | return 0; | ||
730 | } | ||
726 | 731 | ||
727 | return (usec * (clk / 1000000)) / 256; | 732 | return (usec * (clk / 1000000)) / 256; |
728 | } | 733 | } |
@@ -731,8 +736,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) | |||
731 | { | 736 | { |
732 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 737 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
733 | 738 | ||
734 | if (!clk) | 739 | if (!clk) { |
735 | return 0; | 740 | clk = priv->plat->clk_ref_rate; |
741 | if (!clk) | ||
742 | return 0; | ||
743 | } | ||
736 | 744 | ||
737 | return (riwt * 256) / (clk / 1000000); | 745 | return (riwt * 256) / (clk / 1000000); |
738 | } | 746 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5afba69981cf..685d20472358 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3023 | 3023 | ||
3024 | tx_q = &priv->tx_queue[queue]; | 3024 | tx_q = &priv->tx_queue[queue]; |
3025 | 3025 | ||
3026 | if (priv->tx_path_in_lpi_mode) | ||
3027 | stmmac_disable_eee_mode(priv); | ||
3028 | |||
3026 | /* Manage oversized TCP frames for GMAC4 device */ | 3029 | /* Manage oversized TCP frames for GMAC4 device */ |
3027 | if (skb_is_gso(skb) && priv->tso) { | 3030 | if (skb_is_gso(skb) && priv->tso) { |
3028 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) | 3031 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
3032 | /* | ||
3033 | * There is no way to determine the number of TSO | ||
3034 | * capable Queues. Let's use always the Queue 0 | ||
3035 | * because if TSO is supported then at least this | ||
3036 | * one will be capable. | ||
3037 | */ | ||
3038 | skb_set_queue_mapping(skb, 0); | ||
3039 | |||
3029 | return stmmac_tso_xmit(skb, dev); | 3040 | return stmmac_tso_xmit(skb, dev); |
3041 | } | ||
3030 | } | 3042 | } |
3031 | 3043 | ||
3032 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { | 3044 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
@@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3041 | return NETDEV_TX_BUSY; | 3053 | return NETDEV_TX_BUSY; |
3042 | } | 3054 | } |
3043 | 3055 | ||
3044 | if (priv->tx_path_in_lpi_mode) | ||
3045 | stmmac_disable_eee_mode(priv); | ||
3046 | |||
3047 | entry = tx_q->cur_tx; | 3056 | entry = tx_q->cur_tx; |
3048 | first_entry = entry; | 3057 | first_entry = entry; |
3049 | WARN_ON(tx_q->tx_skbuff[first_entry]); | 3058 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) | |||
1898 | cp->net_stats[ring].tx_packets++; | 1898 | cp->net_stats[ring].tx_packets++; |
1899 | cp->net_stats[ring].tx_bytes += skb->len; | 1899 | cp->net_stats[ring].tx_bytes += skb->len; |
1900 | spin_unlock(&cp->stat_lock[ring]); | 1900 | spin_unlock(&cp->stat_lock[ring]); |
1901 | dev_kfree_skb_irq(skb); | 1901 | dev_consume_skb_irq(skb); |
1902 | } | 1902 | } |
1903 | cp->tx_old[ring] = entry; | 1903 | cp->tx_old[ring] = entry; |
1904 | 1904 | ||
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c | |||
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) | |||
781 | 781 | ||
782 | DTX(("skb(%p) ", skb)); | 782 | DTX(("skb(%p) ", skb)); |
783 | bp->tx_skbs[elem] = NULL; | 783 | bp->tx_skbs[elem] = NULL; |
784 | dev_kfree_skb_irq(skb); | 784 | dev_consume_skb_irq(skb); |
785 | 785 | ||
786 | elem = NEXT_TX(elem); | 786 | elem = NEXT_TX(elem); |
787 | } | 787 | } |
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) | |||
1962 | this = &txbase[elem]; | 1962 | this = &txbase[elem]; |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | dev_kfree_skb_irq(skb); | 1965 | dev_consume_skb_irq(skb); |
1966 | dev->stats.tx_packets++; | 1966 | dev->stats.tx_packets++; |
1967 | } | 1967 | } |
1968 | hp->tx_old = elem; | 1968 | hp->tx_old = elem; |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) | |||
1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ | 1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ |
1740 | 1740 | ||
1741 | /* now should come skb pointer - free it */ | 1741 | /* now should come skb pointer - free it */ |
1742 | dev_kfree_skb_irq(db->rptr->addr.skb); | 1742 | dev_consume_skb_irq(db->rptr->addr.skb); |
1743 | bdx_tx_db_inc_rptr(db); | 1743 | bdx_tx_db_inc_rptr(db); |
1744 | } | 1744 | } |
1745 | 1745 | ||
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 1f612268c998..d847f672a705 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -259,7 +259,7 @@ static int netcp_module_probe(struct netcp_device *netcp_device, | |||
259 | const char *name; | 259 | const char *name; |
260 | char node_name[32]; | 260 | char node_name[32]; |
261 | 261 | ||
262 | if (of_property_read_string(node, "label", &name) < 0) { | 262 | if (of_property_read_string(child, "label", &name) < 0) { |
263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); | 263 | snprintf(node_name, sizeof(node_name), "%pOFn", child); |
264 | name = node_name; | 264 | name = node_name; |
265 | } | 265 | } |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, | |||
1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], | 1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], |
1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); | 1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); |
1742 | } | 1742 | } |
1743 | dev_kfree_skb_irq(skb); | 1743 | dev_consume_skb_irq(skb); |
1744 | tdinfo->skb = NULL; | 1744 | tdinfo->skb = NULL; |
1745 | } | 1745 | } |
1746 | 1746 | ||
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c | |||
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) | |||
3512 | bp->descr_block_virt->xmt_data[comp].long_1, | 3512 | bp->descr_block_virt->xmt_data[comp].long_1, |
3513 | p_xmt_drv_descr->p_skb->len, | 3513 | p_xmt_drv_descr->p_skb->len, |
3514 | DMA_TO_DEVICE); | 3514 | DMA_TO_DEVICE); |
3515 | dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); | 3515 | dev_consume_skb_irq(p_xmt_drv_descr->p_skb); |
3516 | 3516 | ||
3517 | /* | 3517 | /* |
3518 | * Move to start of next packet by updating completion index | 3518 | * Move to start of next packet by updating completion index |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..3377ac66a347 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev, | |||
1512 | } | 1512 | } |
1513 | #if IS_ENABLED(CONFIG_IPV6) | 1513 | #if IS_ENABLED(CONFIG_IPV6) |
1514 | case AF_INET6: { | 1514 | case AF_INET6: { |
1515 | struct rt6_info *rt = rt6_lookup(geneve->net, | 1515 | struct rt6_info *rt; |
1516 | &info->key.u.ipv6.dst, NULL, 0, | 1516 | |
1517 | NULL, 0); | 1517 | if (!__in6_dev_get(dev)) |
1518 | break; | ||
1519 | |||
1520 | rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, | ||
1521 | NULL, 0); | ||
1518 | 1522 | ||
1519 | if (rt && rt->dst.dev) | 1523 | if (rt && rt->dst.dev) |
1520 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; | 1524 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; |
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) | |||
905 | } | 905 | } |
906 | break; | 906 | break; |
907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): | 907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): |
908 | /* rx is starting */ | 908 | /* rx is starting */ |
909 | dev_dbg(printdev(lp), "RX is starting\n"); | 909 | dev_dbg(printdev(lp), "RX is starting\n"); |
910 | mcr20a_handle_rx(lp); | 910 | mcr20a_handle_rx(lp); |
911 | break; | 911 | break; |
912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): | 912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): |
913 | if (lp->is_tx) { | 913 | if (lp->is_tx) { |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..07e41c42bcf5 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, | |||
100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); | 100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); |
101 | if (!err) { | 101 | if (!err) { |
102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; | 102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; |
103 | mdev->priv_flags |= IFF_L3MDEV_MASTER; | 103 | mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; |
104 | } else | 104 | } else |
105 | goto fail; | 105 | goto fail; |
106 | } else if (port->mode == IPVLAN_MODE_L3S) { | 106 | } else if (port->mode == IPVLAN_MODE_L3S) { |
107 | /* Old mode was L3S */ | 107 | /* Old mode was L3S */ |
108 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; | 108 | mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); | 109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); |
110 | mdev->l3mdev_ops = NULL; | 110 | mdev->l3mdev_ops = NULL; |
111 | } | 111 | } |
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev) | |||
167 | struct sk_buff *skb; | 167 | struct sk_buff *skb; |
168 | 168 | ||
169 | if (port->mode == IPVLAN_MODE_L3S) { | 169 | if (port->mode == IPVLAN_MODE_L3S) { |
170 | dev->priv_flags &= ~IFF_L3MDEV_MASTER; | 170 | dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
171 | ipvlan_unregister_nf_hook(dev_net(dev)); | 171 | ipvlan_unregister_nf_hook(dev_net(dev)); |
172 | dev->l3mdev_ops = NULL; | 172 | dev->l3mdev_ops = NULL; |
173 | } | 173 | } |
@@ -499,6 +499,8 @@ static int ipvlan_nl_changelink(struct net_device *dev, | |||
499 | 499 | ||
500 | if (!data) | 500 | if (!data) |
501 | return 0; | 501 | return 0; |
502 | if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN)) | ||
503 | return -EPERM; | ||
502 | 504 | ||
503 | if (data[IFLA_IPVLAN_MODE]) { | 505 | if (data[IFLA_IPVLAN_MODE]) { |
504 | u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); | 506 | u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); |
@@ -601,6 +603,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, | |||
601 | struct ipvl_dev *tmp = netdev_priv(phy_dev); | 603 | struct ipvl_dev *tmp = netdev_priv(phy_dev); |
602 | 604 | ||
603 | phy_dev = tmp->phy_dev; | 605 | phy_dev = tmp->phy_dev; |
606 | if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN)) | ||
607 | return -EPERM; | ||
604 | } else if (!netif_is_ipvlan_port(phy_dev)) { | 608 | } else if (!netif_is_ipvlan_port(phy_dev)) { |
605 | /* Exit early if the underlying link is invalid or busy */ | 609 | /* Exit early if the underlying link is invalid or busy */ |
606 | if (phy_dev->type != ARPHRD_ETHER || | 610 | if (phy_dev->type != ARPHRD_ETHER || |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 18b41bc345ab..6e8807212aa3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
898 | struct phy_txts *phy_txts) | 898 | struct phy_txts *phy_txts) |
899 | { | 899 | { |
900 | struct skb_shared_hwtstamps shhwtstamps; | 900 | struct skb_shared_hwtstamps shhwtstamps; |
901 | struct dp83640_skb_info *skb_info; | ||
901 | struct sk_buff *skb; | 902 | struct sk_buff *skb; |
902 | u64 ns; | ||
903 | u8 overflow; | 903 | u8 overflow; |
904 | u64 ns; | ||
904 | 905 | ||
905 | /* We must already have the skb that triggered this. */ | 906 | /* We must already have the skb that triggered this. */ |
906 | 907 | again: | |
907 | skb = skb_dequeue(&dp83640->tx_queue); | 908 | skb = skb_dequeue(&dp83640->tx_queue); |
908 | |||
909 | if (!skb) { | 909 | if (!skb) { |
910 | pr_debug("have timestamp but tx_queue empty\n"); | 910 | pr_debug("have timestamp but tx_queue empty\n"); |
911 | return; | 911 | return; |
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
920 | } | 920 | } |
921 | return; | 921 | return; |
922 | } | 922 | } |
923 | skb_info = (struct dp83640_skb_info *)skb->cb; | ||
924 | if (time_after(jiffies, skb_info->tmo)) { | ||
925 | kfree_skb(skb); | ||
926 | goto again; | ||
927 | } | ||
923 | 928 | ||
924 | ns = phy2txts(phy_txts); | 929 | ns = phy2txts(phy_txts); |
925 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 930 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, | |||
1472 | static void dp83640_txtstamp(struct phy_device *phydev, | 1477 | static void dp83640_txtstamp(struct phy_device *phydev, |
1473 | struct sk_buff *skb, int type) | 1478 | struct sk_buff *skb, int type) |
1474 | { | 1479 | { |
1480 | struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; | ||
1475 | struct dp83640_private *dp83640 = phydev->priv; | 1481 | struct dp83640_private *dp83640 = phydev->priv; |
1476 | 1482 | ||
1477 | switch (dp83640->hwts_tx_en) { | 1483 | switch (dp83640->hwts_tx_en) { |
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, | |||
1484 | /* fall through */ | 1490 | /* fall through */ |
1485 | case HWTSTAMP_TX_ON: | 1491 | case HWTSTAMP_TX_ON: |
1486 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 1492 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1493 | skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; | ||
1487 | skb_queue_tail(&dp83640->tx_queue, skb); | 1494 | skb_queue_tail(&dp83640->tx_queue, skb); |
1488 | break; | 1495 | break; |
1489 | 1496 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2e12f982534f..abb7876a8776 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
847 | 847 | ||
848 | /* SGMII-to-Copper mode initialization */ | 848 | /* SGMII-to-Copper mode initialization */ |
849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { | 849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { |
850 | |||
851 | /* Select page 18 */ | 850 | /* Select page 18 */ |
852 | err = marvell_set_page(phydev, 18); | 851 | err = marvell_set_page(phydev, 18); |
853 | if (err < 0) | 852 | if (err < 0) |
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
870 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); | 869 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); |
871 | if (err < 0) | 870 | if (err < 0) |
872 | return err; | 871 | return err; |
873 | |||
874 | /* There appears to be a bug in the 88e1512 when used in | ||
875 | * SGMII to copper mode, where the AN advertisement register | ||
876 | * clears the pause bits each time a negotiation occurs. | ||
877 | * This means we can never be truely sure what was advertised, | ||
878 | * so disable Pause support. | ||
879 | */ | ||
880 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
881 | phydev->supported); | ||
882 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
883 | phydev->supported); | ||
884 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
885 | phydev->advertising); | ||
886 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
887 | phydev->advertising); | ||
888 | } | 872 | } |
889 | 873 | ||
890 | return m88e1318_config_init(phydev); | 874 | return m88e1318_config_init(phydev); |
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 82ab6ed3b74e..6bac602094bd 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c | |||
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/marvell_phy.h> | 26 | #include <linux/marvell_phy.h> |
27 | #include <linux/phy.h> | 27 | #include <linux/phy.h> |
28 | 28 | ||
29 | #define MDIO_AN_10GBT_CTRL_ADV_NBT_MASK 0x01e0 | ||
30 | |||
29 | enum { | 31 | enum { |
30 | MV_PCS_BASE_T = 0x0000, | 32 | MV_PCS_BASE_T = 0x0000, |
31 | MV_PCS_BASE_R = 0x1000, | 33 | MV_PCS_BASE_R = 0x1000, |
@@ -386,8 +388,10 @@ static int mv3310_config_aneg(struct phy_device *phydev) | |||
386 | else | 388 | else |
387 | reg = 0; | 389 | reg = 0; |
388 | 390 | ||
391 | /* Make sure we clear unsupported 2.5G/5G advertising */ | ||
389 | ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, | 392 | ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, |
390 | MDIO_AN_10GBT_CTRL_ADV10G, reg); | 393 | MDIO_AN_10GBT_CTRL_ADV10G | |
394 | MDIO_AN_10GBT_CTRL_ADV_NBT_MASK, reg); | ||
391 | if (ret < 0) | 395 | if (ret < 0) |
392 | return ret; | 396 | return ret; |
393 | if (ret > 0) | 397 | if (ret > 0) |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 66b9cfe692fc..7368616286ae 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -379,7 +379,6 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) | |||
379 | err = device_register(&bus->dev); | 379 | err = device_register(&bus->dev); |
380 | if (err) { | 380 | if (err) { |
381 | pr_err("mii_bus %s failed to register\n", bus->id); | 381 | pr_err("mii_bus %s failed to register\n", bus->id); |
382 | put_device(&bus->dev); | ||
383 | return -EINVAL; | 382 | return -EINVAL; |
384 | } | 383 | } |
385 | 384 | ||
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 189cd2048c3a..c5675df5fc6f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev) | |||
553 | if (err < 0) | 553 | if (err < 0) |
554 | goto out_unlock; | 554 | goto out_unlock; |
555 | 555 | ||
556 | if (__phy_is_started(phydev)) { | 556 | if (phy_is_started(phydev)) { |
557 | if (phydev->autoneg == AUTONEG_ENABLE) { | 557 | if (phydev->autoneg == AUTONEG_ENABLE) { |
558 | err = phy_check_link_status(phydev); | 558 | err = phy_check_link_status(phydev); |
559 | } else { | 559 | } else { |
@@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev) | |||
709 | cancel_delayed_work_sync(&phydev->state_queue); | 709 | cancel_delayed_work_sync(&phydev->state_queue); |
710 | 710 | ||
711 | mutex_lock(&phydev->lock); | 711 | mutex_lock(&phydev->lock); |
712 | if (__phy_is_started(phydev)) | 712 | if (phy_is_started(phydev)) |
713 | phydev->state = PHY_UP; | 713 | phydev->state = PHY_UP; |
714 | mutex_unlock(&phydev->lock); | 714 | mutex_unlock(&phydev->lock); |
715 | } | 715 | } |
@@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
762 | { | 762 | { |
763 | struct phy_device *phydev = phy_dat; | 763 | struct phy_device *phydev = phy_dat; |
764 | 764 | ||
765 | if (!phy_is_started(phydev)) | ||
766 | return IRQ_NONE; /* It can't be ours. */ | ||
767 | |||
768 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) | 765 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) |
769 | return IRQ_NONE; | 766 | return IRQ_NONE; |
770 | 767 | ||
@@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts); | |||
842 | */ | 839 | */ |
843 | void phy_stop(struct phy_device *phydev) | 840 | void phy_stop(struct phy_device *phydev) |
844 | { | 841 | { |
845 | mutex_lock(&phydev->lock); | 842 | if (!phy_is_started(phydev)) { |
846 | |||
847 | if (!__phy_is_started(phydev)) { | ||
848 | WARN(1, "called from state %s\n", | 843 | WARN(1, "called from state %s\n", |
849 | phy_state_to_str(phydev->state)); | 844 | phy_state_to_str(phydev->state)); |
850 | mutex_unlock(&phydev->lock); | ||
851 | return; | 845 | return; |
852 | } | 846 | } |
853 | 847 | ||
848 | mutex_lock(&phydev->lock); | ||
849 | |||
854 | if (phy_interrupt_is_valid(phydev)) | 850 | if (phy_interrupt_is_valid(phydev)) |
855 | phy_disable_interrupts(phydev); | 851 | phy_disable_interrupts(phydev); |
856 | 852 | ||
@@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work) | |||
989 | * state machine would be pointless and possibly error prone when | 985 | * state machine would be pointless and possibly error prone when |
990 | * called from phy_disconnect() synchronously. | 986 | * called from phy_disconnect() synchronously. |
991 | */ | 987 | */ |
988 | mutex_lock(&phydev->lock); | ||
992 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) | 989 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) |
993 | phy_queue_state_machine(phydev, PHY_STATE_TIME); | 990 | phy_queue_state_machine(phydev, PHY_STATE_TIME); |
991 | mutex_unlock(&phydev->lock); | ||
994 | } | 992 | } |
995 | 993 | ||
996 | /** | 994 | /** |
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e7becc7379d7..938803237d7f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c | |||
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl) | |||
474 | queue_work(system_power_efficient_wq, &pl->resolve); | 474 | queue_work(system_power_efficient_wq, &pl->resolve); |
475 | } | 475 | } |
476 | 476 | ||
477 | static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) | ||
478 | { | ||
479 | unsigned long state = pl->phylink_disable_state; | ||
480 | |||
481 | set_bit(bit, &pl->phylink_disable_state); | ||
482 | if (state == 0) { | ||
483 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
484 | flush_work(&pl->resolve); | ||
485 | } | ||
486 | } | ||
487 | |||
477 | static void phylink_fixed_poll(struct timer_list *t) | 488 | static void phylink_fixed_poll(struct timer_list *t) |
478 | { | 489 | { |
479 | struct phylink *pl = container_of(t, struct phylink, link_poll); | 490 | struct phylink *pl = container_of(t, struct phylink, link_poll); |
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl) | |||
924 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) | 935 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) |
925 | del_timer_sync(&pl->link_poll); | 936 | del_timer_sync(&pl->link_poll); |
926 | 937 | ||
927 | set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); | 938 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); |
928 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
929 | flush_work(&pl->resolve); | ||
930 | } | 939 | } |
931 | EXPORT_SYMBOL_GPL(phylink_stop); | 940 | EXPORT_SYMBOL_GPL(phylink_stop); |
932 | 941 | ||
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream) | |||
1632 | 1641 | ||
1633 | ASSERT_RTNL(); | 1642 | ASSERT_RTNL(); |
1634 | 1643 | ||
1635 | set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); | 1644 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); |
1636 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
1637 | flush_work(&pl->resolve); | ||
1638 | } | 1645 | } |
1639 | 1646 | ||
1640 | static void phylink_sfp_link_up(void *upstream) | 1647 | static void phylink_sfp_link_up(void *upstream) |
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index c6010fb1aa0f..cb4a23041a94 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c | |||
@@ -282,6 +282,13 @@ static struct phy_driver realtek_drvs[] = { | |||
282 | .name = "RTL8366RB Gigabit Ethernet", | 282 | .name = "RTL8366RB Gigabit Ethernet", |
283 | .features = PHY_GBIT_FEATURES, | 283 | .features = PHY_GBIT_FEATURES, |
284 | .config_init = &rtl8366rb_config_init, | 284 | .config_init = &rtl8366rb_config_init, |
285 | /* These interrupts are handled by the irq controller | ||
286 | * embedded inside the RTL8366RB, they get unmasked when the | ||
287 | * irq is requested and ACKed by reading the status register, | ||
288 | * which is done by the irqchip code. | ||
289 | */ | ||
290 | .ack_interrupt = genphy_no_ack_interrupt, | ||
291 | .config_intr = genphy_no_config_intr, | ||
285 | .suspend = genphy_suspend, | 292 | .suspend = genphy_suspend, |
286 | .resume = genphy_resume, | 293 | .resume = genphy_resume, |
287 | }, | 294 | }, |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | } | 349 | } |
350 | bus->socket_ops->attach(bus->sfp); | ||
350 | if (bus->started) | 351 | if (bus->started) |
351 | bus->socket_ops->start(bus->sfp); | 352 | bus->socket_ops->start(bus->sfp); |
352 | bus->netdev->sfp_bus = bus; | 353 | bus->netdev->sfp_bus = bus; |
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
362 | if (bus->registered) { | 363 | if (bus->registered) { |
363 | if (bus->started) | 364 | if (bus->started) |
364 | bus->socket_ops->stop(bus->sfp); | 365 | bus->socket_ops->stop(bus->sfp); |
366 | bus->socket_ops->detach(bus->sfp); | ||
365 | if (bus->phydev && ops && ops->disconnect_phy) | 367 | if (bus->phydev && ops && ops->disconnect_phy) |
366 | ops->disconnect_phy(bus->upstream); | 368 | ops->disconnect_phy(bus->upstream); |
367 | } | 369 | } |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52..68c8fbf099f8 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -184,6 +184,7 @@ struct sfp { | |||
184 | 184 | ||
185 | struct gpio_desc *gpio[GPIO_MAX]; | 185 | struct gpio_desc *gpio[GPIO_MAX]; |
186 | 186 | ||
187 | bool attached; | ||
187 | unsigned int state; | 188 | unsigned int state; |
188 | struct delayed_work poll; | 189 | struct delayed_work poll; |
189 | struct delayed_work timeout; | 190 | struct delayed_work timeout; |
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1475 | */ | 1476 | */ |
1476 | switch (sfp->sm_mod_state) { | 1477 | switch (sfp->sm_mod_state) { |
1477 | default: | 1478 | default: |
1478 | if (event == SFP_E_INSERT) { | 1479 | if (event == SFP_E_INSERT && sfp->attached) { |
1479 | sfp_module_tx_disable(sfp); | 1480 | sfp_module_tx_disable(sfp); |
1480 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); | 1481 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); |
1481 | } | 1482 | } |
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1607 | mutex_unlock(&sfp->sm_mutex); | 1608 | mutex_unlock(&sfp->sm_mutex); |
1608 | } | 1609 | } |
1609 | 1610 | ||
1611 | static void sfp_attach(struct sfp *sfp) | ||
1612 | { | ||
1613 | sfp->attached = true; | ||
1614 | if (sfp->state & SFP_F_PRESENT) | ||
1615 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1616 | } | ||
1617 | |||
1618 | static void sfp_detach(struct sfp *sfp) | ||
1619 | { | ||
1620 | sfp->attached = false; | ||
1621 | sfp_sm_event(sfp, SFP_E_REMOVE); | ||
1622 | } | ||
1623 | |||
1610 | static void sfp_start(struct sfp *sfp) | 1624 | static void sfp_start(struct sfp *sfp) |
1611 | { | 1625 | { |
1612 | sfp_sm_event(sfp, SFP_E_DEV_UP); | 1626 | sfp_sm_event(sfp, SFP_E_DEV_UP); |
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, | |||
1667 | } | 1681 | } |
1668 | 1682 | ||
1669 | static const struct sfp_socket_ops sfp_module_ops = { | 1683 | static const struct sfp_socket_ops sfp_module_ops = { |
1684 | .attach = sfp_attach, | ||
1685 | .detach = sfp_detach, | ||
1670 | .start = sfp_start, | 1686 | .start = sfp_start, |
1671 | .stop = sfp_stop, | 1687 | .stop = sfp_stop, |
1672 | .module_info = sfp_module_info, | 1688 | .module_info = sfp_module_info, |
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1834 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", | 1850 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", |
1835 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); | 1851 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); |
1836 | 1852 | ||
1837 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1838 | if (!sfp->sfp_bus) | ||
1839 | return -ENOMEM; | ||
1840 | |||
1841 | /* Get the initial state, and always signal TX disable, | 1853 | /* Get the initial state, and always signal TX disable, |
1842 | * since the network interface will not be up. | 1854 | * since the network interface will not be up. |
1843 | */ | 1855 | */ |
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1848 | sfp->state |= SFP_F_RATE_SELECT; | 1860 | sfp->state |= SFP_F_RATE_SELECT; |
1849 | sfp_set_state(sfp, sfp->state); | 1861 | sfp_set_state(sfp, sfp->state); |
1850 | sfp_module_tx_disable(sfp); | 1862 | sfp_module_tx_disable(sfp); |
1851 | rtnl_lock(); | ||
1852 | if (sfp->state & SFP_F_PRESENT) | ||
1853 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1854 | rtnl_unlock(); | ||
1855 | 1863 | ||
1856 | for (i = 0; i < GPIO_MAX; i++) { | 1864 | for (i = 0; i < GPIO_MAX; i++) { |
1857 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) | 1865 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) |
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev) | |||
1884 | dev_warn(sfp->dev, | 1892 | dev_warn(sfp->dev, |
1885 | "No tx_disable pin: SFP modules will always be emitting.\n"); | 1893 | "No tx_disable pin: SFP modules will always be emitting.\n"); |
1886 | 1894 | ||
1895 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1896 | if (!sfp->sfp_bus) | ||
1897 | return -ENOMEM; | ||
1898 | |||
1887 | return 0; | 1899 | return 0; |
1888 | } | 1900 | } |
1889 | 1901 | ||
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h | |||
@@ -7,6 +7,8 @@ | |||
7 | struct sfp; | 7 | struct sfp; |
8 | 8 | ||
9 | struct sfp_socket_ops { | 9 | struct sfp_socket_ops { |
10 | void (*attach)(struct sfp *sfp); | ||
11 | void (*detach)(struct sfp *sfp); | ||
10 | void (*start)(struct sfp *sfp); | 12 | void (*start)(struct sfp *sfp); |
11 | void (*stop)(struct sfp *sfp); | 13 | void (*stop)(struct sfp *sfp); |
12 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); | 14 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); |
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 74a8782313cf..bd6084e315de 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c | |||
@@ -44,7 +44,10 @@ static int xgmiitorgmii_read_status(struct phy_device *phydev) | |||
44 | u16 val = 0; | 44 | u16 val = 0; |
45 | int err; | 45 | int err; |
46 | 46 | ||
47 | err = priv->phy_drv->read_status(phydev); | 47 | if (priv->phy_drv->read_status) |
48 | err = priv->phy_drv->read_status(phydev); | ||
49 | else | ||
50 | err = genphy_read_status(phydev); | ||
48 | if (err < 0) | 51 | if (err < 0) |
49 | return err; | 52 | return err; |
50 | 53 | ||
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..6ce3f666d142 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, | |||
256 | } | 256 | } |
257 | } | 257 | } |
258 | 258 | ||
259 | static bool __team_option_inst_tmp_find(const struct list_head *opts, | ||
260 | const struct team_option_inst *needle) | ||
261 | { | ||
262 | struct team_option_inst *opt_inst; | ||
263 | |||
264 | list_for_each_entry(opt_inst, opts, tmp_list) | ||
265 | if (opt_inst == needle) | ||
266 | return true; | ||
267 | return false; | ||
268 | } | ||
269 | |||
270 | static int __team_options_register(struct team *team, | 259 | static int __team_options_register(struct team *team, |
271 | const struct team_option *option, | 260 | const struct team_option *option, |
272 | size_t option_count) | 261 | size_t option_count) |
@@ -1267,7 +1256,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, | |||
1267 | list_add_tail_rcu(&port->list, &team->port_list); | 1256 | list_add_tail_rcu(&port->list, &team->port_list); |
1268 | team_port_enable(team, port); | 1257 | team_port_enable(team, port); |
1269 | __team_compute_features(team); | 1258 | __team_compute_features(team); |
1270 | __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); | 1259 | __team_port_change_port_added(port, !!netif_oper_up(port_dev)); |
1271 | __team_options_change_check(team); | 1260 | __team_options_change_check(team); |
1272 | 1261 | ||
1273 | netdev_info(dev, "Port device %s added\n", portname); | 1262 | netdev_info(dev, "Port device %s added\n", portname); |
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2460 | int err = 0; | 2449 | int err = 0; |
2461 | int i; | 2450 | int i; |
2462 | struct nlattr *nl_option; | 2451 | struct nlattr *nl_option; |
2463 | LIST_HEAD(opt_inst_list); | ||
2464 | 2452 | ||
2465 | rtnl_lock(); | 2453 | rtnl_lock(); |
2466 | 2454 | ||
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2480 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; | 2468 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; |
2481 | struct nlattr *attr; | 2469 | struct nlattr *attr; |
2482 | struct nlattr *attr_data; | 2470 | struct nlattr *attr_data; |
2471 | LIST_HEAD(opt_inst_list); | ||
2483 | enum team_option_type opt_type; | 2472 | enum team_option_type opt_type; |
2484 | int opt_port_ifindex = 0; /* != 0 for per-port options */ | 2473 | int opt_port_ifindex = 0; /* != 0 for per-port options */ |
2485 | u32 opt_array_index = 0; | 2474 | u32 opt_array_index = 0; |
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2584 | if (err) | 2573 | if (err) |
2585 | goto team_put; | 2574 | goto team_put; |
2586 | opt_inst->changed = true; | 2575 | opt_inst->changed = true; |
2587 | |||
2588 | /* dumb/evil user-space can send us duplicate opt, | ||
2589 | * keep only the last one | ||
2590 | */ | ||
2591 | if (__team_option_inst_tmp_find(&opt_inst_list, | ||
2592 | opt_inst)) | ||
2593 | continue; | ||
2594 | |||
2595 | list_add(&opt_inst->tmp_list, &opt_inst_list); | 2576 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
2596 | } | 2577 | } |
2597 | if (!opt_found) { | 2578 | if (!opt_found) { |
2598 | err = -ENOENT; | 2579 | err = -ENOENT; |
2599 | goto team_put; | 2580 | goto team_put; |
2600 | } | 2581 | } |
2601 | } | ||
2602 | 2582 | ||
2603 | err = team_nl_send_event_options_get(team, &opt_inst_list); | 2583 | err = team_nl_send_event_options_get(team, &opt_inst_list); |
2584 | if (err) | ||
2585 | break; | ||
2586 | } | ||
2604 | 2587 | ||
2605 | team_put: | 2588 | team_put: |
2606 | team_nl_team_put(team); | 2589 | team_nl_team_put(team); |
@@ -2932,7 +2915,7 @@ static int team_device_event(struct notifier_block *unused, | |||
2932 | 2915 | ||
2933 | switch (event) { | 2916 | switch (event) { |
2934 | case NETDEV_UP: | 2917 | case NETDEV_UP: |
2935 | if (netif_carrier_ok(dev)) | 2918 | if (netif_oper_up(dev)) |
2936 | team_port_change_check(port, true); | 2919 | team_port_change_check(port, true); |
2937 | break; | 2920 | break; |
2938 | case NETDEV_DOWN: | 2921 | case NETDEV_DOWN: |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..fed298c0cb39 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
866 | if (rtnl_dereference(tun->xdp_prog)) | 866 | if (rtnl_dereference(tun->xdp_prog)) |
867 | sock_set_flag(&tfile->sk, SOCK_XDP); | 867 | sock_set_flag(&tfile->sk, SOCK_XDP); |
868 | 868 | ||
869 | tun_set_real_num_queues(tun); | ||
870 | |||
871 | /* device is allowed to go away first, so no need to hold extra | 869 | /* device is allowed to go away first, so no need to hold extra |
872 | * refcnt. | 870 | * refcnt. |
873 | */ | 871 | */ |
@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
879 | rcu_assign_pointer(tfile->tun, tun); | 877 | rcu_assign_pointer(tfile->tun, tun); |
880 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | 878 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
881 | tun->numqueues++; | 879 | tun->numqueues++; |
880 | tun_set_real_num_queues(tun); | ||
882 | out: | 881 | out: |
883 | return err; | 882 | return err; |
884 | } | 883 | } |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 735ad838e2ba..18af2f8eee96 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -1201,8 +1201,8 @@ static const struct usb_device_id products[] = { | |||
1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ | 1201 | {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ |
1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ | 1202 | {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ |
1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ | 1203 | {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ |
1204 | {QMI_FIXED_INTF(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354 */ | 1204 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 8)}, /* Sierra Wireless MC7304/MC7354, WP76xx */ |
1205 | {QMI_FIXED_INTF(0x1199, 0x68c0, 10)}, /* Sierra Wireless MC7304/MC7354 */ | 1205 | {QMI_QUIRK_SET_DTR(0x1199, 0x68c0, 10)},/* Sierra Wireless MC7304/MC7354 */ |
1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 1206 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ | 1207 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ | 1208 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 60dd1ec1665f..86c8c64fbb0f 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -557,6 +557,7 @@ enum spd_duplex { | |||
557 | /* MAC PASSTHRU */ | 557 | /* MAC PASSTHRU */ |
558 | #define AD_MASK 0xfee0 | 558 | #define AD_MASK 0xfee0 |
559 | #define BND_MASK 0x0004 | 559 | #define BND_MASK 0x0004 |
560 | #define BD_MASK 0x0001 | ||
560 | #define EFUSE 0xcfdb | 561 | #define EFUSE 0xcfdb |
561 | #define PASS_THRU_MASK 0x1 | 562 | #define PASS_THRU_MASK 0x1 |
562 | 563 | ||
@@ -1176,9 +1177,9 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) | |||
1176 | return -ENODEV; | 1177 | return -ENODEV; |
1177 | } | 1178 | } |
1178 | } else { | 1179 | } else { |
1179 | /* test for RTL8153-BND */ | 1180 | /* test for RTL8153-BND and RTL8153-BD */ |
1180 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); | 1181 | ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_MISC_1); |
1181 | if ((ocp_data & BND_MASK) == 0) { | 1182 | if ((ocp_data & BND_MASK) == 0 && (ocp_data & BD_MASK) == 0) { |
1182 | netif_dbg(tp, probe, tp->netdev, | 1183 | netif_dbg(tp, probe, tp->netdev, |
1183 | "Invalid variant for MAC pass through\n"); | 1184 | "Invalid variant for MAC pass through\n"); |
1184 | return -ENODEV; | 1185 | return -ENODEV; |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fadd8eaf601..4cfceb789eea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); | |||
57 | #define VIRTIO_XDP_TX BIT(0) | 57 | #define VIRTIO_XDP_TX BIT(0) |
58 | #define VIRTIO_XDP_REDIR BIT(1) | 58 | #define VIRTIO_XDP_REDIR BIT(1) |
59 | 59 | ||
60 | #define VIRTIO_XDP_FLAG BIT(0) | ||
61 | |||
60 | /* RX packet size EWMA. The average packet size is used to determine the packet | 62 | /* RX packet size EWMA. The average packet size is used to determine the packet |
61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | 63 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | 64 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
@@ -252,6 +254,21 @@ struct padded_vnet_hdr { | |||
252 | char padding[4]; | 254 | char padding[4]; |
253 | }; | 255 | }; |
254 | 256 | ||
257 | static bool is_xdp_frame(void *ptr) | ||
258 | { | ||
259 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; | ||
260 | } | ||
261 | |||
262 | static void *xdp_to_ptr(struct xdp_frame *ptr) | ||
263 | { | ||
264 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); | ||
265 | } | ||
266 | |||
267 | static struct xdp_frame *ptr_to_xdp(void *ptr) | ||
268 | { | ||
269 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); | ||
270 | } | ||
271 | |||
255 | /* Converting between virtqueue no. and kernel tx/rx queue no. | 272 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
256 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | 273 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
257 | */ | 274 | */ |
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, | |||
462 | 479 | ||
463 | sg_init_one(sq->sg, xdpf->data, xdpf->len); | 480 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
464 | 481 | ||
465 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); | 482 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), |
483 | GFP_ATOMIC); | ||
466 | if (unlikely(err)) | 484 | if (unlikely(err)) |
467 | return -ENOSPC; /* Caller handle free/refcnt */ | 485 | return -ENOSPC; /* Caller handle free/refcnt */ |
468 | 486 | ||
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
482 | { | 500 | { |
483 | struct virtnet_info *vi = netdev_priv(dev); | 501 | struct virtnet_info *vi = netdev_priv(dev); |
484 | struct receive_queue *rq = vi->rq; | 502 | struct receive_queue *rq = vi->rq; |
485 | struct xdp_frame *xdpf_sent; | ||
486 | struct bpf_prog *xdp_prog; | 503 | struct bpf_prog *xdp_prog; |
487 | struct send_queue *sq; | 504 | struct send_queue *sq; |
488 | unsigned int len; | 505 | unsigned int len; |
506 | int packets = 0; | ||
507 | int bytes = 0; | ||
489 | int drops = 0; | 508 | int drops = 0; |
490 | int kicks = 0; | 509 | int kicks = 0; |
491 | int ret, err; | 510 | int ret, err; |
511 | void *ptr; | ||
492 | int i; | 512 | int i; |
493 | 513 | ||
494 | sq = virtnet_xdp_sq(vi); | ||
495 | |||
496 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
497 | ret = -EINVAL; | ||
498 | drops = n; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this | 514 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
503 | * indicate XDP resources have been successfully allocated. | 515 | * indicate XDP resources have been successfully allocated. |
504 | */ | 516 | */ |
505 | xdp_prog = rcu_dereference(rq->xdp_prog); | 517 | xdp_prog = rcu_dereference(rq->xdp_prog); |
506 | if (!xdp_prog) { | 518 | if (!xdp_prog) |
507 | ret = -ENXIO; | 519 | return -ENXIO; |
520 | |||
521 | sq = virtnet_xdp_sq(vi); | ||
522 | |||
523 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
524 | ret = -EINVAL; | ||
508 | drops = n; | 525 | drops = n; |
509 | goto out; | 526 | goto out; |
510 | } | 527 | } |
511 | 528 | ||
512 | /* Free up any pending old buffers before queueing new ones. */ | 529 | /* Free up any pending old buffers before queueing new ones. */ |
513 | while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) | 530 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
514 | xdp_return_frame(xdpf_sent); | 531 | if (likely(is_xdp_frame(ptr))) { |
532 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
533 | |||
534 | bytes += frame->len; | ||
535 | xdp_return_frame(frame); | ||
536 | } else { | ||
537 | struct sk_buff *skb = ptr; | ||
538 | |||
539 | bytes += skb->len; | ||
540 | napi_consume_skb(skb, false); | ||
541 | } | ||
542 | packets++; | ||
543 | } | ||
515 | 544 | ||
516 | for (i = 0; i < n; i++) { | 545 | for (i = 0; i < n; i++) { |
517 | struct xdp_frame *xdpf = frames[i]; | 546 | struct xdp_frame *xdpf = frames[i]; |
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
530 | } | 559 | } |
531 | out: | 560 | out: |
532 | u64_stats_update_begin(&sq->stats.syncp); | 561 | u64_stats_update_begin(&sq->stats.syncp); |
562 | sq->stats.bytes += bytes; | ||
563 | sq->stats.packets += packets; | ||
533 | sq->stats.xdp_tx += n; | 564 | sq->stats.xdp_tx += n; |
534 | sq->stats.xdp_tx_drops += drops; | 565 | sq->stats.xdp_tx_drops += drops; |
535 | sq->stats.kicks += kicks; | 566 | sq->stats.kicks += kicks; |
@@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
1332 | 1363 | ||
1333 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | 1364 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
1334 | { | 1365 | { |
1335 | struct sk_buff *skb; | ||
1336 | unsigned int len; | 1366 | unsigned int len; |
1337 | unsigned int packets = 0; | 1367 | unsigned int packets = 0; |
1338 | unsigned int bytes = 0; | 1368 | unsigned int bytes = 0; |
1369 | void *ptr; | ||
1339 | 1370 | ||
1340 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | 1371 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
1341 | pr_debug("Sent skb %p\n", skb); | 1372 | if (likely(!is_xdp_frame(ptr))) { |
1373 | struct sk_buff *skb = ptr; | ||
1342 | 1374 | ||
1343 | bytes += skb->len; | 1375 | pr_debug("Sent skb %p\n", skb); |
1344 | packets++; | 1376 | |
1377 | bytes += skb->len; | ||
1378 | napi_consume_skb(skb, in_napi); | ||
1379 | } else { | ||
1380 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
1345 | 1381 | ||
1346 | napi_consume_skb(skb, in_napi); | 1382 | bytes += frame->len; |
1383 | xdp_return_frame(frame); | ||
1384 | } | ||
1385 | packets++; | ||
1347 | } | 1386 | } |
1348 | 1387 | ||
1349 | /* Avoid overhead when no packets have been processed | 1388 | /* Avoid overhead when no packets have been processed |
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | |||
1358 | u64_stats_update_end(&sq->stats.syncp); | 1397 | u64_stats_update_end(&sq->stats.syncp); |
1359 | } | 1398 | } |
1360 | 1399 | ||
1400 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
1401 | { | ||
1402 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
1403 | return false; | ||
1404 | else if (q < vi->curr_queue_pairs) | ||
1405 | return true; | ||
1406 | else | ||
1407 | return false; | ||
1408 | } | ||
1409 | |||
1361 | static void virtnet_poll_cleantx(struct receive_queue *rq) | 1410 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
1362 | { | 1411 | { |
1363 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1412 | struct virtnet_info *vi = rq->vq->vdev->priv; |
@@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) | |||
1365 | struct send_queue *sq = &vi->sq[index]; | 1414 | struct send_queue *sq = &vi->sq[index]; |
1366 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | 1415 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
1367 | 1416 | ||
1368 | if (!sq->napi.weight) | 1417 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
1369 | return; | 1418 | return; |
1370 | 1419 | ||
1371 | if (__netif_tx_trylock(txq)) { | 1420 | if (__netif_tx_trylock(txq)) { |
@@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) | |||
1442 | { | 1491 | { |
1443 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | 1492 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
1444 | struct virtnet_info *vi = sq->vq->vdev->priv; | 1493 | struct virtnet_info *vi = sq->vq->vdev->priv; |
1445 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | 1494 | unsigned int index = vq2txq(sq->vq); |
1495 | struct netdev_queue *txq; | ||
1446 | 1496 | ||
1497 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { | ||
1498 | /* We don't need to enable cb for XDP */ | ||
1499 | napi_complete_done(napi, 0); | ||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | txq = netdev_get_tx_queue(vi->dev, index); | ||
1447 | __netif_tx_lock(txq, raw_smp_processor_id()); | 1504 | __netif_tx_lock(txq, raw_smp_processor_id()); |
1448 | free_old_xmit_skbs(sq, true); | 1505 | free_old_xmit_skbs(sq, true); |
1449 | __netif_tx_unlock(txq); | 1506 | __netif_tx_unlock(txq); |
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2395 | return -ENOMEM; | 2452 | return -ENOMEM; |
2396 | } | 2453 | } |
2397 | 2454 | ||
2455 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); | ||
2456 | if (!prog && !old_prog) | ||
2457 | return 0; | ||
2458 | |||
2398 | if (prog) { | 2459 | if (prog) { |
2399 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); | 2460 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
2400 | if (IS_ERR(prog)) | 2461 | if (IS_ERR(prog)) |
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2402 | } | 2463 | } |
2403 | 2464 | ||
2404 | /* Make sure NAPI is not using any XDP TX queues for RX. */ | 2465 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
2405 | if (netif_running(dev)) | 2466 | if (netif_running(dev)) { |
2406 | for (i = 0; i < vi->max_queue_pairs; i++) | 2467 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2407 | napi_disable(&vi->rq[i].napi); | 2468 | napi_disable(&vi->rq[i].napi); |
2469 | virtnet_napi_tx_disable(&vi->sq[i].napi); | ||
2470 | } | ||
2471 | } | ||
2472 | |||
2473 | if (!prog) { | ||
2474 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2475 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | ||
2476 | if (i == 0) | ||
2477 | virtnet_restore_guest_offloads(vi); | ||
2478 | } | ||
2479 | synchronize_net(); | ||
2480 | } | ||
2408 | 2481 | ||
2409 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2410 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); | 2482 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
2411 | if (err) | 2483 | if (err) |
2412 | goto err; | 2484 | goto err; |
2485 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2413 | vi->xdp_queue_pairs = xdp_qp; | 2486 | vi->xdp_queue_pairs = xdp_qp; |
2414 | 2487 | ||
2415 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2488 | if (prog) { |
2416 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | 2489 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2417 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | 2490 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
2418 | if (i == 0) { | 2491 | if (i == 0 && !old_prog) |
2419 | if (!old_prog) | ||
2420 | virtnet_clear_guest_offloads(vi); | 2492 | virtnet_clear_guest_offloads(vi); |
2421 | if (!prog) | ||
2422 | virtnet_restore_guest_offloads(vi); | ||
2423 | } | 2493 | } |
2494 | } | ||
2495 | |||
2496 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2424 | if (old_prog) | 2497 | if (old_prog) |
2425 | bpf_prog_put(old_prog); | 2498 | bpf_prog_put(old_prog); |
2426 | if (netif_running(dev)) | 2499 | if (netif_running(dev)) { |
2427 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2500 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
2501 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2502 | &vi->sq[i].napi); | ||
2503 | } | ||
2428 | } | 2504 | } |
2429 | 2505 | ||
2430 | return 0; | 2506 | return 0; |
2431 | 2507 | ||
2432 | err: | 2508 | err: |
2433 | for (i = 0; i < vi->max_queue_pairs; i++) | 2509 | if (!prog) { |
2434 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2510 | virtnet_clear_guest_offloads(vi); |
2511 | for (i = 0; i < vi->max_queue_pairs; i++) | ||
2512 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); | ||
2513 | } | ||
2514 | |||
2515 | if (netif_running(dev)) { | ||
2516 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2517 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | ||
2518 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2519 | &vi->sq[i].napi); | ||
2520 | } | ||
2521 | } | ||
2435 | if (prog) | 2522 | if (prog) |
2436 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | 2523 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
2437 | return err; | 2524 | return err; |
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) | |||
2613 | put_page(vi->rq[i].alloc_frag.page); | 2700 | put_page(vi->rq[i].alloc_frag.page); |
2614 | } | 2701 | } |
2615 | 2702 | ||
2616 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
2617 | { | ||
2618 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
2619 | return false; | ||
2620 | else if (q < vi->curr_queue_pairs) | ||
2621 | return true; | ||
2622 | else | ||
2623 | return false; | ||
2624 | } | ||
2625 | |||
2626 | static void free_unused_bufs(struct virtnet_info *vi) | 2703 | static void free_unused_bufs(struct virtnet_info *vi) |
2627 | { | 2704 | { |
2628 | void *buf; | 2705 | void *buf; |
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
2631 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2708 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2632 | struct virtqueue *vq = vi->sq[i].vq; | 2709 | struct virtqueue *vq = vi->sq[i].vq; |
2633 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 2710 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
2634 | if (!is_xdp_raw_buffer_queue(vi, i)) | 2711 | if (!is_xdp_frame(buf)) |
2635 | dev_kfree_skb(buf); | 2712 | dev_kfree_skb(buf); |
2636 | else | 2713 | else |
2637 | put_page(virt_to_head_page(buf)); | 2714 | xdp_return_frame(ptr_to_xdp(buf)); |
2638 | } | 2715 | } |
2639 | } | 2716 | } |
2640 | 2717 | ||
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 95909e262ba4..7c1430ed0244 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c | |||
@@ -1273,6 +1273,9 @@ static void vrf_setup(struct net_device *dev) | |||
1273 | 1273 | ||
1274 | /* default to no qdisc; user can add if desired */ | 1274 | /* default to no qdisc; user can add if desired */ |
1275 | dev->priv_flags |= IFF_NO_QUEUE; | 1275 | dev->priv_flags |= IFF_NO_QUEUE; |
1276 | |||
1277 | dev->min_mtu = 0; | ||
1278 | dev->max_mtu = 0; | ||
1276 | } | 1279 | } |
1277 | 1280 | ||
1278 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], | 1281 | static int vrf_validate(struct nlattr *tb[], struct nlattr *data[], |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5209ee9aac47..2aae11feff0c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; | 2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; |
2220 | union vxlan_addr loopback; | 2220 | union vxlan_addr loopback; |
2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; | 2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; |
2222 | struct net_device *dev = skb->dev; | 2222 | struct net_device *dev; |
2223 | int len = skb->len; | 2223 | int len = skb->len; |
2224 | 2224 | ||
2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); | 2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); |
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2239 | #endif | 2239 | #endif |
2240 | } | 2240 | } |
2241 | 2241 | ||
2242 | rcu_read_lock(); | ||
2243 | dev = skb->dev; | ||
2244 | if (unlikely(!(dev->flags & IFF_UP))) { | ||
2245 | kfree_skb(skb); | ||
2246 | goto drop; | ||
2247 | } | ||
2248 | |||
2242 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) | 2249 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) |
2243 | vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, | 2250 | vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); |
2244 | vni); | ||
2245 | 2251 | ||
2246 | u64_stats_update_begin(&tx_stats->syncp); | 2252 | u64_stats_update_begin(&tx_stats->syncp); |
2247 | tx_stats->tx_packets++; | 2253 | tx_stats->tx_packets++; |
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2254 | rx_stats->rx_bytes += len; | 2260 | rx_stats->rx_bytes += len; |
2255 | u64_stats_update_end(&rx_stats->syncp); | 2261 | u64_stats_update_end(&rx_stats->syncp); |
2256 | } else { | 2262 | } else { |
2263 | drop: | ||
2257 | dev->stats.rx_dropped++; | 2264 | dev->stats.rx_dropped++; |
2258 | } | 2265 | } |
2266 | rcu_read_unlock(); | ||
2259 | } | 2267 | } |
2260 | 2268 | ||
2261 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, | 2269 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c0b0f525c87c..27decf8ae840 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -1575,7 +1575,7 @@ try: | |||
1575 | dev->stats.tx_packets++; | 1575 | dev->stats.tx_packets++; |
1576 | dev->stats.tx_bytes += skb->len; | 1576 | dev->stats.tx_bytes += skb->len; |
1577 | } | 1577 | } |
1578 | dev_kfree_skb_irq(skb); | 1578 | dev_consume_skb_irq(skb); |
1579 | dpriv->tx_skbuff[cur] = NULL; | 1579 | dpriv->tx_skbuff[cur] = NULL; |
1580 | ++dpriv->tx_dirty; | 1580 | ++dpriv->tx_dirty; |
1581 | } else { | 1581 | } else { |
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
@@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) | |||
482 | memset(priv->tx_buffer + | 482 | memset(priv->tx_buffer + |
483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), | 483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), |
484 | 0, skb->len); | 484 | 0, skb->len); |
485 | dev_kfree_skb_irq(skb); | 485 | dev_consume_skb_irq(skb); |
486 | 486 | ||
487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; | 487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; |
488 | priv->skb_dirtytx = | 488 | priv->skb_dirtytx = |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..e8891f5fc83a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
548 | { | 548 | { |
549 | .id = WCN3990_HW_1_0_DEV_VERSION, | 549 | .id = WCN3990_HW_1_0_DEV_VERSION, |
550 | .dev_id = 0, | 550 | .dev_id = 0, |
551 | .bus = ATH10K_BUS_PCI, | 551 | .bus = ATH10K_BUS_SNOC, |
552 | .name = "wcn3990 hw1.0", | 552 | .name = "wcn3990 hw1.0", |
553 | .continuous_frag_desc = true, | 553 | .continuous_frag_desc = true, |
554 | .tx_chain_mask = 0x7, | 554 | .tx_chain_mask = 0x7, |
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config IWLWIFI | 1 | config IWLWIFI |
2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " | 2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " |
3 | depends on PCI && HAS_IOMEM | 3 | depends on PCI && HAS_IOMEM && CFG80211 |
4 | select FW_LOADER | 4 | select FW_LOADER |
5 | ---help--- | 5 | ---help--- |
6 | Select to build the driver supporting the: | 6 | Select to build the driver supporting the: |
@@ -47,6 +47,7 @@ if IWLWIFI | |||
47 | config IWLWIFI_LEDS | 47 | config IWLWIFI_LEDS |
48 | bool | 48 | bool |
49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI | 49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI |
50 | depends on IWLMVM || IWLDVM | ||
50 | select LEDS_TRIGGERS | 51 | select LEDS_TRIGGERS |
51 | select MAC80211_LEDS | 52 | select MAC80211_LEDS |
52 | default y | 53 | default y |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 320edcac4699..6359053bd0c7 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -3554,7 +3554,7 @@ static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) | |||
3554 | goto out_err; | 3554 | goto out_err; |
3555 | } | 3555 | } |
3556 | 3556 | ||
3557 | genlmsg_reply(skb, info); | 3557 | res = genlmsg_reply(skb, info); |
3558 | break; | 3558 | break; |
3559 | } | 3559 | } |
3560 | 3560 | ||
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..b2cabce1d74d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | |||
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) | |||
212 | mt76x02_add_rate_power_offset(t, delta); | 212 | mt76x02_add_rate_power_offset(t, delta); |
213 | } | 213 | } |
214 | 214 | ||
215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | 215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) |
216 | { | 216 | { |
217 | struct mt76x0_chan_map { | 217 | struct mt76x0_chan_map { |
218 | u8 chan; | 218 | u8 chan; |
219 | u8 offset; | 219 | u8 offset; |
220 | } chan_map[] = { | 220 | } chan_map[] = { |
221 | { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, | 221 | { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, |
222 | { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, | 222 | { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, |
223 | { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, | 223 | { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, |
224 | { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, | 224 | { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, |
225 | { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, | 225 | { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, |
226 | { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, | 226 | { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, |
227 | { 167, 17 }, { 171, 18 }, { 173, 19 }, | 227 | { 167, 34 }, { 171, 36 }, { 175, 38 }, |
228 | }; | 228 | }; |
229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; | 229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; |
230 | u8 offset, addr; | 230 | u8 offset, addr; |
231 | int i, idx = 0; | ||
231 | u16 data; | 232 | u16 data; |
232 | int i; | ||
233 | 233 | ||
234 | if (mt76x0_tssi_enabled(dev)) { | 234 | if (mt76x0_tssi_enabled(dev)) { |
235 | s8 target_power; | 235 | s8 target_power; |
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
239 | else | 239 | else |
240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); | 240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); |
241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; | 241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; |
242 | info[0] = target_power + mt76x0_get_delta(dev); | 242 | *tp = target_power + mt76x0_get_delta(dev); |
243 | info[1] = 0; | ||
244 | 243 | ||
245 | return; | 244 | return; |
246 | } | 245 | } |
247 | 246 | ||
248 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { | 247 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { |
249 | if (chan_map[i].chan <= chan->hw_value) { | 248 | if (chan->hw_value <= chan_map[i].chan) { |
249 | idx = (chan->hw_value == chan_map[i].chan); | ||
250 | offset = chan_map[i].offset; | 250 | offset = chan_map[i].offset; |
251 | break; | 251 | break; |
252 | } | 252 | } |
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; | 258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; |
259 | } else { | 259 | } else { |
260 | switch (chan->hw_value) { | 260 | switch (chan->hw_value) { |
261 | case 42: | ||
262 | offset = 2; | ||
263 | break; | ||
261 | case 58: | 264 | case 58: |
262 | offset = 8; | 265 | offset = 8; |
263 | break; | 266 | break; |
264 | case 106: | 267 | case 106: |
265 | offset = 14; | 268 | offset = 14; |
266 | break; | 269 | break; |
267 | case 112: | 270 | case 122: |
268 | offset = 20; | 271 | offset = 20; |
269 | break; | 272 | break; |
270 | case 155: | 273 | case 155: |
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
277 | } | 280 | } |
278 | 281 | ||
279 | data = mt76x02_eeprom_get(dev, addr); | 282 | data = mt76x02_eeprom_get(dev, addr); |
280 | 283 | *tp = data >> (8 * idx); | |
281 | info[0] = data; | 284 | if (*tp < 0 || *tp > 0x3f) |
282 | if (!info[0] || info[0] > 0x3f) | 285 | *tp = 5; |
283 | info[0] = 5; | ||
284 | |||
285 | info[1] = data >> 8; | ||
286 | if (!info[1] || info[1] > 0x3f) | ||
287 | info[1] = 5; | ||
288 | } | 286 | } |
289 | 287 | ||
290 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) | 288 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..42b259f90b6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h | |||
@@ -26,7 +26,7 @@ struct mt76x02_dev; | |||
26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); | 26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); |
27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); | 27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); |
28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); | 28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); |
29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); | 29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); |
30 | 30 | ||
31 | static inline s8 s6_to_s8(u32 val) | 31 | static inline s8 s6_to_s8(u32 val) |
32 | { | 32 | { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..b6166703ad76 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c | |||
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) | |||
845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) | 845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) |
846 | { | 846 | { |
847 | struct mt76_rate_power *t = &dev->mt76.rate_power; | 847 | struct mt76_rate_power *t = &dev->mt76.rate_power; |
848 | u8 info[2]; | 848 | s8 info; |
849 | 849 | ||
850 | mt76x0_get_tx_power_per_rate(dev); | 850 | mt76x0_get_tx_power_per_rate(dev); |
851 | mt76x0_get_power_info(dev, info); | 851 | mt76x0_get_power_info(dev, &info); |
852 | 852 | ||
853 | mt76x02_add_rate_power_offset(t, info[0]); | 853 | mt76x02_add_rate_power_offset(t, info); |
854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); | 854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); |
855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); | 855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); |
856 | mt76x02_add_rate_power_offset(t, -info[0]); | 856 | mt76x02_add_rate_power_offset(t, -info); |
857 | 857 | ||
858 | mt76x02_phy_set_txpower(dev, info[0], info[1]); | 858 | mt76x02_phy_set_txpower(dev, info, info); |
859 | } | 859 | } |
860 | 860 | ||
861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) | 861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index 0e6b43bb4678..a5ea3ba495a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c | |||
@@ -158,39 +158,49 @@ static const struct ieee80211_ops mt76x0u_ops = { | |||
158 | .get_txpower = mt76x02_get_txpower, | 158 | .get_txpower = mt76x02_get_txpower, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | 161 | static int mt76x0u_init_hardware(struct mt76x02_dev *dev) |
162 | { | 162 | { |
163 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
164 | int err; | 163 | int err; |
165 | 164 | ||
166 | err = mt76u_alloc_queues(&dev->mt76); | ||
167 | if (err < 0) | ||
168 | goto out_err; | ||
169 | |||
170 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
171 | if (err < 0) | ||
172 | goto out_err; | ||
173 | |||
174 | mt76x0_chip_onoff(dev, true, true); | 165 | mt76x0_chip_onoff(dev, true, true); |
175 | if (!mt76x02_wait_for_mac(&dev->mt76)) { | 166 | |
176 | err = -ETIMEDOUT; | 167 | if (!mt76x02_wait_for_mac(&dev->mt76)) |
177 | goto out_err; | 168 | return -ETIMEDOUT; |
178 | } | ||
179 | 169 | ||
180 | err = mt76x0u_mcu_init(dev); | 170 | err = mt76x0u_mcu_init(dev); |
181 | if (err < 0) | 171 | if (err < 0) |
182 | goto out_err; | 172 | return err; |
183 | 173 | ||
184 | mt76x0_init_usb_dma(dev); | 174 | mt76x0_init_usb_dma(dev); |
185 | err = mt76x0_init_hardware(dev); | 175 | err = mt76x0_init_hardware(dev); |
186 | if (err < 0) | 176 | if (err < 0) |
187 | goto out_err; | 177 | return err; |
188 | 178 | ||
189 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); | 179 | mt76_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); |
190 | mt76_wr(dev, MT_TXOP_CTRL_CFG, | 180 | mt76_wr(dev, MT_TXOP_CTRL_CFG, |
191 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | | 181 | FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | |
192 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); | 182 | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); |
193 | 183 | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int mt76x0u_register_device(struct mt76x02_dev *dev) | ||
188 | { | ||
189 | struct ieee80211_hw *hw = dev->mt76.hw; | ||
190 | int err; | ||
191 | |||
192 | err = mt76u_alloc_queues(&dev->mt76); | ||
193 | if (err < 0) | ||
194 | goto out_err; | ||
195 | |||
196 | err = mt76u_mcu_init_rx(&dev->mt76); | ||
197 | if (err < 0) | ||
198 | goto out_err; | ||
199 | |||
200 | err = mt76x0u_init_hardware(dev); | ||
201 | if (err < 0) | ||
202 | goto out_err; | ||
203 | |||
194 | err = mt76x0_register_device(dev); | 204 | err = mt76x0_register_device(dev); |
195 | if (err < 0) | 205 | if (err < 0) |
196 | goto out_err; | 206 | goto out_err; |
@@ -301,6 +311,8 @@ static int __maybe_unused mt76x0_suspend(struct usb_interface *usb_intf, | |||
301 | 311 | ||
302 | mt76u_stop_queues(&dev->mt76); | 312 | mt76u_stop_queues(&dev->mt76); |
303 | mt76x0u_mac_stop(dev); | 313 | mt76x0u_mac_stop(dev); |
314 | clear_bit(MT76_STATE_MCU_RUNNING, &dev->mt76.state); | ||
315 | mt76x0_chip_onoff(dev, false, false); | ||
304 | usb_kill_urb(usb->mcu.res.urb); | 316 | usb_kill_urb(usb->mcu.res.urb); |
305 | 317 | ||
306 | return 0; | 318 | return 0; |
@@ -328,7 +340,7 @@ static int __maybe_unused mt76x0_resume(struct usb_interface *usb_intf) | |||
328 | tasklet_enable(&usb->rx_tasklet); | 340 | tasklet_enable(&usb->rx_tasklet); |
329 | tasklet_enable(&usb->tx_tasklet); | 341 | tasklet_enable(&usb->tx_tasklet); |
330 | 342 | ||
331 | ret = mt76x0_init_hardware(dev); | 343 | ret = mt76x0u_init_hardware(dev); |
332 | if (ret) | 344 | if (ret) |
333 | goto err; | 345 | goto err; |
334 | 346 | ||
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | sdio_claim_host(func); | 166 | sdio_claim_host(func); |
167 | /* | ||
168 | * To guarantee that the SDIO card is power cycled, as required to make | ||
169 | * the FW programming to succeed, let's do a brute force HW reset. | ||
170 | */ | ||
171 | mmc_hw_reset(card->host); | ||
172 | |||
167 | sdio_enable_func(func); | 173 | sdio_enable_func(func); |
168 | sdio_release_host(func); | 174 | sdio_release_host(func); |
169 | 175 | ||
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) | |||
174 | { | 180 | { |
175 | struct sdio_func *func = dev_to_sdio_func(glue->dev); | 181 | struct sdio_func *func = dev_to_sdio_func(glue->dev); |
176 | struct mmc_card *card = func->card; | 182 | struct mmc_card *card = func->card; |
177 | int error; | ||
178 | 183 | ||
179 | sdio_claim_host(func); | 184 | sdio_claim_host(func); |
180 | sdio_disable_func(func); | 185 | sdio_disable_func(func); |
181 | sdio_release_host(func); | 186 | sdio_release_host(func); |
182 | 187 | ||
183 | /* Let runtime PM know the card is powered off */ | 188 | /* Let runtime PM know the card is powered off */ |
184 | error = pm_runtime_put(&card->dev); | 189 | pm_runtime_put(&card->dev); |
185 | if (error < 0 && error != -EBUSY) { | ||
186 | dev_err(&card->dev, "%s failed: %i\n", __func__, error); | ||
187 | |||
188 | return error; | ||
189 | } | ||
190 | |||
191 | return 0; | 190 | return 0; |
192 | } | 191 | } |
193 | 192 | ||
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 150e49723c15..6a9dd68c0f4f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c | |||
@@ -1253,6 +1253,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns, | |||
1253 | * effects say only one namespace is affected. | 1253 | * effects say only one namespace is affected. |
1254 | */ | 1254 | */ |
1255 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { | 1255 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { |
1256 | mutex_lock(&ctrl->scan_lock); | ||
1256 | nvme_start_freeze(ctrl); | 1257 | nvme_start_freeze(ctrl); |
1257 | nvme_wait_freeze(ctrl); | 1258 | nvme_wait_freeze(ctrl); |
1258 | } | 1259 | } |
@@ -1281,8 +1282,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects) | |||
1281 | */ | 1282 | */ |
1282 | if (effects & NVME_CMD_EFFECTS_LBCC) | 1283 | if (effects & NVME_CMD_EFFECTS_LBCC) |
1283 | nvme_update_formats(ctrl); | 1284 | nvme_update_formats(ctrl); |
1284 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) | 1285 | if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) { |
1285 | nvme_unfreeze(ctrl); | 1286 | nvme_unfreeze(ctrl); |
1287 | mutex_unlock(&ctrl->scan_lock); | ||
1288 | } | ||
1286 | if (effects & NVME_CMD_EFFECTS_CCC) | 1289 | if (effects & NVME_CMD_EFFECTS_CCC) |
1287 | nvme_init_identify(ctrl); | 1290 | nvme_init_identify(ctrl); |
1288 | if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) | 1291 | if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC)) |
@@ -3401,6 +3404,7 @@ static void nvme_scan_work(struct work_struct *work) | |||
3401 | if (nvme_identify_ctrl(ctrl, &id)) | 3404 | if (nvme_identify_ctrl(ctrl, &id)) |
3402 | return; | 3405 | return; |
3403 | 3406 | ||
3407 | mutex_lock(&ctrl->scan_lock); | ||
3404 | nn = le32_to_cpu(id->nn); | 3408 | nn = le32_to_cpu(id->nn); |
3405 | if (ctrl->vs >= NVME_VS(1, 1, 0) && | 3409 | if (ctrl->vs >= NVME_VS(1, 1, 0) && |
3406 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { | 3410 | !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) { |
@@ -3409,6 +3413,7 @@ static void nvme_scan_work(struct work_struct *work) | |||
3409 | } | 3413 | } |
3410 | nvme_scan_ns_sequential(ctrl, nn); | 3414 | nvme_scan_ns_sequential(ctrl, nn); |
3411 | out_free_id: | 3415 | out_free_id: |
3416 | mutex_unlock(&ctrl->scan_lock); | ||
3412 | kfree(id); | 3417 | kfree(id); |
3413 | down_write(&ctrl->namespaces_rwsem); | 3418 | down_write(&ctrl->namespaces_rwsem); |
3414 | list_sort(NULL, &ctrl->namespaces, ns_cmp); | 3419 | list_sort(NULL, &ctrl->namespaces, ns_cmp); |
@@ -3652,6 +3657,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, | |||
3652 | 3657 | ||
3653 | ctrl->state = NVME_CTRL_NEW; | 3658 | ctrl->state = NVME_CTRL_NEW; |
3654 | spin_lock_init(&ctrl->lock); | 3659 | spin_lock_init(&ctrl->lock); |
3660 | mutex_init(&ctrl->scan_lock); | ||
3655 | INIT_LIST_HEAD(&ctrl->namespaces); | 3661 | INIT_LIST_HEAD(&ctrl->namespaces); |
3656 | init_rwsem(&ctrl->namespaces_rwsem); | 3662 | init_rwsem(&ctrl->namespaces_rwsem); |
3657 | ctrl->dev = dev; | 3663 | ctrl->dev = dev; |
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab961bdeea89..c4a1bb41abf0 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h | |||
@@ -154,6 +154,7 @@ struct nvme_ctrl { | |||
154 | enum nvme_ctrl_state state; | 154 | enum nvme_ctrl_state state; |
155 | bool identified; | 155 | bool identified; |
156 | spinlock_t lock; | 156 | spinlock_t lock; |
157 | struct mutex scan_lock; | ||
157 | const struct nvme_ctrl_ops *ops; | 158 | const struct nvme_ctrl_ops *ops; |
158 | struct request_queue *admin_q; | 159 | struct request_queue *admin_q; |
159 | struct request_queue *connect_q; | 160 | struct request_queue *connect_q; |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 9bc585415d9b..7fee665ec45e 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -2557,27 +2557,18 @@ static void nvme_reset_work(struct work_struct *work) | |||
2557 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) | 2557 | if (dev->ctrl.ctrl_config & NVME_CC_ENABLE) |
2558 | nvme_dev_disable(dev, false); | 2558 | nvme_dev_disable(dev, false); |
2559 | 2559 | ||
2560 | /* | 2560 | mutex_lock(&dev->shutdown_lock); |
2561 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the | ||
2562 | * initializing procedure here. | ||
2563 | */ | ||
2564 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { | ||
2565 | dev_warn(dev->ctrl.device, | ||
2566 | "failed to mark controller CONNECTING\n"); | ||
2567 | goto out; | ||
2568 | } | ||
2569 | |||
2570 | result = nvme_pci_enable(dev); | 2561 | result = nvme_pci_enable(dev); |
2571 | if (result) | 2562 | if (result) |
2572 | goto out; | 2563 | goto out_unlock; |
2573 | 2564 | ||
2574 | result = nvme_pci_configure_admin_queue(dev); | 2565 | result = nvme_pci_configure_admin_queue(dev); |
2575 | if (result) | 2566 | if (result) |
2576 | goto out; | 2567 | goto out_unlock; |
2577 | 2568 | ||
2578 | result = nvme_alloc_admin_tags(dev); | 2569 | result = nvme_alloc_admin_tags(dev); |
2579 | if (result) | 2570 | if (result) |
2580 | goto out; | 2571 | goto out_unlock; |
2581 | 2572 | ||
2582 | /* | 2573 | /* |
2583 | * Limit the max command size to prevent iod->sg allocations going | 2574 | * Limit the max command size to prevent iod->sg allocations going |
@@ -2585,6 +2576,17 @@ static void nvme_reset_work(struct work_struct *work) | |||
2585 | */ | 2576 | */ |
2586 | dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; | 2577 | dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1; |
2587 | dev->ctrl.max_segments = NVME_MAX_SEGS; | 2578 | dev->ctrl.max_segments = NVME_MAX_SEGS; |
2579 | mutex_unlock(&dev->shutdown_lock); | ||
2580 | |||
2581 | /* | ||
2582 | * Introduce CONNECTING state from nvme-fc/rdma transports to mark the | ||
2583 | * initializing procedure here. | ||
2584 | */ | ||
2585 | if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) { | ||
2586 | dev_warn(dev->ctrl.device, | ||
2587 | "failed to mark controller CONNECTING\n"); | ||
2588 | goto out; | ||
2589 | } | ||
2588 | 2590 | ||
2589 | result = nvme_init_identify(&dev->ctrl); | 2591 | result = nvme_init_identify(&dev->ctrl); |
2590 | if (result) | 2592 | if (result) |
@@ -2649,6 +2651,8 @@ static void nvme_reset_work(struct work_struct *work) | |||
2649 | nvme_start_ctrl(&dev->ctrl); | 2651 | nvme_start_ctrl(&dev->ctrl); |
2650 | return; | 2652 | return; |
2651 | 2653 | ||
2654 | out_unlock: | ||
2655 | mutex_unlock(&dev->shutdown_lock); | ||
2652 | out: | 2656 | out: |
2653 | nvme_remove_dead_ctrl(dev, result); | 2657 | nvme_remove_dead_ctrl(dev, result); |
2654 | } | 2658 | } |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..e2a879e93d86 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev) | |||
639 | break; | 639 | break; |
640 | } | 640 | } |
641 | } | 641 | } |
642 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, | 642 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, |
643 | quirk_synopsys_haps); | 643 | PCI_CLASS_SERIAL_USB_XHCI, 0, |
644 | quirk_synopsys_haps); | ||
644 | 645 | ||
645 | /* | 646 | /* |
646 | * Let's make the southbridge information explicit instead of having to | 647 | * Let's make the southbridge information explicit instead of having to |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 05044e323ea5..03ec7a5d9d0b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1513 | .matches = { | 1513 | .matches = { |
1514 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1514 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1515 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), | 1515 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), |
1516 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1516 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1517 | }, | 1517 | }, |
1518 | }, | 1518 | }, |
1519 | { | 1519 | { |
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1521 | .matches = { | 1521 | .matches = { |
1522 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 1522 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1523 | DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), | 1523 | DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), |
1524 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1524 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1525 | }, | 1525 | }, |
1526 | }, | 1526 | }, |
1527 | { | 1527 | { |
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1529 | .matches = { | 1529 | .matches = { |
1530 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1530 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1531 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), | 1531 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), |
1532 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1532 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1533 | }, | 1533 | }, |
1534 | }, | 1534 | }, |
1535 | { | 1535 | { |
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1537 | .matches = { | 1537 | .matches = { |
1538 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1538 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1539 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), | 1539 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), |
1540 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1540 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1541 | }, | 1541 | }, |
1542 | }, | 1542 | }, |
1543 | {} | 1543 | {} |
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 1817786ab6aa..a005cbccb4f7 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig | |||
@@ -45,12 +45,14 @@ config PINCTRL_MT2701 | |||
45 | config PINCTRL_MT7623 | 45 | config PINCTRL_MT7623 |
46 | bool "Mediatek MT7623 pin control with generic binding" | 46 | bool "Mediatek MT7623 pin control with generic binding" |
47 | depends on MACH_MT7623 || COMPILE_TEST | 47 | depends on MACH_MT7623 || COMPILE_TEST |
48 | depends on OF | ||
48 | default MACH_MT7623 | 49 | default MACH_MT7623 |
49 | select PINCTRL_MTK_MOORE | 50 | select PINCTRL_MTK_MOORE |
50 | 51 | ||
51 | config PINCTRL_MT7629 | 52 | config PINCTRL_MT7629 |
52 | bool "Mediatek MT7629 pin control" | 53 | bool "Mediatek MT7629 pin control" |
53 | depends on MACH_MT7629 || COMPILE_TEST | 54 | depends on MACH_MT7629 || COMPILE_TEST |
55 | depends on OF | ||
54 | default MACH_MT7629 | 56 | default MACH_MT7629 |
55 | select PINCTRL_MTK_MOORE | 57 | select PINCTRL_MTK_MOORE |
56 | 58 | ||
@@ -92,6 +94,7 @@ config PINCTRL_MT6797 | |||
92 | 94 | ||
93 | config PINCTRL_MT7622 | 95 | config PINCTRL_MT7622 |
94 | bool "MediaTek MT7622 pin control" | 96 | bool "MediaTek MT7622 pin control" |
97 | depends on OF | ||
95 | depends on ARM64 || COMPILE_TEST | 98 | depends on ARM64 || COMPILE_TEST |
96 | default ARM64 && ARCH_MEDIATEK | 99 | default ARM64 && ARCH_MEDIATEK |
97 | select PINCTRL_MTK_MOORE | 100 | select PINCTRL_MTK_MOORE |
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c index c69ca95b1ad5..0f140a802137 100644 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c | |||
@@ -693,7 +693,7 @@ static const char * const sd_a_groups[] = { | |||
693 | 693 | ||
694 | static const char * const sdxc_a_groups[] = { | 694 | static const char * const sdxc_a_groups[] = { |
695 | "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", | 695 | "sdxc_d0_0_a", "sdxc_d13_0_a", "sdxc_d47_a", "sdxc_clk_a", |
696 | "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d0_13_1_a" | 696 | "sdxc_cmd_a", "sdxc_d0_1_a", "sdxc_d13_1_a" |
697 | }; | 697 | }; |
698 | 698 | ||
699 | static const char * const pcm_a_groups[] = { | 699 | static const char * const pcm_a_groups[] = { |
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index b03481ef99a1..98905d4a79ca 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c | |||
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
832 | break; | 832 | break; |
833 | 833 | ||
834 | case MCP_TYPE_S18: | 834 | case MCP_TYPE_S18: |
835 | one_regmap_config = | ||
836 | devm_kmemdup(dev, &mcp23x17_regmap, | ||
837 | sizeof(struct regmap_config), GFP_KERNEL); | ||
838 | if (!one_regmap_config) | ||
839 | return -ENOMEM; | ||
835 | mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, | 840 | mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, |
836 | &mcp23x17_regmap); | 841 | one_regmap_config); |
837 | mcp->reg_shift = 1; | 842 | mcp->reg_shift = 1; |
838 | mcp->chip.ngpio = 16; | 843 | mcp->chip.ngpio = 16; |
839 | mcp->chip.label = "mcp23s18"; | 844 | mcp->chip.label = "mcp23s18"; |
diff --git a/drivers/pinctrl/qcom/pinctrl-qcs404.c b/drivers/pinctrl/qcom/pinctrl-qcs404.c index 7aae52a09ff0..4ffd56ff809e 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs404.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs404.c | |||
@@ -79,7 +79,7 @@ enum { | |||
79 | .intr_cfg_reg = 0, \ | 79 | .intr_cfg_reg = 0, \ |
80 | .intr_status_reg = 0, \ | 80 | .intr_status_reg = 0, \ |
81 | .intr_target_reg = 0, \ | 81 | .intr_target_reg = 0, \ |
82 | .tile = NORTH, \ | 82 | .tile = SOUTH, \ |
83 | .mux_bit = -1, \ | 83 | .mux_bit = -1, \ |
84 | .pull_bit = pull, \ | 84 | .pull_bit = pull, \ |
85 | .drv_bit = drv, \ | 85 | .drv_bit = drv, \ |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568..ef4268cc6227 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c | |||
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; | |||
588 | static const struct sunxi_pinctrl_desc h6_pinctrl_data = { | 588 | static const struct sunxi_pinctrl_desc h6_pinctrl_data = { |
589 | .pins = h6_pins, | 589 | .pins = h6_pins, |
590 | .npins = ARRAY_SIZE(h6_pins), | 590 | .npins = ARRAY_SIZE(h6_pins), |
591 | .irq_banks = 3, | 591 | .irq_banks = 4, |
592 | .irq_bank_map = h6_irq_bank_map, | 592 | .irq_bank_map = h6_irq_bank_map, |
593 | .irq_read_needs_mux = true, | 593 | .irq_read_needs_mux = true, |
594 | }; | 594 | }; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5d9184d18c16..0e7fa69e93df 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) | |||
698 | { | 698 | { |
699 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 699 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
700 | unsigned short bank = offset / PINS_PER_BANK; | 700 | unsigned short bank = offset / PINS_PER_BANK; |
701 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; | 701 | unsigned short bank_offset = bank - pctl->desc->pin_base / |
702 | struct regulator *reg; | 702 | PINS_PER_BANK; |
703 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; | ||
704 | struct regulator *reg = s_reg->regulator; | ||
705 | char supply[16]; | ||
703 | int ret; | 706 | int ret; |
704 | 707 | ||
705 | reg = s_reg->regulator; | 708 | if (reg) { |
706 | if (!reg) { | ||
707 | char supply[16]; | ||
708 | |||
709 | snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); | ||
710 | reg = regulator_get(pctl->dev, supply); | ||
711 | if (IS_ERR(reg)) { | ||
712 | dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", | ||
713 | 'A' + bank); | ||
714 | return PTR_ERR(reg); | ||
715 | } | ||
716 | |||
717 | s_reg->regulator = reg; | ||
718 | refcount_set(&s_reg->refcount, 1); | ||
719 | } else { | ||
720 | refcount_inc(&s_reg->refcount); | 709 | refcount_inc(&s_reg->refcount); |
710 | return 0; | ||
711 | } | ||
712 | |||
713 | snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); | ||
714 | reg = regulator_get(pctl->dev, supply); | ||
715 | if (IS_ERR(reg)) { | ||
716 | dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", | ||
717 | 'A' + bank); | ||
718 | return PTR_ERR(reg); | ||
721 | } | 719 | } |
722 | 720 | ||
723 | ret = regulator_enable(reg); | 721 | ret = regulator_enable(reg); |
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) | |||
727 | goto out; | 725 | goto out; |
728 | } | 726 | } |
729 | 727 | ||
728 | s_reg->regulator = reg; | ||
729 | refcount_set(&s_reg->refcount, 1); | ||
730 | |||
730 | return 0; | 731 | return 0; |
731 | 732 | ||
732 | out: | 733 | out: |
733 | if (refcount_dec_and_test(&s_reg->refcount)) { | 734 | regulator_put(s_reg->regulator); |
734 | regulator_put(s_reg->regulator); | ||
735 | s_reg->regulator = NULL; | ||
736 | } | ||
737 | 735 | ||
738 | return ret; | 736 | return ret; |
739 | } | 737 | } |
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset) | |||
742 | { | 740 | { |
743 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 741 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
744 | unsigned short bank = offset / PINS_PER_BANK; | 742 | unsigned short bank = offset / PINS_PER_BANK; |
745 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; | 743 | unsigned short bank_offset = bank - pctl->desc->pin_base / |
744 | PINS_PER_BANK; | ||
745 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; | ||
746 | 746 | ||
747 | if (!refcount_dec_and_test(&s_reg->refcount)) | 747 | if (!refcount_dec_and_test(&s_reg->refcount)) |
748 | return 0; | 748 | return 0; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e340d2a24b44..034c0317c8d6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h | |||
@@ -136,7 +136,7 @@ struct sunxi_pinctrl { | |||
136 | struct gpio_chip *chip; | 136 | struct gpio_chip *chip; |
137 | const struct sunxi_pinctrl_desc *desc; | 137 | const struct sunxi_pinctrl_desc *desc; |
138 | struct device *dev; | 138 | struct device *dev; |
139 | struct sunxi_pinctrl_regulator regulators[12]; | 139 | struct sunxi_pinctrl_regulator regulators[9]; |
140 | struct irq_domain *domain; | 140 | struct irq_domain *domain; |
141 | struct sunxi_pinctrl_function *functions; | 141 | struct sunxi_pinctrl_function *functions; |
142 | unsigned nfunctions; | 142 | unsigned nfunctions; |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) | |||
4469 | usrparm.psf_data &= 0x7fffffffULL; | 4469 | usrparm.psf_data &= 0x7fffffffULL; |
4470 | usrparm.rssd_result &= 0x7fffffffULL; | 4470 | usrparm.rssd_result &= 0x7fffffffULL; |
4471 | } | 4471 | } |
4472 | /* at least 2 bytes are accessed and should be allocated */ | ||
4473 | if (usrparm.psf_data_len < 2) { | ||
4474 | DBF_DEV_EVENT(DBF_WARNING, device, | ||
4475 | "Symmetrix ioctl invalid data length %d", | ||
4476 | usrparm.psf_data_len); | ||
4477 | rc = -EINVAL; | ||
4478 | goto out; | ||
4479 | } | ||
4472 | /* alloc I/O data area */ | 4480 | /* alloc I/O data area */ |
4473 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); | 4481 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); |
4474 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); | 4482 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..5a699746c357 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) | |||
248 | static inline int ap_test_config_card_id(unsigned int id) | 248 | static inline int ap_test_config_card_id(unsigned int id) |
249 | { | 249 | { |
250 | if (!ap_configuration) /* QCI not supported */ | 250 | if (!ap_configuration) /* QCI not supported */ |
251 | return 1; | 251 | /* only ids 0...3F may be probed */ |
252 | return id < 0x40 ? 1 : 0; | ||
252 | return ap_test_config(ap_configuration->apm, id); | 253 | return ap_test_config(ap_configuration->apm, id); |
253 | } | 254 | } |
254 | 255 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ee026947f20..122059ecad84 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/hashtable.h> | 22 | #include <linux/hashtable.h> |
23 | #include <linux/ip.h> | 23 | #include <linux/ip.h> |
24 | #include <linux/refcount.h> | 24 | #include <linux/refcount.h> |
25 | #include <linux/workqueue.h> | ||
25 | 26 | ||
26 | #include <net/ipv6.h> | 27 | #include <net/ipv6.h> |
27 | #include <net/if_inet6.h> | 28 | #include <net/if_inet6.h> |
@@ -789,6 +790,7 @@ struct qeth_card { | |||
789 | struct qeth_seqno seqno; | 790 | struct qeth_seqno seqno; |
790 | struct qeth_card_options options; | 791 | struct qeth_card_options options; |
791 | 792 | ||
793 | struct workqueue_struct *event_wq; | ||
792 | wait_queue_head_t wait_q; | 794 | wait_queue_head_t wait_q; |
793 | spinlock_t mclock; | 795 | spinlock_t mclock; |
794 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 796 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[]; | |||
962 | extern const struct attribute_group qeth_device_attr_group; | 964 | extern const struct attribute_group qeth_device_attr_group; |
963 | extern const struct attribute_group qeth_device_blkt_group; | 965 | extern const struct attribute_group qeth_device_blkt_group; |
964 | extern const struct device_type qeth_generic_devtype; | 966 | extern const struct device_type qeth_generic_devtype; |
965 | extern struct workqueue_struct *qeth_wq; | ||
966 | 967 | ||
967 | int qeth_card_hw_is_reachable(struct qeth_card *); | 968 | int qeth_card_hw_is_reachable(struct qeth_card *); |
968 | const char *qeth_get_cardname_short(struct qeth_card *); | 969 | const char *qeth_get_cardname_short(struct qeth_card *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e63e03143ca7..89f912213e62 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, | |||
74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); | 74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); |
75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | 75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); |
76 | 76 | ||
77 | struct workqueue_struct *qeth_wq; | 77 | static struct workqueue_struct *qeth_wq; |
78 | EXPORT_SYMBOL_GPL(qeth_wq); | ||
79 | 78 | ||
80 | int qeth_card_hw_is_reachable(struct qeth_card *card) | 79 | int qeth_card_hw_is_reachable(struct qeth_card *card) |
81 | { | 80 | { |
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) | |||
566 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", | 565 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", |
567 | rc, CARD_DEVID(card)); | 566 | rc, CARD_DEVID(card)); |
568 | atomic_set(&channel->irq_pending, 0); | 567 | atomic_set(&channel->irq_pending, 0); |
568 | qeth_release_buffer(channel, iob); | ||
569 | card->read_or_write_problem = 1; | 569 | card->read_or_write_problem = 1; |
570 | qeth_schedule_recovery(card); | 570 | qeth_schedule_recovery(card); |
571 | wake_up(&card->wait_q); | 571 | wake_up(&card->wait_q); |
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
1127 | rc = qeth_get_problem(card, cdev, irb); | 1127 | rc = qeth_get_problem(card, cdev, irb); |
1128 | if (rc) { | 1128 | if (rc) { |
1129 | card->read_or_write_problem = 1; | 1129 | card->read_or_write_problem = 1; |
1130 | if (iob) | ||
1131 | qeth_release_buffer(iob->channel, iob); | ||
1130 | qeth_clear_ipacmd_list(card); | 1132 | qeth_clear_ipacmd_list(card); |
1131 | qeth_schedule_recovery(card); | 1133 | qeth_schedule_recovery(card); |
1132 | goto out; | 1134 | goto out; |
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) | |||
1466 | CARD_RDEV(card) = gdev->cdev[0]; | 1468 | CARD_RDEV(card) = gdev->cdev[0]; |
1467 | CARD_WDEV(card) = gdev->cdev[1]; | 1469 | CARD_WDEV(card) = gdev->cdev[1]; |
1468 | CARD_DDEV(card) = gdev->cdev[2]; | 1470 | CARD_DDEV(card) = gdev->cdev[2]; |
1471 | |||
1472 | card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); | ||
1473 | if (!card->event_wq) | ||
1474 | goto out_wq; | ||
1469 | if (qeth_setup_channel(&card->read, true)) | 1475 | if (qeth_setup_channel(&card->read, true)) |
1470 | goto out_ip; | 1476 | goto out_ip; |
1471 | if (qeth_setup_channel(&card->write, true)) | 1477 | if (qeth_setup_channel(&card->write, true)) |
@@ -1481,6 +1487,8 @@ out_data: | |||
1481 | out_channel: | 1487 | out_channel: |
1482 | qeth_clean_channel(&card->read); | 1488 | qeth_clean_channel(&card->read); |
1483 | out_ip: | 1489 | out_ip: |
1490 | destroy_workqueue(card->event_wq); | ||
1491 | out_wq: | ||
1484 | dev_set_drvdata(&gdev->dev, NULL); | 1492 | dev_set_drvdata(&gdev->dev, NULL); |
1485 | kfree(card); | 1493 | kfree(card); |
1486 | out: | 1494 | out: |
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, | |||
1809 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); | 1817 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); |
1810 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 1818 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1811 | atomic_set(&channel->irq_pending, 0); | 1819 | atomic_set(&channel->irq_pending, 0); |
1820 | qeth_release_buffer(channel, iob); | ||
1812 | wake_up(&card->wait_q); | 1821 | wake_up(&card->wait_q); |
1813 | return rc; | 1822 | return rc; |
1814 | } | 1823 | } |
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, | |||
1878 | rc); | 1887 | rc); |
1879 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1888 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1880 | atomic_set(&channel->irq_pending, 0); | 1889 | atomic_set(&channel->irq_pending, 0); |
1890 | qeth_release_buffer(channel, iob); | ||
1881 | wake_up(&card->wait_q); | 1891 | wake_up(&card->wait_q); |
1882 | return rc; | 1892 | return rc; |
1883 | } | 1893 | } |
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
2058 | } | 2068 | } |
2059 | reply = qeth_alloc_reply(card); | 2069 | reply = qeth_alloc_reply(card); |
2060 | if (!reply) { | 2070 | if (!reply) { |
2071 | qeth_release_buffer(channel, iob); | ||
2061 | return -ENOMEM; | 2072 | return -ENOMEM; |
2062 | } | 2073 | } |
2063 | reply->callback = reply_cb; | 2074 | reply->callback = reply_cb; |
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) | |||
2389 | return 0; | 2400 | return 0; |
2390 | } | 2401 | } |
2391 | 2402 | ||
2392 | static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) | 2403 | static void qeth_free_output_queue(struct qeth_qdio_out_q *q) |
2393 | { | 2404 | { |
2394 | if (!q) | 2405 | if (!q) |
2395 | return; | 2406 | return; |
2396 | 2407 | ||
2408 | qeth_clear_outq_buffers(q, 1); | ||
2397 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); | 2409 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); |
2398 | kfree(q); | 2410 | kfree(q); |
2399 | } | 2411 | } |
@@ -2467,10 +2479,8 @@ out_freeoutqbufs: | |||
2467 | card->qdio.out_qs[i]->bufs[j] = NULL; | 2479 | card->qdio.out_qs[i]->bufs[j] = NULL; |
2468 | } | 2480 | } |
2469 | out_freeoutq: | 2481 | out_freeoutq: |
2470 | while (i > 0) { | 2482 | while (i > 0) |
2471 | qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); | 2483 | qeth_free_output_queue(card->qdio.out_qs[--i]); |
2472 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | ||
2473 | } | ||
2474 | kfree(card->qdio.out_qs); | 2484 | kfree(card->qdio.out_qs); |
2475 | card->qdio.out_qs = NULL; | 2485 | card->qdio.out_qs = NULL; |
2476 | out_freepool: | 2486 | out_freepool: |
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) | |||
2503 | qeth_free_buffer_pool(card); | 2513 | qeth_free_buffer_pool(card); |
2504 | /* free outbound qdio_qs */ | 2514 | /* free outbound qdio_qs */ |
2505 | if (card->qdio.out_qs) { | 2515 | if (card->qdio.out_qs) { |
2506 | for (i = 0; i < card->qdio.no_out_queues; ++i) { | 2516 | for (i = 0; i < card->qdio.no_out_queues; i++) |
2507 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | 2517 | qeth_free_output_queue(card->qdio.out_qs[i]); |
2508 | qeth_free_qdio_out_buf(card->qdio.out_qs[i]); | ||
2509 | } | ||
2510 | kfree(card->qdio.out_qs); | 2518 | kfree(card->qdio.out_qs); |
2511 | card->qdio.out_qs = NULL; | 2519 | card->qdio.out_qs = NULL; |
2512 | } | 2520 | } |
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
5028 | qeth_clean_channel(&card->read); | 5036 | qeth_clean_channel(&card->read); |
5029 | qeth_clean_channel(&card->write); | 5037 | qeth_clean_channel(&card->write); |
5030 | qeth_clean_channel(&card->data); | 5038 | qeth_clean_channel(&card->data); |
5039 | destroy_workqueue(card->event_wq); | ||
5031 | qeth_free_qdio_buffers(card); | 5040 | qeth_free_qdio_buffers(card); |
5032 | unregister_service_level(&card->qeth_service_level); | 5041 | unregister_service_level(&card->qeth_service_level); |
5033 | dev_set_drvdata(&card->gdev->dev, NULL); | 5042 | dev_set_drvdata(&card->gdev->dev, NULL); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f108d4b44605..a43de2f9bcac 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
369 | qeth_clear_cmd_buffers(&card->read); | 369 | qeth_clear_cmd_buffers(&card->read); |
370 | qeth_clear_cmd_buffers(&card->write); | 370 | qeth_clear_cmd_buffers(&card->write); |
371 | } | 371 | } |
372 | |||
373 | flush_workqueue(card->event_wq); | ||
372 | } | 374 | } |
373 | 375 | ||
374 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, | 376 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, |
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
801 | 803 | ||
802 | if (cgdev->state == CCWGROUP_ONLINE) | 804 | if (cgdev->state == CCWGROUP_ONLINE) |
803 | qeth_l2_set_offline(cgdev); | 805 | qeth_l2_set_offline(cgdev); |
806 | |||
807 | cancel_work_sync(&card->close_dev_work); | ||
804 | if (qeth_netdev_is_registered(card->dev)) | 808 | if (qeth_netdev_is_registered(card->dev)) |
805 | unregister_netdev(card->dev); | 809 | unregister_netdev(card->dev); |
806 | } | 810 | } |
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, | |||
1434 | data->card = card; | 1438 | data->card = card; |
1435 | memcpy(&data->qports, qports, | 1439 | memcpy(&data->qports, qports, |
1436 | sizeof(struct qeth_sbp_state_change) + extrasize); | 1440 | sizeof(struct qeth_sbp_state_change) + extrasize); |
1437 | queue_work(qeth_wq, &data->worker); | 1441 | queue_work(card->event_wq, &data->worker); |
1438 | } | 1442 | } |
1439 | 1443 | ||
1440 | struct qeth_bridge_host_data { | 1444 | struct qeth_bridge_host_data { |
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card, | |||
1506 | data->card = card; | 1510 | data->card = card; |
1507 | memcpy(&data->hostevs, hostevs, | 1511 | memcpy(&data->hostevs, hostevs, |
1508 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); | 1512 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); |
1509 | queue_work(qeth_wq, &data->worker); | 1513 | queue_work(card->event_wq, &data->worker); |
1510 | } | 1514 | } |
1511 | 1515 | ||
1512 | /* SETBRIDGEPORT support; sending commands */ | 1516 | /* SETBRIDGEPORT support; sending commands */ |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 42a7cdc59b76..df34bff4ac31 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
1433 | qeth_clear_cmd_buffers(&card->read); | 1433 | qeth_clear_cmd_buffers(&card->read); |
1434 | qeth_clear_cmd_buffers(&card->write); | 1434 | qeth_clear_cmd_buffers(&card->write); |
1435 | } | 1435 | } |
1436 | |||
1437 | flush_workqueue(card->event_wq); | ||
1436 | } | 1438 | } |
1437 | 1439 | ||
1438 | /* | 1440 | /* |
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
2338 | if (cgdev->state == CCWGROUP_ONLINE) | 2340 | if (cgdev->state == CCWGROUP_ONLINE) |
2339 | qeth_l3_set_offline(cgdev); | 2341 | qeth_l3_set_offline(cgdev); |
2340 | 2342 | ||
2343 | cancel_work_sync(&card->close_dev_work); | ||
2341 | if (qeth_netdev_is_registered(card->dev)) | 2344 | if (qeth_netdev_is_registered(card->dev)) |
2342 | unregister_netdev(card->dev); | 2345 | unregister_netdev(card->dev); |
2343 | qeth_l3_clear_ip_htable(card, 0); | 2346 | qeth_l3_clear_ip_htable(card, 0); |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f83f79b07b50..07efcb9b5b94 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, | |||
280 | return snprintf(buf, PAGE_SIZE, "%s\n", | 280 | return snprintf(buf, PAGE_SIZE, "%s\n", |
281 | asd_dev_rev[asd_ha->revision_id]); | 281 | asd_dev_rev[asd_ha->revision_id]); |
282 | } | 282 | } |
283 | static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); | 283 | static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); |
284 | 284 | ||
285 | static ssize_t asd_show_dev_bios_build(struct device *dev, | 285 | static ssize_t asd_show_dev_bios_build(struct device *dev, |
286 | struct device_attribute *attr,char *buf) | 286 | struct device_attribute *attr,char *buf) |
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) | |||
477 | { | 477 | { |
478 | int err; | 478 | int err; |
479 | 479 | ||
480 | err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 480 | err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
481 | if (err) | 481 | if (err) |
482 | return err; | 482 | return err; |
483 | 483 | ||
@@ -499,13 +499,13 @@ err_update_bios: | |||
499 | err_biosb: | 499 | err_biosb: |
500 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); | 500 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
501 | err_rev: | 501 | err_rev: |
502 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 502 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
503 | return err; | 503 | return err; |
504 | } | 504 | } |
505 | 505 | ||
506 | static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) | 506 | static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) |
507 | { | 507 | { |
508 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 508 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
509 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); | 509 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
510 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); | 510 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); |
511 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); | 511 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); |
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index bfa13e3b191c..c8bad2c093b8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c | |||
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev, | |||
3687 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | 3687 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; |
3688 | 3688 | ||
3689 | cfg = shost_priv(host); | 3689 | cfg = shost_priv(host); |
3690 | cfg->state = STATE_PROBING; | ||
3690 | cfg->host = host; | 3691 | cfg->host = host; |
3691 | rc = alloc_mem(cfg); | 3692 | rc = alloc_mem(cfg); |
3692 | if (rc) { | 3693 | if (rc) { |
@@ -3775,6 +3776,7 @@ out: | |||
3775 | return rc; | 3776 | return rc; |
3776 | 3777 | ||
3777 | out_remove: | 3778 | out_remove: |
3779 | cfg->state = STATE_PROBED; | ||
3778 | cxlflash_remove(pdev); | 3780 | cxlflash_remove(pdev); |
3779 | goto out; | 3781 | goto out; |
3780 | } | 3782 | } |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9192a1d9dec6..dfba4921b265 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref) | |||
184 | struct fc_rport_priv *rdata; | 184 | struct fc_rport_priv *rdata; |
185 | 185 | ||
186 | rdata = container_of(kref, struct fc_rport_priv, kref); | 186 | rdata = container_of(kref, struct fc_rport_priv, kref); |
187 | WARN_ON(!list_empty(&rdata->peers)); | ||
188 | kfree_rcu(rdata, rcu); | 187 | kfree_rcu(rdata, rcu); |
189 | } | 188 | } |
190 | EXPORT_SYMBOL(fc_rport_destroy); | 189 | EXPORT_SYMBOL(fc_rport_destroy); |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b8d325ce8754..120fc520f27a 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1459,7 +1459,13 @@ static int iscsi_xmit_task(struct iscsi_conn *conn) | |||
1459 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) | 1459 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) |
1460 | return -ENODATA; | 1460 | return -ENODATA; |
1461 | 1461 | ||
1462 | spin_lock_bh(&conn->session->back_lock); | ||
1463 | if (conn->task == NULL) { | ||
1464 | spin_unlock_bh(&conn->session->back_lock); | ||
1465 | return -ENODATA; | ||
1466 | } | ||
1462 | __iscsi_get_task(task); | 1467 | __iscsi_get_task(task); |
1468 | spin_unlock_bh(&conn->session->back_lock); | ||
1463 | spin_unlock_bh(&conn->session->frwd_lock); | 1469 | spin_unlock_bh(&conn->session->frwd_lock); |
1464 | rc = conn->session->tt->xmit_task(task); | 1470 | rc = conn->session->tt->xmit_task(task); |
1465 | spin_lock_bh(&conn->session->frwd_lock); | 1471 | spin_lock_bh(&conn->session->frwd_lock); |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 17eb4185f29d..f21c93bbb35c 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -828,6 +828,7 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
828 | rphy = sas_end_device_alloc(phy->port); | 828 | rphy = sas_end_device_alloc(phy->port); |
829 | if (!rphy) | 829 | if (!rphy) |
830 | goto out_free; | 830 | goto out_free; |
831 | rphy->identify.phy_identifier = phy_id; | ||
831 | 832 | ||
832 | child->rphy = rphy; | 833 | child->rphy = rphy; |
833 | get_device(&rphy->dev); | 834 | get_device(&rphy->dev); |
@@ -854,6 +855,7 @@ static struct domain_device *sas_ex_discover_end_dev( | |||
854 | 855 | ||
855 | child->rphy = rphy; | 856 | child->rphy = rphy; |
856 | get_device(&rphy->dev); | 857 | get_device(&rphy->dev); |
858 | rphy->identify.phy_identifier = phy_id; | ||
857 | sas_fill_in_rphy(child, rphy); | 859 | sas_fill_in_rphy(child, rphy); |
858 | 860 | ||
859 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); | 861 | list_add_tail(&child->disco_list_node, &parent->port->disco_list); |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index aeeb0144bd55..8d1acc802a67 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -1785,13 +1785,13 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, | |||
1785 | 1785 | ||
1786 | /* Issue Marker IOCB */ | 1786 | /* Issue Marker IOCB */ |
1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], | 1787 | qla2x00_marker(vha, vha->hw->req_q_map[0], |
1788 | vha->hw->rsp_q_map[0], sp->fcport->loop_id, lun, | 1788 | vha->hw->rsp_q_map[0], fcport->loop_id, lun, |
1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); | 1789 | flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); |
1790 | } | 1790 | } |
1791 | 1791 | ||
1792 | done_free_sp: | 1792 | done_free_sp: |
1793 | sp->free(sp); | 1793 | sp->free(sp); |
1794 | sp->fcport->flags &= ~FCF_ASYNC_SENT; | 1794 | fcport->flags &= ~FCF_ASYNC_SENT; |
1795 | done: | 1795 | done: |
1796 | return rval; | 1796 | return rval; |
1797 | } | 1797 | } |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 6d65ac584eba..f8d51c3d5582 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -655,6 +655,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result) | |||
655 | set_host_byte(cmd, DID_OK); | 655 | set_host_byte(cmd, DID_OK); |
656 | return BLK_STS_TARGET; | 656 | return BLK_STS_TARGET; |
657 | case DID_NEXUS_FAILURE: | 657 | case DID_NEXUS_FAILURE: |
658 | set_host_byte(cmd, DID_OK); | ||
658 | return BLK_STS_NEXUS; | 659 | return BLK_STS_NEXUS; |
659 | case DID_ALLOC_FAILURE: | 660 | case DID_ALLOC_FAILURE: |
660 | set_host_byte(cmd, DID_OK); | 661 | set_host_byte(cmd, DID_OK); |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b2da8a00ec33..5464d467e23e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2951,9 +2951,6 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) | |||
2951 | if (rot == 1) { | 2951 | if (rot == 1) { |
2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); | 2952 | blk_queue_flag_set(QUEUE_FLAG_NONROT, q); |
2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); | 2953 | blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); |
2954 | } else { | ||
2955 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
2956 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
2957 | } | 2954 | } |
2958 | 2955 | ||
2959 | if (sdkp->device->type == TYPE_ZBC) { | 2956 | if (sdkp->device->type == TYPE_ZBC) { |
@@ -3090,6 +3087,15 @@ static int sd_revalidate_disk(struct gendisk *disk) | |||
3090 | if (sdkp->media_present) { | 3087 | if (sdkp->media_present) { |
3091 | sd_read_capacity(sdkp, buffer); | 3088 | sd_read_capacity(sdkp, buffer); |
3092 | 3089 | ||
3090 | /* | ||
3091 | * set the default to rotational. All non-rotational devices | ||
3092 | * support the block characteristics VPD page, which will | ||
3093 | * cause this to be updated correctly and any device which | ||
3094 | * doesn't support it should be treated as rotational. | ||
3095 | */ | ||
3096 | blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); | ||
3097 | blk_queue_flag_set(QUEUE_FLAG_ADD_RANDOM, q); | ||
3098 | |||
3093 | if (scsi_device_supports_vpd(sdp)) { | 3099 | if (scsi_device_supports_vpd(sdp)) { |
3094 | sd_read_block_provisioning(sdkp); | 3100 | sd_read_block_provisioning(sdkp); |
3095 | sd_read_block_limits(sdkp); | 3101 | sd_read_block_limits(sdkp); |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 83365b29a4d8..a340af797a85 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
@@ -142,10 +142,12 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, | |||
142 | return -EOPNOTSUPP; | 142 | return -EOPNOTSUPP; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * Get a reply buffer for the number of requested zones plus a header. | 145 | * Get a reply buffer for the number of requested zones plus a header, |
146 | * For ATA, buffers must be aligned to 512B. | 146 | * without exceeding the device maximum command size. For ATA disks, |
147 | * buffers must be aligned to 512B. | ||
147 | */ | 148 | */ |
148 | buflen = roundup((nrz + 1) * 64, 512); | 149 | buflen = min(queue_max_hw_sectors(disk->queue) << 9, |
150 | roundup((nrz + 1) * 64, 512)); | ||
149 | buf = kmalloc(buflen, gfp_mask); | 151 | buf = kmalloc(buflen, gfp_mask); |
150 | if (!buf) | 152 | if (!buf) |
151 | return -ENOMEM; | 153 | return -ENOMEM; |
@@ -462,12 +464,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) | |||
462 | sdkp->device->use_10_for_rw = 0; | 464 | sdkp->device->use_10_for_rw = 0; |
463 | 465 | ||
464 | /* | 466 | /* |
465 | * If something changed, revalidate the disk zone bitmaps once we have | 467 | * Revalidate the disk zone bitmaps once the block device capacity is |
466 | * the capacity, that is on the second revalidate execution during disk | 468 | * set on the second revalidate execution during disk scan and if |
467 | * scan and always during normal revalidate. | 469 | * something changed when executing a normal revalidate. |
468 | */ | 470 | */ |
469 | if (sdkp->first_scan) | 471 | if (sdkp->first_scan) { |
472 | sdkp->zone_blocks = zone_blocks; | ||
473 | sdkp->nr_zones = nr_zones; | ||
470 | return 0; | 474 | return 0; |
475 | } | ||
476 | |||
471 | if (sdkp->zone_blocks != zone_blocks || | 477 | if (sdkp->zone_blocks != zone_blocks || |
472 | sdkp->nr_zones != nr_zones || | 478 | sdkp->nr_zones != nr_zones || |
473 | disk->queue->nr_zones != nr_zones) { | 479 | disk->queue->nr_zones != nr_zones) { |
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); | |||
1143 | static irqreturn_t portal_isr(int irq, void *ptr) | 1143 | static irqreturn_t portal_isr(int irq, void *ptr) |
1144 | { | 1144 | { |
1145 | struct qman_portal *p = ptr; | 1145 | struct qman_portal *p = ptr; |
1146 | |||
1147 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; | ||
1148 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | 1146 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; |
1147 | u32 clear = 0; | ||
1149 | 1148 | ||
1150 | if (unlikely(!is)) | 1149 | if (unlikely(!is)) |
1151 | return IRQ_NONE; | 1150 | return IRQ_NONE; |
1152 | 1151 | ||
1153 | /* DQRR-handling if it's interrupt-driven */ | 1152 | /* DQRR-handling if it's interrupt-driven */ |
1154 | if (is & QM_PIRQ_DQRI) | 1153 | if (is & QM_PIRQ_DQRI) { |
1155 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | 1154 | __poll_portal_fast(p, QMAN_POLL_LIMIT); |
1155 | clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; | ||
1156 | } | ||
1156 | /* Handling of anything else that's interrupt-driven */ | 1157 | /* Handling of anything else that's interrupt-driven */ |
1157 | clear |= __poll_portal_slow(p, is); | 1158 | clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; |
1158 | qm_out(&p->p, QM_REG_ISR, clear); | 1159 | qm_out(&p->p, QM_REG_ISR, clear); |
1159 | return IRQ_HANDLED; | 1160 | return IRQ_HANDLED; |
1160 | } | 1161 | } |
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2848fa71a33d..d6248eecf123 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c | |||
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) | |||
170 | return -ENODEV; | 170 | return -ENODEV; |
171 | 171 | ||
172 | priv->last_link = 0; | 172 | priv->last_link = 0; |
173 | phy_start_aneg(phydev); | 173 | phy_start(phydev); |
174 | 174 | ||
175 | return 0; | 175 | return 0; |
176 | no_phy: | 176 | no_phy: |
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index c92bbd05516e..005de0024dd4 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c | |||
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch) | |||
265 | return; | 265 | return; |
266 | } | 266 | } |
267 | 267 | ||
268 | speakup_tty->ops->send_xchar(speakup_tty, ch); | 268 | if (speakup_tty->ops->send_xchar) |
269 | speakup_tty->ops->send_xchar(speakup_tty, ch); | ||
269 | mutex_unlock(&speakup_tty_mutex); | 270 | mutex_unlock(&speakup_tty_mutex); |
270 | } | 271 | } |
271 | 272 | ||
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) | |||
277 | return; | 278 | return; |
278 | } | 279 | } |
279 | 280 | ||
280 | speakup_tty->ops->tiocmset(speakup_tty, set, clear); | 281 | if (speakup_tty->ops->tiocmset) |
282 | speakup_tty->ops->tiocmset(speakup_tty, set, clear); | ||
281 | mutex_unlock(&speakup_tty_mutex); | 283 | mutex_unlock(&speakup_tty_mutex); |
282 | } | 284 | } |
283 | 285 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 72016d0dfca5..8e7fffbb8802 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item, | |||
852 | return count; | 852 | return count; |
853 | } | 853 | } |
854 | 854 | ||
855 | /* always zero, but attr needs to remain RW to avoid userspace breakage */ | ||
856 | static ssize_t pi_prot_format_show(struct config_item *item, char *page) | ||
857 | { | ||
858 | return snprintf(page, PAGE_SIZE, "0\n"); | ||
859 | } | ||
860 | |||
855 | static ssize_t pi_prot_format_store(struct config_item *item, | 861 | static ssize_t pi_prot_format_store(struct config_item *item, |
856 | const char *page, size_t count) | 862 | const char *page, size_t count) |
857 | { | 863 | { |
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc); | |||
1132 | CONFIGFS_ATTR(, emulate_pr); | 1138 | CONFIGFS_ATTR(, emulate_pr); |
1133 | CONFIGFS_ATTR(, pi_prot_type); | 1139 | CONFIGFS_ATTR(, pi_prot_type); |
1134 | CONFIGFS_ATTR_RO(, hw_pi_prot_type); | 1140 | CONFIGFS_ATTR_RO(, hw_pi_prot_type); |
1135 | CONFIGFS_ATTR_WO(, pi_prot_format); | 1141 | CONFIGFS_ATTR(, pi_prot_format); |
1136 | CONFIGFS_ATTR(, pi_prot_verify); | 1142 | CONFIGFS_ATTR(, pi_prot_verify); |
1137 | CONFIGFS_ATTR(, enforce_pr_isids); | 1143 | CONFIGFS_ATTR(, enforce_pr_isids); |
1138 | CONFIGFS_ATTR(, is_nonrot); | 1144 | CONFIGFS_ATTR(, is_nonrot); |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dfd23245f778..6fff16113628 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) | |||
774 | 774 | ||
775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); | 775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); |
776 | if (IS_ERR(cdev)) { | 776 | if (IS_ERR(cdev)) { |
777 | pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", | 777 | pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n", |
778 | policy->cpu, PTR_ERR(cdev)); | 778 | policy->cpu, PTR_ERR(cdev)); |
779 | cdev = NULL; | 779 | cdev = NULL; |
780 | } | 780 | } |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4bfdb4a1e47d..2df059cc07e2 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) | |||
867 | 867 | ||
868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); | 868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); |
869 | if (ret < 0) { | 869 | if (ret < 0) { |
870 | pr_err("missing polling-delay-passive property\n"); | 870 | pr_err("%pOFn: missing polling-delay-passive property\n", np); |
871 | goto free_tz; | 871 | goto free_tz; |
872 | } | 872 | } |
873 | tz->passive_delay = prop; | 873 | tz->passive_delay = prop; |
874 | 874 | ||
875 | ret = of_property_read_u32(np, "polling-delay", &prop); | 875 | ret = of_property_read_u32(np, "polling-delay", &prop); |
876 | if (ret < 0) { | 876 | if (ret < 0) { |
877 | pr_err("missing polling-delay property\n"); | 877 | pr_err("%pOFn: missing polling-delay property\n", np); |
878 | goto free_tz; | 878 | goto free_tz; |
879 | } | 879 | } |
880 | tz->polling_delay = prop; | 880 | tz->polling_delay = prop; |
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index e2c407656fa6..c1fdbc0b6840 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c | |||
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, | |||
357 | if (dmacnt == 2) { | 357 | if (dmacnt == 2) { |
358 | data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), | 358 | data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), |
359 | GFP_KERNEL); | 359 | GFP_KERNEL); |
360 | if (!data->dma) | ||
361 | return -ENOMEM; | ||
362 | |||
360 | data->dma->fn = mtk8250_dma_filter; | 363 | data->dma->fn = mtk8250_dma_filter; |
361 | data->dma->rx_size = MTK_UART_RX_SIZE; | 364 | data->dma->rx_size = MTK_UART_RX_SIZE; |
362 | data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; | 365 | data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index f80a300b5d68..48bd694a5fa1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -3420,6 +3420,11 @@ static int | |||
3420 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | 3420 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) |
3421 | { | 3421 | { |
3422 | int num_iomem, num_port, first_port = -1, i; | 3422 | int num_iomem, num_port, first_port = -1, i; |
3423 | int rc; | ||
3424 | |||
3425 | rc = serial_pci_is_class_communication(dev); | ||
3426 | if (rc) | ||
3427 | return rc; | ||
3423 | 3428 | ||
3424 | /* | 3429 | /* |
3425 | * Should we try to make guesses for multiport serial devices later? | 3430 | * Should we try to make guesses for multiport serial devices later? |
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) | |||
3647 | 3652 | ||
3648 | board = &pci_boards[ent->driver_data]; | 3653 | board = &pci_boards[ent->driver_data]; |
3649 | 3654 | ||
3650 | rc = serial_pci_is_class_communication(dev); | ||
3651 | if (rc) | ||
3652 | return rc; | ||
3653 | |||
3654 | rc = serial_pci_is_blacklisted(dev); | 3655 | rc = serial_pci_is_blacklisted(dev); |
3655 | if (rc) | 3656 | if (rc) |
3656 | return rc; | 3657 | return rc; |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5c01bb6d1c24..556f50aa1b58 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty) | |||
130 | struct uart_port *port; | 130 | struct uart_port *port; |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
133 | if (!state) | ||
134 | return; | ||
135 | |||
133 | port = uart_port_lock(state, flags); | 136 | port = uart_port_lock(state, flags); |
134 | __uart_start(tty); | 137 | __uart_start(tty); |
135 | uart_port_unlock(port, flags); | 138 | uart_port_unlock(port, flags); |
@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty) | |||
727 | upstat_t mask = UPSTAT_SYNC_FIFO; | 730 | upstat_t mask = UPSTAT_SYNC_FIFO; |
728 | struct uart_port *port; | 731 | struct uart_port *port; |
729 | 732 | ||
733 | if (!state) | ||
734 | return; | ||
735 | |||
730 | port = uart_port_ref(state); | 736 | port = uart_port_ref(state); |
731 | if (!port) | 737 | if (!port) |
732 | return; | 738 | return; |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8df0fd824520..64bbeb7d7e0c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1921,7 +1921,7 @@ out_nomem: | |||
1921 | 1921 | ||
1922 | static void sci_free_irq(struct sci_port *port) | 1922 | static void sci_free_irq(struct sci_port *port) |
1923 | { | 1923 | { |
1924 | int i; | 1924 | int i, j; |
1925 | 1925 | ||
1926 | /* | 1926 | /* |
1927 | * Intentionally in reverse order so we iterate over the muxed | 1927 | * Intentionally in reverse order so we iterate over the muxed |
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port) | |||
1937 | if (unlikely(irq < 0)) | 1937 | if (unlikely(irq < 0)) |
1938 | continue; | 1938 | continue; |
1939 | 1939 | ||
1940 | /* Check if already freed (irq was muxed) */ | ||
1941 | for (j = 0; j < i; j++) | ||
1942 | if (port->irqs[j] == irq) | ||
1943 | j = i + 1; | ||
1944 | if (j > i) | ||
1945 | continue; | ||
1946 | |||
1940 | free_irq(port->irqs[i], port); | 1947 | free_irq(port->irqs[i], port); |
1941 | kfree(port->irqstr[i]); | 1948 | kfree(port->irqstr[i]); |
1942 | 1949 | ||
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index cb7fcd7c0ad8..c1e9ea621f41 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c | |||
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
78 | for (i = 0; i < exynos->num_clks; i++) { | 78 | for (i = 0; i < exynos->num_clks; i++) { |
79 | ret = clk_prepare_enable(exynos->clks[i]); | 79 | ret = clk_prepare_enable(exynos->clks[i]); |
80 | if (ret) { | 80 | if (ret) { |
81 | while (--i > 0) | 81 | while (i-- > 0) |
82 | clk_disable_unprepare(exynos->clks[i]); | 82 | clk_disable_unprepare(exynos->clks[i]); |
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev) | |||
223 | for (i = 0; i < exynos->num_clks; i++) { | 223 | for (i = 0; i < exynos->num_clks; i++) { |
224 | ret = clk_prepare_enable(exynos->clks[i]); | 224 | ret = clk_prepare_enable(exynos->clks[i]); |
225 | if (ret) { | 225 | if (ret) { |
226 | while (--i > 0) | 226 | while (i-- > 0) |
227 | clk_disable_unprepare(exynos->clks[i]); | 227 | clk_disable_unprepare(exynos->clks[i]); |
228 | return ret; | 228 | return ret; |
229 | } | 229 | } |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index bed2ff42780b..6c9b76bcc2e1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, | |||
1119 | unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); | 1119 | unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); |
1120 | unsigned int rem = length % maxp; | 1120 | unsigned int rem = length % maxp; |
1121 | 1121 | ||
1122 | if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { | 1122 | if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { |
1123 | struct dwc3 *dwc = dep->dwc; | 1123 | struct dwc3 *dwc = dep->dwc; |
1124 | struct dwc3_trb *trb; | 1124 | struct dwc3_trb *trb; |
1125 | 1125 | ||
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 660878a19505..b77f3126580e 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c | |||
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev) | |||
2083 | #if defined(PLX_PCI_RDK2) | 2083 | #if defined(PLX_PCI_RDK2) |
2084 | /* see if PCI int for us by checking irqstat */ | 2084 | /* see if PCI int for us by checking irqstat */ |
2085 | intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); | 2085 | intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); |
2086 | if (!intcsr & (1 << NET2272_PCI_IRQ)) { | 2086 | if (!(intcsr & (1 << NET2272_PCI_IRQ))) { |
2087 | spin_unlock(&dev->lock); | 2087 | spin_unlock(&dev->lock); |
2088 | return IRQ_NONE; | 2088 | return IRQ_NONE; |
2089 | } | 2089 | } |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index eae8b1b1b45b..ffe462a657b1 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | if (request) { | 454 | if (request) { |
455 | u8 is_dma = 0; | ||
456 | bool short_packet = false; | ||
457 | 455 | ||
458 | trace_musb_req_tx(req); | 456 | trace_musb_req_tx(req); |
459 | 457 | ||
460 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { | 458 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { |
461 | is_dma = 1; | ||
462 | csr |= MUSB_TXCSR_P_WZC_BITS; | 459 | csr |= MUSB_TXCSR_P_WZC_BITS; |
463 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | | 460 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
464 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); | 461 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
476 | */ | 473 | */ |
477 | if ((request->zero && request->length) | 474 | if ((request->zero && request->length) |
478 | && (request->length % musb_ep->packet_sz == 0) | 475 | && (request->length % musb_ep->packet_sz == 0) |
479 | && (request->actual == request->length)) | 476 | && (request->actual == request->length)) { |
480 | short_packet = true; | ||
481 | 477 | ||
482 | if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && | ||
483 | (is_dma && (!dma->desired_mode || | ||
484 | (request->actual & | ||
485 | (musb_ep->packet_sz - 1))))) | ||
486 | short_packet = true; | ||
487 | |||
488 | if (short_packet) { | ||
489 | /* | 478 | /* |
490 | * On DMA completion, FIFO may not be | 479 | * On DMA completion, FIFO may not be |
491 | * available yet... | 480 | * available yet... |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a688f7f87829..5fc6825745f2 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
346 | channel->status = MUSB_DMA_STATUS_FREE; | 346 | channel->status = MUSB_DMA_STATUS_FREE; |
347 | 347 | ||
348 | /* completed */ | 348 | /* completed */ |
349 | if ((devctl & MUSB_DEVCTL_HM) | 349 | if (musb_channel->transmit && |
350 | && (musb_channel->transmit) | 350 | (!channel->desired_mode || |
351 | && ((channel->desired_mode == 0) | 351 | (channel->actual_len % |
352 | || (channel->actual_len & | 352 | musb_channel->max_packet_sz))) { |
353 | (musb_channel->max_packet_sz - 1))) | ||
354 | ) { | ||
355 | u8 epnum = musb_channel->epnum; | 353 | u8 epnum = musb_channel->epnum; |
356 | int offset = musb->io.ep_offset(epnum, | 354 | int offset = musb->io.ep_offset(epnum, |
357 | MUSB_TXCSR); | 355 | MUSB_TXCSR); |
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
363 | */ | 361 | */ |
364 | musb_ep_select(mbase, epnum); | 362 | musb_ep_select(mbase, epnum); |
365 | txcsr = musb_readw(mbase, offset); | 363 | txcsr = musb_readw(mbase, offset); |
366 | txcsr &= ~(MUSB_TXCSR_DMAENAB | 364 | if (channel->desired_mode == 1) { |
365 | txcsr &= ~(MUSB_TXCSR_DMAENAB | ||
367 | | MUSB_TXCSR_AUTOSET); | 366 | | MUSB_TXCSR_AUTOSET); |
368 | musb_writew(mbase, offset, txcsr); | 367 | musb_writew(mbase, offset, txcsr); |
369 | /* Send out the packet */ | 368 | /* Send out the packet */ |
370 | txcsr &= ~MUSB_TXCSR_DMAMODE; | 369 | txcsr &= ~MUSB_TXCSR_DMAMODE; |
370 | txcsr |= MUSB_TXCSR_DMAENAB; | ||
371 | } | ||
371 | txcsr |= MUSB_TXCSR_TXPKTRDY; | 372 | txcsr |= MUSB_TXCSR_TXPKTRDY; |
372 | musb_writew(mbase, offset, txcsr); | 373 | musb_writew(mbase, offset, txcsr); |
373 | } | 374 | } |
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d7312eed6088..91ea3083e7ad 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
@@ -21,7 +21,7 @@ config AB8500_USB | |||
21 | 21 | ||
22 | config FSL_USB2_OTG | 22 | config FSL_USB2_OTG |
23 | bool "Freescale USB OTG Transceiver Driver" | 23 | bool "Freescale USB OTG Transceiver Driver" |
24 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM | 24 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM |
25 | depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' | 25 | depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' |
26 | select USB_PHY | 26 | select USB_PHY |
27 | help | 27 | help |
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 27bdb7222527..f5f0568d8533 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c | |||
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
61 | if (ret) | 61 | if (ret) |
62 | return ret; | 62 | return ret; |
63 | 63 | ||
64 | ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); | ||
65 | if (ret) | ||
66 | return ret; | ||
67 | am_phy->usb_phy_gen.phy.init = am335x_init; | 64 | am_phy->usb_phy_gen.phy.init = am335x_init; |
68 | am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; | 65 | am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; |
69 | 66 | ||
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
82 | device_set_wakeup_enable(dev, false); | 79 | device_set_wakeup_enable(dev, false); |
83 | phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); | 80 | phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); |
84 | 81 | ||
85 | return 0; | 82 | return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); |
86 | } | 83 | } |
87 | 84 | ||
88 | static int am335x_phy_remove(struct platform_device *pdev) | 85 | static int am335x_phy_remove(struct platform_device *pdev) |
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4bc29b586698..f1c39a3c7534 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c | |||
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) | |||
2297 | pdo_pps_apdo_max_voltage(snk)); | 2297 | pdo_pps_apdo_max_voltage(snk)); |
2298 | port->pps_data.max_curr = min_pps_apdo_current(src, snk); | 2298 | port->pps_data.max_curr = min_pps_apdo_current(src, snk); |
2299 | port->pps_data.out_volt = min(port->pps_data.max_volt, | 2299 | port->pps_data.out_volt = min(port->pps_data.max_volt, |
2300 | port->pps_data.out_volt); | 2300 | max(port->pps_data.min_volt, |
2301 | port->pps_data.out_volt)); | ||
2301 | port->pps_data.op_curr = min(port->pps_data.max_curr, | 2302 | port->pps_data.op_curr = min(port->pps_data.max_curr, |
2302 | port->pps_data.op_curr); | 2303 | port->pps_data.op_curr); |
2303 | } | 2304 | } |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 24a129fcdd61..a2e5dc7716e2 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -1788,7 +1788,7 @@ static int log_used(struct vhost_virtqueue *vq, u64 used_offset, u64 len) | |||
1788 | 1788 | ||
1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, | 1789 | ret = translate_desc(vq, (uintptr_t)vq->used + used_offset, |
1790 | len, iov, 64, VHOST_ACCESS_WO); | 1790 | len, iov, 64, VHOST_ACCESS_WO); |
1791 | if (ret) | 1791 | if (ret < 0) |
1792 | return ret; | 1792 | return ret; |
1793 | 1793 | ||
1794 | for (i = 0; i < ret; i++) { | 1794 | for (i = 0; i < ret; i++) { |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cd7e755484e3..a0b07c331255 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -152,7 +152,12 @@ struct vring_virtqueue { | |||
152 | /* Available for packed ring */ | 152 | /* Available for packed ring */ |
153 | struct { | 153 | struct { |
154 | /* Actual memory layout for this queue. */ | 154 | /* Actual memory layout for this queue. */ |
155 | struct vring_packed vring; | 155 | struct { |
156 | unsigned int num; | ||
157 | struct vring_packed_desc *desc; | ||
158 | struct vring_packed_desc_event *driver; | ||
159 | struct vring_packed_desc_event *device; | ||
160 | } vring; | ||
156 | 161 | ||
157 | /* Driver ring wrap counter. */ | 162 | /* Driver ring wrap counter. */ |
158 | bool avail_wrap_counter; | 163 | bool avail_wrap_counter; |
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed( | |||
1609 | !context; | 1614 | !context; |
1610 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 1615 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
1611 | 1616 | ||
1617 | if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) | ||
1618 | vq->weak_barriers = false; | ||
1619 | |||
1612 | vq->packed.ring_dma_addr = ring_dma_addr; | 1620 | vq->packed.ring_dma_addr = ring_dma_addr; |
1613 | vq->packed.driver_event_dma_addr = driver_event_dma_addr; | 1621 | vq->packed.driver_event_dma_addr = driver_event_dma_addr; |
1614 | vq->packed.device_event_dma_addr = device_event_dma_addr; | 1622 | vq->packed.device_event_dma_addr = device_event_dma_addr; |
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, | |||
2079 | !context; | 2087 | !context; |
2080 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 2088 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
2081 | 2089 | ||
2090 | if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) | ||
2091 | vq->weak_barriers = false; | ||
2092 | |||
2082 | vq->split.queue_dma_addr = 0; | 2093 | vq->split.queue_dma_addr = 0; |
2083 | vq->split.queue_size_in_bytes = 0; | 2094 | vq->split.queue_size_in_bytes = 0; |
2084 | 2095 | ||
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev) | |||
2213 | break; | 2224 | break; |
2214 | case VIRTIO_F_RING_PACKED: | 2225 | case VIRTIO_F_RING_PACKED: |
2215 | break; | 2226 | break; |
2227 | case VIRTIO_F_ORDER_PLATFORM: | ||
2228 | break; | ||
2216 | default: | 2229 | default: |
2217 | /* We don't understand this bit. */ | 2230 | /* We don't understand this bit. */ |
2218 | __virtio_clear_bit(vdev, i); | 2231 | __virtio_clear_bit(vdev, i); |
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index cf445dbd5f2e..9de46116c749 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c | |||
@@ -173,6 +173,7 @@ static struct afs_cell *afs_alloc_cell(struct afs_net *net, | |||
173 | 173 | ||
174 | rcu_assign_pointer(cell->vl_servers, vllist); | 174 | rcu_assign_pointer(cell->vl_servers, vllist); |
175 | cell->dns_expiry = TIME64_MAX; | 175 | cell->dns_expiry = TIME64_MAX; |
176 | __clear_bit(AFS_CELL_FL_NO_LOOKUP_YET, &cell->flags); | ||
176 | } else { | 177 | } else { |
177 | cell->dns_expiry = ktime_get_real_seconds(); | 178 | cell->dns_expiry = ktime_get_real_seconds(); |
178 | } | 179 | } |
@@ -1436,6 +1436,7 @@ static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) | |||
1436 | if (unlikely(!req->ki_filp)) | 1436 | if (unlikely(!req->ki_filp)) |
1437 | return -EBADF; | 1437 | return -EBADF; |
1438 | req->ki_complete = aio_complete_rw; | 1438 | req->ki_complete = aio_complete_rw; |
1439 | req->private = NULL; | ||
1439 | req->ki_pos = iocb->aio_offset; | 1440 | req->ki_pos = iocb->aio_offset; |
1440 | req->ki_flags = iocb_flags(req->ki_filp); | 1441 | req->ki_flags = iocb_flags(req->ki_filp); |
1441 | if (iocb->aio_flags & IOCB_FLAG_RESFD) | 1442 | if (iocb->aio_flags & IOCB_FLAG_RESFD) |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index d0078cbb718b..e996174cbfc0 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -14,13 +14,30 @@ | |||
14 | #include <linux/err.h> | 14 | #include <linux/err.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | 16 | ||
17 | static inline bool spacetab(char c) { return c == ' ' || c == '\t'; } | ||
18 | static inline char *next_non_spacetab(char *first, const char *last) | ||
19 | { | ||
20 | for (; first <= last; first++) | ||
21 | if (!spacetab(*first)) | ||
22 | return first; | ||
23 | return NULL; | ||
24 | } | ||
25 | static inline char *next_terminator(char *first, const char *last) | ||
26 | { | ||
27 | for (; first <= last; first++) | ||
28 | if (spacetab(*first) || !*first) | ||
29 | return first; | ||
30 | return NULL; | ||
31 | } | ||
32 | |||
17 | static int load_script(struct linux_binprm *bprm) | 33 | static int load_script(struct linux_binprm *bprm) |
18 | { | 34 | { |
19 | const char *i_arg, *i_name; | 35 | const char *i_arg, *i_name; |
20 | char *cp; | 36 | char *cp, *buf_end; |
21 | struct file *file; | 37 | struct file *file; |
22 | int retval; | 38 | int retval; |
23 | 39 | ||
40 | /* Not ours to exec if we don't start with "#!". */ | ||
24 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) | 41 | if ((bprm->buf[0] != '#') || (bprm->buf[1] != '!')) |
25 | return -ENOEXEC; | 42 | return -ENOEXEC; |
26 | 43 | ||
@@ -33,23 +50,41 @@ static int load_script(struct linux_binprm *bprm) | |||
33 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) | 50 | if (bprm->interp_flags & BINPRM_FLAGS_PATH_INACCESSIBLE) |
34 | return -ENOENT; | 51 | return -ENOENT; |
35 | 52 | ||
36 | /* | 53 | /* Release since we are not mapping a binary into memory. */ |
37 | * This section does the #! interpretation. | ||
38 | * Sorta complicated, but hopefully it will work. -TYT | ||
39 | */ | ||
40 | |||
41 | allow_write_access(bprm->file); | 54 | allow_write_access(bprm->file); |
42 | fput(bprm->file); | 55 | fput(bprm->file); |
43 | bprm->file = NULL; | 56 | bprm->file = NULL; |
44 | 57 | ||
45 | for (cp = bprm->buf+2;; cp++) { | 58 | /* |
46 | if (cp >= bprm->buf + BINPRM_BUF_SIZE) | 59 | * This section handles parsing the #! line into separate |
60 | * interpreter path and argument strings. We must be careful | ||
61 | * because bprm->buf is not yet guaranteed to be NUL-terminated | ||
62 | * (though the buffer will have trailing NUL padding when the | ||
63 | * file size was smaller than the buffer size). | ||
64 | * | ||
65 | * We do not want to exec a truncated interpreter path, so either | ||
66 | * we find a newline (which indicates nothing is truncated), or | ||
67 | * we find a space/tab/NUL after the interpreter path (which | ||
68 | * itself may be preceded by spaces/tabs). Truncating the | ||
69 | * arguments is fine: the interpreter can re-read the script to | ||
70 | * parse them on its own. | ||
71 | */ | ||
72 | buf_end = bprm->buf + sizeof(bprm->buf) - 1; | ||
73 | cp = strnchr(bprm->buf, sizeof(bprm->buf), '\n'); | ||
74 | if (!cp) { | ||
75 | cp = next_non_spacetab(bprm->buf + 2, buf_end); | ||
76 | if (!cp) | ||
77 | return -ENOEXEC; /* Entire buf is spaces/tabs */ | ||
78 | /* | ||
79 | * If there is no later space/tab/NUL we must assume the | ||
80 | * interpreter path is truncated. | ||
81 | */ | ||
82 | if (!next_terminator(cp, buf_end)) | ||
47 | return -ENOEXEC; | 83 | return -ENOEXEC; |
48 | if (!*cp || (*cp == '\n')) | 84 | cp = buf_end; |
49 | break; | ||
50 | } | 85 | } |
86 | /* NUL-terminate the buffer and any trailing spaces/tabs. */ | ||
51 | *cp = '\0'; | 87 | *cp = '\0'; |
52 | |||
53 | while (cp > bprm->buf) { | 88 | while (cp > bprm->buf) { |
54 | cp--; | 89 | cp--; |
55 | if ((*cp == ' ') || (*cp == '\t')) | 90 | if ((*cp == ' ') || (*cp == '\t')) |
diff --git a/fs/buffer.c b/fs/buffer.c index 52d024bfdbc1..48318fb74938 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
200 | struct buffer_head *head; | 200 | struct buffer_head *head; |
201 | struct page *page; | 201 | struct page *page; |
202 | int all_mapped = 1; | 202 | int all_mapped = 1; |
203 | static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1); | ||
203 | 204 | ||
204 | index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); | 205 | index = block >> (PAGE_SHIFT - bd_inode->i_blkbits); |
205 | page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); | 206 | page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED); |
@@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
227 | * file io on the block device and getblk. It gets dealt with | 228 | * file io on the block device and getblk. It gets dealt with |
228 | * elsewhere, don't buffer_error if we had some unmapped buffers | 229 | * elsewhere, don't buffer_error if we had some unmapped buffers |
229 | */ | 230 | */ |
230 | if (all_mapped) { | 231 | ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE); |
231 | printk("__find_get_block_slow() failed. " | 232 | if (all_mapped && __ratelimit(&last_warned)) { |
232 | "block=%llu, b_blocknr=%llu\n", | 233 | printk("__find_get_block_slow() failed. block=%llu, " |
233 | (unsigned long long)block, | 234 | "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, " |
234 | (unsigned long long)bh->b_blocknr); | 235 | "device %pg blocksize: %d\n", |
235 | printk("b_state=0x%08lx, b_size=%zu\n", | 236 | (unsigned long long)block, |
236 | bh->b_state, bh->b_size); | 237 | (unsigned long long)bh->b_blocknr, |
237 | printk("device %pg blocksize: %d\n", bdev, | 238 | bh->b_state, bh->b_size, bdev, |
238 | 1 << bd_inode->i_blkbits); | 239 | 1 << bd_inode->i_blkbits); |
239 | } | 240 | } |
240 | out_unlock: | 241 | out_unlock: |
241 | spin_unlock(&bd_mapping->private_lock); | 242 | spin_unlock(&bd_mapping->private_lock); |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 041c27ea8de1..f74193da0e09 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -616,7 +616,8 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci, | |||
616 | capsnap->size); | 616 | capsnap->size); |
617 | 617 | ||
618 | spin_lock(&mdsc->snap_flush_lock); | 618 | spin_lock(&mdsc->snap_flush_lock); |
619 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); | 619 | if (list_empty(&ci->i_snap_flush_item)) |
620 | list_add_tail(&ci->i_snap_flush_item, &mdsc->snap_flush_list); | ||
620 | spin_unlock(&mdsc->snap_flush_lock); | 621 | spin_unlock(&mdsc->snap_flush_lock); |
621 | return 1; /* caller may want to ceph_flush_snaps */ | 622 | return 1; /* caller may want to ceph_flush_snaps */ |
622 | } | 623 | } |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 13b01351dd1c..29c68c5d44d5 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry) | |||
324 | inode_unlock(d_inode(dentry->d_parent)); | 324 | inode_unlock(d_inode(dentry->d_parent)); |
325 | dput(dentry); | 325 | dput(dentry); |
326 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 326 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
327 | return NULL; | 327 | return ERR_PTR(-ENOMEM); |
328 | } | 328 | } |
329 | 329 | ||
330 | static struct dentry *end_creating(struct dentry *dentry) | 330 | static struct dentry *end_creating(struct dentry *dentry) |
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, | |||
347 | dentry = start_creating(name, parent); | 347 | dentry = start_creating(name, parent); |
348 | 348 | ||
349 | if (IS_ERR(dentry)) | 349 | if (IS_ERR(dentry)) |
350 | return NULL; | 350 | return dentry; |
351 | 351 | ||
352 | inode = debugfs_get_inode(dentry->d_sb); | 352 | inode = debugfs_get_inode(dentry->d_sb); |
353 | if (unlikely(!inode)) | 353 | if (unlikely(!inode)) |
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, | |||
386 | * This function will return a pointer to a dentry if it succeeds. This | 386 | * This function will return a pointer to a dentry if it succeeds. This |
387 | * pointer must be passed to the debugfs_remove() function when the file is | 387 | * pointer must be passed to the debugfs_remove() function when the file is |
388 | * to be removed (no automatic cleanup happens if your module is unloaded, | 388 | * to be removed (no automatic cleanup happens if your module is unloaded, |
389 | * you are responsible here.) If an error occurs, %NULL will be returned. | 389 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
390 | * returned. | ||
390 | * | 391 | * |
391 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 392 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
392 | * returned. | 393 | * returned. |
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); | |||
464 | * This function will return a pointer to a dentry if it succeeds. This | 465 | * This function will return a pointer to a dentry if it succeeds. This |
465 | * pointer must be passed to the debugfs_remove() function when the file is | 466 | * pointer must be passed to the debugfs_remove() function when the file is |
466 | * to be removed (no automatic cleanup happens if your module is unloaded, | 467 | * to be removed (no automatic cleanup happens if your module is unloaded, |
467 | * you are responsible here.) If an error occurs, %NULL will be returned. | 468 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
469 | * returned. | ||
468 | * | 470 | * |
469 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 471 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
470 | * returned. | 472 | * returned. |
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size); | |||
495 | * This function will return a pointer to a dentry if it succeeds. This | 497 | * This function will return a pointer to a dentry if it succeeds. This |
496 | * pointer must be passed to the debugfs_remove() function when the file is | 498 | * pointer must be passed to the debugfs_remove() function when the file is |
497 | * to be removed (no automatic cleanup happens if your module is unloaded, | 499 | * to be removed (no automatic cleanup happens if your module is unloaded, |
498 | * you are responsible here.) If an error occurs, %NULL will be returned. | 500 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
501 | * returned. | ||
499 | * | 502 | * |
500 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 503 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
501 | * returned. | 504 | * returned. |
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) | |||
506 | struct inode *inode; | 509 | struct inode *inode; |
507 | 510 | ||
508 | if (IS_ERR(dentry)) | 511 | if (IS_ERR(dentry)) |
509 | return NULL; | 512 | return dentry; |
510 | 513 | ||
511 | inode = debugfs_get_inode(dentry->d_sb); | 514 | inode = debugfs_get_inode(dentry->d_sb); |
512 | if (unlikely(!inode)) | 515 | if (unlikely(!inode)) |
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name, | |||
545 | struct inode *inode; | 548 | struct inode *inode; |
546 | 549 | ||
547 | if (IS_ERR(dentry)) | 550 | if (IS_ERR(dentry)) |
548 | return NULL; | 551 | return dentry; |
549 | 552 | ||
550 | inode = debugfs_get_inode(dentry->d_sb); | 553 | inode = debugfs_get_inode(dentry->d_sb); |
551 | if (unlikely(!inode)) | 554 | if (unlikely(!inode)) |
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount); | |||
581 | * This function will return a pointer to a dentry if it succeeds. This | 584 | * This function will return a pointer to a dentry if it succeeds. This |
582 | * pointer must be passed to the debugfs_remove() function when the symbolic | 585 | * pointer must be passed to the debugfs_remove() function when the symbolic |
583 | * link is to be removed (no automatic cleanup happens if your module is | 586 | * link is to be removed (no automatic cleanup happens if your module is |
584 | * unloaded, you are responsible here.) If an error occurs, %NULL will be | 587 | * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) |
585 | * returned. | 588 | * will be returned. |
586 | * | 589 | * |
587 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 590 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
588 | * returned. | 591 | * returned. |
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, | |||
594 | struct inode *inode; | 597 | struct inode *inode; |
595 | char *link = kstrdup(target, GFP_KERNEL); | 598 | char *link = kstrdup(target, GFP_KERNEL); |
596 | if (!link) | 599 | if (!link) |
597 | return NULL; | 600 | return ERR_PTR(-ENOMEM); |
598 | 601 | ||
599 | dentry = start_creating(name, parent); | 602 | dentry = start_creating(name, parent); |
600 | if (IS_ERR(dentry)) { | 603 | if (IS_ERR(dentry)) { |
601 | kfree(link); | 604 | kfree(link); |
602 | return NULL; | 605 | return dentry; |
603 | } | 606 | } |
604 | 607 | ||
605 | inode = debugfs_get_inode(dentry->d_sb); | 608 | inode = debugfs_get_inode(dentry->d_sb); |
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, | |||
787 | struct dentry *dentry = NULL, *trap; | 790 | struct dentry *dentry = NULL, *trap; |
788 | struct name_snapshot old_name; | 791 | struct name_snapshot old_name; |
789 | 792 | ||
793 | if (IS_ERR(old_dir)) | ||
794 | return old_dir; | ||
795 | if (IS_ERR(new_dir)) | ||
796 | return new_dir; | ||
797 | if (IS_ERR_OR_NULL(old_dentry)) | ||
798 | return old_dentry; | ||
799 | |||
790 | trap = lock_rename(new_dir, old_dir); | 800 | trap = lock_rename(new_dir, old_dir); |
791 | /* Source or destination directories don't exist? */ | 801 | /* Source or destination directories don't exist? */ |
792 | if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) | 802 | if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) |
@@ -820,7 +830,9 @@ exit: | |||
820 | if (dentry && !IS_ERR(dentry)) | 830 | if (dentry && !IS_ERR(dentry)) |
821 | dput(dentry); | 831 | dput(dentry); |
822 | unlock_rename(new_dir, old_dir); | 832 | unlock_rename(new_dir, old_dir); |
823 | return NULL; | 833 | if (IS_ERR(dentry)) |
834 | return dentry; | ||
835 | return ERR_PTR(-EINVAL); | ||
824 | } | 836 | } |
825 | EXPORT_SYMBOL_GPL(debugfs_rename); | 837 | EXPORT_SYMBOL_GPL(debugfs_rename); |
826 | 838 | ||
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 712f00995390..5508baa11bb6 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
116 | goto out; | 116 | goto out; |
117 | } | 117 | } |
118 | 118 | ||
119 | ret = file_write_and_wait_range(file, start, end); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | if (!journal) { | 119 | if (!journal) { |
124 | struct writeback_control wbc = { | 120 | ret = __generic_file_fsync(file, start, end, datasync); |
125 | .sync_mode = WB_SYNC_ALL | ||
126 | }; | ||
127 | |||
128 | ret = ext4_write_inode(inode, &wbc); | ||
129 | if (!ret) | 121 | if (!ret) |
130 | ret = ext4_sync_parent(inode); | 122 | ret = ext4_sync_parent(inode); |
131 | if (test_opt(inode->i_sb, BARRIER)) | 123 | if (test_opt(inode->i_sb, BARRIER)) |
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
133 | goto out; | 125 | goto out; |
134 | } | 126 | } |
135 | 127 | ||
128 | ret = file_write_and_wait_range(file, start, end); | ||
129 | if (ret) | ||
130 | return ret; | ||
136 | /* | 131 | /* |
137 | * data=writeback,ordered: | 132 | * data=writeback,ordered: |
138 | * The caller's filemap_fdatawrite()/wait will sync the data. | 133 | * The caller's filemap_fdatawrite()/wait will sync the data. |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a5e516a40e7a..809c0f2f9942 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
1742 | req->in.h.nodeid = outarg->nodeid; | 1742 | req->in.h.nodeid = outarg->nodeid; |
1743 | req->in.numargs = 2; | 1743 | req->in.numargs = 2; |
1744 | req->in.argpages = 1; | 1744 | req->in.argpages = 1; |
1745 | req->page_descs[0].offset = offset; | ||
1746 | req->end = fuse_retrieve_end; | 1745 | req->end = fuse_retrieve_end; |
1747 | 1746 | ||
1748 | index = outarg->offset >> PAGE_SHIFT; | 1747 | index = outarg->offset >> PAGE_SHIFT; |
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
1757 | 1756 | ||
1758 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); | 1757 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); |
1759 | req->pages[req->num_pages] = page; | 1758 | req->pages[req->num_pages] = page; |
1759 | req->page_descs[req->num_pages].offset = offset; | ||
1760 | req->page_descs[req->num_pages].length = this_num; | 1760 | req->page_descs[req->num_pages].length = this_num; |
1761 | req->num_pages++; | 1761 | req->num_pages++; |
1762 | 1762 | ||
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
2077 | 2077 | ||
2078 | ret = fuse_dev_do_write(fud, &cs, len); | 2078 | ret = fuse_dev_do_write(fud, &cs, len); |
2079 | 2079 | ||
2080 | pipe_lock(pipe); | ||
2080 | for (idx = 0; idx < nbuf; idx++) | 2081 | for (idx = 0; idx < nbuf; idx++) |
2081 | pipe_buf_release(pipe, &bufs[idx]); | 2082 | pipe_buf_release(pipe, &bufs[idx]); |
2083 | pipe_unlock(pipe); | ||
2082 | 2084 | ||
2083 | out: | 2085 | out: |
2084 | kvfree(bufs); | 2086 | kvfree(bufs); |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ffaffe18352a..a59c16bd90ac 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, | |||
1782 | spin_unlock(&fc->lock); | 1782 | spin_unlock(&fc->lock); |
1783 | 1783 | ||
1784 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); | 1784 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); |
1785 | dec_node_page_state(page, NR_WRITEBACK_TEMP); | 1785 | dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP); |
1786 | wb_writeout_inc(&bdi->wb); | 1786 | wb_writeout_inc(&bdi->wb); |
1787 | fuse_writepage_free(fc, new_req); | 1787 | fuse_writepage_free(fc, new_req); |
1788 | fuse_request_free(new_req); | 1788 | fuse_request_free(new_req); |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 76baaa6be393..c2d4099429be 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns) | |||
628 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); | 628 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); |
629 | fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); | 629 | fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); |
630 | fc->user_ns = get_user_ns(user_ns); | 630 | fc->user_ns = get_user_ns(user_ns); |
631 | fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; | ||
631 | } | 632 | } |
632 | EXPORT_SYMBOL_GPL(fuse_conn_init); | 633 | EXPORT_SYMBOL_GPL(fuse_conn_init); |
633 | 634 | ||
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1162 | fc->user_id = d.user_id; | 1163 | fc->user_id = d.user_id; |
1163 | fc->group_id = d.group_id; | 1164 | fc->group_id = d.group_id; |
1164 | fc->max_read = max_t(unsigned, 4096, d.max_read); | 1165 | fc->max_read = max_t(unsigned, 4096, d.max_read); |
1165 | fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; | ||
1166 | 1166 | ||
1167 | /* Used by get_root_inode() */ | 1167 | /* Used by get_root_inode() */ |
1168 | sb->s_fs_info = fc; | 1168 | sb->s_fs_info = fc; |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f15b4c57c4bd..78510ab91835 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "util.h" | 28 | #include "util.h" |
29 | #include "trans.h" | 29 | #include "trans.h" |
30 | #include "dir.h" | 30 | #include "dir.h" |
31 | #include "lops.h" | ||
32 | 31 | ||
33 | struct workqueue_struct *gfs2_freeze_wq; | 32 | struct workqueue_struct *gfs2_freeze_wq; |
34 | 33 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 5bfaf381921a..b8830fda51e8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, | |||
733 | lh->lh_crc = cpu_to_be32(crc); | 733 | lh->lh_crc = cpu_to_be32(crc); |
734 | 734 | ||
735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); | 735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); |
736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); | 736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags); |
737 | log_flush_wait(sdp); | 737 | log_flush_wait(sdp); |
738 | } | 738 | } |
739 | 739 | ||
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) | |||
810 | 810 | ||
811 | gfs2_ordered_write(sdp); | 811 | gfs2_ordered_write(sdp); |
812 | lops_before_commit(sdp, tr); | 812 | lops_before_commit(sdp, tr); |
813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); | 813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0); |
814 | 814 | ||
815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
816 | log_flush_wait(sdp); | 816 | log_flush_wait(sdp); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 94dcab655bc0..2295042bc625 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -17,9 +17,7 @@ | |||
17 | #include <linux/bio.h> | 17 | #include <linux/bio.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/list_sort.h> | 19 | #include <linux/list_sort.h> |
20 | #include <linux/blkdev.h> | ||
21 | 20 | ||
22 | #include "bmap.h" | ||
23 | #include "dir.h" | 21 | #include "dir.h" |
24 | #include "gfs2.h" | 22 | #include "gfs2.h" |
25 | #include "incore.h" | 23 | #include "incore.h" |
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | |||
195 | /** | 193 | /** |
196 | * gfs2_end_log_write - end of i/o to the log | 194 | * gfs2_end_log_write - end of i/o to the log |
197 | * @bio: The bio | 195 | * @bio: The bio |
196 | * @error: Status of i/o request | ||
198 | * | 197 | * |
199 | * Each bio_vec contains either data from the pagecache or data | 198 | * Each bio_vec contains either data from the pagecache or data |
200 | * relating to the log itself. Here we iterate over the bio_vec | 199 | * relating to the log itself. Here we iterate over the bio_vec |
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio) | |||
231 | /** | 230 | /** |
232 | * gfs2_log_submit_bio - Submit any pending log bio | 231 | * gfs2_log_submit_bio - Submit any pending log bio |
233 | * @biop: Address of the bio pointer | 232 | * @biop: Address of the bio pointer |
234 | * @opf: REQ_OP | op_flags | 233 | * @op: REQ_OP |
234 | * @op_flags: req_flag_bits | ||
235 | * | 235 | * |
236 | * Submit any pending part-built or full bio to the block device. If | 236 | * Submit any pending part-built or full bio to the block device. If |
237 | * there is no pending bio, then this is a no-op. | 237 | * there is no pending bio, then this is a no-op. |
238 | */ | 238 | */ |
239 | 239 | ||
240 | void gfs2_log_submit_bio(struct bio **biop, int opf) | 240 | void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags) |
241 | { | 241 | { |
242 | struct bio *bio = *biop; | 242 | struct bio *bio = *biop; |
243 | if (bio) { | 243 | if (bio) { |
244 | struct gfs2_sbd *sdp = bio->bi_private; | 244 | struct gfs2_sbd *sdp = bio->bi_private; |
245 | atomic_inc(&sdp->sd_log_in_flight); | 245 | atomic_inc(&sdp->sd_log_in_flight); |
246 | bio->bi_opf = opf; | 246 | bio_set_op_attrs(bio, op, op_flags); |
247 | submit_bio(bio); | 247 | submit_bio(bio); |
248 | *biop = NULL; | 248 | *biop = NULL; |
249 | } | 249 | } |
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, | |||
304 | nblk >>= sdp->sd_fsb2bb_shift; | 304 | nblk >>= sdp->sd_fsb2bb_shift; |
305 | if (blkno == nblk && !flush) | 305 | if (blkno == nblk && !flush) |
306 | return bio; | 306 | return bio; |
307 | gfs2_log_submit_bio(biop, op); | 307 | gfs2_log_submit_bio(biop, op, 0); |
308 | } | 308 | } |
309 | 309 | ||
310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); | 310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); |
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) | |||
375 | gfs2_log_bmap(sdp)); | 375 | gfs2_log_bmap(sdp)); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | ||
379 | * gfs2_end_log_read - end I/O callback for reads from the log | ||
380 | * @bio: The bio | ||
381 | * | ||
382 | * Simply unlock the pages in the bio. The main thread will wait on them and | ||
383 | * process them in order as necessary. | ||
384 | */ | ||
385 | |||
386 | static void gfs2_end_log_read(struct bio *bio) | ||
387 | { | ||
388 | struct page *page; | ||
389 | struct bio_vec *bvec; | ||
390 | int i; | ||
391 | |||
392 | bio_for_each_segment_all(bvec, bio, i) { | ||
393 | page = bvec->bv_page; | ||
394 | if (bio->bi_status) { | ||
395 | int err = blk_status_to_errno(bio->bi_status); | ||
396 | |||
397 | SetPageError(page); | ||
398 | mapping_set_error(page->mapping, err); | ||
399 | } | ||
400 | unlock_page(page); | ||
401 | } | ||
402 | |||
403 | bio_put(bio); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * gfs2_jhead_pg_srch - Look for the journal head in a given page. | ||
408 | * @jd: The journal descriptor | ||
409 | * @page: The page to look in | ||
410 | * | ||
411 | * Returns: 1 if found, 0 otherwise. | ||
412 | */ | ||
413 | |||
414 | static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, | ||
415 | struct gfs2_log_header_host *head, | ||
416 | struct page *page) | ||
417 | { | ||
418 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
419 | struct gfs2_log_header_host uninitialized_var(lh); | ||
420 | void *kaddr = kmap_atomic(page); | ||
421 | unsigned int offset; | ||
422 | bool ret = false; | ||
423 | |||
424 | for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { | ||
425 | if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { | ||
426 | if (lh.lh_sequence > head->lh_sequence) | ||
427 | *head = lh; | ||
428 | else { | ||
429 | ret = true; | ||
430 | break; | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | kunmap_atomic(kaddr); | ||
435 | return ret; | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * gfs2_jhead_process_page - Search/cleanup a page | ||
440 | * @jd: The journal descriptor | ||
441 | * @index: Index of the page to look into | ||
442 | * @done: If set, perform only cleanup, else search and set if found. | ||
443 | * | ||
444 | * Find the page with 'index' in the journal's mapping. Search the page for | ||
445 | * the journal head if requested (cleanup == false). Release refs on the | ||
446 | * page so the page cache can reclaim it (put_page() twice). We grabbed a | ||
447 | * reference on this page two times, first when we did a find_or_create_page() | ||
448 | * to obtain the page to add it to the bio and second when we do a | ||
449 | * find_get_page() here to get the page to wait on while I/O on it is being | ||
450 | * completed. | ||
451 | * This function is also used to free up a page we might've grabbed but not | ||
452 | * used. Maybe we added it to a bio, but not submitted it for I/O. Or we | ||
453 | * submitted the I/O, but we already found the jhead so we only need to drop | ||
454 | * our references to the page. | ||
455 | */ | ||
456 | |||
457 | static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, | ||
458 | struct gfs2_log_header_host *head, | ||
459 | bool *done) | ||
460 | { | ||
461 | struct page *page; | ||
462 | |||
463 | page = find_get_page(jd->jd_inode->i_mapping, index); | ||
464 | wait_on_page_locked(page); | ||
465 | |||
466 | if (PageError(page)) | ||
467 | *done = true; | ||
468 | |||
469 | if (!*done) | ||
470 | *done = gfs2_jhead_pg_srch(jd, head, page); | ||
471 | |||
472 | put_page(page); /* Once for find_get_page */ | ||
473 | put_page(page); /* Once more for find_or_create_page */ | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * gfs2_find_jhead - find the head of a log | ||
478 | * @jd: The journal descriptor | ||
479 | * @head: The log descriptor for the head of the log is returned here | ||
480 | * | ||
481 | * Do a search of a journal by reading it in large chunks using bios and find | ||
482 | * the valid log entry with the highest sequence number. (i.e. the log head) | ||
483 | * | ||
484 | * Returns: 0 on success, errno otherwise | ||
485 | */ | ||
486 | |||
487 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
488 | { | ||
489 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
490 | struct address_space *mapping = jd->jd_inode->i_mapping; | ||
491 | struct gfs2_journal_extent *je; | ||
492 | u32 block, read_idx = 0, submit_idx = 0, index = 0; | ||
493 | int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; | ||
494 | int blocks_per_page = 1 << shift, sz, ret = 0; | ||
495 | struct bio *bio = NULL; | ||
496 | struct page *page; | ||
497 | bool done = false; | ||
498 | errseq_t since; | ||
499 | |||
500 | memset(head, 0, sizeof(*head)); | ||
501 | if (list_empty(&jd->extent_list)) | ||
502 | gfs2_map_journal_extents(sdp, jd); | ||
503 | |||
504 | since = filemap_sample_wb_err(mapping); | ||
505 | list_for_each_entry(je, &jd->extent_list, list) { | ||
506 | for (block = 0; block < je->blocks; block += blocks_per_page) { | ||
507 | index = (je->lblock + block) >> shift; | ||
508 | |||
509 | page = find_or_create_page(mapping, index, GFP_NOFS); | ||
510 | if (!page) { | ||
511 | ret = -ENOMEM; | ||
512 | done = true; | ||
513 | goto out; | ||
514 | } | ||
515 | |||
516 | if (bio) { | ||
517 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
518 | if (sz == PAGE_SIZE) | ||
519 | goto page_added; | ||
520 | submit_idx = index; | ||
521 | submit_bio(bio); | ||
522 | bio = NULL; | ||
523 | } | ||
524 | |||
525 | bio = gfs2_log_alloc_bio(sdp, | ||
526 | je->dblock + (index << shift), | ||
527 | gfs2_end_log_read); | ||
528 | bio->bi_opf = REQ_OP_READ; | ||
529 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
530 | gfs2_assert_warn(sdp, sz == PAGE_SIZE); | ||
531 | |||
532 | page_added: | ||
533 | if (submit_idx <= read_idx + BIO_MAX_PAGES) { | ||
534 | /* Keep at least one bio in flight */ | ||
535 | continue; | ||
536 | } | ||
537 | |||
538 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
539 | if (done) | ||
540 | goto out; /* found */ | ||
541 | } | ||
542 | } | ||
543 | |||
544 | out: | ||
545 | if (bio) | ||
546 | submit_bio(bio); | ||
547 | while (read_idx <= index) | ||
548 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
549 | |||
550 | if (!ret) | ||
551 | ret = filemap_check_wb_err(mapping, since); | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, | 378 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, |
557 | u32 ld_length, u32 ld_data1) | 379 | u32 ld_length, u32 ld_data1) |
558 | { | 380 | { |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 331160fc568b..711c4d89c063 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp); | |||
30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, | 30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, |
31 | unsigned size, unsigned offset, u64 blkno); | 31 | unsigned size, unsigned offset, u64 blkno); |
32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | 32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); |
33 | extern void gfs2_log_submit_bio(struct bio **biop, int opf); | 33 | extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags); |
34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); | 34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); |
35 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
36 | struct gfs2_log_header_host *head); | ||
37 | 35 | ||
38 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 36 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
39 | { | 37 | { |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 1179763f6370..b041cb8ae383 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include "dir.h" | 41 | #include "dir.h" |
42 | #include "meta_io.h" | 42 | #include "meta_io.h" |
43 | #include "trace_gfs2.h" | 43 | #include "trace_gfs2.h" |
44 | #include "lops.h" | ||
45 | 44 | ||
46 | #define DO 0 | 45 | #define DO 0 |
47 | #define UNDO 1 | 46 | #define UNDO 1 |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 7389e445a7a7..2dac43065382 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * find_good_lh - find a good log header | ||
186 | * @jd: the journal | ||
187 | * @blk: the segment to start searching from | ||
188 | * @lh: the log header to fill in | ||
189 | * @forward: if true search forward in the log, else search backward | ||
190 | * | ||
191 | * Call get_log_header() to get a log header for a segment, but if the | ||
192 | * segment is bad, either scan forward or backward until we find a good one. | ||
193 | * | ||
194 | * Returns: errno | ||
195 | */ | ||
196 | |||
197 | static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, | ||
198 | struct gfs2_log_header_host *head) | ||
199 | { | ||
200 | unsigned int orig_blk = *blk; | ||
201 | int error; | ||
202 | |||
203 | for (;;) { | ||
204 | error = get_log_header(jd, *blk, head); | ||
205 | if (error <= 0) | ||
206 | return error; | ||
207 | |||
208 | if (++*blk == jd->jd_blocks) | ||
209 | *blk = 0; | ||
210 | |||
211 | if (*blk == orig_blk) { | ||
212 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
213 | return -EIO; | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * jhead_scan - make sure we've found the head of the log | ||
220 | * @jd: the journal | ||
221 | * @head: this is filled in with the log descriptor of the head | ||
222 | * | ||
223 | * At this point, seg and lh should be either the head of the log or just | ||
224 | * before. Scan forward until we find the head. | ||
225 | * | ||
226 | * Returns: errno | ||
227 | */ | ||
228 | |||
229 | static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
230 | { | ||
231 | unsigned int blk = head->lh_blkno; | ||
232 | struct gfs2_log_header_host lh; | ||
233 | int error; | ||
234 | |||
235 | for (;;) { | ||
236 | if (++blk == jd->jd_blocks) | ||
237 | blk = 0; | ||
238 | |||
239 | error = get_log_header(jd, blk, &lh); | ||
240 | if (error < 0) | ||
241 | return error; | ||
242 | if (error == 1) | ||
243 | continue; | ||
244 | |||
245 | if (lh.lh_sequence == head->lh_sequence) { | ||
246 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
247 | return -EIO; | ||
248 | } | ||
249 | if (lh.lh_sequence < head->lh_sequence) | ||
250 | break; | ||
251 | |||
252 | *head = lh; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * gfs2_find_jhead - find the head of a log | ||
260 | * @jd: the journal | ||
261 | * @head: the log descriptor for the head of the log is returned here | ||
262 | * | ||
263 | * Do a binary search of a journal and find the valid log entry with the | ||
264 | * highest sequence number. (i.e. the log head) | ||
265 | * | ||
266 | * Returns: errno | ||
267 | */ | ||
268 | |||
269 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
270 | { | ||
271 | struct gfs2_log_header_host lh_1, lh_m; | ||
272 | u32 blk_1, blk_2, blk_m; | ||
273 | int error; | ||
274 | |||
275 | blk_1 = 0; | ||
276 | blk_2 = jd->jd_blocks - 1; | ||
277 | |||
278 | for (;;) { | ||
279 | blk_m = (blk_1 + blk_2) / 2; | ||
280 | |||
281 | error = find_good_lh(jd, &blk_1, &lh_1); | ||
282 | if (error) | ||
283 | return error; | ||
284 | |||
285 | error = find_good_lh(jd, &blk_m, &lh_m); | ||
286 | if (error) | ||
287 | return error; | ||
288 | |||
289 | if (blk_1 == blk_m || blk_m == blk_2) | ||
290 | break; | ||
291 | |||
292 | if (lh_1.lh_sequence <= lh_m.lh_sequence) | ||
293 | blk_1 = blk_m; | ||
294 | else | ||
295 | blk_2 = blk_m; | ||
296 | } | ||
297 | |||
298 | error = jhead_scan(jd, &lh_1); | ||
299 | if (error) | ||
300 | return error; | ||
301 | |||
302 | *head = lh_1; | ||
303 | |||
304 | return error; | ||
305 | } | ||
306 | |||
307 | /** | ||
185 | * foreach_descriptor - go through the active part of the log | 308 | * foreach_descriptor - go through the active part of the log |
186 | * @jd: the journal | 309 | * @jd: the journal |
187 | * @start: the first log header in the active region | 310 | * @start: the first log header in the active region |
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 99575ab81202..11d81248be85 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h | |||
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) | |||
27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); | 27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); |
28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); | 28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); |
29 | 29 | ||
30 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
31 | struct gfs2_log_header_host *head); | ||
30 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); | 32 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); |
31 | extern void gfs2_recover_func(struct work_struct *work); | 33 | extern void gfs2_recover_func(struct work_struct *work); |
32 | extern int __get_log_header(struct gfs2_sbd *sdp, | 34 | extern int __get_log_header(struct gfs2_sbd *sdp, |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d4b11c903971..ca71163ff7cf 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include "util.h" | 45 | #include "util.h" |
46 | #include "sys.h" | 46 | #include "sys.h" |
47 | #include "xattr.h" | 47 | #include "xattr.h" |
48 | #include "lops.h" | ||
49 | 48 | ||
50 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) | 49 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) |
51 | 50 | ||
diff --git a/fs/inode.c b/fs/inode.c index 0cd47fe0dbe5..73432e64f874 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item, | |||
730 | return LRU_REMOVED; | 730 | return LRU_REMOVED; |
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | /* recently referenced inodes get one more pass */ |
734 | * Recently referenced inodes and inodes with many attached pages | 734 | if (inode->i_state & I_REFERENCED) { |
735 | * get one more pass. | ||
736 | */ | ||
737 | if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) { | ||
738 | inode->i_state &= ~I_REFERENCED; | 735 | inode->i_state &= ~I_REFERENCED; |
739 | spin_unlock(&inode->i_lock); | 736 | spin_unlock(&inode->i_lock); |
740 | return LRU_ROTATE; | 737 | return LRU_ROTATE; |
diff --git a/fs/namespace.c b/fs/namespace.c index a677b59efd74..678ef175d63a 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -2698,7 +2698,6 @@ static long exact_copy_from_user(void *to, const void __user * from, | |||
2698 | if (!access_ok(from, n)) | 2698 | if (!access_ok(from, n)) |
2699 | return n; | 2699 | return n; |
2700 | 2700 | ||
2701 | current->kernel_uaccess_faults_ok++; | ||
2702 | while (n) { | 2701 | while (n) { |
2703 | if (__get_user(c, f)) { | 2702 | if (__get_user(c, f)) { |
2704 | memset(t, 0, n); | 2703 | memset(t, 0, n); |
@@ -2708,7 +2707,6 @@ static long exact_copy_from_user(void *to, const void __user * from, | |||
2708 | f++; | 2707 | f++; |
2709 | n--; | 2708 | n--; |
2710 | } | 2709 | } |
2711 | current->kernel_uaccess_faults_ok--; | ||
2712 | return n; | 2710 | return n; |
2713 | } | 2711 | } |
2714 | 2712 | ||
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 3f23b6840547..bf34ddaa2ad7 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/keyctl.h> | 44 | #include <linux/keyctl.h> |
45 | #include <linux/key-type.h> | 45 | #include <linux/key-type.h> |
46 | #include <keys/user-type.h> | 46 | #include <keys/user-type.h> |
47 | #include <keys/request_key_auth-type.h> | ||
47 | #include <linux/module.h> | 48 | #include <linux/module.h> |
48 | 49 | ||
49 | #include "internal.h" | 50 | #include "internal.h" |
@@ -59,7 +60,7 @@ static struct key_type key_type_id_resolver_legacy; | |||
59 | struct idmap_legacy_upcalldata { | 60 | struct idmap_legacy_upcalldata { |
60 | struct rpc_pipe_msg pipe_msg; | 61 | struct rpc_pipe_msg pipe_msg; |
61 | struct idmap_msg idmap_msg; | 62 | struct idmap_msg idmap_msg; |
62 | struct key_construction *key_cons; | 63 | struct key *authkey; |
63 | struct idmap *idmap; | 64 | struct idmap *idmap; |
64 | }; | 65 | }; |
65 | 66 | ||
@@ -384,7 +385,7 @@ static const match_table_t nfs_idmap_tokens = { | |||
384 | { Opt_find_err, NULL } | 385 | { Opt_find_err, NULL } |
385 | }; | 386 | }; |
386 | 387 | ||
387 | static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); | 388 | static int nfs_idmap_legacy_upcall(struct key *, void *); |
388 | static ssize_t idmap_pipe_downcall(struct file *, const char __user *, | 389 | static ssize_t idmap_pipe_downcall(struct file *, const char __user *, |
389 | size_t); | 390 | size_t); |
390 | static void idmap_release_pipe(struct inode *); | 391 | static void idmap_release_pipe(struct inode *); |
@@ -549,11 +550,12 @@ nfs_idmap_prepare_pipe_upcall(struct idmap *idmap, | |||
549 | static void | 550 | static void |
550 | nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) | 551 | nfs_idmap_complete_pipe_upcall_locked(struct idmap *idmap, int ret) |
551 | { | 552 | { |
552 | struct key_construction *cons = idmap->idmap_upcall_data->key_cons; | 553 | struct key *authkey = idmap->idmap_upcall_data->authkey; |
553 | 554 | ||
554 | kfree(idmap->idmap_upcall_data); | 555 | kfree(idmap->idmap_upcall_data); |
555 | idmap->idmap_upcall_data = NULL; | 556 | idmap->idmap_upcall_data = NULL; |
556 | complete_request_key(cons, ret); | 557 | complete_request_key(authkey, ret); |
558 | key_put(authkey); | ||
557 | } | 559 | } |
558 | 560 | ||
559 | static void | 561 | static void |
@@ -563,15 +565,14 @@ nfs_idmap_abort_pipe_upcall(struct idmap *idmap, int ret) | |||
563 | nfs_idmap_complete_pipe_upcall_locked(idmap, ret); | 565 | nfs_idmap_complete_pipe_upcall_locked(idmap, ret); |
564 | } | 566 | } |
565 | 567 | ||
566 | static int nfs_idmap_legacy_upcall(struct key_construction *cons, | 568 | static int nfs_idmap_legacy_upcall(struct key *authkey, void *aux) |
567 | const char *op, | ||
568 | void *aux) | ||
569 | { | 569 | { |
570 | struct idmap_legacy_upcalldata *data; | 570 | struct idmap_legacy_upcalldata *data; |
571 | struct request_key_auth *rka = get_request_key_auth(authkey); | ||
571 | struct rpc_pipe_msg *msg; | 572 | struct rpc_pipe_msg *msg; |
572 | struct idmap_msg *im; | 573 | struct idmap_msg *im; |
573 | struct idmap *idmap = (struct idmap *)aux; | 574 | struct idmap *idmap = (struct idmap *)aux; |
574 | struct key *key = cons->key; | 575 | struct key *key = rka->target_key; |
575 | int ret = -ENOKEY; | 576 | int ret = -ENOKEY; |
576 | 577 | ||
577 | if (!aux) | 578 | if (!aux) |
@@ -586,7 +587,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, | |||
586 | msg = &data->pipe_msg; | 587 | msg = &data->pipe_msg; |
587 | im = &data->idmap_msg; | 588 | im = &data->idmap_msg; |
588 | data->idmap = idmap; | 589 | data->idmap = idmap; |
589 | data->key_cons = cons; | 590 | data->authkey = key_get(authkey); |
590 | 591 | ||
591 | ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); | 592 | ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); |
592 | if (ret < 0) | 593 | if (ret < 0) |
@@ -604,7 +605,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, | |||
604 | out2: | 605 | out2: |
605 | kfree(data); | 606 | kfree(data); |
606 | out1: | 607 | out1: |
607 | complete_request_key(cons, ret); | 608 | complete_request_key(authkey, ret); |
608 | return ret; | 609 | return ret; |
609 | } | 610 | } |
610 | 611 | ||
@@ -651,9 +652,10 @@ out: | |||
651 | static ssize_t | 652 | static ssize_t |
652 | idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | 653 | idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) |
653 | { | 654 | { |
655 | struct request_key_auth *rka; | ||
654 | struct rpc_inode *rpci = RPC_I(file_inode(filp)); | 656 | struct rpc_inode *rpci = RPC_I(file_inode(filp)); |
655 | struct idmap *idmap = (struct idmap *)rpci->private; | 657 | struct idmap *idmap = (struct idmap *)rpci->private; |
656 | struct key_construction *cons; | 658 | struct key *authkey; |
657 | struct idmap_msg im; | 659 | struct idmap_msg im; |
658 | size_t namelen_in; | 660 | size_t namelen_in; |
659 | int ret = -ENOKEY; | 661 | int ret = -ENOKEY; |
@@ -665,7 +667,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
665 | if (idmap->idmap_upcall_data == NULL) | 667 | if (idmap->idmap_upcall_data == NULL) |
666 | goto out_noupcall; | 668 | goto out_noupcall; |
667 | 669 | ||
668 | cons = idmap->idmap_upcall_data->key_cons; | 670 | authkey = idmap->idmap_upcall_data->authkey; |
671 | rka = get_request_key_auth(authkey); | ||
669 | 672 | ||
670 | if (mlen != sizeof(im)) { | 673 | if (mlen != sizeof(im)) { |
671 | ret = -ENOSPC; | 674 | ret = -ENOSPC; |
@@ -690,9 +693,9 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) | |||
690 | 693 | ||
691 | ret = nfs_idmap_read_and_verify_message(&im, | 694 | ret = nfs_idmap_read_and_verify_message(&im, |
692 | &idmap->idmap_upcall_data->idmap_msg, | 695 | &idmap->idmap_upcall_data->idmap_msg, |
693 | cons->key, cons->authkey); | 696 | rka->target_key, authkey); |
694 | if (ret >= 0) { | 697 | if (ret >= 0) { |
695 | key_set_timeout(cons->key, nfs_idmap_cache_timeout); | 698 | key_set_timeout(rka->target_key, nfs_idmap_cache_timeout); |
696 | ret = mlen; | 699 | ret = mlen; |
697 | } | 700 | } |
698 | 701 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index f12cb31a41e5..d09c9f878141 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -238,9 +238,9 @@ out: | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ | 240 | /* A writeback failed: mark the page as bad, and invalidate the page cache */ |
241 | static void nfs_set_pageerror(struct page *page) | 241 | static void nfs_set_pageerror(struct address_space *mapping) |
242 | { | 242 | { |
243 | nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); | 243 | nfs_zap_mapping(mapping->host, mapping); |
244 | } | 244 | } |
245 | 245 | ||
246 | /* | 246 | /* |
@@ -994,7 +994,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr) | |||
994 | nfs_list_remove_request(req); | 994 | nfs_list_remove_request(req); |
995 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && | 995 | if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && |
996 | (hdr->good_bytes < bytes)) { | 996 | (hdr->good_bytes < bytes)) { |
997 | nfs_set_pageerror(req->wb_page); | 997 | nfs_set_pageerror(page_file_mapping(req->wb_page)); |
998 | nfs_context_set_write_error(req->wb_context, hdr->error); | 998 | nfs_context_set_write_error(req->wb_context, hdr->error); |
999 | goto remove_req; | 999 | goto remove_req; |
1000 | } | 1000 | } |
@@ -1348,7 +1348,8 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1348 | unsigned int offset, unsigned int count) | 1348 | unsigned int offset, unsigned int count) |
1349 | { | 1349 | { |
1350 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 1350 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
1351 | struct inode *inode = page_file_mapping(page)->host; | 1351 | struct address_space *mapping = page_file_mapping(page); |
1352 | struct inode *inode = mapping->host; | ||
1352 | int status = 0; | 1353 | int status = 0; |
1353 | 1354 | ||
1354 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); | 1355 | nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); |
@@ -1366,7 +1367,7 @@ int nfs_updatepage(struct file *file, struct page *page, | |||
1366 | 1367 | ||
1367 | status = nfs_writepage_setup(ctx, page, offset, count); | 1368 | status = nfs_writepage_setup(ctx, page, offset, count); |
1368 | if (status < 0) | 1369 | if (status < 0) |
1369 | nfs_set_pageerror(page); | 1370 | nfs_set_pageerror(mapping); |
1370 | else | 1371 | else |
1371 | __set_page_dirty_nobuffers(page); | 1372 | __set_page_dirty_nobuffers(page); |
1372 | out: | 1373 | out: |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index b33f9785b756..72a7681f4046 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -1239,8 +1239,8 @@ static __net_init int nfsd_init_net(struct net *net) | |||
1239 | retval = nfsd_idmap_init(net); | 1239 | retval = nfsd_idmap_init(net); |
1240 | if (retval) | 1240 | if (retval) |
1241 | goto out_idmap_error; | 1241 | goto out_idmap_error; |
1242 | nn->nfsd4_lease = 45; /* default lease time */ | 1242 | nn->nfsd4_lease = 90; /* default lease time */ |
1243 | nn->nfsd4_grace = 45; | 1243 | nn->nfsd4_grace = 90; |
1244 | nn->somebody_reclaimed = false; | 1244 | nn->somebody_reclaimed = false; |
1245 | nn->clverifier_counter = prandom_u32(); | 1245 | nn->clverifier_counter = prandom_u32(); |
1246 | nn->clientid_counter = prandom_u32(); | 1246 | nn->clientid_counter = prandom_u32(); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 9824e32b2f23..7dc98e14655d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, | |||
557 | loff_t cloned; | 557 | loff_t cloned; |
558 | 558 | ||
559 | cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); | 559 | cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); |
560 | if (cloned < 0) | ||
561 | return nfserrno(cloned); | ||
560 | if (count && cloned != count) | 562 | if (count && cloned != count) |
561 | cloned = -EINVAL; | 563 | return nfserrno(-EINVAL); |
562 | return nfserrno(cloned < 0 ? cloned : 0); | 564 | return 0; |
563 | } | 565 | } |
564 | 566 | ||
565 | ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, | 567 | ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 633a63462573..f5ed9512d193 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -1086,10 +1086,6 @@ static int __set_oom_adj(struct file *file, int oom_adj, bool legacy) | |||
1086 | 1086 | ||
1087 | task_lock(p); | 1087 | task_lock(p); |
1088 | if (!p->vfork_done && process_shares_mm(p, mm)) { | 1088 | if (!p->vfork_done && process_shares_mm(p, mm)) { |
1089 | pr_info("updating oom_score_adj for %d (%s) from %d to %d because it shares mm with %d (%s). Report if this is unexpected.\n", | ||
1090 | task_pid_nr(p), p->comm, | ||
1091 | p->signal->oom_score_adj, oom_adj, | ||
1092 | task_pid_nr(task), task->comm); | ||
1093 | p->signal->oom_score_adj = oom_adj; | 1089 | p->signal->oom_score_adj = oom_adj; |
1094 | if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) | 1090 | if (!legacy && has_capability_noaudit(current, CAP_SYS_RESOURCE)) |
1095 | p->signal->oom_score_adj_min = (short)oom_adj; | 1091 | p->signal->oom_score_adj_min = (short)oom_adj; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0ec9edab2f3..85b0ef890b28 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -423,7 +423,7 @@ struct mem_size_stats { | |||
423 | }; | 423 | }; |
424 | 424 | ||
425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, | 425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
426 | bool compound, bool young, bool dirty) | 426 | bool compound, bool young, bool dirty, bool locked) |
427 | { | 427 | { |
428 | int i, nr = compound ? 1 << compound_order(page) : 1; | 428 | int i, nr = compound ? 1 << compound_order(page) : 1; |
429 | unsigned long size = nr * PAGE_SIZE; | 429 | unsigned long size = nr * PAGE_SIZE; |
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, | |||
450 | else | 450 | else |
451 | mss->private_clean += size; | 451 | mss->private_clean += size; |
452 | mss->pss += (u64)size << PSS_SHIFT; | 452 | mss->pss += (u64)size << PSS_SHIFT; |
453 | if (locked) | ||
454 | mss->pss_locked += (u64)size << PSS_SHIFT; | ||
453 | return; | 455 | return; |
454 | } | 456 | } |
455 | 457 | ||
456 | for (i = 0; i < nr; i++, page++) { | 458 | for (i = 0; i < nr; i++, page++) { |
457 | int mapcount = page_mapcount(page); | 459 | int mapcount = page_mapcount(page); |
460 | unsigned long pss = (PAGE_SIZE << PSS_SHIFT); | ||
458 | 461 | ||
459 | if (mapcount >= 2) { | 462 | if (mapcount >= 2) { |
460 | if (dirty || PageDirty(page)) | 463 | if (dirty || PageDirty(page)) |
461 | mss->shared_dirty += PAGE_SIZE; | 464 | mss->shared_dirty += PAGE_SIZE; |
462 | else | 465 | else |
463 | mss->shared_clean += PAGE_SIZE; | 466 | mss->shared_clean += PAGE_SIZE; |
464 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | 467 | mss->pss += pss / mapcount; |
468 | if (locked) | ||
469 | mss->pss_locked += pss / mapcount; | ||
465 | } else { | 470 | } else { |
466 | if (dirty || PageDirty(page)) | 471 | if (dirty || PageDirty(page)) |
467 | mss->private_dirty += PAGE_SIZE; | 472 | mss->private_dirty += PAGE_SIZE; |
468 | else | 473 | else |
469 | mss->private_clean += PAGE_SIZE; | 474 | mss->private_clean += PAGE_SIZE; |
470 | mss->pss += PAGE_SIZE << PSS_SHIFT; | 475 | mss->pss += pss; |
476 | if (locked) | ||
477 | mss->pss_locked += pss; | ||
471 | } | 478 | } |
472 | } | 479 | } |
473 | } | 480 | } |
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
490 | { | 497 | { |
491 | struct mem_size_stats *mss = walk->private; | 498 | struct mem_size_stats *mss = walk->private; |
492 | struct vm_area_struct *vma = walk->vma; | 499 | struct vm_area_struct *vma = walk->vma; |
500 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
493 | struct page *page = NULL; | 501 | struct page *page = NULL; |
494 | 502 | ||
495 | if (pte_present(*pte)) { | 503 | if (pte_present(*pte)) { |
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
532 | if (!page) | 540 | if (!page) |
533 | return; | 541 | return; |
534 | 542 | ||
535 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); | 543 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); |
536 | } | 544 | } |
537 | 545 | ||
538 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 546 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
541 | { | 549 | { |
542 | struct mem_size_stats *mss = walk->private; | 550 | struct mem_size_stats *mss = walk->private; |
543 | struct vm_area_struct *vma = walk->vma; | 551 | struct vm_area_struct *vma = walk->vma; |
552 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
544 | struct page *page; | 553 | struct page *page; |
545 | 554 | ||
546 | /* FOLL_DUMP will return -EFAULT on huge zero page */ | 555 | /* FOLL_DUMP will return -EFAULT on huge zero page */ |
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
555 | /* pass */; | 564 | /* pass */; |
556 | else | 565 | else |
557 | VM_BUG_ON_PAGE(1, page); | 566 | VM_BUG_ON_PAGE(1, page); |
558 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); | 567 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); |
559 | } | 568 | } |
560 | #else | 569 | #else |
561 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | 570 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, |
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma, | |||
737 | } | 746 | } |
738 | } | 747 | } |
739 | #endif | 748 | #endif |
740 | |||
741 | /* mmap_sem is held in m_start */ | 749 | /* mmap_sem is held in m_start */ |
742 | walk_page_vma(vma, &smaps_walk); | 750 | walk_page_vma(vma, &smaps_walk); |
743 | if (vma->vm_flags & VM_LOCKED) | ||
744 | mss->pss_locked += mss->pss; | ||
745 | } | 751 | } |
746 | 752 | ||
747 | #define SEQ_PUT_DEC(str, val) \ | 753 | #define SEQ_PUT_DEC(str, val) \ |
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 1c8eecfe52b8..6acf1bfa0bfe 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c | |||
@@ -768,18 +768,23 @@ xrep_findroot_block( | |||
768 | if (!uuid_equal(&btblock->bb_u.s.bb_uuid, | 768 | if (!uuid_equal(&btblock->bb_u.s.bb_uuid, |
769 | &mp->m_sb.sb_meta_uuid)) | 769 | &mp->m_sb.sb_meta_uuid)) |
770 | goto out; | 770 | goto out; |
771 | /* | ||
772 | * Read verifiers can reference b_ops, so we set the pointer | ||
773 | * here. If the verifier fails we'll reset the buffer state | ||
774 | * to what it was before we touched the buffer. | ||
775 | */ | ||
776 | bp->b_ops = fab->buf_ops; | ||
771 | fab->buf_ops->verify_read(bp); | 777 | fab->buf_ops->verify_read(bp); |
772 | if (bp->b_error) { | 778 | if (bp->b_error) { |
779 | bp->b_ops = NULL; | ||
773 | bp->b_error = 0; | 780 | bp->b_error = 0; |
774 | goto out; | 781 | goto out; |
775 | } | 782 | } |
776 | 783 | ||
777 | /* | 784 | /* |
778 | * Some read verifiers will (re)set b_ops, so we must be | 785 | * Some read verifiers will (re)set b_ops, so we must be |
779 | * careful not to blow away any such assignment. | 786 | * careful not to change b_ops after running the verifier. |
780 | */ | 787 | */ |
781 | if (!bp->b_ops) | ||
782 | bp->b_ops = fab->buf_ops; | ||
783 | } | 788 | } |
784 | 789 | ||
785 | /* | 790 | /* |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 338b9d9984e0..d9048bcea49c 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -449,6 +449,7 @@ xfs_map_blocks( | |||
449 | } | 449 | } |
450 | 450 | ||
451 | wpc->imap = imap; | 451 | wpc->imap = imap; |
452 | xfs_trim_extent_eof(&wpc->imap, ip); | ||
452 | trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); | 453 | trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); |
453 | return 0; | 454 | return 0; |
454 | allocate_blocks: | 455 | allocate_blocks: |
@@ -459,6 +460,7 @@ allocate_blocks: | |||
459 | ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || | 460 | ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || |
460 | imap.br_startoff + imap.br_blockcount <= cow_fsb); | 461 | imap.br_startoff + imap.br_blockcount <= cow_fsb); |
461 | wpc->imap = imap; | 462 | wpc->imap = imap; |
463 | xfs_trim_extent_eof(&wpc->imap, ip); | ||
462 | trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); | 464 | trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); |
463 | return 0; | 465 | return 0; |
464 | } | 466 | } |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index eedc5e0156ff..4f5f2ff3f70f 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -776,10 +776,26 @@ _xfs_buf_read( | |||
776 | } | 776 | } |
777 | 777 | ||
778 | /* | 778 | /* |
779 | * Set buffer ops on an unchecked buffer and validate it, if possible. | ||
780 | * | ||
779 | * If the caller passed in an ops structure and the buffer doesn't have ops | 781 | * If the caller passed in an ops structure and the buffer doesn't have ops |
780 | * assigned, set the ops and use them to verify the contents. If the contents | 782 | * assigned, set the ops and use them to verify the contents. If the contents |
781 | * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no | 783 | * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no |
782 | * recorded errors and is already in XBF_DONE state. | 784 | * recorded errors and is already in XBF_DONE state. |
785 | * | ||
786 | * Under normal operations, every in-core buffer must have buffer ops assigned | ||
787 | * to them when the buffer is read in from disk so that we can validate the | ||
788 | * metadata. | ||
789 | * | ||
790 | * However, there are two scenarios where one can encounter in-core buffers | ||
791 | * that don't have buffer ops. The first is during log recovery of buffers on | ||
792 | * a V4 filesystem, though these buffers are purged at the end of recovery. | ||
793 | * | ||
794 | * The other is online repair, which tries to match arbitrary metadata blocks | ||
795 | * with btree types in order to find the root. If online repair doesn't match | ||
796 | * the buffer with /any/ btree type, the buffer remains in memory in DONE state | ||
797 | * with no ops, and a subsequent read_buf call from elsewhere will not set the | ||
798 | * ops. This function helps us fix this situation. | ||
783 | */ | 799 | */ |
784 | int | 800 | int |
785 | xfs_buf_ensure_ops( | 801 | xfs_buf_ensure_ops( |
@@ -1536,8 +1552,7 @@ __xfs_buf_submit( | |||
1536 | xfs_buf_ioerror(bp, -EIO); | 1552 | xfs_buf_ioerror(bp, -EIO); |
1537 | bp->b_flags &= ~XBF_DONE; | 1553 | bp->b_flags &= ~XBF_DONE; |
1538 | xfs_buf_stale(bp); | 1554 | xfs_buf_stale(bp); |
1539 | if (bp->b_flags & XBF_ASYNC) | 1555 | xfs_buf_ioend(bp); |
1540 | xfs_buf_ioend(bp); | ||
1541 | return -EIO; | 1556 | return -EIO; |
1542 | } | 1557 | } |
1543 | 1558 | ||
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h index 8b78c0ba08b1..8b78c0ba08b1 100644 --- a/include/uapi/asm-generic/shmparam.h +++ b/include/asm-generic/shmparam.h | |||
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index b53be41929be..04f7ac345984 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h | |||
@@ -350,7 +350,7 @@ | |||
350 | #define IMX8MQ_CLK_VPU_G2_ROOT 241 | 350 | #define IMX8MQ_CLK_VPU_G2_ROOT 241 |
351 | 351 | ||
352 | /* SCCG PLL GATE */ | 352 | /* SCCG PLL GATE */ |
353 | #define IMX8MQ_SYS1_PLL_OUT 232 | 353 | #define IMX8MQ_SYS1_PLL_OUT 242 |
354 | #define IMX8MQ_SYS2_PLL_OUT 243 | 354 | #define IMX8MQ_SYS2_PLL_OUT 243 |
355 | #define IMX8MQ_SYS3_PLL_OUT 244 | 355 | #define IMX8MQ_SYS3_PLL_OUT 244 |
356 | #define IMX8MQ_DRAM_PLL_OUT 245 | 356 | #define IMX8MQ_DRAM_PLL_OUT 245 |
@@ -372,24 +372,24 @@ | |||
372 | /* txesc clock */ | 372 | /* txesc clock */ |
373 | #define IMX8MQ_CLK_DSI_IPG_DIV 256 | 373 | #define IMX8MQ_CLK_DSI_IPG_DIV 256 |
374 | 374 | ||
375 | #define IMX8MQ_CLK_TMU_ROOT 265 | 375 | #define IMX8MQ_CLK_TMU_ROOT 257 |
376 | 376 | ||
377 | /* Display root clocks */ | 377 | /* Display root clocks */ |
378 | #define IMX8MQ_CLK_DISP_AXI_ROOT 266 | 378 | #define IMX8MQ_CLK_DISP_AXI_ROOT 258 |
379 | #define IMX8MQ_CLK_DISP_APB_ROOT 267 | 379 | #define IMX8MQ_CLK_DISP_APB_ROOT 259 |
380 | #define IMX8MQ_CLK_DISP_RTRM_ROOT 268 | 380 | #define IMX8MQ_CLK_DISP_RTRM_ROOT 260 |
381 | 381 | ||
382 | #define IMX8MQ_CLK_OCOTP_ROOT 269 | 382 | #define IMX8MQ_CLK_OCOTP_ROOT 261 |
383 | 383 | ||
384 | #define IMX8MQ_CLK_DRAM_ALT_ROOT 270 | 384 | #define IMX8MQ_CLK_DRAM_ALT_ROOT 262 |
385 | #define IMX8MQ_CLK_DRAM_CORE 271 | 385 | #define IMX8MQ_CLK_DRAM_CORE 263 |
386 | 386 | ||
387 | #define IMX8MQ_CLK_MU_ROOT 272 | 387 | #define IMX8MQ_CLK_MU_ROOT 264 |
388 | #define IMX8MQ_VIDEO2_PLL_OUT 273 | 388 | #define IMX8MQ_VIDEO2_PLL_OUT 265 |
389 | 389 | ||
390 | #define IMX8MQ_CLK_CLKO2 274 | 390 | #define IMX8MQ_CLK_CLKO2 266 |
391 | 391 | ||
392 | #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 | 392 | #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267 |
393 | 393 | ||
394 | #define IMX8MQ_CLK_END 276 | 394 | #define IMX8MQ_CLK_END 268 |
395 | #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ | 395 | #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ |
diff --git a/include/keys/request_key_auth-type.h b/include/keys/request_key_auth-type.h new file mode 100644 index 000000000000..a726dd3f1dc6 --- /dev/null +++ b/include/keys/request_key_auth-type.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* request_key authorisation token key type | ||
2 | * | ||
3 | * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _KEYS_REQUEST_KEY_AUTH_TYPE_H | ||
13 | #define _KEYS_REQUEST_KEY_AUTH_TYPE_H | ||
14 | |||
15 | #include <linux/key.h> | ||
16 | |||
17 | /* | ||
18 | * Authorisation record for request_key(). | ||
19 | */ | ||
20 | struct request_key_auth { | ||
21 | struct key *target_key; | ||
22 | struct key *dest_keyring; | ||
23 | const struct cred *cred; | ||
24 | void *callout_info; | ||
25 | size_t callout_len; | ||
26 | pid_t pid; | ||
27 | char op[8]; | ||
28 | } __randomize_layout; | ||
29 | |||
30 | static inline struct request_key_auth *get_request_key_auth(const struct key *key) | ||
31 | { | ||
32 | return key->payload.data[0]; | ||
33 | } | ||
34 | |||
35 | |||
36 | #endif /* _KEYS_REQUEST_KEY_AUTH_TYPE_H */ | ||
diff --git a/include/keys/user-type.h b/include/keys/user-type.h index e098cbe27db5..12babe991594 100644 --- a/include/keys/user-type.h +++ b/include/keys/user-type.h | |||
@@ -31,7 +31,7 @@ | |||
31 | struct user_key_payload { | 31 | struct user_key_payload { |
32 | struct rcu_head rcu; /* RCU destructor */ | 32 | struct rcu_head rcu; /* RCU destructor */ |
33 | unsigned short datalen; /* length of this data */ | 33 | unsigned short datalen; /* length of this data */ |
34 | char data[0]; /* actual data */ | 34 | char data[0] __aligned(__alignof__(u64)); /* actual data */ |
35 | }; | 35 | }; |
36 | 36 | ||
37 | extern struct key_type key_type_user; | 37 | extern struct key_type key_type_user; |
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 4f31f96bbfab..c36c86f1ec9a 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h | |||
@@ -100,7 +100,7 @@ enum vgic_irq_config { | |||
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct vgic_irq { | 102 | struct vgic_irq { |
103 | spinlock_t irq_lock; /* Protects the content of the struct */ | 103 | raw_spinlock_t irq_lock; /* Protects the content of the struct */ |
104 | struct list_head lpi_list; /* Used to link all LPIs together */ | 104 | struct list_head lpi_list; /* Used to link all LPIs together */ |
105 | struct list_head ap_list; | 105 | struct list_head ap_list; |
106 | 106 | ||
@@ -256,7 +256,7 @@ struct vgic_dist { | |||
256 | u64 propbaser; | 256 | u64 propbaser; |
257 | 257 | ||
258 | /* Protects the lpi_list and the count value below. */ | 258 | /* Protects the lpi_list and the count value below. */ |
259 | spinlock_t lpi_list_lock; | 259 | raw_spinlock_t lpi_list_lock; |
260 | struct list_head lpi_list_head; | 260 | struct list_head lpi_list_head; |
261 | int lpi_list_count; | 261 | int lpi_list_count; |
262 | 262 | ||
@@ -307,7 +307,7 @@ struct vgic_cpu { | |||
307 | unsigned int used_lrs; | 307 | unsigned int used_lrs; |
308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; | 308 | struct vgic_irq private_irqs[VGIC_NR_PRIVATE_IRQS]; |
309 | 309 | ||
310 | spinlock_t ap_list_lock; /* Protects the ap_list */ | 310 | raw_spinlock_t ap_list_lock; /* Protects the ap_list */ |
311 | 311 | ||
312 | /* | 312 | /* |
313 | * List of IRQs that this VCPU should consider because they are either | 313 | * List of IRQs that this VCPU should consider because they are either |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 8804753805ac..7bb2d8de9f30 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); | |||
116 | 116 | ||
117 | static inline sector_t blk_rq_trace_sector(struct request *rq) | 117 | static inline sector_t blk_rq_trace_sector(struct request *rq) |
118 | { | 118 | { |
119 | return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); | 119 | /* |
120 | * Tracing should ignore starting sector for passthrough requests and | ||
121 | * requests where starting sector didn't get set. | ||
122 | */ | ||
123 | if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) | ||
124 | return 0; | ||
125 | return blk_rq_pos(rq); | ||
120 | } | 126 | } |
121 | 127 | ||
122 | static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) | 128 | static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 19f32b0c29af..6b318efd8a74 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #ifndef __has_attribute | 34 | #ifndef __has_attribute |
35 | # define __has_attribute(x) __GCC4_has_attribute_##x | 35 | # define __has_attribute(x) __GCC4_has_attribute_##x |
36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) | 36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) |
37 | # define __GCC4_has_attribute___copy__ 0 | ||
37 | # define __GCC4_has_attribute___designated_init__ 0 | 38 | # define __GCC4_has_attribute___designated_init__ 0 |
38 | # define __GCC4_has_attribute___externally_visible__ 1 | 39 | # define __GCC4_has_attribute___externally_visible__ 1 |
39 | # define __GCC4_has_attribute___noclone__ 1 | 40 | # define __GCC4_has_attribute___noclone__ 1 |
@@ -101,6 +102,19 @@ | |||
101 | #define __attribute_const__ __attribute__((__const__)) | 102 | #define __attribute_const__ __attribute__((__const__)) |
102 | 103 | ||
103 | /* | 104 | /* |
105 | * Optional: only supported since gcc >= 9 | ||
106 | * Optional: not supported by clang | ||
107 | * Optional: not supported by icc | ||
108 | * | ||
109 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute | ||
110 | */ | ||
111 | #if __has_attribute(__copy__) | ||
112 | # define __copy(symbol) __attribute__((__copy__(symbol))) | ||
113 | #else | ||
114 | # define __copy(symbol) | ||
115 | #endif | ||
116 | |||
117 | /* | ||
104 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' | 118 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' |
105 | * attribute warnings entirely and for good") for more information. | 119 | * attribute warnings entirely and for good") for more information. |
106 | * | 120 | * |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..28604a8d0aa9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -1198,8 +1198,6 @@ static inline bool efi_enabled(int feature) | |||
1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); | 1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); |
1199 | 1199 | ||
1200 | extern bool efi_is_table_address(unsigned long phys_addr); | 1200 | extern bool efi_is_table_address(unsigned long phys_addr); |
1201 | |||
1202 | extern int efi_apply_persistent_mem_reservations(void); | ||
1203 | #else | 1201 | #else |
1204 | static inline bool efi_enabled(int feature) | 1202 | static inline bool efi_enabled(int feature) |
1205 | { | 1203 | { |
@@ -1218,11 +1216,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) | |||
1218 | { | 1216 | { |
1219 | return false; | 1217 | return false; |
1220 | } | 1218 | } |
1221 | |||
1222 | static inline int efi_apply_persistent_mem_reservations(void) | ||
1223 | { | ||
1224 | return 0; | ||
1225 | } | ||
1226 | #endif | 1219 | #endif |
1227 | 1220 | ||
1228 | extern int efi_status_to_err(efi_status_t status); | 1221 | extern int efi_status_to_err(efi_status_t status); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..e532fcc6e4b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) | |||
591 | return qdisc_skb_cb(skb)->data; | 591 | return qdisc_skb_cb(skb)->data; |
592 | } | 592 | } |
593 | 593 | ||
594 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | 594 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
595 | struct sk_buff *skb) | 595 | struct sk_buff *skb) |
596 | { | 596 | { |
597 | u8 *cb_data = bpf_skb_cb(skb); | 597 | u8 *cb_data = bpf_skb_cb(skb); |
598 | u8 cb_saved[BPF_SKB_CB_LEN]; | 598 | u8 cb_saved[BPF_SKB_CB_LEN]; |
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | |||
611 | return res; | 611 | return res; |
612 | } | 612 | } |
613 | 613 | ||
614 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | ||
615 | struct sk_buff *skb) | ||
616 | { | ||
617 | u32 res; | ||
618 | |||
619 | preempt_disable(); | ||
620 | res = __bpf_prog_run_save_cb(prog, skb); | ||
621 | preempt_enable(); | ||
622 | return res; | ||
623 | } | ||
624 | |||
614 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | 625 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
615 | struct sk_buff *skb) | 626 | struct sk_buff *skb) |
616 | { | 627 | { |
617 | u8 *cb_data = bpf_skb_cb(skb); | 628 | u8 *cb_data = bpf_skb_cb(skb); |
629 | u32 res; | ||
618 | 630 | ||
619 | if (unlikely(prog->cb_access)) | 631 | if (unlikely(prog->cb_access)) |
620 | memset(cb_data, 0, BPF_SKB_CB_LEN); | 632 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
621 | 633 | ||
622 | return BPF_PROG_RUN(prog, skb); | 634 | preempt_disable(); |
635 | res = BPF_PROG_RUN(prog, skb); | ||
636 | preempt_enable(); | ||
637 | return res; | ||
623 | } | 638 | } |
624 | 639 | ||
625 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | 640 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h index 8663f216c563..2d6100edf204 100644 --- a/include/linux/hid-debug.h +++ b/include/linux/hid-debug.h | |||
@@ -24,7 +24,10 @@ | |||
24 | 24 | ||
25 | #ifdef CONFIG_DEBUG_FS | 25 | #ifdef CONFIG_DEBUG_FS |
26 | 26 | ||
27 | #include <linux/kfifo.h> | ||
28 | |||
27 | #define HID_DEBUG_BUFSIZE 512 | 29 | #define HID_DEBUG_BUFSIZE 512 |
30 | #define HID_DEBUG_FIFOSIZE 512 | ||
28 | 31 | ||
29 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); | 32 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); |
30 | void hid_dump_report(struct hid_device *, int , u8 *, int); | 33 | void hid_dump_report(struct hid_device *, int , u8 *, int); |
@@ -37,11 +40,8 @@ void hid_debug_init(void); | |||
37 | void hid_debug_exit(void); | 40 | void hid_debug_exit(void); |
38 | void hid_debug_event(struct hid_device *, char *); | 41 | void hid_debug_event(struct hid_device *, char *); |
39 | 42 | ||
40 | |||
41 | struct hid_debug_list { | 43 | struct hid_debug_list { |
42 | char *hid_debug_buf; | 44 | DECLARE_KFIFO_PTR(hid_debug_fifo, char); |
43 | int head; | ||
44 | int tail; | ||
45 | struct fasync_struct *fasync; | 45 | struct fasync_struct *fasync; |
46 | struct hid_device *hdev; | 46 | struct hid_device *hdev; |
47 | struct list_head node; | 47 | struct list_head node; |
@@ -64,4 +64,3 @@ struct hid_debug_list { | |||
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #endif | 66 | #endif |
67 | |||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 071b4cbdf010..c848a7cc502e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -319,7 +319,7 @@ | |||
319 | #define GITS_TYPER_PLPIS (1UL << 0) | 319 | #define GITS_TYPER_PLPIS (1UL << 0) |
320 | #define GITS_TYPER_VLPIS (1UL << 1) | 320 | #define GITS_TYPER_VLPIS (1UL << 1) |
321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 | 321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 |
322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) | 322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) |
323 | #define GITS_TYPER_IDBITS_SHIFT 8 | 323 | #define GITS_TYPER_IDBITS_SHIFT 8 |
324 | #define GITS_TYPER_DEVBITS_SHIFT 13 | 324 | #define GITS_TYPER_DEVBITS_SHIFT 13 |
325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | 325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) |
diff --git a/include/linux/key-type.h b/include/linux/key-type.h index bc9af551fc83..e49d1de0614e 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h | |||
@@ -21,15 +21,6 @@ struct kernel_pkey_query; | |||
21 | struct kernel_pkey_params; | 21 | struct kernel_pkey_params; |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * key under-construction record | ||
25 | * - passed to the request_key actor if supplied | ||
26 | */ | ||
27 | struct key_construction { | ||
28 | struct key *key; /* key being constructed */ | ||
29 | struct key *authkey;/* authorisation for key being constructed */ | ||
30 | }; | ||
31 | |||
32 | /* | ||
33 | * Pre-parsed payload, used by key add, update and instantiate. | 24 | * Pre-parsed payload, used by key add, update and instantiate. |
34 | * | 25 | * |
35 | * This struct will be cleared and data and datalen will be set with the data | 26 | * This struct will be cleared and data and datalen will be set with the data |
@@ -50,8 +41,7 @@ struct key_preparsed_payload { | |||
50 | time64_t expiry; /* Expiry time of key */ | 41 | time64_t expiry; /* Expiry time of key */ |
51 | } __randomize_layout; | 42 | } __randomize_layout; |
52 | 43 | ||
53 | typedef int (*request_key_actor_t)(struct key_construction *key, | 44 | typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); |
54 | const char *op, void *aux); | ||
55 | 45 | ||
56 | /* | 46 | /* |
57 | * Preparsed matching criterion. | 47 | * Preparsed matching criterion. |
@@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key, | |||
181 | const void *data, | 171 | const void *data, |
182 | size_t datalen, | 172 | size_t datalen, |
183 | struct key *keyring, | 173 | struct key *keyring, |
184 | struct key *instkey); | 174 | struct key *authkey); |
185 | extern int key_reject_and_link(struct key *key, | 175 | extern int key_reject_and_link(struct key *key, |
186 | unsigned timeout, | 176 | unsigned timeout, |
187 | unsigned error, | 177 | unsigned error, |
188 | struct key *keyring, | 178 | struct key *keyring, |
189 | struct key *instkey); | 179 | struct key *authkey); |
190 | extern void complete_request_key(struct key_construction *cons, int error); | 180 | extern void complete_request_key(struct key *authkey, int error); |
191 | 181 | ||
192 | static inline int key_negate_and_link(struct key *key, | 182 | static inline int key_negate_and_link(struct key *key, |
193 | unsigned timeout, | 183 | unsigned timeout, |
194 | struct key *keyring, | 184 | struct key *keyring, |
195 | struct key *instkey) | 185 | struct key *authkey) |
196 | { | 186 | { |
197 | return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); | 187 | return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); |
198 | } | 188 | } |
199 | 189 | ||
200 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); | 190 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 64c41cf45590..859b55b66db2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -29,9 +29,6 @@ extern unsigned long max_pfn; | |||
29 | */ | 29 | */ |
30 | extern unsigned long long max_possible_pfn; | 30 | extern unsigned long long max_possible_pfn; |
31 | 31 | ||
32 | #define INIT_MEMBLOCK_REGIONS 128 | ||
33 | #define INIT_PHYSMEM_REGIONS 4 | ||
34 | |||
35 | /** | 32 | /** |
36 | * enum memblock_flags - definition of memory region attributes | 33 | * enum memblock_flags - definition of memory region attributes |
37 | * @MEMBLOCK_NONE: no special request | 34 | * @MEMBLOCK_NONE: no special request |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index de7377815b6b..8ef330027b13 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -308,6 +308,7 @@ struct mmc_card { | |||
308 | unsigned int nr_parts; | 308 | unsigned int nr_parts; |
309 | 309 | ||
310 | unsigned int bouncesz; /* Bounce buffer size */ | 310 | unsigned int bouncesz; /* Bounce buffer size */ |
311 | struct workqueue_struct *complete_wq; /* Private workqueue */ | ||
311 | }; | 312 | }; |
312 | 313 | ||
313 | static inline bool mmc_large_sector(struct mmc_card *card) | 314 | static inline bool mmc_large_sector(struct mmc_card *card) |
diff --git a/include/linux/module.h b/include/linux/module.h index 8fa38d3e7538..f5bc4c046461 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -129,13 +129,13 @@ extern void cleanup_module(void); | |||
129 | #define module_init(initfn) \ | 129 | #define module_init(initfn) \ |
130 | static inline initcall_t __maybe_unused __inittest(void) \ | 130 | static inline initcall_t __maybe_unused __inittest(void) \ |
131 | { return initfn; } \ | 131 | { return initfn; } \ |
132 | int init_module(void) __attribute__((alias(#initfn))); | 132 | int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); |
133 | 133 | ||
134 | /* This is only required if you want to be unloadable. */ | 134 | /* This is only required if you want to be unloadable. */ |
135 | #define module_exit(exitfn) \ | 135 | #define module_exit(exitfn) \ |
136 | static inline exitcall_t __maybe_unused __exittest(void) \ | 136 | static inline exitcall_t __maybe_unused __exittest(void) \ |
137 | { return exitfn; } \ | 137 | { return exitfn; } \ |
138 | void cleanup_module(void) __attribute__((alias(#exitfn))); | 138 | void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); |
139 | 139 | ||
140 | #endif | 140 | #endif |
141 | 141 | ||
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -11,6 +11,8 @@ | |||
11 | #define _LINUX_NETDEV_FEATURES_H | 11 | #define _LINUX_NETDEV_FEATURES_H |
12 | 12 | ||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/bitops.h> | ||
15 | #include <asm/byteorder.h> | ||
14 | 16 | ||
15 | typedef u64 netdev_features_t; | 17 | typedef u64 netdev_features_t; |
16 | 18 | ||
@@ -154,8 +156,26 @@ enum { | |||
154 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) | 156 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) |
155 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) | 157 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) |
156 | 158 | ||
157 | #define for_each_netdev_feature(mask_addr, bit) \ | 159 | /* Finds the next feature with the highest number of the range of start till 0. |
158 | for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) | 160 | */ |
161 | static inline int find_next_netdev_feature(u64 feature, unsigned long start) | ||
162 | { | ||
163 | /* like BITMAP_LAST_WORD_MASK() for u64 | ||
164 | * this sets the most significant 64 - start to 0. | ||
165 | */ | ||
166 | feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); | ||
167 | |||
168 | return fls64(feature) - 1; | ||
169 | } | ||
170 | |||
171 | /* This goes for the MSB to the LSB through the set feature bits, | ||
172 | * mask_addr should be a u64 and bit an int | ||
173 | */ | ||
174 | #define for_each_netdev_feature(mask_addr, bit) \ | ||
175 | for ((bit) = find_next_netdev_feature((mask_addr), \ | ||
176 | NETDEV_FEATURE_COUNT); \ | ||
177 | (bit) >= 0; \ | ||
178 | (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) | ||
159 | 179 | ||
160 | /* Features valid for ethtool to change */ | 180 | /* Features valid for ethtool to change */ |
161 | /* = all defined minus driver/device-class-related */ | 181 | /* = all defined minus driver/device-class-related */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..86dbb3e29139 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1483,6 +1483,7 @@ struct net_device_ops { | |||
1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook | 1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
1484 | * @IFF_FAILOVER: device is a failover master device | 1484 | * @IFF_FAILOVER: device is a failover master device |
1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
1486 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | ||
1486 | */ | 1487 | */ |
1487 | enum netdev_priv_flags { | 1488 | enum netdev_priv_flags { |
1488 | IFF_802_1Q_VLAN = 1<<0, | 1489 | IFF_802_1Q_VLAN = 1<<0, |
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags { | |||
1514 | IFF_NO_RX_HANDLER = 1<<26, | 1515 | IFF_NO_RX_HANDLER = 1<<26, |
1515 | IFF_FAILOVER = 1<<27, | 1516 | IFF_FAILOVER = 1<<27, |
1516 | IFF_FAILOVER_SLAVE = 1<<28, | 1517 | IFF_FAILOVER_SLAVE = 1<<28, |
1518 | IFF_L3MDEV_RX_HANDLER = 1<<29, | ||
1517 | }; | 1519 | }; |
1518 | 1520 | ||
1519 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1521 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags { | |||
1544 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER | 1546 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
1545 | #define IFF_FAILOVER IFF_FAILOVER | 1547 | #define IFF_FAILOVER IFF_FAILOVER |
1546 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1548 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
1549 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | ||
1547 | 1550 | ||
1548 | /** | 1551 | /** |
1549 | * struct net_device - The DEVICE structure. | 1552 | * struct net_device - The DEVICE structure. |
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev) | |||
4549 | return dev->priv_flags & IFF_SUPP_NOFCS; | 4552 | return dev->priv_flags & IFF_SUPP_NOFCS; |
4550 | } | 4553 | } |
4551 | 4554 | ||
4555 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) | ||
4556 | { | ||
4557 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; | ||
4558 | } | ||
4559 | |||
4552 | static inline bool netif_is_l3_master(const struct net_device *dev) | 4560 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4553 | { | 4561 | { |
4554 | return dev->priv_flags & IFF_L3MDEV_MASTER; | 4562 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..e1a051724f7e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -447,6 +447,11 @@ struct pmu { | |||
447 | * Filter events for PMU-specific reasons. | 447 | * Filter events for PMU-specific reasons. |
448 | */ | 448 | */ |
449 | int (*filter_match) (struct perf_event *event); /* optional */ | 449 | int (*filter_match) (struct perf_event *event); /* optional */ |
450 | |||
451 | /* | ||
452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
453 | */ | ||
454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | ||
450 | }; | 455 | }; |
451 | 456 | ||
452 | enum perf_addr_filter_action_t { | 457 | enum perf_addr_filter_action_t { |
diff --git a/include/linux/phy.h b/include/linux/phy.h index ef20aeea10cc..333b56d8f746 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, | |||
674 | size_t phy_speeds(unsigned int *speeds, size_t size, | 674 | size_t phy_speeds(unsigned int *speeds, size_t size, |
675 | unsigned long *mask); | 675 | unsigned long *mask); |
676 | 676 | ||
677 | static inline bool __phy_is_started(struct phy_device *phydev) | ||
678 | { | ||
679 | WARN_ON(!mutex_is_locked(&phydev->lock)); | ||
680 | |||
681 | return phydev->state >= PHY_UP; | ||
682 | } | ||
683 | |||
684 | /** | 677 | /** |
685 | * phy_is_started - Convenience function to check whether PHY is started | 678 | * phy_is_started - Convenience function to check whether PHY is started |
686 | * @phydev: The phy_device struct | 679 | * @phydev: The phy_device struct |
687 | */ | 680 | */ |
688 | static inline bool phy_is_started(struct phy_device *phydev) | 681 | static inline bool phy_is_started(struct phy_device *phydev) |
689 | { | 682 | { |
690 | bool started; | 683 | return phydev->state >= PHY_UP; |
691 | |||
692 | mutex_lock(&phydev->lock); | ||
693 | started = __phy_is_started(phydev); | ||
694 | mutex_unlock(&phydev->lock); | ||
695 | |||
696 | return started; | ||
697 | } | 684 | } |
698 | 685 | ||
699 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); | 686 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); |
@@ -1005,6 +992,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) | |||
1005 | { | 992 | { |
1006 | return 0; | 993 | return 0; |
1007 | } | 994 | } |
995 | static inline int genphy_no_ack_interrupt(struct phy_device *phydev) | ||
996 | { | ||
997 | return 0; | ||
998 | } | ||
999 | static inline int genphy_no_config_intr(struct phy_device *phydev) | ||
1000 | { | ||
1001 | return 0; | ||
1002 | } | ||
1008 | int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, | 1003 | int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, |
1009 | u16 regnum); | 1004 | u16 regnum); |
1010 | int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, | 1005 | int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, |
diff --git a/include/linux/sched.h b/include/linux/sched.h index bba3afb4e9bf..f9b43c989577 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -739,12 +739,6 @@ struct task_struct { | |||
739 | unsigned use_memdelay:1; | 739 | unsigned use_memdelay:1; |
740 | #endif | 740 | #endif |
741 | 741 | ||
742 | /* | ||
743 | * May usercopy functions fault on kernel addresses? | ||
744 | * This is not just a single bit because this can potentially nest. | ||
745 | */ | ||
746 | unsigned int kernel_uaccess_faults_ok; | ||
747 | |||
748 | unsigned long atomic_flags; /* Flags requiring atomic access. */ | 742 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
749 | 743 | ||
750 | struct restart_block restart_block; | 744 | struct restart_block restart_block; |
diff --git a/include/linux/signal.h b/include/linux/signal.h index cc7e2c1cd444..9702016734b1 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); | |||
392 | #endif | 392 | #endif |
393 | 393 | ||
394 | #define siginmask(sig, mask) \ | 394 | #define siginmask(sig, mask) \ |
395 | ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) | 395 | ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) |
396 | 396 | ||
397 | #define SIG_KERNEL_ONLY_MASK (\ | 397 | #define SIG_KERNEL_ONLY_MASK (\ |
398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) | 398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 95d25b010a25..bdb9563c64a0 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -2434,7 +2434,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, | |||
2434 | 2434 | ||
2435 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) | 2435 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
2436 | skb_set_transport_header(skb, keys.control.thoff); | 2436 | skb_set_transport_header(skb, keys.control.thoff); |
2437 | else | 2437 | else if (offset_hint >= 0) |
2438 | skb_set_transport_header(skb, offset_hint); | 2438 | skb_set_transport_header(skb, offset_hint); |
2439 | } | 2439 | } |
2440 | 2440 | ||
@@ -4212,6 +4212,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) | |||
4212 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; | 4212 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; |
4213 | } | 4213 | } |
4214 | 4214 | ||
4215 | static inline bool skb_is_gso_tcp(const struct sk_buff *skb) | ||
4216 | { | ||
4217 | return skb_is_gso(skb) && | ||
4218 | skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); | ||
4219 | } | ||
4220 | |||
4215 | static inline void skb_gso_reset(struct sk_buff *skb) | 4221 | static inline void skb_gso_reset(struct sk_buff *skb) |
4216 | { | 4222 | { |
4217 | skb_shinfo(skb)->gso_size = 0; | 4223 | skb_shinfo(skb)->gso_size = 0; |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7ddfc65586b0..4335bd771ce5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data { | |||
184 | struct clk *pclk; | 184 | struct clk *pclk; |
185 | struct clk *clk_ptp_ref; | 185 | struct clk *clk_ptp_ref; |
186 | unsigned int clk_ptp_rate; | 186 | unsigned int clk_ptp_rate; |
187 | unsigned int clk_ref_rate; | ||
187 | struct reset_control *stmmac_rst; | 188 | struct reset_control *stmmac_rst; |
188 | struct stmmac_axi *axi; | 189 | struct stmmac_axi *axi; |
189 | int has_gmac4; | 190 | int has_gmac4; |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..e0348cb0a1dd 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
@@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, | |||
57 | 57 | ||
58 | if (!skb_partial_csum_set(skb, start, off)) | 58 | if (!skb_partial_csum_set(skb, start, off)) |
59 | return -EINVAL; | 59 | return -EINVAL; |
60 | } else { | ||
61 | /* gso packets without NEEDS_CSUM do not set transport_offset. | ||
62 | * probe and drop if does not match one of the above types. | ||
63 | */ | ||
64 | if (gso_type && skb->network_header) { | ||
65 | if (!skb->protocol) | ||
66 | virtio_net_hdr_set_proto(skb, hdr); | ||
67 | retry: | ||
68 | skb_probe_transport_header(skb, -1); | ||
69 | if (!skb_transport_header_was_set(skb)) { | ||
70 | /* UFO does not specify ipv4 or 6: try both */ | ||
71 | if (gso_type & SKB_GSO_UDP && | ||
72 | skb->protocol == htons(ETH_P_IP)) { | ||
73 | skb->protocol = htons(ETH_P_IPV6); | ||
74 | goto retry; | ||
75 | } | ||
76 | return -EINVAL; | ||
77 | } | ||
78 | } | ||
60 | } | 79 | } |
61 | 80 | ||
62 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 81 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 00b5e7825508..74ff688568a0 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -39,6 +39,7 @@ struct inet_peer { | |||
39 | 39 | ||
40 | u32 metrics[RTAX_MAX]; | 40 | u32 metrics[RTAX_MAX]; |
41 | u32 rate_tokens; /* rate limiting for ICMP */ | 41 | u32 rate_tokens; /* rate limiting for ICMP */ |
42 | u32 n_redirects; | ||
42 | unsigned long rate_last; | 43 | unsigned long rate_last; |
43 | /* | 44 | /* |
44 | * Once inet_peer is queued for deletion (refcnt == 0), following field | 45 | * Once inet_peer is queued for deletion (refcnt == 0), following field |
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 78fa0ac4613c..5175fd63cd82 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h | |||
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) | |||
153 | 153 | ||
154 | if (netif_is_l3_slave(skb->dev)) | 154 | if (netif_is_l3_slave(skb->dev)) |
155 | master = netdev_master_upper_dev_get_rcu(skb->dev); | 155 | master = netdev_master_upper_dev_get_rcu(skb->dev); |
156 | else if (netif_is_l3_master(skb->dev)) | 156 | else if (netif_is_l3_master(skb->dev) || |
157 | netif_has_l3_rx_handler(skb->dev)) | ||
157 | master = skb->dev; | 158 | master = skb->dev; |
158 | 159 | ||
159 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) | 160 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 841835a387e1..b4984bbbe157 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -469,9 +469,7 @@ struct nft_set_binding { | |||
469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
470 | struct nft_set_binding *binding); | 470 | struct nft_set_binding *binding); |
471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
472 | struct nft_set_binding *binding); | 472 | struct nft_set_binding *binding, bool commit); |
473 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
474 | struct nft_set_binding *binding); | ||
475 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); | 473 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); |
476 | 474 | ||
477 | /** | 475 | /** |
@@ -721,6 +719,13 @@ struct nft_expr_type { | |||
721 | #define NFT_EXPR_STATEFUL 0x1 | 719 | #define NFT_EXPR_STATEFUL 0x1 |
722 | #define NFT_EXPR_GC 0x2 | 720 | #define NFT_EXPR_GC 0x2 |
723 | 721 | ||
722 | enum nft_trans_phase { | ||
723 | NFT_TRANS_PREPARE, | ||
724 | NFT_TRANS_ABORT, | ||
725 | NFT_TRANS_COMMIT, | ||
726 | NFT_TRANS_RELEASE | ||
727 | }; | ||
728 | |||
724 | /** | 729 | /** |
725 | * struct nft_expr_ops - nf_tables expression operations | 730 | * struct nft_expr_ops - nf_tables expression operations |
726 | * | 731 | * |
@@ -750,7 +755,8 @@ struct nft_expr_ops { | |||
750 | void (*activate)(const struct nft_ctx *ctx, | 755 | void (*activate)(const struct nft_ctx *ctx, |
751 | const struct nft_expr *expr); | 756 | const struct nft_expr *expr); |
752 | void (*deactivate)(const struct nft_ctx *ctx, | 757 | void (*deactivate)(const struct nft_ctx *ctx, |
753 | const struct nft_expr *expr); | 758 | const struct nft_expr *expr, |
759 | enum nft_trans_phase phase); | ||
754 | void (*destroy)(const struct nft_ctx *ctx, | 760 | void (*destroy)(const struct nft_ctx *ctx, |
755 | const struct nft_expr *expr); | 761 | const struct nft_expr *expr); |
756 | void (*destroy_clone)(const struct nft_ctx *ctx, | 762 | void (*destroy_clone)(const struct nft_ctx *ctx, |
@@ -1323,12 +1329,15 @@ struct nft_trans_rule { | |||
1323 | struct nft_trans_set { | 1329 | struct nft_trans_set { |
1324 | struct nft_set *set; | 1330 | struct nft_set *set; |
1325 | u32 set_id; | 1331 | u32 set_id; |
1332 | bool bound; | ||
1326 | }; | 1333 | }; |
1327 | 1334 | ||
1328 | #define nft_trans_set(trans) \ | 1335 | #define nft_trans_set(trans) \ |
1329 | (((struct nft_trans_set *)trans->data)->set) | 1336 | (((struct nft_trans_set *)trans->data)->set) |
1330 | #define nft_trans_set_id(trans) \ | 1337 | #define nft_trans_set_id(trans) \ |
1331 | (((struct nft_trans_set *)trans->data)->set_id) | 1338 | (((struct nft_trans_set *)trans->data)->set_id) |
1339 | #define nft_trans_set_bound(trans) \ | ||
1340 | (((struct nft_trans_set *)trans->data)->bound) | ||
1332 | 1341 | ||
1333 | struct nft_trans_chain { | 1342 | struct nft_trans_chain { |
1334 | bool update; | 1343 | bool update; |
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index b669fe6dbc3b..98f31c7ea23d 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h | |||
@@ -63,10 +63,11 @@ struct pnpipehdr { | |||
63 | u8 state_after_reset; /* reset request */ | 63 | u8 state_after_reset; /* reset request */ |
64 | u8 error_code; /* any response */ | 64 | u8 error_code; /* any response */ |
65 | u8 pep_type; /* status indication */ | 65 | u8 pep_type; /* status indication */ |
66 | u8 data[1]; | 66 | u8 data0; /* anything else */ |
67 | }; | 67 | }; |
68 | u8 data[]; | ||
68 | }; | 69 | }; |
69 | #define other_pep_type data[1] | 70 | #define other_pep_type data[0] |
70 | 71 | ||
71 | static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) | 72 | static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) |
72 | { | 73 | { |
diff --git a/include/net/sock.h b/include/net/sock.h index 2b229f7be8eb..f43f935cb113 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) | |||
1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); | 1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | static inline int | 1280 | static inline u64 |
1281 | sk_sockets_allocated_read_positive(struct sock *sk) | 1281 | sk_sockets_allocated_read_positive(struct sock *sk) |
1282 | { | 1282 | { |
1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); | 1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 7298a53b9702..85386becbaea 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -853,7 +853,7 @@ static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) | |||
853 | xfrm_pol_put(pols[i]); | 853 | xfrm_pol_put(pols[i]); |
854 | } | 854 | } |
855 | 855 | ||
856 | void __xfrm_state_destroy(struct xfrm_state *); | 856 | void __xfrm_state_destroy(struct xfrm_state *, bool); |
857 | 857 | ||
858 | static inline void __xfrm_state_put(struct xfrm_state *x) | 858 | static inline void __xfrm_state_put(struct xfrm_state *x) |
859 | { | 859 | { |
@@ -863,7 +863,13 @@ static inline void __xfrm_state_put(struct xfrm_state *x) | |||
863 | static inline void xfrm_state_put(struct xfrm_state *x) | 863 | static inline void xfrm_state_put(struct xfrm_state *x) |
864 | { | 864 | { |
865 | if (refcount_dec_and_test(&x->refcnt)) | 865 | if (refcount_dec_and_test(&x->refcnt)) |
866 | __xfrm_state_destroy(x); | 866 | __xfrm_state_destroy(x, false); |
867 | } | ||
868 | |||
869 | static inline void xfrm_state_put_sync(struct xfrm_state *x) | ||
870 | { | ||
871 | if (refcount_dec_and_test(&x->refcnt)) | ||
872 | __xfrm_state_destroy(x, true); | ||
867 | } | 873 | } |
868 | 874 | ||
869 | static inline void xfrm_state_hold(struct xfrm_state *x) | 875 | static inline void xfrm_state_hold(struct xfrm_state *x) |
@@ -1590,7 +1596,7 @@ struct xfrmk_spdinfo { | |||
1590 | 1596 | ||
1591 | struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); | 1597 | struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq); |
1592 | int xfrm_state_delete(struct xfrm_state *x); | 1598 | int xfrm_state_delete(struct xfrm_state *x); |
1593 | int xfrm_state_flush(struct net *net, u8 proto, bool task_valid); | 1599 | int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync); |
1594 | int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); | 1600 | int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid); |
1595 | void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); | 1601 | void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si); |
1596 | void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); | 1602 | void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); |
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index 0cdc3999ecfa..c5188ff724d1 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h | |||
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) | |||
173 | if (snd_BUG_ON(!stream)) | 173 | if (snd_BUG_ON(!stream)) |
174 | return; | 174 | return; |
175 | 175 | ||
176 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | 176 | if (stream->direction == SND_COMPRESS_PLAYBACK) |
177 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | ||
178 | else | ||
179 | stream->runtime->state = SNDRV_PCM_STATE_PREPARED; | ||
180 | |||
177 | wake_up(&stream->runtime->sleep); | 181 | wake_up(&stream->runtime->sleep); |
178 | } | 182 | } |
179 | 183 | ||
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 7fa48b100936..cc7c8d42d4fd 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h | |||
@@ -68,6 +68,7 @@ struct hda_bus { | |||
68 | unsigned int response_reset:1; /* controller was reset */ | 68 | unsigned int response_reset:1; /* controller was reset */ |
69 | unsigned int in_reset:1; /* during reset operation */ | 69 | unsigned int in_reset:1; /* during reset operation */ |
70 | unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ | 70 | unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ |
71 | unsigned int bus_probing :1; /* during probing process */ | ||
71 | 72 | ||
72 | int primary_dig_out_type; /* primary digital out PCM type */ | 73 | int primary_dig_out_type; /* primary digital out PCM type */ |
73 | unsigned int mixer_assigned; /* codec addr for mixer name */ | 74 | unsigned int mixer_assigned; /* codec addr for mixer name */ |
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 14565d703291..e8baca85bac6 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h | |||
@@ -137,15 +137,21 @@ enum { | |||
137 | INET_DIAG_TCLASS, | 137 | INET_DIAG_TCLASS, |
138 | INET_DIAG_SKMEMINFO, | 138 | INET_DIAG_SKMEMINFO, |
139 | INET_DIAG_SHUTDOWN, | 139 | INET_DIAG_SHUTDOWN, |
140 | INET_DIAG_DCTCPINFO, | 140 | |
141 | INET_DIAG_PROTOCOL, /* response attribute only */ | 141 | /* |
142 | * Next extenstions cannot be requested in struct inet_diag_req_v2: | ||
143 | * its field idiag_ext has only 8 bits. | ||
144 | */ | ||
145 | |||
146 | INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */ | ||
147 | INET_DIAG_PROTOCOL, /* response attribute only */ | ||
142 | INET_DIAG_SKV6ONLY, | 148 | INET_DIAG_SKV6ONLY, |
143 | INET_DIAG_LOCALS, | 149 | INET_DIAG_LOCALS, |
144 | INET_DIAG_PEERS, | 150 | INET_DIAG_PEERS, |
145 | INET_DIAG_PAD, | 151 | INET_DIAG_PAD, |
146 | INET_DIAG_MARK, | 152 | INET_DIAG_MARK, /* only with CAP_NET_ADMIN */ |
147 | INET_DIAG_BBRINFO, | 153 | INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ |
148 | INET_DIAG_CLASS_ID, | 154 | INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ |
149 | INET_DIAG_MD5SIG, | 155 | INET_DIAG_MD5SIG, |
150 | __INET_DIAG_MAX, | 156 | __INET_DIAG_MAX, |
151 | }; | 157 | }; |
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 1196e1c1d4f6..ff8e7dc9d4dd 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h | |||
@@ -79,6 +79,12 @@ | |||
79 | #define VIRTIO_F_RING_PACKED 34 | 79 | #define VIRTIO_F_RING_PACKED 34 |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * This feature indicates that memory accesses by the driver and the | ||
83 | * device are ordered in a way described by the platform. | ||
84 | */ | ||
85 | #define VIRTIO_F_ORDER_PLATFORM 36 | ||
86 | |||
87 | /* | ||
82 | * Does the device support Single Root I/O Virtualization? | 88 | * Does the device support Single Root I/O Virtualization? |
83 | */ | 89 | */ |
84 | #define VIRTIO_F_SR_IOV 37 | 90 | #define VIRTIO_F_SR_IOV 37 |
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 2414f8af26b3..4c4e24c291a5 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h | |||
@@ -213,14 +213,4 @@ struct vring_packed_desc { | |||
213 | __le16 flags; | 213 | __le16 flags; |
214 | }; | 214 | }; |
215 | 215 | ||
216 | struct vring_packed { | ||
217 | unsigned int num; | ||
218 | |||
219 | struct vring_packed_desc *desc; | ||
220 | |||
221 | struct vring_packed_desc_event *driver; | ||
222 | |||
223 | struct vring_packed_desc_event *device; | ||
224 | }; | ||
225 | |||
226 | #endif /* _UAPI_LINUX_VIRTIO_RING_H */ | 216 | #endif /* _UAPI_LINUX_VIRTIO_RING_H */ |
diff --git a/init/initramfs.c b/init/initramfs.c index 7cea802d00ef..fca899622937 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -550,6 +550,7 @@ skip: | |||
550 | initrd_end = 0; | 550 | initrd_end = 0; |
551 | } | 551 | } |
552 | 552 | ||
553 | #ifdef CONFIG_BLK_DEV_RAM | ||
553 | #define BUF_SIZE 1024 | 554 | #define BUF_SIZE 1024 |
554 | static void __init clean_rootfs(void) | 555 | static void __init clean_rootfs(void) |
555 | { | 556 | { |
@@ -596,6 +597,7 @@ static void __init clean_rootfs(void) | |||
596 | ksys_close(fd); | 597 | ksys_close(fd); |
597 | kfree(buf); | 598 | kfree(buf); |
598 | } | 599 | } |
600 | #endif | ||
599 | 601 | ||
600 | static int __init populate_rootfs(void) | 602 | static int __init populate_rootfs(void) |
601 | { | 603 | { |
@@ -638,10 +640,8 @@ static int __init populate_rootfs(void) | |||
638 | printk(KERN_INFO "Unpacking initramfs...\n"); | 640 | printk(KERN_INFO "Unpacking initramfs...\n"); |
639 | err = unpack_to_rootfs((char *)initrd_start, | 641 | err = unpack_to_rootfs((char *)initrd_start, |
640 | initrd_end - initrd_start); | 642 | initrd_end - initrd_start); |
641 | if (err) { | 643 | if (err) |
642 | printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); | 644 | printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); |
643 | clean_rootfs(); | ||
644 | } | ||
645 | free_initrd(); | 645 | free_initrd(); |
646 | #endif | 646 | #endif |
647 | } | 647 | } |
diff --git a/init/main.c b/init/main.c index e2e80ca3165a..c86a1c8f19f4 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
695 | initrd_start = 0; | 695 | initrd_start = 0; |
696 | } | 696 | } |
697 | #endif | 697 | #endif |
698 | page_ext_init(); | ||
699 | kmemleak_init(); | 698 | kmemleak_init(); |
700 | setup_per_cpu_pageset(); | 699 | setup_per_cpu_pageset(); |
701 | numa_policy_init(); | 700 | numa_policy_init(); |
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void) | |||
1131 | sched_init_smp(); | 1130 | sched_init_smp(); |
1132 | 1131 | ||
1133 | page_alloc_init_late(); | 1132 | page_alloc_init_late(); |
1133 | /* Initialize page ext after all struct pages are initialized. */ | ||
1134 | page_ext_init(); | ||
1134 | 1135 | ||
1135 | do_basic_setup(); | 1136 | do_basic_setup(); |
1136 | 1137 | ||
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index befe570be5ba..c57bd10340ed 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, | |||
1459 | 1459 | ||
1460 | /* "typedef void new_void", "const void"...etc */ | 1460 | /* "typedef void new_void", "const void"...etc */ |
1461 | if (!btf_type_is_void(next_type) && | 1461 | if (!btf_type_is_void(next_type) && |
1462 | !btf_type_is_fwd(next_type)) { | 1462 | !btf_type_is_fwd(next_type) && |
1463 | !btf_type_is_func_proto(next_type)) { | ||
1463 | btf_verifier_log_type(env, v->t, "Invalid type_id"); | 1464 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
1464 | return -EINVAL; | 1465 | return -EINVAL; |
1465 | } | 1466 | } |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ab612fe9862f..d17d05570a3f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, | |||
572 | bpf_compute_and_save_data_end(skb, &saved_data_end); | 572 | bpf_compute_and_save_data_end(skb, &saved_data_end); |
573 | 573 | ||
574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, | 574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, |
575 | bpf_prog_run_save_cb); | 575 | __bpf_prog_run_save_cb); |
576 | bpf_restore_data_end(skb, saved_data_end); | 576 | bpf_restore_data_end(skb, saved_data_end); |
577 | __skb_pull(skb, offset); | 577 | __skb_pull(skb, offset); |
578 | skb->sk = save_sk; | 578 | skb->sk = save_sk; |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4b7c76765d9d..f9274114c88d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | |||
686 | } | 686 | } |
687 | 687 | ||
688 | if (htab_is_prealloc(htab)) { | 688 | if (htab_is_prealloc(htab)) { |
689 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 689 | __pcpu_freelist_push(&htab->freelist, &l->fnode); |
690 | } else { | 690 | } else { |
691 | atomic_dec(&htab->count); | 691 | atomic_dec(&htab->count); |
692 | l->htab = htab; | 692 | l->htab = htab; |
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
748 | } else { | 748 | } else { |
749 | struct pcpu_freelist_node *l; | 749 | struct pcpu_freelist_node *l; |
750 | 750 | ||
751 | l = pcpu_freelist_pop(&htab->freelist); | 751 | l = __pcpu_freelist_pop(&htab->freelist); |
752 | if (!l) | 752 | if (!l) |
753 | return ERR_PTR(-E2BIG); | 753 | return ERR_PTR(-E2BIG); |
754 | l_new = container_of(l, struct htab_elem, fnode); | 754 | l_new = container_of(l, struct htab_elem, fnode); |
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index abf1002080df..93a5cbbde421 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c | |||
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key) | |||
471 | } | 471 | } |
472 | 472 | ||
473 | if (!node || node->prefixlen != key->prefixlen || | 473 | if (!node || node->prefixlen != key->prefixlen || |
474 | node->prefixlen != matchlen || | ||
474 | (node->flags & LPM_TREE_NODE_FLAG_IM)) { | 475 | (node->flags & LPM_TREE_NODE_FLAG_IM)) { |
475 | ret = -ENOENT; | 476 | ret = -ENOENT; |
476 | goto out; | 477 | goto out; |
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 673fa6fe2d73..0c1b4ba9e90e 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c | |||
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) | |||
28 | free_percpu(s->freelist); | 28 | free_percpu(s->freelist); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | 31 | static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, |
32 | struct pcpu_freelist_node *node) | 32 | struct pcpu_freelist_node *node) |
33 | { | 33 | { |
34 | raw_spin_lock(&head->lock); | 34 | raw_spin_lock(&head->lock); |
35 | node->next = head->first; | 35 | node->next = head->first; |
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | |||
37 | raw_spin_unlock(&head->lock); | 37 | raw_spin_unlock(&head->lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | void pcpu_freelist_push(struct pcpu_freelist *s, | 40 | void __pcpu_freelist_push(struct pcpu_freelist *s, |
41 | struct pcpu_freelist_node *node) | 41 | struct pcpu_freelist_node *node) |
42 | { | 42 | { |
43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); | 43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); |
44 | 44 | ||
45 | __pcpu_freelist_push(head, node); | 45 | ___pcpu_freelist_push(head, node); |
46 | } | ||
47 | |||
48 | void pcpu_freelist_push(struct pcpu_freelist *s, | ||
49 | struct pcpu_freelist_node *node) | ||
50 | { | ||
51 | unsigned long flags; | ||
52 | |||
53 | local_irq_save(flags); | ||
54 | __pcpu_freelist_push(s, node); | ||
55 | local_irq_restore(flags); | ||
46 | } | 56 | } |
47 | 57 | ||
48 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 58 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | |||
63 | for_each_possible_cpu(cpu) { | 73 | for_each_possible_cpu(cpu) { |
64 | again: | 74 | again: |
65 | head = per_cpu_ptr(s->freelist, cpu); | 75 | head = per_cpu_ptr(s->freelist, cpu); |
66 | __pcpu_freelist_push(head, buf); | 76 | ___pcpu_freelist_push(head, buf); |
67 | i++; | 77 | i++; |
68 | buf += elem_size; | 78 | buf += elem_size; |
69 | if (i == nr_elems) | 79 | if (i == nr_elems) |
@@ -74,14 +84,12 @@ again: | |||
74 | local_irq_restore(flags); | 84 | local_irq_restore(flags); |
75 | } | 85 | } |
76 | 86 | ||
77 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | 87 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) |
78 | { | 88 | { |
79 | struct pcpu_freelist_head *head; | 89 | struct pcpu_freelist_head *head; |
80 | struct pcpu_freelist_node *node; | 90 | struct pcpu_freelist_node *node; |
81 | unsigned long flags; | ||
82 | int orig_cpu, cpu; | 91 | int orig_cpu, cpu; |
83 | 92 | ||
84 | local_irq_save(flags); | ||
85 | orig_cpu = cpu = raw_smp_processor_id(); | 93 | orig_cpu = cpu = raw_smp_processor_id(); |
86 | while (1) { | 94 | while (1) { |
87 | head = per_cpu_ptr(s->freelist, cpu); | 95 | head = per_cpu_ptr(s->freelist, cpu); |
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | |||
89 | node = head->first; | 97 | node = head->first; |
90 | if (node) { | 98 | if (node) { |
91 | head->first = node->next; | 99 | head->first = node->next; |
92 | raw_spin_unlock_irqrestore(&head->lock, flags); | 100 | raw_spin_unlock(&head->lock); |
93 | return node; | 101 | return node; |
94 | } | 102 | } |
95 | raw_spin_unlock(&head->lock); | 103 | raw_spin_unlock(&head->lock); |
96 | cpu = cpumask_next(cpu, cpu_possible_mask); | 104 | cpu = cpumask_next(cpu, cpu_possible_mask); |
97 | if (cpu >= nr_cpu_ids) | 105 | if (cpu >= nr_cpu_ids) |
98 | cpu = 0; | 106 | cpu = 0; |
99 | if (cpu == orig_cpu) { | 107 | if (cpu == orig_cpu) |
100 | local_irq_restore(flags); | ||
101 | return NULL; | 108 | return NULL; |
102 | } | ||
103 | } | 109 | } |
104 | } | 110 | } |
111 | |||
112 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | ||
113 | { | ||
114 | struct pcpu_freelist_node *ret; | ||
115 | unsigned long flags; | ||
116 | |||
117 | local_irq_save(flags); | ||
118 | ret = __pcpu_freelist_pop(s); | ||
119 | local_irq_restore(flags); | ||
120 | return ret; | ||
121 | } | ||
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h index 3049aae8ea1e..c3960118e617 100644 --- a/kernel/bpf/percpu_freelist.h +++ b/kernel/bpf/percpu_freelist.h | |||
@@ -22,8 +22,12 @@ struct pcpu_freelist_node { | |||
22 | struct pcpu_freelist_node *next; | 22 | struct pcpu_freelist_node *next; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | /* pcpu_freelist_* do spin_lock_irqsave. */ | ||
25 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | 26 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); |
26 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); | 27 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); |
28 | /* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ | ||
29 | void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | ||
30 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); | ||
27 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 31 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
28 | u32 nr_elems); | 32 | u32 nr_elems); |
29 | int pcpu_freelist_init(struct pcpu_freelist *); | 33 | int pcpu_freelist_init(struct pcpu_freelist *); |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index d43b14535827..950ab2f28922 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry) | |||
44 | struct stack_map_irq_work *work; | 44 | struct stack_map_irq_work *work; |
45 | 45 | ||
46 | work = container_of(entry, struct stack_map_irq_work, irq_work); | 46 | work = container_of(entry, struct stack_map_irq_work, irq_work); |
47 | up_read(work->sem); | 47 | up_read_non_owner(work->sem); |
48 | work->sem = NULL; | 48 | work->sem = NULL; |
49 | } | 49 | } |
50 | 50 | ||
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, | |||
338 | } else { | 338 | } else { |
339 | work->sem = ¤t->mm->mmap_sem; | 339 | work->sem = ¤t->mm->mmap_sem; |
340 | irq_work_queue(&work->irq_work); | 340 | irq_work_queue(&work->irq_work); |
341 | /* | ||
342 | * The irq_work will release the mmap_sem with | ||
343 | * up_read_non_owner(). The rwsem_release() is called | ||
344 | * here to release the lock from lockdep's perspective. | ||
345 | */ | ||
346 | rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_); | ||
341 | } | 347 | } |
342 | } | 348 | } |
343 | 349 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b155cd17c1bd..8577bb7f8be6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
713 | 713 | ||
714 | if (bpf_map_is_dev_bound(map)) { | 714 | if (bpf_map_is_dev_bound(map)) { |
715 | err = bpf_map_offload_lookup_elem(map, key, value); | 715 | err = bpf_map_offload_lookup_elem(map, key, value); |
716 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | 716 | goto done; |
717 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | 717 | } |
718 | |||
719 | preempt_disable(); | ||
720 | this_cpu_inc(bpf_prog_active); | ||
721 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | ||
722 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | ||
718 | err = bpf_percpu_hash_copy(map, key, value); | 723 | err = bpf_percpu_hash_copy(map, key, value); |
719 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | 724 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
720 | err = bpf_percpu_array_copy(map, key, value); | 725 | err = bpf_percpu_array_copy(map, key, value); |
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
744 | } | 749 | } |
745 | rcu_read_unlock(); | 750 | rcu_read_unlock(); |
746 | } | 751 | } |
752 | this_cpu_dec(bpf_prog_active); | ||
753 | preempt_enable(); | ||
747 | 754 | ||
755 | done: | ||
748 | if (err) | 756 | if (err) |
749 | goto free_value; | 757 | goto free_value; |
750 | 758 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 56674a7c3778..8f295b790297 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, | |||
1617 | return 0; | 1617 | return 0; |
1618 | } | 1618 | } |
1619 | 1619 | ||
1620 | static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | 1620 | static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, |
1621 | int size, enum bpf_access_type t) | 1621 | u32 regno, int off, int size, |
1622 | enum bpf_access_type t) | ||
1622 | { | 1623 | { |
1623 | struct bpf_reg_state *regs = cur_regs(env); | 1624 | struct bpf_reg_state *regs = cur_regs(env); |
1624 | struct bpf_reg_state *reg = ®s[regno]; | 1625 | struct bpf_reg_state *reg = ®s[regno]; |
1625 | struct bpf_insn_access_aux info; | 1626 | struct bpf_insn_access_aux info = {}; |
1626 | 1627 | ||
1627 | if (reg->smin_value < 0) { | 1628 | if (reg->smin_value < 0) { |
1628 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | 1629 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
1636 | return -EACCES; | 1637 | return -EACCES; |
1637 | } | 1638 | } |
1638 | 1639 | ||
1640 | env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size; | ||
1641 | |||
1639 | return 0; | 1642 | return 0; |
1640 | } | 1643 | } |
1641 | 1644 | ||
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
2032 | verbose(env, "cannot write into socket\n"); | 2035 | verbose(env, "cannot write into socket\n"); |
2033 | return -EACCES; | 2036 | return -EACCES; |
2034 | } | 2037 | } |
2035 | err = check_sock_access(env, regno, off, size, t); | 2038 | err = check_sock_access(env, insn_idx, regno, off, size, t); |
2036 | if (!err && value_regno >= 0) | 2039 | if (!err && value_regno >= 0) |
2037 | mark_reg_unknown(env, regs, value_regno); | 2040 | mark_reg_unknown(env, regs, value_regno); |
2038 | } else { | 2041 | } else { |
diff --git a/kernel/events/core.c b/kernel/events/core.c index e5ede6918050..26d6edab051a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event, | |||
4963 | } | 4963 | } |
4964 | } | 4964 | } |
4965 | 4965 | ||
4966 | static int perf_event_check_period(struct perf_event *event, u64 value) | ||
4967 | { | ||
4968 | return event->pmu->check_period(event, value); | ||
4969 | } | ||
4970 | |||
4966 | static int perf_event_period(struct perf_event *event, u64 __user *arg) | 4971 | static int perf_event_period(struct perf_event *event, u64 __user *arg) |
4967 | { | 4972 | { |
4968 | u64 value; | 4973 | u64 value; |
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) | |||
4979 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) | 4984 | if (event->attr.freq && value > sysctl_perf_event_sample_rate) |
4980 | return -EINVAL; | 4985 | return -EINVAL; |
4981 | 4986 | ||
4987 | if (perf_event_check_period(event, value)) | ||
4988 | return -EINVAL; | ||
4989 | |||
4982 | event_function_call(event, __perf_event_period, &value); | 4990 | event_function_call(event, __perf_event_period, &value); |
4983 | 4991 | ||
4984 | return 0; | 4992 | return 0; |
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu) | |||
9391 | return 0; | 9399 | return 0; |
9392 | } | 9400 | } |
9393 | 9401 | ||
9402 | static int perf_event_nop_int(struct perf_event *event, u64 value) | ||
9403 | { | ||
9404 | return 0; | ||
9405 | } | ||
9406 | |||
9394 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); | 9407 | static DEFINE_PER_CPU(unsigned int, nop_txn_flags); |
9395 | 9408 | ||
9396 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) | 9409 | static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags) |
@@ -9691,6 +9704,9 @@ got_cpu_context: | |||
9691 | pmu->pmu_disable = perf_pmu_nop_void; | 9704 | pmu->pmu_disable = perf_pmu_nop_void; |
9692 | } | 9705 | } |
9693 | 9706 | ||
9707 | if (!pmu->check_period) | ||
9708 | pmu->check_period = perf_event_nop_int; | ||
9709 | |||
9694 | if (!pmu->event_idx) | 9710 | if (!pmu->event_idx) |
9695 | pmu->event_idx = perf_event_idx_default; | 9711 | pmu->event_idx = perf_event_idx_default; |
9696 | 9712 | ||
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4a9937076331..5ab4fe3b1dcc 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) | ||
738 | goto fail; | ||
739 | |||
737 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
738 | if (!rb) | 741 | if (!rb) |
739 | goto fail; | 742 | goto fail; |
diff --git a/kernel/futex.c b/kernel/futex.c index 113f1c042250..6968923053ff 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2217,11 +2217,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
2217 | * decrement the counter at queue_unlock() when some error has | 2217 | * decrement the counter at queue_unlock() when some error has |
2218 | * occurred and we don't end up adding the task to the list. | 2218 | * occurred and we don't end up adding the task to the list. |
2219 | */ | 2219 | */ |
2220 | hb_waiters_inc(hb); | 2220 | hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
2221 | 2221 | ||
2222 | q->lock_ptr = &hb->lock; | 2222 | q->lock_ptr = &hb->lock; |
2223 | 2223 | ||
2224 | spin_lock(&hb->lock); /* implies smp_mb(); (A) */ | 2224 | spin_lock(&hb->lock); |
2225 | return hb; | 2225 | return hb; |
2226 | } | 2226 | } |
2227 | 2227 | ||
@@ -2857,35 +2857,39 @@ retry_private: | |||
2857 | * and BUG when futex_unlock_pi() interleaves with this. | 2857 | * and BUG when futex_unlock_pi() interleaves with this. |
2858 | * | 2858 | * |
2859 | * Therefore acquire wait_lock while holding hb->lock, but drop the | 2859 | * Therefore acquire wait_lock while holding hb->lock, but drop the |
2860 | * latter before calling rt_mutex_start_proxy_lock(). This still fully | 2860 | * latter before calling __rt_mutex_start_proxy_lock(). This |
2861 | * serializes against futex_unlock_pi() as that does the exact same | 2861 | * interleaves with futex_unlock_pi() -- which does a similar lock |
2862 | * lock handoff sequence. | 2862 | * handoff -- such that the latter can observe the futex_q::pi_state |
2863 | * before __rt_mutex_start_proxy_lock() is done. | ||
2863 | */ | 2864 | */ |
2864 | raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); | 2865 | raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); |
2865 | spin_unlock(q.lock_ptr); | 2866 | spin_unlock(q.lock_ptr); |
2867 | /* | ||
2868 | * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter | ||
2869 | * such that futex_unlock_pi() is guaranteed to observe the waiter when | ||
2870 | * it sees the futex_q::pi_state. | ||
2871 | */ | ||
2866 | ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); | 2872 | ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); |
2867 | raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); | 2873 | raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); |
2868 | 2874 | ||
2869 | if (ret) { | 2875 | if (ret) { |
2870 | if (ret == 1) | 2876 | if (ret == 1) |
2871 | ret = 0; | 2877 | ret = 0; |
2872 | 2878 | goto cleanup; | |
2873 | spin_lock(q.lock_ptr); | ||
2874 | goto no_block; | ||
2875 | } | 2879 | } |
2876 | 2880 | ||
2877 | |||
2878 | if (unlikely(to)) | 2881 | if (unlikely(to)) |
2879 | hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); | 2882 | hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); |
2880 | 2883 | ||
2881 | ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); | 2884 | ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
2882 | 2885 | ||
2886 | cleanup: | ||
2883 | spin_lock(q.lock_ptr); | 2887 | spin_lock(q.lock_ptr); |
2884 | /* | 2888 | /* |
2885 | * If we failed to acquire the lock (signal/timeout), we must | 2889 | * If we failed to acquire the lock (deadlock/signal/timeout), we must |
2886 | * first acquire the hb->lock before removing the lock from the | 2890 | * first acquire the hb->lock before removing the lock from the |
2887 | * rt_mutex waitqueue, such that we can keep the hb and rt_mutex | 2891 | * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait |
2888 | * wait lists consistent. | 2892 | * lists consistent. |
2889 | * | 2893 | * |
2890 | * In particular; it is important that futex_unlock_pi() can not | 2894 | * In particular; it is important that futex_unlock_pi() can not |
2891 | * observe this inconsistency. | 2895 | * observe this inconsistency. |
@@ -3009,6 +3013,10 @@ retry: | |||
3009 | * there is no point where we hold neither; and therefore | 3013 | * there is no point where we hold neither; and therefore |
3010 | * wake_futex_pi() must observe a state consistent with what we | 3014 | * wake_futex_pi() must observe a state consistent with what we |
3011 | * observed. | 3015 | * observed. |
3016 | * | ||
3017 | * In particular; this forces __rt_mutex_start_proxy() to | ||
3018 | * complete such that we're guaranteed to observe the | ||
3019 | * rt_waiter. Also see the WARN in wake_futex_pi(). | ||
3012 | */ | 3020 | */ |
3013 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | 3021 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
3014 | spin_unlock(&hb->lock); | 3022 | spin_unlock(&hb->lock); |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 581edcc63c26..978d63a8261c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |||
1726 | rt_mutex_set_owner(lock, NULL); | 1726 | rt_mutex_set_owner(lock, NULL); |
1727 | } | 1727 | } |
1728 | 1728 | ||
1729 | /** | ||
1730 | * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task | ||
1731 | * @lock: the rt_mutex to take | ||
1732 | * @waiter: the pre-initialized rt_mutex_waiter | ||
1733 | * @task: the task to prepare | ||
1734 | * | ||
1735 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | ||
1736 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | ||
1737 | * | ||
1738 | * NOTE: does _NOT_ remove the @waiter on failure; must either call | ||
1739 | * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. | ||
1740 | * | ||
1741 | * Returns: | ||
1742 | * 0 - task blocked on lock | ||
1743 | * 1 - acquired the lock for task, caller should wake it up | ||
1744 | * <0 - error | ||
1745 | * | ||
1746 | * Special API call for PI-futex support. | ||
1747 | */ | ||
1729 | int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | 1748 | int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1730 | struct rt_mutex_waiter *waiter, | 1749 | struct rt_mutex_waiter *waiter, |
1731 | struct task_struct *task) | 1750 | struct task_struct *task) |
1732 | { | 1751 | { |
1733 | int ret; | 1752 | int ret; |
1734 | 1753 | ||
1754 | lockdep_assert_held(&lock->wait_lock); | ||
1755 | |||
1735 | if (try_to_take_rt_mutex(lock, task, NULL)) | 1756 | if (try_to_take_rt_mutex(lock, task, NULL)) |
1736 | return 1; | 1757 | return 1; |
1737 | 1758 | ||
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1749 | ret = 0; | 1770 | ret = 0; |
1750 | } | 1771 | } |
1751 | 1772 | ||
1752 | if (unlikely(ret)) | ||
1753 | remove_waiter(lock, waiter); | ||
1754 | |||
1755 | debug_rt_mutex_print_deadlock(waiter); | 1773 | debug_rt_mutex_print_deadlock(waiter); |
1756 | 1774 | ||
1757 | return ret; | 1775 | return ret; |
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1763 | * @waiter: the pre-initialized rt_mutex_waiter | 1781 | * @waiter: the pre-initialized rt_mutex_waiter |
1764 | * @task: the task to prepare | 1782 | * @task: the task to prepare |
1765 | * | 1783 | * |
1784 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | ||
1785 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | ||
1786 | * | ||
1787 | * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter | ||
1788 | * on failure. | ||
1789 | * | ||
1766 | * Returns: | 1790 | * Returns: |
1767 | * 0 - task blocked on lock | 1791 | * 0 - task blocked on lock |
1768 | * 1 - acquired the lock for task, caller should wake it up | 1792 | * 1 - acquired the lock for task, caller should wake it up |
1769 | * <0 - error | 1793 | * <0 - error |
1770 | * | 1794 | * |
1771 | * Special API call for FUTEX_REQUEUE_PI support. | 1795 | * Special API call for PI-futex support. |
1772 | */ | 1796 | */ |
1773 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | 1797 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1774 | struct rt_mutex_waiter *waiter, | 1798 | struct rt_mutex_waiter *waiter, |
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1778 | 1802 | ||
1779 | raw_spin_lock_irq(&lock->wait_lock); | 1803 | raw_spin_lock_irq(&lock->wait_lock); |
1780 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); | 1804 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); |
1805 | if (unlikely(ret)) | ||
1806 | remove_waiter(lock, waiter); | ||
1781 | raw_spin_unlock_irq(&lock->wait_lock); | 1807 | raw_spin_unlock_irq(&lock->wait_lock); |
1782 | 1808 | ||
1783 | return ret; | 1809 | return ret; |
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
1845 | * @lock: the rt_mutex we were woken on | 1871 | * @lock: the rt_mutex we were woken on |
1846 | * @waiter: the pre-initialized rt_mutex_waiter | 1872 | * @waiter: the pre-initialized rt_mutex_waiter |
1847 | * | 1873 | * |
1848 | * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). | 1874 | * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or |
1875 | * rt_mutex_wait_proxy_lock(). | ||
1849 | * | 1876 | * |
1850 | * Unless we acquired the lock; we're still enqueued on the wait-list and can | 1877 | * Unless we acquired the lock; we're still enqueued on the wait-list and can |
1851 | * in fact still be granted ownership until we're removed. Therefore we can | 1878 | * in fact still be granted ownership until we're removed. Therefore we can |
diff --git a/kernel/relay.c b/kernel/relay.c index 04f248644e06..9e0f52375487 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan, | |||
428 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, | 428 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, |
429 | S_IRUSR, buf, | 429 | S_IRUSR, buf, |
430 | &chan->is_global); | 430 | &chan->is_global); |
431 | if (IS_ERR(dentry)) | ||
432 | dentry = NULL; | ||
431 | 433 | ||
432 | kfree(tmpname); | 434 | kfree(tmpname); |
433 | 435 | ||
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
461 | dentry = chan->cb->create_buf_file(NULL, NULL, | 463 | dentry = chan->cb->create_buf_file(NULL, NULL, |
462 | S_IRUSR, buf, | 464 | S_IRUSR, buf, |
463 | &chan->is_global); | 465 | &chan->is_global); |
464 | if (WARN_ON(dentry)) | 466 | if (IS_ERR_OR_NULL(dentry)) |
465 | goto free_buf; | 467 | goto free_buf; |
466 | } | 468 | } |
467 | 469 | ||
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index c3484785b179..0e97ca9306ef 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c | |||
@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group) | |||
322 | expires = group->next_update; | 322 | expires = group->next_update; |
323 | if (now < expires) | 323 | if (now < expires) |
324 | goto out; | 324 | goto out; |
325 | if (now - expires > psi_period) | 325 | if (now - expires >= psi_period) |
326 | missed_periods = div_u64(now - expires, psi_period); | 326 | missed_periods = div_u64(now - expires, psi_period); |
327 | 327 | ||
328 | /* | 328 | /* |
diff --git a/kernel/signal.c b/kernel/signal.c index e1d7ad8e6ab1..57b7771e20d7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in | |||
688 | } | 688 | } |
689 | EXPORT_SYMBOL_GPL(dequeue_signal); | 689 | EXPORT_SYMBOL_GPL(dequeue_signal); |
690 | 690 | ||
691 | static int dequeue_synchronous_signal(kernel_siginfo_t *info) | ||
692 | { | ||
693 | struct task_struct *tsk = current; | ||
694 | struct sigpending *pending = &tsk->pending; | ||
695 | struct sigqueue *q, *sync = NULL; | ||
696 | |||
697 | /* | ||
698 | * Might a synchronous signal be in the queue? | ||
699 | */ | ||
700 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) | ||
701 | return 0; | ||
702 | |||
703 | /* | ||
704 | * Return the first synchronous signal in the queue. | ||
705 | */ | ||
706 | list_for_each_entry(q, &pending->list, list) { | ||
707 | /* Synchronous signals have a postive si_code */ | ||
708 | if ((q->info.si_code > SI_USER) && | ||
709 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { | ||
710 | sync = q; | ||
711 | goto next; | ||
712 | } | ||
713 | } | ||
714 | return 0; | ||
715 | next: | ||
716 | /* | ||
717 | * Check if there is another siginfo for the same signal. | ||
718 | */ | ||
719 | list_for_each_entry_continue(q, &pending->list, list) { | ||
720 | if (q->info.si_signo == sync->info.si_signo) | ||
721 | goto still_pending; | ||
722 | } | ||
723 | |||
724 | sigdelset(&pending->signal, sync->info.si_signo); | ||
725 | recalc_sigpending(); | ||
726 | still_pending: | ||
727 | list_del_init(&sync->list); | ||
728 | copy_siginfo(info, &sync->info); | ||
729 | __sigqueue_free(sync); | ||
730 | return info->si_signo; | ||
731 | } | ||
732 | |||
691 | /* | 733 | /* |
692 | * Tell a process that it has a new active signal.. | 734 | * Tell a process that it has a new active signal.. |
693 | * | 735 | * |
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc | |||
1057 | 1099 | ||
1058 | result = TRACE_SIGNAL_DELIVERED; | 1100 | result = TRACE_SIGNAL_DELIVERED; |
1059 | /* | 1101 | /* |
1060 | * Skip useless siginfo allocation for SIGKILL SIGSTOP, | 1102 | * Skip useless siginfo allocation for SIGKILL and kernel threads. |
1061 | * and kernel threads. | ||
1062 | */ | 1103 | */ |
1063 | if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) | 1104 | if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) |
1064 | goto out_set; | 1105 | goto out_set; |
1065 | 1106 | ||
1066 | /* | 1107 | /* |
@@ -2394,6 +2435,14 @@ relock: | |||
2394 | goto relock; | 2435 | goto relock; |
2395 | } | 2436 | } |
2396 | 2437 | ||
2438 | /* Has this task already been marked for death? */ | ||
2439 | if (signal_group_exit(signal)) { | ||
2440 | ksig->info.si_signo = signr = SIGKILL; | ||
2441 | sigdelset(¤t->pending.signal, SIGKILL); | ||
2442 | recalc_sigpending(); | ||
2443 | goto fatal; | ||
2444 | } | ||
2445 | |||
2397 | for (;;) { | 2446 | for (;;) { |
2398 | struct k_sigaction *ka; | 2447 | struct k_sigaction *ka; |
2399 | 2448 | ||
@@ -2407,7 +2456,15 @@ relock: | |||
2407 | goto relock; | 2456 | goto relock; |
2408 | } | 2457 | } |
2409 | 2458 | ||
2410 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | 2459 | /* |
2460 | * Signals generated by the execution of an instruction | ||
2461 | * need to be delivered before any other pending signals | ||
2462 | * so that the instruction pointer in the signal stack | ||
2463 | * frame points to the faulting instruction. | ||
2464 | */ | ||
2465 | signr = dequeue_synchronous_signal(&ksig->info); | ||
2466 | if (!signr) | ||
2467 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | ||
2411 | 2468 | ||
2412 | if (!signr) | 2469 | if (!signr) |
2413 | break; /* will return 0 */ | 2470 | break; /* will return 0 */ |
@@ -2489,6 +2546,7 @@ relock: | |||
2489 | continue; | 2546 | continue; |
2490 | } | 2547 | } |
2491 | 2548 | ||
2549 | fatal: | ||
2492 | spin_unlock_irq(&sighand->siglock); | 2550 | spin_unlock_irq(&sighand->siglock); |
2493 | 2551 | ||
2494 | /* | 2552 | /* |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b068adb9da1..f1a86a0d881d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * | |||
1204 | 1204 | ||
1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1206 | { | 1206 | { |
1207 | int err; | 1207 | return __bpf_probe_register(btp, prog); |
1208 | |||
1209 | mutex_lock(&bpf_event_mutex); | ||
1210 | err = __bpf_probe_register(btp, prog); | ||
1211 | mutex_unlock(&bpf_event_mutex); | ||
1212 | return err; | ||
1213 | } | 1208 | } |
1214 | 1209 | ||
1215 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1210 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1216 | { | 1211 | { |
1217 | int err; | 1212 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
1218 | |||
1219 | mutex_lock(&bpf_event_mutex); | ||
1220 | err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); | ||
1221 | mutex_unlock(&bpf_event_mutex); | ||
1222 | return err; | ||
1223 | } | 1213 | } |
1224 | 1214 | ||
1225 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | 1215 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c521b7347482..c4238b441624 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file | |||
3384 | const char tgid_space[] = " "; | 3384 | const char tgid_space[] = " "; |
3385 | const char space[] = " "; | 3385 | const char space[] = " "; |
3386 | 3386 | ||
3387 | print_event_info(buf, m); | ||
3388 | |||
3387 | seq_printf(m, "# %s _-----=> irqs-off\n", | 3389 | seq_printf(m, "# %s _-----=> irqs-off\n", |
3388 | tgid ? tgid_space : space); | 3390 | tgid ? tgid_space : space); |
3389 | seq_printf(m, "# %s / _----=> need-resched\n", | 3391 | seq_printf(m, "# %s / _----=> need-resched\n", |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index d5fb09ebba8b..9eaf07f99212 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = { | |||
861 | static nokprobe_inline int | 861 | static nokprobe_inline int |
862 | fetch_store_strlen(unsigned long addr) | 862 | fetch_store_strlen(unsigned long addr) |
863 | { | 863 | { |
864 | mm_segment_t old_fs; | ||
865 | int ret, len = 0; | 864 | int ret, len = 0; |
866 | u8 c; | 865 | u8 c; |
867 | 866 | ||
868 | old_fs = get_fs(); | ||
869 | set_fs(KERNEL_DS); | ||
870 | pagefault_disable(); | ||
871 | |||
872 | do { | 867 | do { |
873 | ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1); | 868 | ret = probe_mem_read(&c, (u8 *)addr + len, 1); |
874 | len++; | 869 | len++; |
875 | } while (c && ret == 0 && len < MAX_STRING_SIZE); | 870 | } while (c && ret == 0 && len < MAX_STRING_SIZE); |
876 | 871 | ||
877 | pagefault_enable(); | ||
878 | set_fs(old_fs); | ||
879 | |||
880 | return (ret < 0) ? ret : len; | 872 | return (ret < 0) ? ret : len; |
881 | } | 873 | } |
882 | 874 | ||
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 5c56afc17cf8..4737bb8c07a3 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h | |||
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, | |||
180 | if (unlikely(arg->dynamic)) | 180 | if (unlikely(arg->dynamic)) |
181 | *dl = make_data_loc(maxlen, dyndata - base); | 181 | *dl = make_data_loc(maxlen, dyndata - base); |
182 | ret = process_fetch_insn(arg->code, regs, dl, base); | 182 | ret = process_fetch_insn(arg->code, regs, dl, base); |
183 | if (unlikely(ret < 0 && arg->dynamic)) | 183 | if (unlikely(ret < 0 && arg->dynamic)) { |
184 | *dl = make_data_loc(0, dyndata - base); | 184 | *dl = make_data_loc(0, dyndata - base); |
185 | else | 185 | } else { |
186 | dyndata += ret; | 186 | dyndata += ret; |
187 | maxlen -= ret; | ||
188 | } | ||
187 | } | 189 | } |
188 | } | 190 | } |
189 | 191 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e335576b9411..9bde07c06362 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (C) IBM Corporation, 2010-2012 | 5 | * Copyright (C) IBM Corporation, 2010-2012 |
6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> |
7 | */ | 7 | */ |
8 | #define pr_fmt(fmt) "trace_kprobe: " fmt | 8 | #define pr_fmt(fmt) "trace_uprobe: " fmt |
9 | 9 | ||
10 | #include <linux/ctype.h> | 10 | #include <linux/ctype.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base) | |||
160 | if (ret >= 0) { | 160 | if (ret >= 0) { |
161 | if (ret == maxlen) | 161 | if (ret == maxlen) |
162 | dst[ret - 1] = '\0'; | 162 | dst[ret - 1] = '\0'; |
163 | else | ||
164 | /* | ||
165 | * Include the terminating null byte. In this case it | ||
166 | * was copied by strncpy_from_user but not accounted | ||
167 | * for in ret. | ||
168 | */ | ||
169 | ret++; | ||
163 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); | 170 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); |
164 | } | 171 | } |
165 | 172 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index c6659cb37033..59875eb278ea 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -768,9 +768,11 @@ all_leaves_cluster_together: | |||
768 | new_s0->index_key[i] = | 768 | new_s0->index_key[i] = |
769 | ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); | 769 | ops->get_key_chunk(index_key, i * ASSOC_ARRAY_KEY_CHUNK_SIZE); |
770 | 770 | ||
771 | blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); | 771 | if (level & ASSOC_ARRAY_KEY_CHUNK_MASK) { |
772 | pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); | 772 | blank = ULONG_MAX << (level & ASSOC_ARRAY_KEY_CHUNK_MASK); |
773 | new_s0->index_key[keylen - 1] &= ~blank; | 773 | pr_devel("blank off [%zu] %d: %lx\n", keylen - 1, level, blank); |
774 | new_s0->index_key[keylen - 1] &= ~blank; | ||
775 | } | ||
774 | 776 | ||
775 | /* This now reduces to a node splitting exercise for which we'll need | 777 | /* This now reduces to a node splitting exercise for which we'll need |
776 | * to regenerate the disparity table. | 778 | * to regenerate the disparity table. |
diff --git a/lib/crc32.c b/lib/crc32.c index 45b1d67a1767..4a20455d1f61 100644 --- a/lib/crc32.c +++ b/lib/crc32.c | |||
@@ -206,8 +206,8 @@ u32 __pure __weak __crc32c_le(u32 crc, unsigned char const *p, size_t len) | |||
206 | EXPORT_SYMBOL(crc32_le); | 206 | EXPORT_SYMBOL(crc32_le); |
207 | EXPORT_SYMBOL(__crc32c_le); | 207 | EXPORT_SYMBOL(__crc32c_le); |
208 | 208 | ||
209 | u32 crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); | 209 | u32 __pure crc32_le_base(u32, unsigned char const *, size_t) __alias(crc32_le); |
210 | u32 __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); | 210 | u32 __pure __crc32c_le_base(u32, unsigned char const *, size_t) __alias(__crc32c_le); |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * This multiplies the polynomials x and y modulo the given modulus. | 213 | * This multiplies the polynomials x and y modulo the given modulus. |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 6a8ac7626797..e52f8cafe227 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt) | |||
541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, | 541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, |
542 | int cnt, bool slow) | 542 | int cnt, bool slow) |
543 | { | 543 | { |
544 | struct rhltable rhlt; | 544 | struct rhltable *rhlt; |
545 | unsigned int i, ret; | 545 | unsigned int i, ret; |
546 | const char *key; | 546 | const char *key; |
547 | int err = 0; | 547 | int err = 0; |
548 | 548 | ||
549 | err = rhltable_init(&rhlt, &test_rht_params_dup); | 549 | rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); |
550 | if (WARN_ON(err)) | 550 | if (WARN_ON(!rhlt)) |
551 | return -EINVAL; | ||
552 | |||
553 | err = rhltable_init(rhlt, &test_rht_params_dup); | ||
554 | if (WARN_ON(err)) { | ||
555 | kfree(rhlt); | ||
551 | return err; | 556 | return err; |
557 | } | ||
552 | 558 | ||
553 | for (i = 0; i < cnt; i++) { | 559 | for (i = 0; i < cnt; i++) { |
554 | rhl_test_objects[i].value.tid = i; | 560 | rhl_test_objects[i].value.tid = i; |
555 | key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); | 561 | key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); |
556 | key += test_rht_params_dup.key_offset; | 562 | key += test_rht_params_dup.key_offset; |
557 | 563 | ||
558 | if (slow) { | 564 | if (slow) { |
559 | err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, | 565 | err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, |
560 | &rhl_test_objects[i].list_node.rhead)); | 566 | &rhl_test_objects[i].list_node.rhead)); |
561 | if (err == -EAGAIN) | 567 | if (err == -EAGAIN) |
562 | err = 0; | 568 | err = 0; |
563 | } else | 569 | } else |
564 | err = rhltable_insert(&rhlt, | 570 | err = rhltable_insert(rhlt, |
565 | &rhl_test_objects[i].list_node, | 571 | &rhl_test_objects[i].list_node, |
566 | test_rht_params_dup); | 572 | test_rht_params_dup); |
567 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) | 573 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) |
568 | goto skip_print; | 574 | goto skip_print; |
569 | } | 575 | } |
570 | 576 | ||
571 | ret = print_ht(&rhlt); | 577 | ret = print_ht(rhlt); |
572 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); | 578 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); |
573 | 579 | ||
574 | skip_print: | 580 | skip_print: |
575 | rhltable_destroy(&rhlt); | 581 | rhltable_destroy(rhlt); |
582 | kfree(rhlt); | ||
576 | 583 | ||
577 | return 0; | 584 | return 0; |
578 | } | 585 | } |
diff --git a/mm/debug.c b/mm/debug.c index 0abb987dad9b..1611cf00a137 100644 --- a/mm/debug.c +++ b/mm/debug.c | |||
@@ -44,7 +44,7 @@ const struct trace_print_flags vmaflag_names[] = { | |||
44 | 44 | ||
45 | void __dump_page(struct page *page, const char *reason) | 45 | void __dump_page(struct page *page, const char *reason) |
46 | { | 46 | { |
47 | struct address_space *mapping = page_mapping(page); | 47 | struct address_space *mapping; |
48 | bool page_poisoned = PagePoisoned(page); | 48 | bool page_poisoned = PagePoisoned(page); |
49 | int mapcount; | 49 | int mapcount; |
50 | 50 | ||
@@ -58,6 +58,8 @@ void __dump_page(struct page *page, const char *reason) | |||
58 | goto hex_only; | 58 | goto hex_only; |
59 | } | 59 | } |
60 | 60 | ||
61 | mapping = page_mapping(page); | ||
62 | |||
61 | /* | 63 | /* |
62 | * Avoid VM_BUG_ON() in page_mapcount(). | 64 | * Avoid VM_BUG_ON() in page_mapcount(). |
63 | * page->_mapcount space in struct page is used by sl[aou]b pages to | 65 | * page->_mapcount space in struct page is used by sl[aou]b pages to |
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
1674 | if (!pmd_present(pmd)) | 1674 | if (!pmd_present(pmd)) |
1675 | return 0; | 1675 | return 0; |
1676 | 1676 | ||
1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | 1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
1678 | pmd_devmap(pmd))) { | ||
1678 | /* | 1679 | /* |
1679 | * NUMA hinting faults need to be handled in the GUP | 1680 | * NUMA hinting faults need to be handled in the GUP |
1680 | * slowpath for accounting purposes and so that they | 1681 | * slowpath for accounting purposes and so that they |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index e2bb06c1b45e..5d1065efbd47 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -7,6 +7,8 @@ KCOV_INSTRUMENT := n | |||
7 | 7 | ||
8 | CFLAGS_REMOVE_common.o = -pg | 8 | CFLAGS_REMOVE_common.o = -pg |
9 | CFLAGS_REMOVE_generic.o = -pg | 9 | CFLAGS_REMOVE_generic.o = -pg |
10 | CFLAGS_REMOVE_tags.o = -pg | ||
11 | |||
10 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 12 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
11 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 13 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
12 | 14 | ||
diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 73c9cbfdedf4..09b534fbba17 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c | |||
@@ -361,10 +361,15 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object) | |||
361 | * get different tags. | 361 | * get different tags. |
362 | */ | 362 | */ |
363 | static u8 assign_tag(struct kmem_cache *cache, const void *object, | 363 | static u8 assign_tag(struct kmem_cache *cache, const void *object, |
364 | bool init, bool krealloc) | 364 | bool init, bool keep_tag) |
365 | { | 365 | { |
366 | /* Reuse the same tag for krealloc'ed objects. */ | 366 | /* |
367 | if (krealloc) | 367 | * 1. When an object is kmalloc()'ed, two hooks are called: |
368 | * kasan_slab_alloc() and kasan_kmalloc(). We assign the | ||
369 | * tag only in the first one. | ||
370 | * 2. We reuse the same tag for krealloc'ed objects. | ||
371 | */ | ||
372 | if (keep_tag) | ||
368 | return get_tag(object); | 373 | return get_tag(object); |
369 | 374 | ||
370 | /* | 375 | /* |
@@ -405,12 +410,6 @@ void * __must_check kasan_init_slab_obj(struct kmem_cache *cache, | |||
405 | return (void *)object; | 410 | return (void *)object; |
406 | } | 411 | } |
407 | 412 | ||
408 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, | ||
409 | gfp_t flags) | ||
410 | { | ||
411 | return kasan_kmalloc(cache, object, cache->object_size, flags); | ||
412 | } | ||
413 | |||
414 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) | 413 | static inline bool shadow_invalid(u8 tag, s8 shadow_byte) |
415 | { | 414 | { |
416 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) | 415 | if (IS_ENABLED(CONFIG_KASAN_GENERIC)) |
@@ -467,7 +466,7 @@ bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip) | |||
467 | } | 466 | } |
468 | 467 | ||
469 | static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | 468 | static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, |
470 | size_t size, gfp_t flags, bool krealloc) | 469 | size_t size, gfp_t flags, bool keep_tag) |
471 | { | 470 | { |
472 | unsigned long redzone_start; | 471 | unsigned long redzone_start; |
473 | unsigned long redzone_end; | 472 | unsigned long redzone_end; |
@@ -485,7 +484,7 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | |||
485 | KASAN_SHADOW_SCALE_SIZE); | 484 | KASAN_SHADOW_SCALE_SIZE); |
486 | 485 | ||
487 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) | 486 | if (IS_ENABLED(CONFIG_KASAN_SW_TAGS)) |
488 | tag = assign_tag(cache, object, false, krealloc); | 487 | tag = assign_tag(cache, object, false, keep_tag); |
489 | 488 | ||
490 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ | 489 | /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */ |
491 | kasan_unpoison_shadow(set_tag(object, tag), size); | 490 | kasan_unpoison_shadow(set_tag(object, tag), size); |
@@ -498,10 +497,16 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object, | |||
498 | return set_tag(object, tag); | 497 | return set_tag(object, tag); |
499 | } | 498 | } |
500 | 499 | ||
500 | void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object, | ||
501 | gfp_t flags) | ||
502 | { | ||
503 | return __kasan_kmalloc(cache, object, cache->object_size, flags, false); | ||
504 | } | ||
505 | |||
501 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, | 506 | void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object, |
502 | size_t size, gfp_t flags) | 507 | size_t size, gfp_t flags) |
503 | { | 508 | { |
504 | return __kasan_kmalloc(cache, object, size, flags, false); | 509 | return __kasan_kmalloc(cache, object, size, flags, true); |
505 | } | 510 | } |
506 | EXPORT_SYMBOL(kasan_kmalloc); | 511 | EXPORT_SYMBOL(kasan_kmalloc); |
507 | 512 | ||
diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 0777649e07c4..63fca3172659 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c | |||
@@ -46,7 +46,7 @@ void kasan_init_tags(void) | |||
46 | int cpu; | 46 | int cpu; |
47 | 47 | ||
48 | for_each_possible_cpu(cpu) | 48 | for_each_possible_cpu(cpu) |
49 | per_cpu(prng_state, cpu) = get_random_u32(); | 49 | per_cpu(prng_state, cpu) = (u32)get_cycles(); |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index f9d9dc250428..707fa5579f66 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
@@ -574,6 +574,7 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |||
574 | unsigned long flags; | 574 | unsigned long flags; |
575 | struct kmemleak_object *object, *parent; | 575 | struct kmemleak_object *object, *parent; |
576 | struct rb_node **link, *rb_parent; | 576 | struct rb_node **link, *rb_parent; |
577 | unsigned long untagged_ptr; | ||
577 | 578 | ||
578 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); | 579 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); |
579 | if (!object) { | 580 | if (!object) { |
@@ -619,8 +620,9 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |||
619 | 620 | ||
620 | write_lock_irqsave(&kmemleak_lock, flags); | 621 | write_lock_irqsave(&kmemleak_lock, flags); |
621 | 622 | ||
622 | min_addr = min(min_addr, ptr); | 623 | untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr); |
623 | max_addr = max(max_addr, ptr + size); | 624 | min_addr = min(min_addr, untagged_ptr); |
625 | max_addr = max(max_addr, untagged_ptr + size); | ||
624 | link = &object_tree_root.rb_node; | 626 | link = &object_tree_root.rb_node; |
625 | rb_parent = NULL; | 627 | rb_parent = NULL; |
626 | while (*link) { | 628 | while (*link) { |
@@ -1333,6 +1335,7 @@ static void scan_block(void *_start, void *_end, | |||
1333 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | 1335 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); |
1334 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | 1336 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); |
1335 | unsigned long flags; | 1337 | unsigned long flags; |
1338 | unsigned long untagged_ptr; | ||
1336 | 1339 | ||
1337 | read_lock_irqsave(&kmemleak_lock, flags); | 1340 | read_lock_irqsave(&kmemleak_lock, flags); |
1338 | for (ptr = start; ptr < end; ptr++) { | 1341 | for (ptr = start; ptr < end; ptr++) { |
@@ -1347,7 +1350,8 @@ static void scan_block(void *_start, void *_end, | |||
1347 | pointer = *ptr; | 1350 | pointer = *ptr; |
1348 | kasan_enable_current(); | 1351 | kasan_enable_current(); |
1349 | 1352 | ||
1350 | if (pointer < min_addr || pointer >= max_addr) | 1353 | untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer); |
1354 | if (untagged_ptr < min_addr || untagged_ptr >= max_addr) | ||
1351 | continue; | 1355 | continue; |
1352 | 1356 | ||
1353 | /* | 1357 | /* |
diff --git a/mm/maccess.c b/mm/maccess.c index f3416632e5a4..ec00be51a24f 100644 --- a/mm/maccess.c +++ b/mm/maccess.c | |||
@@ -30,10 +30,8 @@ long __probe_kernel_read(void *dst, const void *src, size_t size) | |||
30 | 30 | ||
31 | set_fs(KERNEL_DS); | 31 | set_fs(KERNEL_DS); |
32 | pagefault_disable(); | 32 | pagefault_disable(); |
33 | current->kernel_uaccess_faults_ok++; | ||
34 | ret = __copy_from_user_inatomic(dst, | 33 | ret = __copy_from_user_inatomic(dst, |
35 | (__force const void __user *)src, size); | 34 | (__force const void __user *)src, size); |
36 | current->kernel_uaccess_faults_ok--; | ||
37 | pagefault_enable(); | 35 | pagefault_enable(); |
38 | set_fs(old_fs); | 36 | set_fs(old_fs); |
39 | 37 | ||
@@ -60,9 +58,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size) | |||
60 | 58 | ||
61 | set_fs(KERNEL_DS); | 59 | set_fs(KERNEL_DS); |
62 | pagefault_disable(); | 60 | pagefault_disable(); |
63 | current->kernel_uaccess_faults_ok++; | ||
64 | ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); | 61 | ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); |
65 | current->kernel_uaccess_faults_ok--; | ||
66 | pagefault_enable(); | 62 | pagefault_enable(); |
67 | set_fs(old_fs); | 63 | set_fs(old_fs); |
68 | 64 | ||
@@ -98,13 +94,11 @@ long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count) | |||
98 | 94 | ||
99 | set_fs(KERNEL_DS); | 95 | set_fs(KERNEL_DS); |
100 | pagefault_disable(); | 96 | pagefault_disable(); |
101 | current->kernel_uaccess_faults_ok++; | ||
102 | 97 | ||
103 | do { | 98 | do { |
104 | ret = __get_user(*dst++, (const char __user __force *)src++); | 99 | ret = __get_user(*dst++, (const char __user __force *)src++); |
105 | } while (dst[-1] && ret == 0 && src - unsafe_addr < count); | 100 | } while (dst[-1] && ret == 0 && src - unsafe_addr < count); |
106 | 101 | ||
107 | current->kernel_uaccess_faults_ok--; | ||
108 | dst[-1] = '\0'; | 102 | dst[-1] = '\0'; |
109 | pagefault_enable(); | 103 | pagefault_enable(); |
110 | set_fs(old_fs); | 104 | set_fs(old_fs); |
diff --git a/mm/memblock.c b/mm/memblock.c index 022d4cbb3618..ea31045ba704 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -26,6 +26,13 @@ | |||
26 | 26 | ||
27 | #include "internal.h" | 27 | #include "internal.h" |
28 | 28 | ||
29 | #define INIT_MEMBLOCK_REGIONS 128 | ||
30 | #define INIT_PHYSMEM_REGIONS 4 | ||
31 | |||
32 | #ifndef INIT_MEMBLOCK_RESERVED_REGIONS | ||
33 | # define INIT_MEMBLOCK_RESERVED_REGIONS INIT_MEMBLOCK_REGIONS | ||
34 | #endif | ||
35 | |||
29 | /** | 36 | /** |
30 | * DOC: memblock overview | 37 | * DOC: memblock overview |
31 | * | 38 | * |
@@ -92,7 +99,7 @@ unsigned long max_pfn; | |||
92 | unsigned long long max_possible_pfn; | 99 | unsigned long long max_possible_pfn; |
93 | 100 | ||
94 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 101 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; |
95 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; | 102 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock; |
96 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 103 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
97 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; | 104 | static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock; |
98 | #endif | 105 | #endif |
@@ -105,7 +112,7 @@ struct memblock memblock __initdata_memblock = { | |||
105 | 112 | ||
106 | .reserved.regions = memblock_reserved_init_regions, | 113 | .reserved.regions = memblock_reserved_init_regions, |
107 | .reserved.cnt = 1, /* empty dummy entry */ | 114 | .reserved.cnt = 1, /* empty dummy entry */ |
108 | .reserved.max = INIT_MEMBLOCK_REGIONS, | 115 | .reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS, |
109 | .reserved.name = "reserved", | 116 | .reserved.name = "reserved", |
110 | 117 | ||
111 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP | 118 | #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 124e794867c5..1ad28323fb9f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1188,11 +1188,13 @@ static inline int pageblock_free(struct page *page) | |||
1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; | 1188 | return PageBuddy(page) && page_order(page) >= pageblock_order; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | /* Return the start of the next active pageblock after a given page */ | 1191 | /* Return the pfn of the start of the next active pageblock after a given pfn */ |
1192 | static struct page *next_active_pageblock(struct page *page) | 1192 | static unsigned long next_active_pageblock(unsigned long pfn) |
1193 | { | 1193 | { |
1194 | struct page *page = pfn_to_page(pfn); | ||
1195 | |||
1194 | /* Ensure the starting page is pageblock-aligned */ | 1196 | /* Ensure the starting page is pageblock-aligned */ |
1195 | BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); | 1197 | BUG_ON(pfn & (pageblock_nr_pages - 1)); |
1196 | 1198 | ||
1197 | /* If the entire pageblock is free, move to the end of free page */ | 1199 | /* If the entire pageblock is free, move to the end of free page */ |
1198 | if (pageblock_free(page)) { | 1200 | if (pageblock_free(page)) { |
@@ -1200,16 +1202,16 @@ static struct page *next_active_pageblock(struct page *page) | |||
1200 | /* be careful. we don't have locks, page_order can be changed.*/ | 1202 | /* be careful. we don't have locks, page_order can be changed.*/ |
1201 | order = page_order(page); | 1203 | order = page_order(page); |
1202 | if ((order < MAX_ORDER) && (order >= pageblock_order)) | 1204 | if ((order < MAX_ORDER) && (order >= pageblock_order)) |
1203 | return page + (1 << order); | 1205 | return pfn + (1 << order); |
1204 | } | 1206 | } |
1205 | 1207 | ||
1206 | return page + pageblock_nr_pages; | 1208 | return pfn + pageblock_nr_pages; |
1207 | } | 1209 | } |
1208 | 1210 | ||
1209 | static bool is_pageblock_removable_nolock(struct page *page) | 1211 | static bool is_pageblock_removable_nolock(unsigned long pfn) |
1210 | { | 1212 | { |
1213 | struct page *page = pfn_to_page(pfn); | ||
1211 | struct zone *zone; | 1214 | struct zone *zone; |
1212 | unsigned long pfn; | ||
1213 | 1215 | ||
1214 | /* | 1216 | /* |
1215 | * We have to be careful here because we are iterating over memory | 1217 | * We have to be careful here because we are iterating over memory |
@@ -1232,13 +1234,14 @@ static bool is_pageblock_removable_nolock(struct page *page) | |||
1232 | /* Checks if this range of memory is likely to be hot-removable. */ | 1234 | /* Checks if this range of memory is likely to be hot-removable. */ |
1233 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | 1235 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
1234 | { | 1236 | { |
1235 | struct page *page = pfn_to_page(start_pfn); | 1237 | unsigned long end_pfn, pfn; |
1236 | unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); | 1238 | |
1237 | struct page *end_page = pfn_to_page(end_pfn); | 1239 | end_pfn = min(start_pfn + nr_pages, |
1240 | zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); | ||
1238 | 1241 | ||
1239 | /* Check the starting page of each pageblock within the range */ | 1242 | /* Check the starting page of each pageblock within the range */ |
1240 | for (; page < end_page; page = next_active_pageblock(page)) { | 1243 | for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { |
1241 | if (!is_pageblock_removable_nolock(page)) | 1244 | if (!is_pageblock_removable_nolock(pfn)) |
1242 | return false; | 1245 | return false; |
1243 | cond_resched(); | 1246 | cond_resched(); |
1244 | } | 1247 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d4496d9d34f5..ee2bce59d2bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1314,7 +1314,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode, | |||
1314 | nodemask_t *nodes) | 1314 | nodemask_t *nodes) |
1315 | { | 1315 | { |
1316 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; | 1316 | unsigned long copy = ALIGN(maxnode-1, 64) / 8; |
1317 | const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long); | 1317 | unsigned int nbytes = BITS_TO_LONGS(nr_node_ids) * sizeof(long); |
1318 | 1318 | ||
1319 | if (copy > nbytes) { | 1319 | if (copy > nbytes) { |
1320 | if (copy > PAGE_SIZE) | 1320 | if (copy > PAGE_SIZE) |
@@ -1491,7 +1491,7 @@ static int kernel_get_mempolicy(int __user *policy, | |||
1491 | int uninitialized_var(pval); | 1491 | int uninitialized_var(pval); |
1492 | nodemask_t nodes; | 1492 | nodemask_t nodes; |
1493 | 1493 | ||
1494 | if (nmask != NULL && maxnode < MAX_NUMNODES) | 1494 | if (nmask != NULL && maxnode < nr_node_ids) |
1495 | return -EINVAL; | 1495 | return -EINVAL; |
1496 | 1496 | ||
1497 | err = do_get_mempolicy(&pval, &nodes, addr, flags); | 1497 | err = do_get_mempolicy(&pval, &nodes, addr, flags); |
@@ -1527,7 +1527,7 @@ COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, | |||
1527 | unsigned long nr_bits, alloc_size; | 1527 | unsigned long nr_bits, alloc_size; |
1528 | DECLARE_BITMAP(bm, MAX_NUMNODES); | 1528 | DECLARE_BITMAP(bm, MAX_NUMNODES); |
1529 | 1529 | ||
1530 | nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES); | 1530 | nr_bits = min_t(unsigned long, maxnode-1, nr_node_ids); |
1531 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; | 1531 | alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8; |
1532 | 1532 | ||
1533 | if (nmask) | 1533 | if (nmask) |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 35fdde041f5c..0b9f577b1a2a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2170,6 +2170,18 @@ static inline void boost_watermark(struct zone *zone) | |||
2170 | 2170 | ||
2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], | 2171 | max_boost = mult_frac(zone->_watermark[WMARK_HIGH], |
2172 | watermark_boost_factor, 10000); | 2172 | watermark_boost_factor, 10000); |
2173 | |||
2174 | /* | ||
2175 | * high watermark may be uninitialised if fragmentation occurs | ||
2176 | * very early in boot so do not boost. We do not fall | ||
2177 | * through and boost by pageblock_nr_pages as failing | ||
2178 | * allocations that early means that reclaim is not going | ||
2179 | * to help and it may even be impossible to reclaim the | ||
2180 | * boosted watermark resulting in a hang. | ||
2181 | */ | ||
2182 | if (!max_boost) | ||
2183 | return; | ||
2184 | |||
2173 | max_boost = max(pageblock_nr_pages, max_boost); | 2185 | max_boost = max(pageblock_nr_pages, max_boost); |
2174 | 2186 | ||
2175 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, | 2187 | zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, |
@@ -4675,11 +4687,11 @@ refill: | |||
4675 | /* Even if we own the page, we do not use atomic_set(). | 4687 | /* Even if we own the page, we do not use atomic_set(). |
4676 | * This would break get_page_unless_zero() users. | 4688 | * This would break get_page_unless_zero() users. |
4677 | */ | 4689 | */ |
4678 | page_ref_add(page, size - 1); | 4690 | page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); |
4679 | 4691 | ||
4680 | /* reset page count bias and offset to start of new frag */ | 4692 | /* reset page count bias and offset to start of new frag */ |
4681 | nc->pfmemalloc = page_is_pfmemalloc(page); | 4693 | nc->pfmemalloc = page_is_pfmemalloc(page); |
4682 | nc->pagecnt_bias = size; | 4694 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4683 | nc->offset = size; | 4695 | nc->offset = size; |
4684 | } | 4696 | } |
4685 | 4697 | ||
@@ -4695,10 +4707,10 @@ refill: | |||
4695 | size = nc->size; | 4707 | size = nc->size; |
4696 | #endif | 4708 | #endif |
4697 | /* OK, page count is 0, we can safely set it */ | 4709 | /* OK, page count is 0, we can safely set it */ |
4698 | set_page_count(page, size); | 4710 | set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); |
4699 | 4711 | ||
4700 | /* reset page count bias and offset to start of new frag */ | 4712 | /* reset page count bias and offset to start of new frag */ |
4701 | nc->pagecnt_bias = size; | 4713 | nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; |
4702 | offset = size - fragsz; | 4714 | offset = size - fragsz; |
4703 | } | 4715 | } |
4704 | 4716 | ||
diff --git a/mm/page_ext.c b/mm/page_ext.c index ae44f7adbe07..8c78b8d45117 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c | |||
@@ -398,10 +398,8 @@ void __init page_ext_init(void) | |||
398 | * We know some arch can have a nodes layout such as | 398 | * We know some arch can have a nodes layout such as |
399 | * -------------pfn--------------> | 399 | * -------------pfn--------------> |
400 | * N0 | N1 | N2 | N0 | N1 | N2|.... | 400 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
401 | * | ||
402 | * Take into account DEFERRED_STRUCT_PAGE_INIT. | ||
403 | */ | 401 | */ |
404 | if (early_pfn_to_nid(pfn) != nid) | 402 | if (pfn_to_nid(pfn) != nid) |
405 | continue; | 403 | continue; |
406 | if (init_section_page_ext(pfn, nid)) | 404 | if (init_section_page_ext(pfn, nid)) |
407 | goto oom; | 405 | goto oom; |
diff --git a/mm/shmem.c b/mm/shmem.c index 6ece1e2fe76e..2c012eee133d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -2848,16 +2848,20 @@ static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode, | |||
2848 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) | 2848 | static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) |
2849 | { | 2849 | { |
2850 | struct inode *inode = d_inode(old_dentry); | 2850 | struct inode *inode = d_inode(old_dentry); |
2851 | int ret; | 2851 | int ret = 0; |
2852 | 2852 | ||
2853 | /* | 2853 | /* |
2854 | * No ordinary (disk based) filesystem counts links as inodes; | 2854 | * No ordinary (disk based) filesystem counts links as inodes; |
2855 | * but each new link needs a new dentry, pinning lowmem, and | 2855 | * but each new link needs a new dentry, pinning lowmem, and |
2856 | * tmpfs dentries cannot be pruned until they are unlinked. | 2856 | * tmpfs dentries cannot be pruned until they are unlinked. |
2857 | * But if an O_TMPFILE file is linked into the tmpfs, the | ||
2858 | * first link must skip that, to get the accounting right. | ||
2857 | */ | 2859 | */ |
2858 | ret = shmem_reserve_inode(inode->i_sb); | 2860 | if (inode->i_nlink) { |
2859 | if (ret) | 2861 | ret = shmem_reserve_inode(inode->i_sb); |
2860 | goto out; | 2862 | if (ret) |
2863 | goto out; | ||
2864 | } | ||
2861 | 2865 | ||
2862 | dir->i_size += BOGO_DIRENT_SIZE; | 2866 | dir->i_size += BOGO_DIRENT_SIZE; |
2863 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); | 2867 | inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode); |
@@ -2359,7 +2359,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
2359 | void *freelist; | 2359 | void *freelist; |
2360 | void *addr = page_address(page); | 2360 | void *addr = page_address(page); |
2361 | 2361 | ||
2362 | page->s_mem = kasan_reset_tag(addr) + colour_off; | 2362 | page->s_mem = addr + colour_off; |
2363 | page->active = 0; | 2363 | page->active = 0; |
2364 | 2364 | ||
2365 | if (OBJFREELIST_SLAB(cachep)) | 2365 | if (OBJFREELIST_SLAB(cachep)) |
@@ -2368,6 +2368,7 @@ static void *alloc_slabmgmt(struct kmem_cache *cachep, | |||
2368 | /* Slab management obj is off-slab. */ | 2368 | /* Slab management obj is off-slab. */ |
2369 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, | 2369 | freelist = kmem_cache_alloc_node(cachep->freelist_cache, |
2370 | local_flags, nodeid); | 2370 | local_flags, nodeid); |
2371 | freelist = kasan_reset_tag(freelist); | ||
2371 | if (!freelist) | 2372 | if (!freelist) |
2372 | return NULL; | 2373 | return NULL; |
2373 | } else { | 2374 | } else { |
@@ -2681,6 +2682,13 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, | |||
2681 | 2682 | ||
2682 | offset *= cachep->colour_off; | 2683 | offset *= cachep->colour_off; |
2683 | 2684 | ||
2685 | /* | ||
2686 | * Call kasan_poison_slab() before calling alloc_slabmgmt(), so | ||
2687 | * page_address() in the latter returns a non-tagged pointer, | ||
2688 | * as it should be for slab pages. | ||
2689 | */ | ||
2690 | kasan_poison_slab(page); | ||
2691 | |||
2684 | /* Get slab management. */ | 2692 | /* Get slab management. */ |
2685 | freelist = alloc_slabmgmt(cachep, page, offset, | 2693 | freelist = alloc_slabmgmt(cachep, page, offset, |
2686 | local_flags & ~GFP_CONSTRAINT_MASK, page_node); | 2694 | local_flags & ~GFP_CONSTRAINT_MASK, page_node); |
@@ -2689,7 +2697,6 @@ static struct page *cache_grow_begin(struct kmem_cache *cachep, | |||
2689 | 2697 | ||
2690 | slab_map_pages(cachep, page, freelist); | 2698 | slab_map_pages(cachep, page, freelist); |
2691 | 2699 | ||
2692 | kasan_poison_slab(page); | ||
2693 | cache_init_objs(cachep, page); | 2700 | cache_init_objs(cachep, page); |
2694 | 2701 | ||
2695 | if (gfpflags_allow_blocking(local_flags)) | 2702 | if (gfpflags_allow_blocking(local_flags)) |
@@ -3540,7 +3547,6 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | |||
3540 | { | 3547 | { |
3541 | void *ret = slab_alloc(cachep, flags, _RET_IP_); | 3548 | void *ret = slab_alloc(cachep, flags, _RET_IP_); |
3542 | 3549 | ||
3543 | ret = kasan_slab_alloc(cachep, ret, flags); | ||
3544 | trace_kmem_cache_alloc(_RET_IP_, ret, | 3550 | trace_kmem_cache_alloc(_RET_IP_, ret, |
3545 | cachep->object_size, cachep->size, flags); | 3551 | cachep->object_size, cachep->size, flags); |
3546 | 3552 | ||
@@ -3630,7 +3636,6 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid) | |||
3630 | { | 3636 | { |
3631 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); | 3637 | void *ret = slab_alloc_node(cachep, flags, nodeid, _RET_IP_); |
3632 | 3638 | ||
3633 | ret = kasan_slab_alloc(cachep, ret, flags); | ||
3634 | trace_kmem_cache_alloc_node(_RET_IP_, ret, | 3639 | trace_kmem_cache_alloc_node(_RET_IP_, ret, |
3635 | cachep->object_size, cachep->size, | 3640 | cachep->object_size, cachep->size, |
3636 | flags, nodeid); | 3641 | flags, nodeid); |
@@ -4408,6 +4413,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, | |||
4408 | unsigned int objnr; | 4413 | unsigned int objnr; |
4409 | unsigned long offset; | 4414 | unsigned long offset; |
4410 | 4415 | ||
4416 | ptr = kasan_reset_tag(ptr); | ||
4417 | |||
4411 | /* Find and validate object. */ | 4418 | /* Find and validate object. */ |
4412 | cachep = page->slab_cache; | 4419 | cachep = page->slab_cache; |
4413 | objnr = obj_to_index(cachep, page, (void *)ptr); | 4420 | objnr = obj_to_index(cachep, page, (void *)ptr); |
@@ -437,11 +437,10 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |||
437 | 437 | ||
438 | flags &= gfp_allowed_mask; | 438 | flags &= gfp_allowed_mask; |
439 | for (i = 0; i < size; i++) { | 439 | for (i = 0; i < size; i++) { |
440 | void *object = p[i]; | 440 | p[i] = kasan_slab_alloc(s, p[i], flags); |
441 | 441 | /* As p[i] might get tagged, call kmemleak hook after KASAN. */ | |
442 | kmemleak_alloc_recursive(object, s->object_size, 1, | 442 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
443 | s->flags, flags); | 443 | s->flags, flags); |
444 | p[i] = kasan_slab_alloc(s, object, flags); | ||
445 | } | 444 | } |
446 | 445 | ||
447 | if (memcg_kmem_enabled()) | 446 | if (memcg_kmem_enabled()) |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 81732d05e74a..f9d89c1b5977 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -1228,8 +1228,9 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) | |||
1228 | flags |= __GFP_COMP; | 1228 | flags |= __GFP_COMP; |
1229 | page = alloc_pages(flags, order); | 1229 | page = alloc_pages(flags, order); |
1230 | ret = page ? page_address(page) : NULL; | 1230 | ret = page ? page_address(page) : NULL; |
1231 | kmemleak_alloc(ret, size, 1, flags); | ||
1232 | ret = kasan_kmalloc_large(ret, size, flags); | 1231 | ret = kasan_kmalloc_large(ret, size, flags); |
1232 | /* As ret might get tagged, call kmemleak hook after KASAN. */ | ||
1233 | kmemleak_alloc(ret, size, 1, flags); | ||
1233 | return ret; | 1234 | return ret; |
1234 | } | 1235 | } |
1235 | EXPORT_SYMBOL(kmalloc_order); | 1236 | EXPORT_SYMBOL(kmalloc_order); |
@@ -249,7 +249,18 @@ static inline void *freelist_ptr(const struct kmem_cache *s, void *ptr, | |||
249 | unsigned long ptr_addr) | 249 | unsigned long ptr_addr) |
250 | { | 250 | { |
251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED | 251 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
252 | return (void *)((unsigned long)ptr ^ s->random ^ ptr_addr); | 252 | /* |
253 | * When CONFIG_KASAN_SW_TAGS is enabled, ptr_addr might be tagged. | ||
254 | * Normally, this doesn't cause any issues, as both set_freepointer() | ||
255 | * and get_freepointer() are called with a pointer with the same tag. | ||
256 | * However, there are some issues with CONFIG_SLUB_DEBUG code. For | ||
257 | * example, when __free_slub() iterates over objects in a cache, it | ||
258 | * passes untagged pointers to check_object(). check_object() in turns | ||
259 | * calls get_freepointer() with an untagged pointer, which causes the | ||
260 | * freepointer to be restored incorrectly. | ||
261 | */ | ||
262 | return (void *)((unsigned long)ptr ^ s->random ^ | ||
263 | (unsigned long)kasan_reset_tag((void *)ptr_addr)); | ||
253 | #else | 264 | #else |
254 | return ptr; | 265 | return ptr; |
255 | #endif | 266 | #endif |
@@ -303,15 +314,10 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp) | |||
303 | __p < (__addr) + (__objects) * (__s)->size; \ | 314 | __p < (__addr) + (__objects) * (__s)->size; \ |
304 | __p += (__s)->size) | 315 | __p += (__s)->size) |
305 | 316 | ||
306 | #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \ | ||
307 | for (__p = fixup_red_left(__s, __addr), __idx = 1; \ | ||
308 | __idx <= __objects; \ | ||
309 | __p += (__s)->size, __idx++) | ||
310 | |||
311 | /* Determine object index from a given position */ | 317 | /* Determine object index from a given position */ |
312 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) | 318 | static inline unsigned int slab_index(void *p, struct kmem_cache *s, void *addr) |
313 | { | 319 | { |
314 | return (p - addr) / s->size; | 320 | return (kasan_reset_tag(p) - addr) / s->size; |
315 | } | 321 | } |
316 | 322 | ||
317 | static inline unsigned int order_objects(unsigned int order, unsigned int size) | 323 | static inline unsigned int order_objects(unsigned int order, unsigned int size) |
@@ -507,6 +513,7 @@ static inline int check_valid_pointer(struct kmem_cache *s, | |||
507 | return 1; | 513 | return 1; |
508 | 514 | ||
509 | base = page_address(page); | 515 | base = page_address(page); |
516 | object = kasan_reset_tag(object); | ||
510 | object = restore_red_left(s, object); | 517 | object = restore_red_left(s, object); |
511 | if (object < base || object >= base + page->objects * s->size || | 518 | if (object < base || object >= base + page->objects * s->size || |
512 | (object - base) % s->size) { | 519 | (object - base) % s->size) { |
@@ -1075,6 +1082,16 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
1075 | init_tracking(s, object); | 1082 | init_tracking(s, object); |
1076 | } | 1083 | } |
1077 | 1084 | ||
1085 | static void setup_page_debug(struct kmem_cache *s, void *addr, int order) | ||
1086 | { | ||
1087 | if (!(s->flags & SLAB_POISON)) | ||
1088 | return; | ||
1089 | |||
1090 | metadata_access_enable(); | ||
1091 | memset(addr, POISON_INUSE, PAGE_SIZE << order); | ||
1092 | metadata_access_disable(); | ||
1093 | } | ||
1094 | |||
1078 | static inline int alloc_consistency_checks(struct kmem_cache *s, | 1095 | static inline int alloc_consistency_checks(struct kmem_cache *s, |
1079 | struct page *page, | 1096 | struct page *page, |
1080 | void *object, unsigned long addr) | 1097 | void *object, unsigned long addr) |
@@ -1330,6 +1347,8 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, | |||
1330 | #else /* !CONFIG_SLUB_DEBUG */ | 1347 | #else /* !CONFIG_SLUB_DEBUG */ |
1331 | static inline void setup_object_debug(struct kmem_cache *s, | 1348 | static inline void setup_object_debug(struct kmem_cache *s, |
1332 | struct page *page, void *object) {} | 1349 | struct page *page, void *object) {} |
1350 | static inline void setup_page_debug(struct kmem_cache *s, | ||
1351 | void *addr, int order) {} | ||
1333 | 1352 | ||
1334 | static inline int alloc_debug_processing(struct kmem_cache *s, | 1353 | static inline int alloc_debug_processing(struct kmem_cache *s, |
1335 | struct page *page, void *object, unsigned long addr) { return 0; } | 1354 | struct page *page, void *object, unsigned long addr) { return 0; } |
@@ -1374,8 +1393,10 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, | |||
1374 | */ | 1393 | */ |
1375 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) | 1394 | static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags) |
1376 | { | 1395 | { |
1396 | ptr = kasan_kmalloc_large(ptr, size, flags); | ||
1397 | /* As ptr might get tagged, call kmemleak hook after KASAN. */ | ||
1377 | kmemleak_alloc(ptr, size, 1, flags); | 1398 | kmemleak_alloc(ptr, size, 1, flags); |
1378 | return kasan_kmalloc_large(ptr, size, flags); | 1399 | return ptr; |
1379 | } | 1400 | } |
1380 | 1401 | ||
1381 | static __always_inline void kfree_hook(void *x) | 1402 | static __always_inline void kfree_hook(void *x) |
@@ -1641,27 +1662,25 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) | |||
1641 | if (page_is_pfmemalloc(page)) | 1662 | if (page_is_pfmemalloc(page)) |
1642 | SetPageSlabPfmemalloc(page); | 1663 | SetPageSlabPfmemalloc(page); |
1643 | 1664 | ||
1665 | kasan_poison_slab(page); | ||
1666 | |||
1644 | start = page_address(page); | 1667 | start = page_address(page); |
1645 | 1668 | ||
1646 | if (unlikely(s->flags & SLAB_POISON)) | 1669 | setup_page_debug(s, start, order); |
1647 | memset(start, POISON_INUSE, PAGE_SIZE << order); | ||
1648 | |||
1649 | kasan_poison_slab(page); | ||
1650 | 1670 | ||
1651 | shuffle = shuffle_freelist(s, page); | 1671 | shuffle = shuffle_freelist(s, page); |
1652 | 1672 | ||
1653 | if (!shuffle) { | 1673 | if (!shuffle) { |
1654 | for_each_object_idx(p, idx, s, start, page->objects) { | ||
1655 | if (likely(idx < page->objects)) { | ||
1656 | next = p + s->size; | ||
1657 | next = setup_object(s, page, next); | ||
1658 | set_freepointer(s, p, next); | ||
1659 | } else | ||
1660 | set_freepointer(s, p, NULL); | ||
1661 | } | ||
1662 | start = fixup_red_left(s, start); | 1674 | start = fixup_red_left(s, start); |
1663 | start = setup_object(s, page, start); | 1675 | start = setup_object(s, page, start); |
1664 | page->freelist = start; | 1676 | page->freelist = start; |
1677 | for (idx = 0, p = start; idx < page->objects - 1; idx++) { | ||
1678 | next = p + s->size; | ||
1679 | next = setup_object(s, page, next); | ||
1680 | set_freepointer(s, p, next); | ||
1681 | p = next; | ||
1682 | } | ||
1683 | set_freepointer(s, p, NULL); | ||
1665 | } | 1684 | } |
1666 | 1685 | ||
1667 | page->inuse = page->objects; | 1686 | page->inuse = page->objects; |
@@ -320,11 +320,6 @@ static inline void activate_page_drain(int cpu) | |||
320 | { | 320 | { |
321 | } | 321 | } |
322 | 322 | ||
323 | static bool need_activate_page_drain(int cpu) | ||
324 | { | ||
325 | return false; | ||
326 | } | ||
327 | |||
328 | void activate_page(struct page *page) | 323 | void activate_page(struct page *page) |
329 | { | 324 | { |
330 | struct zone *zone = page_zone(page); | 325 | struct zone *zone = page_zone(page); |
@@ -653,13 +648,15 @@ void lru_add_drain(void) | |||
653 | put_cpu(); | 648 | put_cpu(); |
654 | } | 649 | } |
655 | 650 | ||
651 | #ifdef CONFIG_SMP | ||
652 | |||
653 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
654 | |||
656 | static void lru_add_drain_per_cpu(struct work_struct *dummy) | 655 | static void lru_add_drain_per_cpu(struct work_struct *dummy) |
657 | { | 656 | { |
658 | lru_add_drain(); | 657 | lru_add_drain(); |
659 | } | 658 | } |
660 | 659 | ||
661 | static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); | ||
662 | |||
663 | /* | 660 | /* |
664 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu | 661 | * Doesn't need any cpu hotplug locking because we do rely on per-cpu |
665 | * kworkers being shut down before our page_alloc_cpu_dead callback is | 662 | * kworkers being shut down before our page_alloc_cpu_dead callback is |
@@ -702,6 +699,12 @@ void lru_add_drain_all(void) | |||
702 | 699 | ||
703 | mutex_unlock(&lock); | 700 | mutex_unlock(&lock); |
704 | } | 701 | } |
702 | #else | ||
703 | void lru_add_drain_all(void) | ||
704 | { | ||
705 | lru_add_drain(); | ||
706 | } | ||
707 | #endif | ||
705 | 708 | ||
706 | /** | 709 | /** |
707 | * release_pages - batched put_page() | 710 | * release_pages - batched put_page() |
@@ -150,7 +150,7 @@ void *memdup_user(const void __user *src, size_t len) | |||
150 | { | 150 | { |
151 | void *p; | 151 | void *p; |
152 | 152 | ||
153 | p = kmalloc_track_caller(len, GFP_USER); | 153 | p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); |
154 | if (!p) | 154 | if (!p) |
155 | return ERR_PTR(-ENOMEM); | 155 | return ERR_PTR(-ENOMEM); |
156 | 156 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index a714c4f800e9..e979705bbf32 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, | |||
491 | delta = freeable / 2; | 491 | delta = freeable / 2; |
492 | } | 492 | } |
493 | 493 | ||
494 | /* | ||
495 | * Make sure we apply some minimal pressure on default priority | ||
496 | * even on small cgroups. Stale objects are not only consuming memory | ||
497 | * by themselves, but can also hold a reference to a dying cgroup, | ||
498 | * preventing it from being reclaimed. A dying cgroup with all | ||
499 | * corresponding structures like per-cpu stats and kmem caches | ||
500 | * can be really big, so it may lead to a significant waste of memory. | ||
501 | */ | ||
502 | delta = max_t(unsigned long long, delta, min(freeable, batch_size)); | ||
503 | |||
504 | total_scan += delta; | 494 | total_scan += delta; |
505 | if (total_scan < 0) { | 495 | if (total_scan < 0) { |
506 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", | 496 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", |
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index e8090f099eb8..ef0dec20c7d8 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) | |||
104 | 104 | ||
105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); | 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); |
106 | 106 | ||
107 | /* free the TID stats immediately */ | ||
108 | cfg80211_sinfo_release_content(&sinfo); | ||
109 | |||
107 | dev_put(real_netdev); | 110 | dev_put(real_netdev); |
108 | if (ret == -ENOENT) { | 111 | if (ret == -ENOENT) { |
109 | /* Node is not associated anymore! It would be | 112 | /* Node is not associated anymore! It would be |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 508f4416dfc9..415d494cbe22 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include "main.h" | 20 | #include "main.h" |
21 | 21 | ||
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <linux/bug.h> | ||
24 | #include <linux/byteorder/generic.h> | 23 | #include <linux/byteorder/generic.h> |
25 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
26 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) | |||
179 | parent_dev = __dev_get_by_index((struct net *)parent_net, | 178 | parent_dev = __dev_get_by_index((struct net *)parent_net, |
180 | dev_get_iflink(net_dev)); | 179 | dev_get_iflink(net_dev)); |
181 | /* if we got a NULL parent_dev there is something broken.. */ | 180 | /* if we got a NULL parent_dev there is something broken.. */ |
182 | if (WARN(!parent_dev, "Cannot find parent device")) | 181 | if (!parent_dev) { |
182 | pr_err("Cannot find parent device\n"); | ||
183 | return false; | 183 | return false; |
184 | } | ||
184 | 185 | ||
185 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) | 186 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) |
186 | return false; | 187 | return false; |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5db5a0a4c959..ffc83bebfe40 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, | |||
221 | 221 | ||
222 | netif_trans_update(soft_iface); | 222 | netif_trans_update(soft_iface); |
223 | vid = batadv_get_vid(skb, 0); | 223 | vid = batadv_get_vid(skb, 0); |
224 | |||
225 | skb_reset_mac_header(skb); | ||
224 | ethhdr = eth_hdr(skb); | 226 | ethhdr = eth_hdr(skb); |
225 | 227 | ||
226 | switch (ntohs(ethhdr->h_proto)) { | 228 | switch (ntohs(ethhdr->h_proto)) { |
227 | case ETH_P_8021Q: | 229 | case ETH_P_8021Q: |
230 | if (!pskb_may_pull(skb, sizeof(*vhdr))) | ||
231 | goto dropped; | ||
228 | vhdr = vlan_eth_hdr(skb); | 232 | vhdr = vlan_eth_hdr(skb); |
229 | 233 | ||
230 | /* drop batman-in-batman packets to prevent loops */ | 234 | /* drop batman-in-batman packets to prevent loops */ |
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index fa2644d276ef..e31e1b20f7f4 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c | |||
@@ -13,27 +13,13 @@ | |||
13 | #include <net/sock.h> | 13 | #include <net/sock.h> |
14 | #include <net/tcp.h> | 14 | #include <net/tcp.h> |
15 | 15 | ||
16 | static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, | 16 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
17 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) | 17 | u32 *retval, u32 *time) |
18 | { | ||
19 | u32 ret; | ||
20 | |||
21 | preempt_disable(); | ||
22 | rcu_read_lock(); | ||
23 | bpf_cgroup_storage_set(storage); | ||
24 | ret = BPF_PROG_RUN(prog, ctx); | ||
25 | rcu_read_unlock(); | ||
26 | preempt_enable(); | ||
27 | |||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, | ||
32 | u32 *time) | ||
33 | { | 18 | { |
34 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; | 19 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 }; |
35 | enum bpf_cgroup_storage_type stype; | 20 | enum bpf_cgroup_storage_type stype; |
36 | u64 time_start, time_spent = 0; | 21 | u64 time_start, time_spent = 0; |
22 | int ret = 0; | ||
37 | u32 i; | 23 | u32 i; |
38 | 24 | ||
39 | for_each_cgroup_storage_type(stype) { | 25 | for_each_cgroup_storage_type(stype) { |
@@ -48,25 +34,42 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *ret, | |||
48 | 34 | ||
49 | if (!repeat) | 35 | if (!repeat) |
50 | repeat = 1; | 36 | repeat = 1; |
37 | |||
38 | rcu_read_lock(); | ||
39 | preempt_disable(); | ||
51 | time_start = ktime_get_ns(); | 40 | time_start = ktime_get_ns(); |
52 | for (i = 0; i < repeat; i++) { | 41 | for (i = 0; i < repeat; i++) { |
53 | *ret = bpf_test_run_one(prog, ctx, storage); | 42 | bpf_cgroup_storage_set(storage); |
43 | *retval = BPF_PROG_RUN(prog, ctx); | ||
44 | |||
45 | if (signal_pending(current)) { | ||
46 | ret = -EINTR; | ||
47 | break; | ||
48 | } | ||
49 | |||
54 | if (need_resched()) { | 50 | if (need_resched()) { |
55 | if (signal_pending(current)) | ||
56 | break; | ||
57 | time_spent += ktime_get_ns() - time_start; | 51 | time_spent += ktime_get_ns() - time_start; |
52 | preempt_enable(); | ||
53 | rcu_read_unlock(); | ||
54 | |||
58 | cond_resched(); | 55 | cond_resched(); |
56 | |||
57 | rcu_read_lock(); | ||
58 | preempt_disable(); | ||
59 | time_start = ktime_get_ns(); | 59 | time_start = ktime_get_ns(); |
60 | } | 60 | } |
61 | } | 61 | } |
62 | time_spent += ktime_get_ns() - time_start; | 62 | time_spent += ktime_get_ns() - time_start; |
63 | preempt_enable(); | ||
64 | rcu_read_unlock(); | ||
65 | |||
63 | do_div(time_spent, repeat); | 66 | do_div(time_spent, repeat); |
64 | *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; | 67 | *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; |
65 | 68 | ||
66 | for_each_cgroup_storage_type(stype) | 69 | for_each_cgroup_storage_type(stype) |
67 | bpf_cgroup_storage_free(storage[stype]); | 70 | bpf_cgroup_storage_free(storage[stype]); |
68 | 71 | ||
69 | return 0; | 72 | return ret; |
70 | } | 73 | } |
71 | 74 | ||
72 | static int bpf_test_finish(const union bpf_attr *kattr, | 75 | static int bpf_test_finish(const union bpf_attr *kattr, |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 3aeff0895669..ac92b2eb32b1 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1204,14 +1204,7 @@ static void br_multicast_query_received(struct net_bridge *br, | |||
1204 | return; | 1204 | return; |
1205 | 1205 | ||
1206 | br_multicast_update_query_timer(br, query, max_delay); | 1206 | br_multicast_update_query_timer(br, query, max_delay); |
1207 | 1207 | br_multicast_mark_router(br, port); | |
1208 | /* Based on RFC4541, section 2.1.1 IGMP Forwarding Rules, | ||
1209 | * the arrival port for IGMP Queries where the source address | ||
1210 | * is 0.0.0.0 should not be added to router port list. | ||
1211 | */ | ||
1212 | if ((saddr->proto == htons(ETH_P_IP) && saddr->u.ip4) || | ||
1213 | saddr->proto == htons(ETH_P_IPV6)) | ||
1214 | br_multicast_mark_router(br, port); | ||
1215 | } | 1208 | } |
1216 | 1209 | ||
1217 | static void br_ip4_multicast_query(struct net_bridge *br, | 1210 | static void br_ip4_multicast_query(struct net_bridge *br, |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 3661cdd927f1..7e71b0df1fbc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -2058,6 +2058,8 @@ static int process_connect(struct ceph_connection *con) | |||
2058 | dout("process_connect on %p tag %d\n", con, (int)con->in_tag); | 2058 | dout("process_connect on %p tag %d\n", con, (int)con->in_tag); |
2059 | 2059 | ||
2060 | if (con->auth) { | 2060 | if (con->auth) { |
2061 | int len = le32_to_cpu(con->in_reply.authorizer_len); | ||
2062 | |||
2061 | /* | 2063 | /* |
2062 | * Any connection that defines ->get_authorizer() | 2064 | * Any connection that defines ->get_authorizer() |
2063 | * should also define ->add_authorizer_challenge() and | 2065 | * should also define ->add_authorizer_challenge() and |
@@ -2067,8 +2069,7 @@ static int process_connect(struct ceph_connection *con) | |||
2067 | */ | 2069 | */ |
2068 | if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { | 2070 | if (con->in_reply.tag == CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER) { |
2069 | ret = con->ops->add_authorizer_challenge( | 2071 | ret = con->ops->add_authorizer_challenge( |
2070 | con, con->auth->authorizer_reply_buf, | 2072 | con, con->auth->authorizer_reply_buf, len); |
2071 | le32_to_cpu(con->in_reply.authorizer_len)); | ||
2072 | if (ret < 0) | 2073 | if (ret < 0) |
2073 | return ret; | 2074 | return ret; |
2074 | 2075 | ||
@@ -2078,10 +2079,12 @@ static int process_connect(struct ceph_connection *con) | |||
2078 | return 0; | 2079 | return 0; |
2079 | } | 2080 | } |
2080 | 2081 | ||
2081 | ret = con->ops->verify_authorizer_reply(con); | 2082 | if (len) { |
2082 | if (ret < 0) { | 2083 | ret = con->ops->verify_authorizer_reply(con); |
2083 | con->error_msg = "bad authorize reply"; | 2084 | if (ret < 0) { |
2084 | return ret; | 2085 | con->error_msg = "bad authorize reply"; |
2086 | return ret; | ||
2087 | } | ||
2085 | } | 2088 | } |
2086 | } | 2089 | } |
2087 | 2090 | ||
diff --git a/net/compat.c b/net/compat.c index 959d1c51826d..3d348198004f 100644 --- a/net/compat.c +++ b/net/compat.c | |||
@@ -388,8 +388,12 @@ static int __compat_sys_setsockopt(int fd, int level, int optname, | |||
388 | char __user *optval, unsigned int optlen) | 388 | char __user *optval, unsigned int optlen) |
389 | { | 389 | { |
390 | int err; | 390 | int err; |
391 | struct socket *sock = sockfd_lookup(fd, &err); | 391 | struct socket *sock; |
392 | |||
393 | if (optlen > INT_MAX) | ||
394 | return -EINVAL; | ||
392 | 395 | ||
396 | sock = sockfd_lookup(fd, &err); | ||
393 | if (sock) { | 397 | if (sock) { |
394 | err = security_socket_setsockopt(sock, level, optname); | 398 | err = security_socket_setsockopt(sock, level, optname); |
395 | if (err) { | 399 | if (err) { |
diff --git a/net/core/dev.c b/net/core/dev.c index 8e276e0192a1..5d03889502eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -8152,7 +8152,7 @@ static netdev_features_t netdev_sync_upper_features(struct net_device *lower, | |||
8152 | netdev_features_t feature; | 8152 | netdev_features_t feature; |
8153 | int feature_bit; | 8153 | int feature_bit; |
8154 | 8154 | ||
8155 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8155 | for_each_netdev_feature(upper_disables, feature_bit) { |
8156 | feature = __NETIF_F_BIT(feature_bit); | 8156 | feature = __NETIF_F_BIT(feature_bit); |
8157 | if (!(upper->wanted_features & feature) | 8157 | if (!(upper->wanted_features & feature) |
8158 | && (features & feature)) { | 8158 | && (features & feature)) { |
@@ -8172,7 +8172,7 @@ static void netdev_sync_lower_features(struct net_device *upper, | |||
8172 | netdev_features_t feature; | 8172 | netdev_features_t feature; |
8173 | int feature_bit; | 8173 | int feature_bit; |
8174 | 8174 | ||
8175 | for_each_netdev_feature(&upper_disables, feature_bit) { | 8175 | for_each_netdev_feature(upper_disables, feature_bit) { |
8176 | feature = __NETIF_F_BIT(feature_bit); | 8176 | feature = __NETIF_F_BIT(feature_bit); |
8177 | if (!(features & feature) && (lower->features & feature)) { | 8177 | if (!(features & feature) && (lower->features & feature)) { |
8178 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", | 8178 | netdev_dbg(upper, "Disabling feature %pNF on lower dev %s.\n", |
diff --git a/net/core/filter.c b/net/core/filter.c index 7559d6835ecb..f7d0004fc160 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -2789,8 +2789,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |||
2789 | u32 off = skb_mac_header_len(skb); | 2789 | u32 off = skb_mac_header_len(skb); |
2790 | int ret; | 2790 | int ret; |
2791 | 2791 | ||
2792 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2792 | if (!skb_is_gso_tcp(skb)) |
2793 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2794 | return -ENOTSUPP; | 2793 | return -ENOTSUPP; |
2795 | 2794 | ||
2796 | ret = skb_cow(skb, len_diff); | 2795 | ret = skb_cow(skb, len_diff); |
@@ -2831,8 +2830,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |||
2831 | u32 off = skb_mac_header_len(skb); | 2830 | u32 off = skb_mac_header_len(skb); |
2832 | int ret; | 2831 | int ret; |
2833 | 2832 | ||
2834 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2833 | if (!skb_is_gso_tcp(skb)) |
2835 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2836 | return -ENOTSUPP; | 2834 | return -ENOTSUPP; |
2837 | 2835 | ||
2838 | ret = skb_unclone(skb, GFP_ATOMIC); | 2836 | ret = skb_unclone(skb, GFP_ATOMIC); |
@@ -2957,8 +2955,7 @@ static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |||
2957 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2955 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2958 | int ret; | 2956 | int ret; |
2959 | 2957 | ||
2960 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2958 | if (!skb_is_gso_tcp(skb)) |
2961 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2962 | return -ENOTSUPP; | 2959 | return -ENOTSUPP; |
2963 | 2960 | ||
2964 | ret = skb_cow(skb, len_diff); | 2961 | ret = skb_cow(skb, len_diff); |
@@ -2987,8 +2984,7 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |||
2987 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | 2984 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); |
2988 | int ret; | 2985 | int ret; |
2989 | 2986 | ||
2990 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ | 2987 | if (!skb_is_gso_tcp(skb)) |
2991 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | ||
2992 | return -ENOTSUPP; | 2988 | return -ENOTSUPP; |
2993 | 2989 | ||
2994 | ret = skb_unclone(skb, GFP_ATOMIC); | 2990 | ret = skb_unclone(skb, GFP_ATOMIC); |
@@ -4112,10 +4108,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4112 | /* Only some socketops are supported */ | 4108 | /* Only some socketops are supported */ |
4113 | switch (optname) { | 4109 | switch (optname) { |
4114 | case SO_RCVBUF: | 4110 | case SO_RCVBUF: |
4111 | val = min_t(u32, val, sysctl_rmem_max); | ||
4115 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 4112 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
4116 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); | 4113 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); |
4117 | break; | 4114 | break; |
4118 | case SO_SNDBUF: | 4115 | case SO_SNDBUF: |
4116 | val = min_t(u32, val, sysctl_wmem_max); | ||
4119 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 4117 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4118 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
4121 | break; | 4119 | break; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 26d848484912..2415d9cb9b89 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -356,6 +356,8 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
356 | */ | 356 | */ |
357 | void *netdev_alloc_frag(unsigned int fragsz) | 357 | void *netdev_alloc_frag(unsigned int fragsz) |
358 | { | 358 | { |
359 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
360 | |||
359 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); | 361 | return __netdev_alloc_frag(fragsz, GFP_ATOMIC); |
360 | } | 362 | } |
361 | EXPORT_SYMBOL(netdev_alloc_frag); | 363 | EXPORT_SYMBOL(netdev_alloc_frag); |
@@ -369,6 +371,8 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) | |||
369 | 371 | ||
370 | void *napi_alloc_frag(unsigned int fragsz) | 372 | void *napi_alloc_frag(unsigned int fragsz) |
371 | { | 373 | { |
374 | fragsz = SKB_DATA_ALIGN(fragsz); | ||
375 | |||
372 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); | 376 | return __napi_alloc_frag(fragsz, GFP_ATOMIC); |
373 | } | 377 | } |
374 | EXPORT_SYMBOL(napi_alloc_frag); | 378 | EXPORT_SYMBOL(napi_alloc_frag); |
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index d6d5c20d7044..8c826603bf36 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c | |||
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) | |||
545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); | 545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); |
546 | 546 | ||
547 | /* No sk_callback_lock since already detached. */ | 547 | /* No sk_callback_lock since already detached. */ |
548 | if (psock->parser.enabled) | 548 | strp_done(&psock->parser.strp); |
549 | strp_done(&psock->parser.strp); | ||
550 | 549 | ||
551 | cancel_work_sync(&psock->work); | 550 | cancel_work_sync(&psock->work); |
552 | 551 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 6aa2e7e0b4fb..bc3512f230a3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) | |||
2380 | } | 2380 | } |
2381 | 2381 | ||
2382 | if (sk_has_memory_pressure(sk)) { | 2382 | if (sk_has_memory_pressure(sk)) { |
2383 | int alloc; | 2383 | u64 alloc; |
2384 | 2384 | ||
2385 | if (!sk_under_memory_pressure(sk)) | 2385 | if (!sk_under_memory_pressure(sk)) |
2386 | return 1; | 2386 | return 1; |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 6eb837a47b5c..baaaeb2b2c42 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, | |||
202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | 202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, |
203 | u8 pkt, u8 opt, u8 *val, u8 len) | 203 | u8 pkt, u8 opt, u8 *val, u8 len) |
204 | { | 204 | { |
205 | if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) | 205 | if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options) |
206 | return 0; | 206 | return 0; |
207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); | 207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); |
208 | } | 208 | } |
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | |||
214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, | 214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, |
215 | u8 pkt, u8 opt, u8 *val, u8 len) | 215 | u8 pkt, u8 opt, u8 *val, u8 len) |
216 | { | 216 | { |
217 | if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) | 217 | if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options) |
218 | return 0; | 218 | return 0; |
219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); | 219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); |
220 | } | 220 | } |
diff --git a/net/dsa/master.c b/net/dsa/master.c index 71bb15f491c8..54f5551fb799 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c | |||
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev) | |||
205 | rtnl_unlock(); | 205 | rtnl_unlock(); |
206 | } | 206 | } |
207 | 207 | ||
208 | static struct lock_class_key dsa_master_addr_list_lock_key; | ||
209 | |||
208 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | 210 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) |
209 | { | 211 | { |
210 | int ret; | 212 | int ret; |
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | |||
218 | wmb(); | 220 | wmb(); |
219 | 221 | ||
220 | dev->dsa_ptr = cpu_dp; | 222 | dev->dsa_ptr = cpu_dp; |
223 | lockdep_set_class(&dev->addr_list_lock, | ||
224 | &dsa_master_addr_list_lock_key); | ||
221 | 225 | ||
222 | ret = dsa_master_ethtool_setup(dev); | 226 | ret = dsa_master_ethtool_setup(dev); |
223 | if (ret) | 227 | if (ret) |
diff --git a/net/dsa/port.c b/net/dsa/port.c index 2d7e01b23572..2a2a878b5ce3 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c | |||
@@ -69,7 +69,6 @@ static void dsa_port_set_state_now(struct dsa_port *dp, u8 state) | |||
69 | 69 | ||
70 | int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) | 70 | int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) |
71 | { | 71 | { |
72 | u8 stp_state = dp->bridge_dev ? BR_STATE_BLOCKING : BR_STATE_FORWARDING; | ||
73 | struct dsa_switch *ds = dp->ds; | 72 | struct dsa_switch *ds = dp->ds; |
74 | int port = dp->index; | 73 | int port = dp->index; |
75 | int err; | 74 | int err; |
@@ -80,7 +79,8 @@ int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy) | |||
80 | return err; | 79 | return err; |
81 | } | 80 | } |
82 | 81 | ||
83 | dsa_port_set_state_now(dp, stp_state); | 82 | if (!dp->bridge_dev) |
83 | dsa_port_set_state_now(dp, BR_STATE_FORWARDING); | ||
84 | 84 | ||
85 | return 0; | 85 | return 0; |
86 | } | 86 | } |
@@ -90,7 +90,8 @@ void dsa_port_disable(struct dsa_port *dp, struct phy_device *phy) | |||
90 | struct dsa_switch *ds = dp->ds; | 90 | struct dsa_switch *ds = dp->ds; |
91 | int port = dp->index; | 91 | int port = dp->index; |
92 | 92 | ||
93 | dsa_port_set_state_now(dp, BR_STATE_DISABLED); | 93 | if (!dp->bridge_dev) |
94 | dsa_port_set_state_now(dp, BR_STATE_DISABLED); | ||
94 | 95 | ||
95 | if (ds->ops->port_disable) | 96 | if (ds->ops->port_disable) |
96 | ds->ops->port_disable(ds, port, phy); | 97 | ds->ops->port_disable(ds, port, phy); |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3fcc1d01615..a1c9fe155057 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) | 140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) |
141 | { | 141 | { |
142 | struct net_device *master = dsa_slave_to_master(dev); | 142 | struct net_device *master = dsa_slave_to_master(dev); |
143 | 143 | if (dev->flags & IFF_UP) { | |
144 | if (change & IFF_ALLMULTI) | 144 | if (change & IFF_ALLMULTI) |
145 | dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); | 145 | dev_set_allmulti(master, |
146 | if (change & IFF_PROMISC) | 146 | dev->flags & IFF_ALLMULTI ? 1 : -1); |
147 | dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); | 147 | if (change & IFF_PROMISC) |
148 | dev_set_promiscuity(master, | ||
149 | dev->flags & IFF_PROMISC ? 1 : -1); | ||
150 | } | ||
148 | } | 151 | } |
149 | 152 | ||
150 | static void dsa_slave_set_rx_mode(struct net_device *dev) | 153 | static void dsa_slave_set_rx_mode(struct net_device *dev) |
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) | |||
639 | int ret; | 642 | int ret; |
640 | 643 | ||
641 | /* Port's PHY and MAC both need to be EEE capable */ | 644 | /* Port's PHY and MAC both need to be EEE capable */ |
642 | if (!dev->phydev && !dp->pl) | 645 | if (!dev->phydev || !dp->pl) |
643 | return -ENODEV; | 646 | return -ENODEV; |
644 | 647 | ||
645 | if (!ds->ops->set_mac_eee) | 648 | if (!ds->ops->set_mac_eee) |
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) | |||
659 | int ret; | 662 | int ret; |
660 | 663 | ||
661 | /* Port's PHY and MAC both need to be EEE capable */ | 664 | /* Port's PHY and MAC both need to be EEE capable */ |
662 | if (!dev->phydev && !dp->pl) | 665 | if (!dev->phydev || !dp->pl) |
663 | return -ENODEV; | 666 | return -ENODEV; |
664 | 667 | ||
665 | if (!ds->ops->get_mac_eee) | 668 | if (!ds->ops->get_mac_eee) |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 5459f41fc26f..10e809b296ec 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -328,7 +328,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * | |||
328 | skb->len += tailen; | 328 | skb->len += tailen; |
329 | skb->data_len += tailen; | 329 | skb->data_len += tailen; |
330 | skb->truesize += tailen; | 330 | skb->truesize += tailen; |
331 | if (sk) | 331 | if (sk && sk_fullsock(sk)) |
332 | refcount_add(tailen, &sk->sk_wmem_alloc); | 332 | refcount_add(tailen, &sk->sk_wmem_alloc); |
333 | 333 | ||
334 | goto out; | 334 | goto out; |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 1a4e9ff02762..5731670c560b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk, | |||
108 | + nla_total_size(1) /* INET_DIAG_TOS */ | 108 | + nla_total_size(1) /* INET_DIAG_TOS */ |
109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
110 | + nla_total_size(4) /* INET_DIAG_MARK */ | 110 | + nla_total_size(4) /* INET_DIAG_MARK */ |
111 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
111 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 112 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
112 | + nla_total_size(sizeof(struct inet_diag_msg)) | 113 | + nla_total_size(sizeof(struct inet_diag_msg)) |
113 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) | 114 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) |
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
287 | goto errout; | 288 | goto errout; |
288 | } | 289 | } |
289 | 290 | ||
290 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { | 291 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || |
292 | ext & (1 << (INET_DIAG_TCLASS - 1))) { | ||
291 | u32 classid = 0; | 293 | u32 classid = 0; |
292 | 294 | ||
293 | #ifdef CONFIG_SOCK_CGROUP_DATA | 295 | #ifdef CONFIG_SOCK_CGROUP_DATA |
294 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); | 296 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); |
295 | #endif | 297 | #endif |
298 | /* Fallback to socket priority if class id isn't set. | ||
299 | * Classful qdiscs use it as direct reference to class. | ||
300 | * For cgroup2 classid is always zero. | ||
301 | */ | ||
302 | if (!classid) | ||
303 | classid = sk->sk_priority; | ||
296 | 304 | ||
297 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) | 305 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) |
298 | goto errout; | 306 | goto errout; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d757b9642d0d..be778599bfed 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, | |||
216 | atomic_set(&p->rid, 0); | 216 | atomic_set(&p->rid, 0); |
217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
218 | p->rate_tokens = 0; | 218 | p->rate_tokens = 0; |
219 | p->n_redirects = 0; | ||
219 | /* 60*HZ is arbitrary, but chosen enough high so that the first | 220 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
220 | * calculation of tokens is at its maximum. | 221 | * calculation of tokens is at its maximum. |
221 | */ | 222 | */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 20a64fe6254b..6ae89f2b541b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1455,12 +1455,31 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1455 | { | 1455 | { |
1456 | struct ip_tunnel *t = netdev_priv(dev); | 1456 | struct ip_tunnel *t = netdev_priv(dev); |
1457 | struct ip_tunnel_parm *p = &t->parms; | 1457 | struct ip_tunnel_parm *p = &t->parms; |
1458 | __be16 o_flags = p->o_flags; | ||
1459 | |||
1460 | if (t->erspan_ver == 1 || t->erspan_ver == 2) { | ||
1461 | if (!t->collect_md) | ||
1462 | o_flags |= TUNNEL_KEY; | ||
1463 | |||
1464 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) | ||
1465 | goto nla_put_failure; | ||
1466 | |||
1467 | if (t->erspan_ver == 1) { | ||
1468 | if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) | ||
1469 | goto nla_put_failure; | ||
1470 | } else { | ||
1471 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) | ||
1472 | goto nla_put_failure; | ||
1473 | if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) | ||
1474 | goto nla_put_failure; | ||
1475 | } | ||
1476 | } | ||
1458 | 1477 | ||
1459 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 1478 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
1460 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 1479 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
1461 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 1480 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
1462 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 1481 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
1463 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 1482 | gre_tnl_flags_to_gre_flags(o_flags)) || |
1464 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 1483 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
1465 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 1484 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
1466 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || | 1485 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || |
@@ -1490,19 +1509,6 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1490 | goto nla_put_failure; | 1509 | goto nla_put_failure; |
1491 | } | 1510 | } |
1492 | 1511 | ||
1493 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver)) | ||
1494 | goto nla_put_failure; | ||
1495 | |||
1496 | if (t->erspan_ver == 1) { | ||
1497 | if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index)) | ||
1498 | goto nla_put_failure; | ||
1499 | } else if (t->erspan_ver == 2) { | ||
1500 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir)) | ||
1501 | goto nla_put_failure; | ||
1502 | if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid)) | ||
1503 | goto nla_put_failure; | ||
1504 | } | ||
1505 | |||
1506 | return 0; | 1512 | return 0; |
1507 | 1513 | ||
1508 | nla_put_failure: | 1514 | nla_put_failure: |
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 2687db015b6f..fa2ba7c500e4 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c | |||
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, | |||
215 | 215 | ||
216 | /* Change outer to look like the reply to an incoming packet */ | 216 | /* Change outer to look like the reply to an incoming packet */ |
217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
218 | target.dst.protonum = IPPROTO_ICMP; | ||
218 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) | 219 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) |
219 | return 0; | 220 | return 0; |
220 | 221 | ||
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c index a0aa13bcabda..0a8a60c1bf9a 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c | |||
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) | |||
105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, | 105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, |
106 | const void *data, size_t datalen) | 106 | const void *data, size_t datalen) |
107 | { | 107 | { |
108 | if (datalen != 1) | ||
109 | return -EINVAL; | ||
108 | if (*(unsigned char *)data > 1) | 110 | if (*(unsigned char *)data > 1) |
109 | return -ENOTSUPP; | 111 | return -ENOTSUPP; |
110 | return 1; | 112 | return 1; |
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag, | |||
114 | const void *data, size_t datalen) | 116 | const void *data, size_t datalen) |
115 | { | 117 | { |
116 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; | 118 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; |
117 | __be32 *pdata = (__be32 *)data; | 119 | __be32 *pdata; |
118 | 120 | ||
121 | if (datalen != 4) | ||
122 | return -EINVAL; | ||
123 | pdata = (__be32 *)data; | ||
119 | if (*pdata == ctx->from) { | 124 | if (*pdata == ctx->from) { |
120 | pr_debug("%s: %pI4 to %pI4\n", __func__, | 125 | pr_debug("%s: %pI4 to %pI4\n", __func__, |
121 | (void *)&ctx->from, (void *)&ctx->to); | 126 | (void *)&ctx->from, (void *)&ctx->to); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce92f73cf104..5163b64f8fb3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
887 | /* No redirected packets during ip_rt_redirect_silence; | 887 | /* No redirected packets during ip_rt_redirect_silence; |
888 | * reset the algorithm. | 888 | * reset the algorithm. |
889 | */ | 889 | */ |
890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) | 890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { |
891 | peer->rate_tokens = 0; | 891 | peer->rate_tokens = 0; |
892 | peer->n_redirects = 0; | ||
893 | } | ||
892 | 894 | ||
893 | /* Too many ignored redirects; do not send anything | 895 | /* Too many ignored redirects; do not send anything |
894 | * set dst.rate_last to the last seen redirected packet. | 896 | * set dst.rate_last to the last seen redirected packet. |
895 | */ | 897 | */ |
896 | if (peer->rate_tokens >= ip_rt_redirect_number) { | 898 | if (peer->n_redirects >= ip_rt_redirect_number) { |
897 | peer->rate_last = jiffies; | 899 | peer->rate_last = jiffies; |
898 | goto out_put_peer; | 900 | goto out_put_peer; |
899 | } | 901 | } |
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
910 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); | 912 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); |
911 | peer->rate_last = jiffies; | 913 | peer->rate_last = jiffies; |
912 | ++peer->rate_tokens; | 914 | ++peer->rate_tokens; |
915 | ++peer->n_redirects; | ||
913 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 916 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
914 | if (log_martians && | 917 | if (log_martians && |
915 | peer->rate_tokens == ip_rt_redirect_number) | 918 | peer->rate_tokens == ip_rt_redirect_number) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 2079145a3b7c..cf3c5095c10e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2528,6 +2528,7 @@ void tcp_write_queue_purge(struct sock *sk) | |||
2528 | sk_mem_reclaim(sk); | 2528 | sk_mem_reclaim(sk); |
2529 | tcp_clear_all_retrans_hints(tcp_sk(sk)); | 2529 | tcp_clear_all_retrans_hints(tcp_sk(sk)); |
2530 | tcp_sk(sk)->packets_out = 0; | 2530 | tcp_sk(sk)->packets_out = 0; |
2531 | inet_csk(sk)->icsk_backoff = 0; | ||
2531 | } | 2532 | } |
2532 | 2533 | ||
2533 | int tcp_disconnect(struct sock *sk, int flags) | 2534 | int tcp_disconnect(struct sock *sk, int flags) |
@@ -2576,7 +2577,6 @@ int tcp_disconnect(struct sock *sk, int flags) | |||
2576 | tp->write_seq += tp->max_window + 2; | 2577 | tp->write_seq += tp->max_window + 2; |
2577 | if (tp->write_seq == 0) | 2578 | if (tp->write_seq == 0) |
2578 | tp->write_seq = 1; | 2579 | tp->write_seq = 1; |
2579 | icsk->icsk_backoff = 0; | ||
2580 | tp->snd_cwnd = 2; | 2580 | tp->snd_cwnd = 2; |
2581 | icsk->icsk_probes_out = 0; | 2581 | icsk->icsk_probes_out = 0; |
2582 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | 2582 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index efc6fef692ff..ec3cea9d6828 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -536,12 +536,15 @@ int tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
536 | if (sock_owned_by_user(sk)) | 536 | if (sock_owned_by_user(sk)) |
537 | break; | 537 | break; |
538 | 538 | ||
539 | skb = tcp_rtx_queue_head(sk); | ||
540 | if (WARN_ON_ONCE(!skb)) | ||
541 | break; | ||
542 | |||
539 | icsk->icsk_backoff--; | 543 | icsk->icsk_backoff--; |
540 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : | 544 | icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : |
541 | TCP_TIMEOUT_INIT; | 545 | TCP_TIMEOUT_INIT; |
542 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); | 546 | icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); |
543 | 547 | ||
544 | skb = tcp_rtx_queue_head(sk); | ||
545 | 548 | ||
546 | tcp_mstamp_refresh(tp); | 549 | tcp_mstamp_refresh(tp); |
547 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); | 550 | delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 730bc44dbad9..ccc78f3a4b60 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2347,6 +2347,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2347 | /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ | 2347 | /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ |
2348 | skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; | 2348 | skb->skb_mstamp_ns = tp->tcp_wstamp_ns = tp->tcp_clock_cache; |
2349 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); | 2349 | list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); |
2350 | tcp_init_tso_segs(skb, mss_now); | ||
2350 | goto repair; /* Skip network transmission */ | 2351 | goto repair; /* Skip network transmission */ |
2351 | } | 2352 | } |
2352 | 2353 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5c3cd5d84a6f..372fdc5381a9 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -562,10 +562,12 @@ static int __udp4_lib_err_encap_no_sk(struct sk_buff *skb, u32 info) | |||
562 | 562 | ||
563 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { | 563 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { |
564 | int (*handler)(struct sk_buff *skb, u32 info); | 564 | int (*handler)(struct sk_buff *skb, u32 info); |
565 | const struct ip_tunnel_encap_ops *encap; | ||
565 | 566 | ||
566 | if (!iptun_encaps[i]) | 567 | encap = rcu_dereference(iptun_encaps[i]); |
568 | if (!encap) | ||
567 | continue; | 569 | continue; |
568 | handler = rcu_dereference(iptun_encaps[i]->err_handler); | 570 | handler = encap->err_handler; |
569 | if (handler && !handler(skb, info)) | 571 | if (handler && !handler(skb, info)) |
570 | return 0; | 572 | return 0; |
571 | } | 573 | } |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 84c358804355..72ffd3d760ff 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) | |||
1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | 1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1166 | if (ifa == ifp) | 1166 | if (ifa == ifp) |
1167 | continue; | 1167 | continue; |
1168 | if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, | 1168 | if (ifa->prefix_len != ifp->prefix_len || |
1169 | !ipv6_prefix_equal(&ifa->addr, &ifp->addr, | ||
1169 | ifp->prefix_len)) | 1170 | ifp->prefix_len)) |
1170 | continue; | 1171 | continue; |
1171 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) | 1172 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 5afe9f83374d..239d4a65ad6e 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -296,7 +296,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info | |||
296 | skb->len += tailen; | 296 | skb->len += tailen; |
297 | skb->data_len += tailen; | 297 | skb->data_len += tailen; |
298 | skb->truesize += tailen; | 298 | skb->truesize += tailen; |
299 | if (sk) | 299 | if (sk && sk_fullsock(sk)) |
300 | refcount_add(tailen, &sk->sk_wmem_alloc); | 300 | refcount_add(tailen, &sk->sk_wmem_alloc); |
301 | 301 | ||
302 | goto out; | 302 | goto out; |
diff --git a/net/ipv6/fou6.c b/net/ipv6/fou6.c index b858bd5280bf..867474abe269 100644 --- a/net/ipv6/fou6.c +++ b/net/ipv6/fou6.c | |||
@@ -72,7 +72,7 @@ static int gue6_build_header(struct sk_buff *skb, struct ip_tunnel_encap *e, | |||
72 | 72 | ||
73 | static int gue6_err_proto_handler(int proto, struct sk_buff *skb, | 73 | static int gue6_err_proto_handler(int proto, struct sk_buff *skb, |
74 | struct inet6_skb_parm *opt, | 74 | struct inet6_skb_parm *opt, |
75 | u8 type, u8 code, int offset, u32 info) | 75 | u8 type, u8 code, int offset, __be32 info) |
76 | { | 76 | { |
77 | const struct inet6_protocol *ipprot; | 77 | const struct inet6_protocol *ipprot; |
78 | 78 | ||
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4416368dbd49..26f25b6e2833 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -1719,6 +1719,27 @@ static int ip6erspan_tap_validate(struct nlattr *tb[], struct nlattr *data[], | |||
1719 | return 0; | 1719 | return 0; |
1720 | } | 1720 | } |
1721 | 1721 | ||
1722 | static void ip6erspan_set_version(struct nlattr *data[], | ||
1723 | struct __ip6_tnl_parm *parms) | ||
1724 | { | ||
1725 | if (!data) | ||
1726 | return; | ||
1727 | |||
1728 | parms->erspan_ver = 1; | ||
1729 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1730 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1731 | |||
1732 | if (parms->erspan_ver == 1) { | ||
1733 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1734 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1735 | } else if (parms->erspan_ver == 2) { | ||
1736 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1737 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1738 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1739 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1740 | } | ||
1741 | } | ||
1742 | |||
1722 | static void ip6gre_netlink_parms(struct nlattr *data[], | 1743 | static void ip6gre_netlink_parms(struct nlattr *data[], |
1723 | struct __ip6_tnl_parm *parms) | 1744 | struct __ip6_tnl_parm *parms) |
1724 | { | 1745 | { |
@@ -1767,20 +1788,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[], | |||
1767 | 1788 | ||
1768 | if (data[IFLA_GRE_COLLECT_METADATA]) | 1789 | if (data[IFLA_GRE_COLLECT_METADATA]) |
1769 | parms->collect_md = true; | 1790 | parms->collect_md = true; |
1770 | |||
1771 | parms->erspan_ver = 1; | ||
1772 | if (data[IFLA_GRE_ERSPAN_VER]) | ||
1773 | parms->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]); | ||
1774 | |||
1775 | if (parms->erspan_ver == 1) { | ||
1776 | if (data[IFLA_GRE_ERSPAN_INDEX]) | ||
1777 | parms->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]); | ||
1778 | } else if (parms->erspan_ver == 2) { | ||
1779 | if (data[IFLA_GRE_ERSPAN_DIR]) | ||
1780 | parms->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]); | ||
1781 | if (data[IFLA_GRE_ERSPAN_HWID]) | ||
1782 | parms->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]); | ||
1783 | } | ||
1784 | } | 1791 | } |
1785 | 1792 | ||
1786 | static int ip6gre_tap_init(struct net_device *dev) | 1793 | static int ip6gre_tap_init(struct net_device *dev) |
@@ -2098,12 +2105,31 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
2098 | { | 2105 | { |
2099 | struct ip6_tnl *t = netdev_priv(dev); | 2106 | struct ip6_tnl *t = netdev_priv(dev); |
2100 | struct __ip6_tnl_parm *p = &t->parms; | 2107 | struct __ip6_tnl_parm *p = &t->parms; |
2108 | __be16 o_flags = p->o_flags; | ||
2109 | |||
2110 | if (p->erspan_ver == 1 || p->erspan_ver == 2) { | ||
2111 | if (!p->collect_md) | ||
2112 | o_flags |= TUNNEL_KEY; | ||
2113 | |||
2114 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) | ||
2115 | goto nla_put_failure; | ||
2116 | |||
2117 | if (p->erspan_ver == 1) { | ||
2118 | if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) | ||
2119 | goto nla_put_failure; | ||
2120 | } else { | ||
2121 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) | ||
2122 | goto nla_put_failure; | ||
2123 | if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) | ||
2124 | goto nla_put_failure; | ||
2125 | } | ||
2126 | } | ||
2101 | 2127 | ||
2102 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 2128 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
2103 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 2129 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
2104 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 2130 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
2105 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 2131 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
2106 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 2132 | gre_tnl_flags_to_gre_flags(o_flags)) || |
2107 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 2133 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
2108 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 2134 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
2109 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || | 2135 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || |
@@ -2112,8 +2138,7 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
2112 | nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || | 2138 | nla_put_u8(skb, IFLA_GRE_ENCAP_LIMIT, p->encap_limit) || |
2113 | nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || | 2139 | nla_put_be32(skb, IFLA_GRE_FLOWINFO, p->flowinfo) || |
2114 | nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || | 2140 | nla_put_u32(skb, IFLA_GRE_FLAGS, p->flags) || |
2115 | nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark) || | 2141 | nla_put_u32(skb, IFLA_GRE_FWMARK, p->fwmark)) |
2116 | nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) | ||
2117 | goto nla_put_failure; | 2142 | goto nla_put_failure; |
2118 | 2143 | ||
2119 | if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, | 2144 | if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE, |
@@ -2131,19 +2156,6 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
2131 | goto nla_put_failure; | 2156 | goto nla_put_failure; |
2132 | } | 2157 | } |
2133 | 2158 | ||
2134 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, p->erspan_ver)) | ||
2135 | goto nla_put_failure; | ||
2136 | |||
2137 | if (p->erspan_ver == 1) { | ||
2138 | if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, p->index)) | ||
2139 | goto nla_put_failure; | ||
2140 | } else if (p->erspan_ver == 2) { | ||
2141 | if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, p->dir)) | ||
2142 | goto nla_put_failure; | ||
2143 | if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, p->hwid)) | ||
2144 | goto nla_put_failure; | ||
2145 | } | ||
2146 | |||
2147 | return 0; | 2159 | return 0; |
2148 | 2160 | ||
2149 | nla_put_failure: | 2161 | nla_put_failure: |
@@ -2198,6 +2210,7 @@ static int ip6erspan_newlink(struct net *src_net, struct net_device *dev, | |||
2198 | int err; | 2210 | int err; |
2199 | 2211 | ||
2200 | ip6gre_netlink_parms(data, &nt->parms); | 2212 | ip6gre_netlink_parms(data, &nt->parms); |
2213 | ip6erspan_set_version(data, &nt->parms); | ||
2201 | ign = net_generic(net, ip6gre_net_id); | 2214 | ign = net_generic(net, ip6gre_net_id); |
2202 | 2215 | ||
2203 | if (nt->parms.collect_md) { | 2216 | if (nt->parms.collect_md) { |
@@ -2243,6 +2256,7 @@ static int ip6erspan_changelink(struct net_device *dev, struct nlattr *tb[], | |||
2243 | if (IS_ERR(t)) | 2256 | if (IS_ERR(t)) |
2244 | return PTR_ERR(t); | 2257 | return PTR_ERR(t); |
2245 | 2258 | ||
2259 | ip6erspan_set_version(data, &p); | ||
2246 | ip6gre_tunnel_unlink_md(ign, t); | 2260 | ip6gre_tunnel_unlink_md(ign, t); |
2247 | ip6gre_tunnel_unlink(ign, t); | 2261 | ip6gre_tunnel_unlink(ign, t); |
2248 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); | 2262 | ip6erspan_tnl_change(t, &p, !tb[IFLA_MTU]); |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8b075f0bc351..6d0b1f3e927b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
23 | struct sock *sk = sk_to_full_sk(skb->sk); | 23 | struct sock *sk = sk_to_full_sk(skb->sk); |
24 | unsigned int hh_len; | 24 | unsigned int hh_len; |
25 | struct dst_entry *dst; | 25 | struct dst_entry *dst; |
26 | int strict = (ipv6_addr_type(&iph->daddr) & | ||
27 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); | ||
26 | struct flowi6 fl6 = { | 28 | struct flowi6 fl6 = { |
27 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : | 29 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : |
28 | rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, | 30 | strict ? skb_dst(skb)->dev->ifindex : 0, |
29 | .flowi6_mark = skb->mark, | 31 | .flowi6_mark = skb->mark, |
30 | .flowi6_uid = sock_net_uid(net, sk), | 32 | .flowi6_uid = sock_net_uid(net, sk), |
31 | .daddr = iph->daddr, | 33 | .daddr = iph->daddr, |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 23022447eb49..7a41ee3c11b4 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, | |||
226 | } | 226 | } |
227 | 227 | ||
228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
229 | target.dst.protonum = IPPROTO_ICMPV6; | ||
229 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) | 230 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) |
230 | return 0; | 231 | return 0; |
231 | 232 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 964491cf3672..ce15dc4ccbfa 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1274,18 +1274,29 @@ static DEFINE_SPINLOCK(rt6_exception_lock); | |||
1274 | static void rt6_remove_exception(struct rt6_exception_bucket *bucket, | 1274 | static void rt6_remove_exception(struct rt6_exception_bucket *bucket, |
1275 | struct rt6_exception *rt6_ex) | 1275 | struct rt6_exception *rt6_ex) |
1276 | { | 1276 | { |
1277 | struct fib6_info *from; | ||
1277 | struct net *net; | 1278 | struct net *net; |
1278 | 1279 | ||
1279 | if (!bucket || !rt6_ex) | 1280 | if (!bucket || !rt6_ex) |
1280 | return; | 1281 | return; |
1281 | 1282 | ||
1282 | net = dev_net(rt6_ex->rt6i->dst.dev); | 1283 | net = dev_net(rt6_ex->rt6i->dst.dev); |
1284 | net->ipv6.rt6_stats->fib_rt_cache--; | ||
1285 | |||
1286 | /* purge completely the exception to allow releasing the held resources: | ||
1287 | * some [sk] cache may keep the dst around for unlimited time | ||
1288 | */ | ||
1289 | from = rcu_dereference_protected(rt6_ex->rt6i->from, | ||
1290 | lockdep_is_held(&rt6_exception_lock)); | ||
1291 | rcu_assign_pointer(rt6_ex->rt6i->from, NULL); | ||
1292 | fib6_info_release(from); | ||
1293 | dst_dev_put(&rt6_ex->rt6i->dst); | ||
1294 | |||
1283 | hlist_del_rcu(&rt6_ex->hlist); | 1295 | hlist_del_rcu(&rt6_ex->hlist); |
1284 | dst_release(&rt6_ex->rt6i->dst); | 1296 | dst_release(&rt6_ex->rt6i->dst); |
1285 | kfree_rcu(rt6_ex, rcu); | 1297 | kfree_rcu(rt6_ex, rcu); |
1286 | WARN_ON_ONCE(!bucket->depth); | 1298 | WARN_ON_ONCE(!bucket->depth); |
1287 | bucket->depth--; | 1299 | bucket->depth--; |
1288 | net->ipv6.rt6_stats->fib_rt_cache--; | ||
1289 | } | 1300 | } |
1290 | 1301 | ||
1291 | /* Remove oldest rt6_ex in bucket and free the memory | 1302 | /* Remove oldest rt6_ex in bucket and free the memory |
@@ -1599,15 +1610,15 @@ static int rt6_remove_exception_rt(struct rt6_info *rt) | |||
1599 | static void rt6_update_exception_stamp_rt(struct rt6_info *rt) | 1610 | static void rt6_update_exception_stamp_rt(struct rt6_info *rt) |
1600 | { | 1611 | { |
1601 | struct rt6_exception_bucket *bucket; | 1612 | struct rt6_exception_bucket *bucket; |
1602 | struct fib6_info *from = rt->from; | ||
1603 | struct in6_addr *src_key = NULL; | 1613 | struct in6_addr *src_key = NULL; |
1604 | struct rt6_exception *rt6_ex; | 1614 | struct rt6_exception *rt6_ex; |
1605 | 1615 | struct fib6_info *from; | |
1606 | if (!from || | ||
1607 | !(rt->rt6i_flags & RTF_CACHE)) | ||
1608 | return; | ||
1609 | 1616 | ||
1610 | rcu_read_lock(); | 1617 | rcu_read_lock(); |
1618 | from = rcu_dereference(rt->from); | ||
1619 | if (!from || !(rt->rt6i_flags & RTF_CACHE)) | ||
1620 | goto unlock; | ||
1621 | |||
1611 | bucket = rcu_dereference(from->rt6i_exception_bucket); | 1622 | bucket = rcu_dereference(from->rt6i_exception_bucket); |
1612 | 1623 | ||
1613 | #ifdef CONFIG_IPV6_SUBTREES | 1624 | #ifdef CONFIG_IPV6_SUBTREES |
@@ -1626,6 +1637,7 @@ static void rt6_update_exception_stamp_rt(struct rt6_info *rt) | |||
1626 | if (rt6_ex) | 1637 | if (rt6_ex) |
1627 | rt6_ex->stamp = jiffies; | 1638 | rt6_ex->stamp = jiffies; |
1628 | 1639 | ||
1640 | unlock: | ||
1629 | rcu_read_unlock(); | 1641 | rcu_read_unlock(); |
1630 | } | 1642 | } |
1631 | 1643 | ||
@@ -2742,20 +2754,24 @@ static int ip6_route_check_nh_onlink(struct net *net, | |||
2742 | u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; | 2754 | u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN; |
2743 | const struct in6_addr *gw_addr = &cfg->fc_gateway; | 2755 | const struct in6_addr *gw_addr = &cfg->fc_gateway; |
2744 | u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; | 2756 | u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT; |
2757 | struct fib6_info *from; | ||
2745 | struct rt6_info *grt; | 2758 | struct rt6_info *grt; |
2746 | int err; | 2759 | int err; |
2747 | 2760 | ||
2748 | err = 0; | 2761 | err = 0; |
2749 | grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); | 2762 | grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0); |
2750 | if (grt) { | 2763 | if (grt) { |
2764 | rcu_read_lock(); | ||
2765 | from = rcu_dereference(grt->from); | ||
2751 | if (!grt->dst.error && | 2766 | if (!grt->dst.error && |
2752 | /* ignore match if it is the default route */ | 2767 | /* ignore match if it is the default route */ |
2753 | grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) && | 2768 | from && !ipv6_addr_any(&from->fib6_dst.addr) && |
2754 | (grt->rt6i_flags & flags || dev != grt->dst.dev)) { | 2769 | (grt->rt6i_flags & flags || dev != grt->dst.dev)) { |
2755 | NL_SET_ERR_MSG(extack, | 2770 | NL_SET_ERR_MSG(extack, |
2756 | "Nexthop has invalid gateway or device mismatch"); | 2771 | "Nexthop has invalid gateway or device mismatch"); |
2757 | err = -EINVAL; | 2772 | err = -EINVAL; |
2758 | } | 2773 | } |
2774 | rcu_read_unlock(); | ||
2759 | 2775 | ||
2760 | ip6_rt_put(grt); | 2776 | ip6_rt_put(grt); |
2761 | } | 2777 | } |
@@ -4649,7 +4665,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, | |||
4649 | table = rt->fib6_table->tb6_id; | 4665 | table = rt->fib6_table->tb6_id; |
4650 | else | 4666 | else |
4651 | table = RT6_TABLE_UNSPEC; | 4667 | table = RT6_TABLE_UNSPEC; |
4652 | rtm->rtm_table = table; | 4668 | rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT; |
4653 | if (nla_put_u32(skb, RTA_TABLE, table)) | 4669 | if (nla_put_u32(skb, RTA_TABLE, table)) |
4654 | goto nla_put_failure; | 4670 | goto nla_put_failure; |
4655 | 4671 | ||
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 8d0ba757a46c..9b2f272ca164 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c | |||
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info) | |||
221 | rcu_read_unlock(); | 221 | rcu_read_unlock(); |
222 | 222 | ||
223 | genlmsg_end(msg, hdr); | 223 | genlmsg_end(msg, hdr); |
224 | genlmsg_reply(msg, info); | 224 | return genlmsg_reply(msg, info); |
225 | |||
226 | return 0; | ||
227 | 225 | ||
228 | nla_put_failure: | 226 | nla_put_failure: |
229 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 8181ee7e1e27..ee5403cbe655 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
146 | } else { | 146 | } else { |
147 | ip6_flow_hdr(hdr, 0, flowlabel); | 147 | ip6_flow_hdr(hdr, 0, flowlabel); |
148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); | 148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); |
149 | |||
150 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | ||
149 | } | 151 | } |
150 | 152 | ||
151 | hdr->nexthdr = NEXTHDR_ROUTING; | 153 | hdr->nexthdr = NEXTHDR_ROUTING; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1e03305c0549..e8a1dabef803 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | err = 0; | 548 | err = 0; |
549 | if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | 549 | if (__in6_dev_get(skb->dev) && |
550 | !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | ||
550 | goto out; | 551 | goto out; |
551 | 552 | ||
552 | if (t->parms.iph.daddr == 0) | 553 | if (t->parms.iph.daddr == 0) |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 2596ffdeebea..b444483cdb2b 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -288,8 +288,8 @@ int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, | |||
288 | int peeked, peeking, off; | 288 | int peeked, peeking, off; |
289 | int err; | 289 | int err; |
290 | int is_udplite = IS_UDPLITE(sk); | 290 | int is_udplite = IS_UDPLITE(sk); |
291 | struct udp_mib __percpu *mib; | ||
291 | bool checksum_valid = false; | 292 | bool checksum_valid = false; |
292 | struct udp_mib *mib; | ||
293 | int is_udp4; | 293 | int is_udp4; |
294 | 294 | ||
295 | if (flags & MSG_ERRQUEUE) | 295 | if (flags & MSG_ERRQUEUE) |
@@ -420,17 +420,19 @@ EXPORT_SYMBOL(udpv6_encap_enable); | |||
420 | */ | 420 | */ |
421 | static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, | 421 | static int __udp6_lib_err_encap_no_sk(struct sk_buff *skb, |
422 | struct inet6_skb_parm *opt, | 422 | struct inet6_skb_parm *opt, |
423 | u8 type, u8 code, int offset, u32 info) | 423 | u8 type, u8 code, int offset, __be32 info) |
424 | { | 424 | { |
425 | int i; | 425 | int i; |
426 | 426 | ||
427 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { | 427 | for (i = 0; i < MAX_IPTUN_ENCAP_OPS; i++) { |
428 | int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, | 428 | int (*handler)(struct sk_buff *skb, struct inet6_skb_parm *opt, |
429 | u8 type, u8 code, int offset, u32 info); | 429 | u8 type, u8 code, int offset, __be32 info); |
430 | const struct ip6_tnl_encap_ops *encap; | ||
430 | 431 | ||
431 | if (!ip6tun_encaps[i]) | 432 | encap = rcu_dereference(ip6tun_encaps[i]); |
433 | if (!encap) | ||
432 | continue; | 434 | continue; |
433 | handler = rcu_dereference(ip6tun_encaps[i]->err_handler); | 435 | handler = encap->err_handler; |
434 | if (handler && !handler(skb, opt, type, code, offset, info)) | 436 | if (handler && !handler(skb, opt, type, code, offset, info)) |
435 | return 0; | 437 | return 0; |
436 | } | 438 | } |
diff --git a/net/ipv6/xfrm6_tunnel.c b/net/ipv6/xfrm6_tunnel.c index f5b4febeaa25..bc65db782bfb 100644 --- a/net/ipv6/xfrm6_tunnel.c +++ b/net/ipv6/xfrm6_tunnel.c | |||
@@ -344,8 +344,8 @@ static void __net_exit xfrm6_tunnel_net_exit(struct net *net) | |||
344 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); | 344 | struct xfrm6_tunnel_net *xfrm6_tn = xfrm6_tunnel_pernet(net); |
345 | unsigned int i; | 345 | unsigned int i; |
346 | 346 | ||
347 | xfrm_state_flush(net, IPSEC_PROTO_ANY, false); | ||
348 | xfrm_flush_gc(); | 347 | xfrm_flush_gc(); |
348 | xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); | ||
349 | 349 | ||
350 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) | 350 | for (i = 0; i < XFRM6_TUNNEL_SPI_BYADDR_HSIZE; i++) |
351 | WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); | 351 | WARN_ON_ONCE(!hlist_empty(&xfrm6_tn->spi_byaddr[i])); |
diff --git a/net/key/af_key.c b/net/key/af_key.c index 655c787f9d54..5651c29cb5bd 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c | |||
@@ -196,30 +196,22 @@ static int pfkey_release(struct socket *sock) | |||
196 | return 0; | 196 | return 0; |
197 | } | 197 | } |
198 | 198 | ||
199 | static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, | 199 | static int pfkey_broadcast_one(struct sk_buff *skb, gfp_t allocation, |
200 | gfp_t allocation, struct sock *sk) | 200 | struct sock *sk) |
201 | { | 201 | { |
202 | int err = -ENOBUFS; | 202 | int err = -ENOBUFS; |
203 | 203 | ||
204 | sock_hold(sk); | 204 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) |
205 | if (*skb2 == NULL) { | 205 | return err; |
206 | if (refcount_read(&skb->users) != 1) { | 206 | |
207 | *skb2 = skb_clone(skb, allocation); | 207 | skb = skb_clone(skb, allocation); |
208 | } else { | 208 | |
209 | *skb2 = skb; | 209 | if (skb) { |
210 | refcount_inc(&skb->users); | 210 | skb_set_owner_r(skb, sk); |
211 | } | 211 | skb_queue_tail(&sk->sk_receive_queue, skb); |
212 | } | 212 | sk->sk_data_ready(sk); |
213 | if (*skb2 != NULL) { | 213 | err = 0; |
214 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) { | ||
215 | skb_set_owner_r(*skb2, sk); | ||
216 | skb_queue_tail(&sk->sk_receive_queue, *skb2); | ||
217 | sk->sk_data_ready(sk); | ||
218 | *skb2 = NULL; | ||
219 | err = 0; | ||
220 | } | ||
221 | } | 214 | } |
222 | sock_put(sk); | ||
223 | return err; | 215 | return err; |
224 | } | 216 | } |
225 | 217 | ||
@@ -234,7 +226,6 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
234 | { | 226 | { |
235 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); | 227 | struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id); |
236 | struct sock *sk; | 228 | struct sock *sk; |
237 | struct sk_buff *skb2 = NULL; | ||
238 | int err = -ESRCH; | 229 | int err = -ESRCH; |
239 | 230 | ||
240 | /* XXX Do we need something like netlink_overrun? I think | 231 | /* XXX Do we need something like netlink_overrun? I think |
@@ -253,7 +244,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
253 | * socket. | 244 | * socket. |
254 | */ | 245 | */ |
255 | if (pfk->promisc) | 246 | if (pfk->promisc) |
256 | pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); | 247 | pfkey_broadcast_one(skb, GFP_ATOMIC, sk); |
257 | 248 | ||
258 | /* the exact target will be processed later */ | 249 | /* the exact target will be processed later */ |
259 | if (sk == one_sk) | 250 | if (sk == one_sk) |
@@ -268,7 +259,7 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
268 | continue; | 259 | continue; |
269 | } | 260 | } |
270 | 261 | ||
271 | err2 = pfkey_broadcast_one(skb, &skb2, GFP_ATOMIC, sk); | 262 | err2 = pfkey_broadcast_one(skb, GFP_ATOMIC, sk); |
272 | 263 | ||
273 | /* Error is cleared after successful sending to at least one | 264 | /* Error is cleared after successful sending to at least one |
274 | * registered KM */ | 265 | * registered KM */ |
@@ -278,9 +269,8 @@ static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, | |||
278 | rcu_read_unlock(); | 269 | rcu_read_unlock(); |
279 | 270 | ||
280 | if (one_sk != NULL) | 271 | if (one_sk != NULL) |
281 | err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk); | 272 | err = pfkey_broadcast_one(skb, allocation, one_sk); |
282 | 273 | ||
283 | kfree_skb(skb2); | ||
284 | kfree_skb(skb); | 274 | kfree_skb(skb); |
285 | return err; | 275 | return err; |
286 | } | 276 | } |
@@ -1783,7 +1773,7 @@ static int pfkey_flush(struct sock *sk, struct sk_buff *skb, const struct sadb_m | |||
1783 | if (proto == 0) | 1773 | if (proto == 0) |
1784 | return -EINVAL; | 1774 | return -EINVAL; |
1785 | 1775 | ||
1786 | err = xfrm_state_flush(net, proto, true); | 1776 | err = xfrm_state_flush(net, proto, true, false); |
1787 | err2 = unicast_flush_resp(sk, hdr); | 1777 | err2 = unicast_flush_resp(sk, hdr); |
1788 | if (err || err2) { | 1778 | if (err || err2) { |
1789 | if (err == -ESRCH) /* empty table - go quietly */ | 1779 | if (err == -ESRCH) /* empty table - go quietly */ |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 26f1d435696a..fed6becc5daf 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -83,8 +83,7 @@ | |||
83 | #define L2TP_SLFLAG_S 0x40000000 | 83 | #define L2TP_SLFLAG_S 0x40000000 |
84 | #define L2TP_SL_SEQ_MASK 0x00ffffff | 84 | #define L2TP_SL_SEQ_MASK 0x00ffffff |
85 | 85 | ||
86 | #define L2TP_HDR_SIZE_SEQ 10 | 86 | #define L2TP_HDR_SIZE_MAX 14 |
87 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
88 | 87 | ||
89 | /* Default trace flags */ | 88 | /* Default trace flags */ |
90 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | 89 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 |
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
808 | __skb_pull(skb, sizeof(struct udphdr)); | 807 | __skb_pull(skb, sizeof(struct udphdr)); |
809 | 808 | ||
810 | /* Short packet? */ | 809 | /* Short packet? */ |
811 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | 810 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { |
812 | l2tp_info(tunnel, L2TP_MSG_DATA, | 811 | l2tp_info(tunnel, L2TP_MSG_DATA, |
813 | "%s: recv short packet (len=%d)\n", | 812 | "%s: recv short packet (len=%d)\n", |
814 | tunnel->name, skb->len); | 813 | tunnel->name, skb->len); |
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
884 | goto error; | 883 | goto error; |
885 | } | 884 | } |
886 | 885 | ||
886 | if (tunnel->version == L2TP_HDR_VER_3 && | ||
887 | l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
888 | goto error; | ||
889 | |||
887 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); | 890 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); |
888 | l2tp_session_dec_refcount(session); | 891 | l2tp_session_dec_refcount(session); |
889 | 892 | ||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9c9afe94d389..b2ce90260c35 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) | |||
301 | } | 301 | } |
302 | #endif | 302 | #endif |
303 | 303 | ||
304 | static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, | ||
305 | unsigned char **ptr, unsigned char **optr) | ||
306 | { | ||
307 | int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); | ||
308 | |||
309 | if (opt_len > 0) { | ||
310 | int off = *ptr - *optr; | ||
311 | |||
312 | if (!pskb_may_pull(skb, off + opt_len)) | ||
313 | return -1; | ||
314 | |||
315 | if (skb->data != *optr) { | ||
316 | *optr = skb->data; | ||
317 | *ptr = skb->data + off; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
304 | #define l2tp_printk(ptr, type, func, fmt, ...) \ | 324 | #define l2tp_printk(ptr, type, func, fmt, ...) \ |
305 | do { \ | 325 | do { \ |
306 | if (((ptr)->debug) & (type)) \ | 326 | if (((ptr)->debug) & (type)) \ |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 35f6f86d4dcc..d4c60523c549 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) | |||
165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
166 | } | 166 | } |
167 | 167 | ||
168 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
169 | goto discard_sess; | ||
170 | |||
168 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 171 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
169 | l2tp_session_dec_refcount(session); | 172 | l2tp_session_dec_refcount(session); |
170 | 173 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 237f1a4a0b0c..0ae6899edac0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) | |||
178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
179 | } | 179 | } |
180 | 180 | ||
181 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
182 | goto discard_sess; | ||
183 | |||
181 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 184 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
182 | l2tp_session_dec_refcount(session); | 185 | l2tp_session_dec_refcount(session); |
183 | 186 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 69e831bc317b..54821fb1a960 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | 11 | * Copyright (C) 2018 - 2019 Intel Corporation |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
366 | 366 | ||
367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | 367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
368 | 368 | ||
369 | ieee80211_agg_stop_txq(sta, tid); | ||
370 | |||
369 | spin_unlock_bh(&sta->lock); | 371 | spin_unlock_bh(&sta->lock); |
370 | 372 | ||
371 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", | 373 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 2493c74c2d37..96496b2c1670 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -941,6 +941,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
941 | BSS_CHANGED_P2P_PS | | 941 | BSS_CHANGED_P2P_PS | |
942 | BSS_CHANGED_TXPOWER; | 942 | BSS_CHANGED_TXPOWER; |
943 | int err; | 943 | int err; |
944 | int prev_beacon_int; | ||
944 | 945 | ||
945 | old = sdata_dereference(sdata->u.ap.beacon, sdata); | 946 | old = sdata_dereference(sdata->u.ap.beacon, sdata); |
946 | if (old) | 947 | if (old) |
@@ -963,6 +964,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
963 | 964 | ||
964 | sdata->needed_rx_chains = sdata->local->rx_chains; | 965 | sdata->needed_rx_chains = sdata->local->rx_chains; |
965 | 966 | ||
967 | prev_beacon_int = sdata->vif.bss_conf.beacon_int; | ||
966 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; | 968 | sdata->vif.bss_conf.beacon_int = params->beacon_interval; |
967 | 969 | ||
968 | if (params->he_cap) | 970 | if (params->he_cap) |
@@ -974,8 +976,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, | |||
974 | if (!err) | 976 | if (!err) |
975 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); | 977 | ieee80211_vif_copy_chanctx_to_vlans(sdata, false); |
976 | mutex_unlock(&local->mtx); | 978 | mutex_unlock(&local->mtx); |
977 | if (err) | 979 | if (err) { |
980 | sdata->vif.bss_conf.beacon_int = prev_beacon_int; | ||
978 | return err; | 981 | return err; |
982 | } | ||
979 | 983 | ||
980 | /* | 984 | /* |
981 | * Apply control port protocol, this allows us to | 985 | * Apply control port protocol, this allows us to |
diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 87a729926734..977dea436ee8 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c | |||
@@ -615,13 +615,13 @@ struct ieee80211_hw *ieee80211_alloc_hw_nm(size_t priv_data_len, | |||
615 | * We need a bit of data queued to build aggregates properly, so | 615 | * We need a bit of data queued to build aggregates properly, so |
616 | * instruct the TCP stack to allow more than a single ms of data | 616 | * instruct the TCP stack to allow more than a single ms of data |
617 | * to be queued in the stack. The value is a bit-shift of 1 | 617 | * to be queued in the stack. The value is a bit-shift of 1 |
618 | * second, so 8 is ~4ms of queued data. Only affects local TCP | 618 | * second, so 7 is ~8ms of queued data. Only affects local TCP |
619 | * sockets. | 619 | * sockets. |
620 | * This is the default, anyhow - drivers may need to override it | 620 | * This is the default, anyhow - drivers may need to override it |
621 | * for local reasons (longer buffers, longer completion time, or | 621 | * for local reasons (longer buffers, longer completion time, or |
622 | * similar). | 622 | * similar). |
623 | */ | 623 | */ |
624 | local->hw.tx_sk_pacing_shift = 8; | 624 | local->hw.tx_sk_pacing_shift = 7; |
625 | 625 | ||
626 | /* set up some defaults */ | 626 | /* set up some defaults */ |
627 | local->hw.queues = 1; | 627 | local->hw.queues = 1; |
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h index cad6592c52a1..2ec7011a4d07 100644 --- a/net/mac80211/mesh.h +++ b/net/mac80211/mesh.h | |||
@@ -70,6 +70,7 @@ enum mesh_deferred_task_flags { | |||
70 | * @dst: mesh path destination mac address | 70 | * @dst: mesh path destination mac address |
71 | * @mpp: mesh proxy mac address | 71 | * @mpp: mesh proxy mac address |
72 | * @rhash: rhashtable list pointer | 72 | * @rhash: rhashtable list pointer |
73 | * @walk_list: linked list containing all mesh_path objects. | ||
73 | * @gate_list: list pointer for known gates list | 74 | * @gate_list: list pointer for known gates list |
74 | * @sdata: mesh subif | 75 | * @sdata: mesh subif |
75 | * @next_hop: mesh neighbor to which frames for this destination will be | 76 | * @next_hop: mesh neighbor to which frames for this destination will be |
@@ -105,6 +106,7 @@ struct mesh_path { | |||
105 | u8 dst[ETH_ALEN]; | 106 | u8 dst[ETH_ALEN]; |
106 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ | 107 | u8 mpp[ETH_ALEN]; /* used for MPP or MAP */ |
107 | struct rhash_head rhash; | 108 | struct rhash_head rhash; |
109 | struct hlist_node walk_list; | ||
108 | struct hlist_node gate_list; | 110 | struct hlist_node gate_list; |
109 | struct ieee80211_sub_if_data *sdata; | 111 | struct ieee80211_sub_if_data *sdata; |
110 | struct sta_info __rcu *next_hop; | 112 | struct sta_info __rcu *next_hop; |
@@ -133,12 +135,16 @@ struct mesh_path { | |||
133 | * gate's mpath may or may not be resolved and active. | 135 | * gate's mpath may or may not be resolved and active. |
134 | * @gates_lock: protects updates to known_gates | 136 | * @gates_lock: protects updates to known_gates |
135 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr | 137 | * @rhead: the rhashtable containing struct mesh_paths, keyed by dest addr |
138 | * @walk_head: linked list containging all mesh_path objects | ||
139 | * @walk_lock: lock protecting walk_head | ||
136 | * @entries: number of entries in the table | 140 | * @entries: number of entries in the table |
137 | */ | 141 | */ |
138 | struct mesh_table { | 142 | struct mesh_table { |
139 | struct hlist_head known_gates; | 143 | struct hlist_head known_gates; |
140 | spinlock_t gates_lock; | 144 | spinlock_t gates_lock; |
141 | struct rhashtable rhead; | 145 | struct rhashtable rhead; |
146 | struct hlist_head walk_head; | ||
147 | spinlock_t walk_lock; | ||
142 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ | 148 | atomic_t entries; /* Up to MAX_MESH_NEIGHBOURS */ |
143 | }; | 149 | }; |
144 | 150 | ||
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c index a5125624a76d..88a6d5e18ccc 100644 --- a/net/mac80211/mesh_pathtbl.c +++ b/net/mac80211/mesh_pathtbl.c | |||
@@ -59,8 +59,10 @@ static struct mesh_table *mesh_table_alloc(void) | |||
59 | return NULL; | 59 | return NULL; |
60 | 60 | ||
61 | INIT_HLIST_HEAD(&newtbl->known_gates); | 61 | INIT_HLIST_HEAD(&newtbl->known_gates); |
62 | INIT_HLIST_HEAD(&newtbl->walk_head); | ||
62 | atomic_set(&newtbl->entries, 0); | 63 | atomic_set(&newtbl->entries, 0); |
63 | spin_lock_init(&newtbl->gates_lock); | 64 | spin_lock_init(&newtbl->gates_lock); |
65 | spin_lock_init(&newtbl->walk_lock); | ||
64 | 66 | ||
65 | return newtbl; | 67 | return newtbl; |
66 | } | 68 | } |
@@ -249,28 +251,15 @@ mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst) | |||
249 | static struct mesh_path * | 251 | static struct mesh_path * |
250 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) | 252 | __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx) |
251 | { | 253 | { |
252 | int i = 0, ret; | 254 | int i = 0; |
253 | struct mesh_path *mpath = NULL; | 255 | struct mesh_path *mpath; |
254 | struct rhashtable_iter iter; | ||
255 | |||
256 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
257 | if (ret) | ||
258 | return NULL; | ||
259 | |||
260 | rhashtable_walk_start(&iter); | ||
261 | 256 | ||
262 | while ((mpath = rhashtable_walk_next(&iter))) { | 257 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { |
263 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
264 | continue; | ||
265 | if (IS_ERR(mpath)) | ||
266 | break; | ||
267 | if (i++ == idx) | 258 | if (i++ == idx) |
268 | break; | 259 | break; |
269 | } | 260 | } |
270 | rhashtable_walk_stop(&iter); | ||
271 | rhashtable_walk_exit(&iter); | ||
272 | 261 | ||
273 | if (IS_ERR(mpath) || !mpath) | 262 | if (!mpath) |
274 | return NULL; | 263 | return NULL; |
275 | 264 | ||
276 | if (mpath_expired(mpath)) { | 265 | if (mpath_expired(mpath)) { |
@@ -432,6 +421,7 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
432 | return ERR_PTR(-ENOMEM); | 421 | return ERR_PTR(-ENOMEM); |
433 | 422 | ||
434 | tbl = sdata->u.mesh.mesh_paths; | 423 | tbl = sdata->u.mesh.mesh_paths; |
424 | spin_lock_bh(&tbl->walk_lock); | ||
435 | do { | 425 | do { |
436 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 426 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
437 | &new_mpath->rhash, | 427 | &new_mpath->rhash, |
@@ -441,20 +431,20 @@ struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata, | |||
441 | mpath = rhashtable_lookup_fast(&tbl->rhead, | 431 | mpath = rhashtable_lookup_fast(&tbl->rhead, |
442 | dst, | 432 | dst, |
443 | mesh_rht_params); | 433 | mesh_rht_params); |
444 | 434 | else if (!ret) | |
435 | hlist_add_head(&new_mpath->walk_list, &tbl->walk_head); | ||
445 | } while (unlikely(ret == -EEXIST && !mpath)); | 436 | } while (unlikely(ret == -EEXIST && !mpath)); |
437 | spin_unlock_bh(&tbl->walk_lock); | ||
446 | 438 | ||
447 | if (ret && ret != -EEXIST) | 439 | if (ret) { |
448 | return ERR_PTR(ret); | ||
449 | |||
450 | /* At this point either new_mpath was added, or we found a | ||
451 | * matching entry already in the table; in the latter case | ||
452 | * free the unnecessary new entry. | ||
453 | */ | ||
454 | if (ret == -EEXIST) { | ||
455 | kfree(new_mpath); | 440 | kfree(new_mpath); |
441 | |||
442 | if (ret != -EEXIST) | ||
443 | return ERR_PTR(ret); | ||
444 | |||
456 | new_mpath = mpath; | 445 | new_mpath = mpath; |
457 | } | 446 | } |
447 | |||
458 | sdata->u.mesh.mesh_paths_generation++; | 448 | sdata->u.mesh.mesh_paths_generation++; |
459 | return new_mpath; | 449 | return new_mpath; |
460 | } | 450 | } |
@@ -480,9 +470,17 @@ int mpp_path_add(struct ieee80211_sub_if_data *sdata, | |||
480 | 470 | ||
481 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); | 471 | memcpy(new_mpath->mpp, mpp, ETH_ALEN); |
482 | tbl = sdata->u.mesh.mpp_paths; | 472 | tbl = sdata->u.mesh.mpp_paths; |
473 | |||
474 | spin_lock_bh(&tbl->walk_lock); | ||
483 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, | 475 | ret = rhashtable_lookup_insert_fast(&tbl->rhead, |
484 | &new_mpath->rhash, | 476 | &new_mpath->rhash, |
485 | mesh_rht_params); | 477 | mesh_rht_params); |
478 | if (!ret) | ||
479 | hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head); | ||
480 | spin_unlock_bh(&tbl->walk_lock); | ||
481 | |||
482 | if (ret) | ||
483 | kfree(new_mpath); | ||
486 | 484 | ||
487 | sdata->u.mesh.mpp_paths_generation++; | 485 | sdata->u.mesh.mpp_paths_generation++; |
488 | return ret; | 486 | return ret; |
@@ -503,20 +501,9 @@ void mesh_plink_broken(struct sta_info *sta) | |||
503 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 501 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
504 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; | 502 | static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; |
505 | struct mesh_path *mpath; | 503 | struct mesh_path *mpath; |
506 | struct rhashtable_iter iter; | ||
507 | int ret; | ||
508 | |||
509 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
510 | if (ret) | ||
511 | return; | ||
512 | 504 | ||
513 | rhashtable_walk_start(&iter); | 505 | rcu_read_lock(); |
514 | 506 | hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) { | |
515 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
516 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
517 | continue; | ||
518 | if (IS_ERR(mpath)) | ||
519 | break; | ||
520 | if (rcu_access_pointer(mpath->next_hop) == sta && | 507 | if (rcu_access_pointer(mpath->next_hop) == sta && |
521 | mpath->flags & MESH_PATH_ACTIVE && | 508 | mpath->flags & MESH_PATH_ACTIVE && |
522 | !(mpath->flags & MESH_PATH_FIXED)) { | 509 | !(mpath->flags & MESH_PATH_FIXED)) { |
@@ -530,8 +517,7 @@ void mesh_plink_broken(struct sta_info *sta) | |||
530 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); | 517 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast); |
531 | } | 518 | } |
532 | } | 519 | } |
533 | rhashtable_walk_stop(&iter); | 520 | rcu_read_unlock(); |
534 | rhashtable_walk_exit(&iter); | ||
535 | } | 521 | } |
536 | 522 | ||
537 | static void mesh_path_free_rcu(struct mesh_table *tbl, | 523 | static void mesh_path_free_rcu(struct mesh_table *tbl, |
@@ -551,6 +537,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, | |||
551 | 537 | ||
552 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) | 538 | static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath) |
553 | { | 539 | { |
540 | hlist_del_rcu(&mpath->walk_list); | ||
554 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); | 541 | rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params); |
555 | mesh_path_free_rcu(tbl, mpath); | 542 | mesh_path_free_rcu(tbl, mpath); |
556 | } | 543 | } |
@@ -571,27 +558,14 @@ void mesh_path_flush_by_nexthop(struct sta_info *sta) | |||
571 | struct ieee80211_sub_if_data *sdata = sta->sdata; | 558 | struct ieee80211_sub_if_data *sdata = sta->sdata; |
572 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; | 559 | struct mesh_table *tbl = sdata->u.mesh.mesh_paths; |
573 | struct mesh_path *mpath; | 560 | struct mesh_path *mpath; |
574 | struct rhashtable_iter iter; | 561 | struct hlist_node *n; |
575 | int ret; | ||
576 | |||
577 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
578 | if (ret) | ||
579 | return; | ||
580 | |||
581 | rhashtable_walk_start(&iter); | ||
582 | |||
583 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
584 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
585 | continue; | ||
586 | if (IS_ERR(mpath)) | ||
587 | break; | ||
588 | 562 | ||
563 | spin_lock_bh(&tbl->walk_lock); | ||
564 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
589 | if (rcu_access_pointer(mpath->next_hop) == sta) | 565 | if (rcu_access_pointer(mpath->next_hop) == sta) |
590 | __mesh_path_del(tbl, mpath); | 566 | __mesh_path_del(tbl, mpath); |
591 | } | 567 | } |
592 | 568 | spin_unlock_bh(&tbl->walk_lock); | |
593 | rhashtable_walk_stop(&iter); | ||
594 | rhashtable_walk_exit(&iter); | ||
595 | } | 569 | } |
596 | 570 | ||
597 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | 571 | static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, |
@@ -599,51 +573,26 @@ static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata, | |||
599 | { | 573 | { |
600 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; | 574 | struct mesh_table *tbl = sdata->u.mesh.mpp_paths; |
601 | struct mesh_path *mpath; | 575 | struct mesh_path *mpath; |
602 | struct rhashtable_iter iter; | 576 | struct hlist_node *n; |
603 | int ret; | ||
604 | |||
605 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
606 | if (ret) | ||
607 | return; | ||
608 | |||
609 | rhashtable_walk_start(&iter); | ||
610 | |||
611 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
612 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
613 | continue; | ||
614 | if (IS_ERR(mpath)) | ||
615 | break; | ||
616 | 577 | ||
578 | spin_lock_bh(&tbl->walk_lock); | ||
579 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { | ||
617 | if (ether_addr_equal(mpath->mpp, proxy)) | 580 | if (ether_addr_equal(mpath->mpp, proxy)) |
618 | __mesh_path_del(tbl, mpath); | 581 | __mesh_path_del(tbl, mpath); |
619 | } | 582 | } |
620 | 583 | spin_unlock_bh(&tbl->walk_lock); | |
621 | rhashtable_walk_stop(&iter); | ||
622 | rhashtable_walk_exit(&iter); | ||
623 | } | 584 | } |
624 | 585 | ||
625 | static void table_flush_by_iface(struct mesh_table *tbl) | 586 | static void table_flush_by_iface(struct mesh_table *tbl) |
626 | { | 587 | { |
627 | struct mesh_path *mpath; | 588 | struct mesh_path *mpath; |
628 | struct rhashtable_iter iter; | 589 | struct hlist_node *n; |
629 | int ret; | ||
630 | |||
631 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_ATOMIC); | ||
632 | if (ret) | ||
633 | return; | ||
634 | |||
635 | rhashtable_walk_start(&iter); | ||
636 | 590 | ||
637 | while ((mpath = rhashtable_walk_next(&iter))) { | 591 | spin_lock_bh(&tbl->walk_lock); |
638 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | 592 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
639 | continue; | ||
640 | if (IS_ERR(mpath)) | ||
641 | break; | ||
642 | __mesh_path_del(tbl, mpath); | 593 | __mesh_path_del(tbl, mpath); |
643 | } | 594 | } |
644 | 595 | spin_unlock_bh(&tbl->walk_lock); | |
645 | rhashtable_walk_stop(&iter); | ||
646 | rhashtable_walk_exit(&iter); | ||
647 | } | 596 | } |
648 | 597 | ||
649 | /** | 598 | /** |
@@ -675,15 +624,15 @@ static int table_path_del(struct mesh_table *tbl, | |||
675 | { | 624 | { |
676 | struct mesh_path *mpath; | 625 | struct mesh_path *mpath; |
677 | 626 | ||
678 | rcu_read_lock(); | 627 | spin_lock_bh(&tbl->walk_lock); |
679 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); | 628 | mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params); |
680 | if (!mpath) { | 629 | if (!mpath) { |
681 | rcu_read_unlock(); | 630 | spin_unlock_bh(&tbl->walk_lock); |
682 | return -ENXIO; | 631 | return -ENXIO; |
683 | } | 632 | } |
684 | 633 | ||
685 | __mesh_path_del(tbl, mpath); | 634 | __mesh_path_del(tbl, mpath); |
686 | rcu_read_unlock(); | 635 | spin_unlock_bh(&tbl->walk_lock); |
687 | return 0; | 636 | return 0; |
688 | } | 637 | } |
689 | 638 | ||
@@ -854,28 +803,16 @@ void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata, | |||
854 | struct mesh_table *tbl) | 803 | struct mesh_table *tbl) |
855 | { | 804 | { |
856 | struct mesh_path *mpath; | 805 | struct mesh_path *mpath; |
857 | struct rhashtable_iter iter; | 806 | struct hlist_node *n; |
858 | int ret; | ||
859 | 807 | ||
860 | ret = rhashtable_walk_init(&tbl->rhead, &iter, GFP_KERNEL); | 808 | spin_lock_bh(&tbl->walk_lock); |
861 | if (ret) | 809 | hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) { |
862 | return; | ||
863 | |||
864 | rhashtable_walk_start(&iter); | ||
865 | |||
866 | while ((mpath = rhashtable_walk_next(&iter))) { | ||
867 | if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN) | ||
868 | continue; | ||
869 | if (IS_ERR(mpath)) | ||
870 | break; | ||
871 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && | 810 | if ((!(mpath->flags & MESH_PATH_RESOLVING)) && |
872 | (!(mpath->flags & MESH_PATH_FIXED)) && | 811 | (!(mpath->flags & MESH_PATH_FIXED)) && |
873 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) | 812 | time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) |
874 | __mesh_path_del(tbl, mpath); | 813 | __mesh_path_del(tbl, mpath); |
875 | } | 814 | } |
876 | 815 | spin_unlock_bh(&tbl->walk_lock); | |
877 | rhashtable_walk_stop(&iter); | ||
878 | rhashtable_walk_exit(&iter); | ||
879 | } | 816 | } |
880 | 817 | ||
881 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) | 818 | void mesh_path_expire(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb4d71efb6fb..c2a6da5d80da 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c | |||
@@ -2644,6 +2644,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
2644 | struct ieee80211_sub_if_data *sdata = rx->sdata; | 2644 | struct ieee80211_sub_if_data *sdata = rx->sdata; |
2645 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; | 2645 | struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; |
2646 | u16 ac, q, hdrlen; | 2646 | u16 ac, q, hdrlen; |
2647 | int tailroom = 0; | ||
2647 | 2648 | ||
2648 | hdr = (struct ieee80211_hdr *) skb->data; | 2649 | hdr = (struct ieee80211_hdr *) skb->data; |
2649 | hdrlen = ieee80211_hdrlen(hdr->frame_control); | 2650 | hdrlen = ieee80211_hdrlen(hdr->frame_control); |
@@ -2732,8 +2733,12 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx) | |||
2732 | if (!ifmsh->mshcfg.dot11MeshForwarding) | 2733 | if (!ifmsh->mshcfg.dot11MeshForwarding) |
2733 | goto out; | 2734 | goto out; |
2734 | 2735 | ||
2736 | if (sdata->crypto_tx_tailroom_needed_cnt) | ||
2737 | tailroom = IEEE80211_ENCRYPT_TAILROOM; | ||
2738 | |||
2735 | fwd_skb = skb_copy_expand(skb, local->tx_headroom + | 2739 | fwd_skb = skb_copy_expand(skb, local->tx_headroom + |
2736 | sdata->encrypt_headroom, 0, GFP_ATOMIC); | 2740 | sdata->encrypt_headroom, |
2741 | tailroom, GFP_ATOMIC); | ||
2737 | if (!fwd_skb) | 2742 | if (!fwd_skb) |
2738 | goto out; | 2743 | goto out; |
2739 | 2744 | ||
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f170d6c6629a..928f13a208b0 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1938 | int head_need, bool may_encrypt) | 1938 | int head_need, bool may_encrypt) |
1939 | { | 1939 | { |
1940 | struct ieee80211_local *local = sdata->local; | 1940 | struct ieee80211_local *local = sdata->local; |
1941 | struct ieee80211_hdr *hdr; | ||
1942 | bool enc_tailroom; | ||
1941 | int tail_need = 0; | 1943 | int tail_need = 0; |
1942 | 1944 | ||
1943 | if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { | 1945 | hdr = (struct ieee80211_hdr *) skb->data; |
1946 | enc_tailroom = may_encrypt && | ||
1947 | (sdata->crypto_tx_tailroom_needed_cnt || | ||
1948 | ieee80211_is_mgmt(hdr->frame_control)); | ||
1949 | |||
1950 | if (enc_tailroom) { | ||
1944 | tail_need = IEEE80211_ENCRYPT_TAILROOM; | 1951 | tail_need = IEEE80211_ENCRYPT_TAILROOM; |
1945 | tail_need -= skb_tailroom(skb); | 1952 | tail_need -= skb_tailroom(skb); |
1946 | tail_need = max_t(int, tail_need, 0); | 1953 | tail_need = max_t(int, tail_need, 0); |
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1948 | 1955 | ||
1949 | if (skb_cloned(skb) && | 1956 | if (skb_cloned(skb) && |
1950 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || | 1957 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || |
1951 | !skb_clone_writable(skb, ETH_HLEN) || | 1958 | !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) |
1952 | (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) | ||
1953 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | 1959 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); |
1954 | else if (head_need || tail_need) | 1960 | else if (head_need || tail_need) |
1955 | I802_DEBUG_INC(local->tx_expand_skb_head); | 1961 | I802_DEBUG_INC(local->tx_expand_skb_head); |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d0eb38b890aa..ba950ae974fc 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH | 7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2146 | case NL80211_IFTYPE_AP_VLAN: | 2146 | case NL80211_IFTYPE_AP_VLAN: |
2147 | case NL80211_IFTYPE_MONITOR: | 2147 | case NL80211_IFTYPE_MONITOR: |
2148 | break; | 2148 | break; |
2149 | case NL80211_IFTYPE_ADHOC: | ||
2150 | if (sdata->vif.bss_conf.ibss_joined) | ||
2151 | WARN_ON(drv_join_ibss(local, sdata)); | ||
2152 | /* fall through */ | ||
2149 | default: | 2153 | default: |
2150 | ieee80211_reconfig_stations(sdata); | 2154 | ieee80211_reconfig_stations(sdata); |
2151 | /* fall through */ | 2155 | /* fall through */ |
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index cad48d07c818..8401cefd9f65 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig | |||
@@ -29,6 +29,7 @@ config IP_VS_IPV6 | |||
29 | bool "IPv6 support for IPVS" | 29 | bool "IPv6 support for IPVS" |
30 | depends on IPV6 = y || IP_VS = IPV6 | 30 | depends on IPV6 = y || IP_VS = IPV6 |
31 | select IP6_NF_IPTABLES | 31 | select IP6_NF_IPTABLES |
32 | select NF_DEFRAG_IPV6 | ||
32 | ---help--- | 33 | ---help--- |
33 | Add IPv6 support to IPVS. | 34 | Add IPv6 support to IPVS. |
34 | 35 | ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index fe9abf3cc10a..235205c93e14 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, | |||
1536 | /* sorry, all this trouble for a no-hit :) */ | 1536 | /* sorry, all this trouble for a no-hit :) */ |
1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, | 1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, |
1538 | "ip_vs_in: packet continues traversal as normal"); | 1538 | "ip_vs_in: packet continues traversal as normal"); |
1539 | if (iph->fragoffs) { | 1539 | |
1540 | /* Fragment that couldn't be mapped to a conn entry | 1540 | /* Fragment couldn't be mapped to a conn entry */ |
1541 | * is missing module nf_defrag_ipv6 | 1541 | if (iph->fragoffs) |
1542 | */ | ||
1543 | IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); | ||
1544 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, | 1542 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, |
1545 | "unhandled fragment"); | 1543 | "unhandled fragment"); |
1546 | } | 1544 | |
1547 | *verdict = NF_ACCEPT; | 1545 | *verdict = NF_ACCEPT; |
1548 | return 0; | 1546 | return 0; |
1549 | } | 1547 | } |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 7d6318664eb2..ac8d848d7624 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #ifdef CONFIG_IP_VS_IPV6 | 43 | #ifdef CONFIG_IP_VS_IPV6 |
44 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
45 | #include <net/ip6_route.h> | 45 | #include <net/ip6_route.h> |
46 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | ||
46 | #endif | 47 | #endif |
47 | #include <net/route.h> | 48 | #include <net/route.h> |
48 | #include <net/sock.h> | 49 | #include <net/sock.h> |
@@ -900,11 +901,17 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
900 | 901 | ||
901 | #ifdef CONFIG_IP_VS_IPV6 | 902 | #ifdef CONFIG_IP_VS_IPV6 |
902 | if (udest->af == AF_INET6) { | 903 | if (udest->af == AF_INET6) { |
904 | int ret; | ||
905 | |||
903 | atype = ipv6_addr_type(&udest->addr.in6); | 906 | atype = ipv6_addr_type(&udest->addr.in6); |
904 | if ((!(atype & IPV6_ADDR_UNICAST) || | 907 | if ((!(atype & IPV6_ADDR_UNICAST) || |
905 | atype & IPV6_ADDR_LINKLOCAL) && | 908 | atype & IPV6_ADDR_LINKLOCAL) && |
906 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) | 909 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) |
907 | return -EINVAL; | 910 | return -EINVAL; |
911 | |||
912 | ret = nf_defrag_ipv6_enable(svc->ipvs->net); | ||
913 | if (ret) | ||
914 | return ret; | ||
908 | } else | 915 | } else |
909 | #endif | 916 | #endif |
910 | { | 917 | { |
@@ -1228,6 +1235,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, | |||
1228 | ret = -EINVAL; | 1235 | ret = -EINVAL; |
1229 | goto out_err; | 1236 | goto out_err; |
1230 | } | 1237 | } |
1238 | |||
1239 | ret = nf_defrag_ipv6_enable(ipvs->net); | ||
1240 | if (ret) | ||
1241 | goto out_err; | ||
1231 | } | 1242 | } |
1232 | #endif | 1243 | #endif |
1233 | 1244 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 741b533148ba..db4d46332e86 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { | 1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { |
1010 | /* Tuple is taken already, so caller will need to find | ||
1011 | * a new source port to use. | ||
1012 | * | ||
1013 | * Only exception: | ||
1014 | * If the *original tuples* are identical, then both | ||
1015 | * conntracks refer to the same flow. | ||
1016 | * This is a rare situation, it can occur e.g. when | ||
1017 | * more than one UDP packet is sent from same socket | ||
1018 | * in different threads. | ||
1019 | * | ||
1020 | * Let nf_ct_resolve_clash() deal with this later. | ||
1021 | */ | ||
1022 | if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | ||
1023 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) | ||
1024 | continue; | ||
1025 | |||
1010 | NF_CT_STAT_INC_ATOMIC(net, found); | 1026 | NF_CT_STAT_INC_ATOMIC(net, found); |
1011 | rcu_read_unlock(); | 1027 | rcu_read_unlock(); |
1012 | return 1; | 1028 | return 1; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fb07f6cfc719..4893f248dfdc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans) | |||
116 | kfree(trans); | 116 | kfree(trans); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) | ||
120 | { | ||
121 | struct net *net = ctx->net; | ||
122 | struct nft_trans *trans; | ||
123 | |||
124 | if (!nft_set_is_anonymous(set)) | ||
125 | return; | ||
126 | |||
127 | list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { | ||
128 | if (trans->msg_type == NFT_MSG_NEWSET && | ||
129 | nft_trans_set(trans) == set) { | ||
130 | nft_trans_set_bound(trans) = true; | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
119 | static int nf_tables_register_hook(struct net *net, | 136 | static int nf_tables_register_hook(struct net *net, |
120 | const struct nft_table *table, | 137 | const struct nft_table *table, |
121 | struct nft_chain *chain) | 138 | struct nft_chain *chain) |
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx) | |||
211 | return err; | 228 | return err; |
212 | } | 229 | } |
213 | 230 | ||
214 | /* either expr ops provide both activate/deactivate, or neither */ | ||
215 | static bool nft_expr_check_ops(const struct nft_expr_ops *ops) | ||
216 | { | ||
217 | if (!ops) | ||
218 | return true; | ||
219 | |||
220 | if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) | ||
221 | return false; | ||
222 | |||
223 | return true; | ||
224 | } | ||
225 | |||
226 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, | 231 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, |
227 | struct nft_rule *rule) | 232 | struct nft_rule *rule) |
228 | { | 233 | { |
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, | |||
238 | } | 243 | } |
239 | 244 | ||
240 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, | 245 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, |
241 | struct nft_rule *rule) | 246 | struct nft_rule *rule, |
247 | enum nft_trans_phase phase) | ||
242 | { | 248 | { |
243 | struct nft_expr *expr; | 249 | struct nft_expr *expr; |
244 | 250 | ||
245 | expr = nft_expr_first(rule); | 251 | expr = nft_expr_first(rule); |
246 | while (expr != nft_expr_last(rule) && expr->ops) { | 252 | while (expr != nft_expr_last(rule) && expr->ops) { |
247 | if (expr->ops->deactivate) | 253 | if (expr->ops->deactivate) |
248 | expr->ops->deactivate(ctx, expr); | 254 | expr->ops->deactivate(ctx, expr, phase); |
249 | 255 | ||
250 | expr = nft_expr_next(expr); | 256 | expr = nft_expr_next(expr); |
251 | } | 257 | } |
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) | |||
296 | nft_trans_destroy(trans); | 302 | nft_trans_destroy(trans); |
297 | return err; | 303 | return err; |
298 | } | 304 | } |
299 | nft_rule_expr_deactivate(ctx, rule); | 305 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); |
300 | 306 | ||
301 | return 0; | 307 | return 0; |
302 | } | 308 | } |
@@ -307,6 +313,9 @@ static int nft_delrule_by_chain(struct nft_ctx *ctx) | |||
307 | int err; | 313 | int err; |
308 | 314 | ||
309 | list_for_each_entry(rule, &ctx->chain->rules, list) { | 315 | list_for_each_entry(rule, &ctx->chain->rules, list) { |
316 | if (!nft_is_active_next(ctx->net, rule)) | ||
317 | continue; | ||
318 | |||
310 | err = nft_delrule(ctx, rule); | 319 | err = nft_delrule(ctx, rule); |
311 | if (err < 0) | 320 | if (err < 0) |
312 | return err; | 321 | return err; |
@@ -1929,9 +1938,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, | |||
1929 | */ | 1938 | */ |
1930 | int nft_register_expr(struct nft_expr_type *type) | 1939 | int nft_register_expr(struct nft_expr_type *type) |
1931 | { | 1940 | { |
1932 | if (!nft_expr_check_ops(type->ops)) | ||
1933 | return -EINVAL; | ||
1934 | |||
1935 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 1941 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
1936 | if (type->family == NFPROTO_UNSPEC) | 1942 | if (type->family == NFPROTO_UNSPEC) |
1937 | list_add_tail_rcu(&type->list, &nf_tables_expressions); | 1943 | list_add_tail_rcu(&type->list, &nf_tables_expressions); |
@@ -2079,10 +2085,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, | |||
2079 | err = PTR_ERR(ops); | 2085 | err = PTR_ERR(ops); |
2080 | goto err1; | 2086 | goto err1; |
2081 | } | 2087 | } |
2082 | if (!nft_expr_check_ops(ops)) { | ||
2083 | err = -EINVAL; | ||
2084 | goto err1; | ||
2085 | } | ||
2086 | } else | 2088 | } else |
2087 | ops = type->ops; | 2089 | ops = type->ops; |
2088 | 2090 | ||
@@ -2511,7 +2513,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
2511 | static void nf_tables_rule_release(const struct nft_ctx *ctx, | 2513 | static void nf_tables_rule_release(const struct nft_ctx *ctx, |
2512 | struct nft_rule *rule) | 2514 | struct nft_rule *rule) |
2513 | { | 2515 | { |
2514 | nft_rule_expr_deactivate(ctx, rule); | 2516 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); |
2515 | nf_tables_rule_destroy(ctx, rule); | 2517 | nf_tables_rule_destroy(ctx, rule); |
2516 | } | 2518 | } |
2517 | 2519 | ||
@@ -3708,39 +3710,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | |||
3708 | bind: | 3710 | bind: |
3709 | binding->chain = ctx->chain; | 3711 | binding->chain = ctx->chain; |
3710 | list_add_tail_rcu(&binding->list, &set->bindings); | 3712 | list_add_tail_rcu(&binding->list, &set->bindings); |
3713 | nft_set_trans_bind(ctx, set); | ||
3714 | |||
3711 | return 0; | 3715 | return 0; |
3712 | } | 3716 | } |
3713 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); | 3717 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); |
3714 | 3718 | ||
3715 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
3716 | struct nft_set_binding *binding) | ||
3717 | { | ||
3718 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | ||
3719 | nft_is_active(ctx->net, set)) | ||
3720 | list_add_tail_rcu(&set->list, &ctx->table->sets); | ||
3721 | |||
3722 | list_add_tail_rcu(&binding->list, &set->bindings); | ||
3723 | } | ||
3724 | EXPORT_SYMBOL_GPL(nf_tables_rebind_set); | ||
3725 | |||
3726 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 3719 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
3727 | struct nft_set_binding *binding) | 3720 | struct nft_set_binding *binding, bool event) |
3728 | { | 3721 | { |
3729 | list_del_rcu(&binding->list); | 3722 | list_del_rcu(&binding->list); |
3730 | 3723 | ||
3731 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3724 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { |
3732 | nft_is_active(ctx->net, set)) | ||
3733 | list_del_rcu(&set->list); | 3725 | list_del_rcu(&set->list); |
3726 | if (event) | ||
3727 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, | ||
3728 | GFP_KERNEL); | ||
3729 | } | ||
3734 | } | 3730 | } |
3735 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); | 3731 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); |
3736 | 3732 | ||
3737 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) | 3733 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) |
3738 | { | 3734 | { |
3739 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3735 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) |
3740 | nft_is_active(ctx->net, set)) { | ||
3741 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); | ||
3742 | nft_set_destroy(set); | 3736 | nft_set_destroy(set); |
3743 | } | ||
3744 | } | 3737 | } |
3745 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); | 3738 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); |
3746 | 3739 | ||
@@ -6535,6 +6528,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
6535 | nf_tables_rule_notify(&trans->ctx, | 6528 | nf_tables_rule_notify(&trans->ctx, |
6536 | nft_trans_rule(trans), | 6529 | nft_trans_rule(trans), |
6537 | NFT_MSG_DELRULE); | 6530 | NFT_MSG_DELRULE); |
6531 | nft_rule_expr_deactivate(&trans->ctx, | ||
6532 | nft_trans_rule(trans), | ||
6533 | NFT_TRANS_COMMIT); | ||
6538 | break; | 6534 | break; |
6539 | case NFT_MSG_NEWSET: | 6535 | case NFT_MSG_NEWSET: |
6540 | nft_clear(net, nft_trans_set(trans)); | 6536 | nft_clear(net, nft_trans_set(trans)); |
@@ -6621,7 +6617,8 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
6621 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); | 6617 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); |
6622 | break; | 6618 | break; |
6623 | case NFT_MSG_NEWSET: | 6619 | case NFT_MSG_NEWSET: |
6624 | nft_set_destroy(nft_trans_set(trans)); | 6620 | if (!nft_trans_set_bound(trans)) |
6621 | nft_set_destroy(nft_trans_set(trans)); | ||
6625 | break; | 6622 | break; |
6626 | case NFT_MSG_NEWSETELEM: | 6623 | case NFT_MSG_NEWSETELEM: |
6627 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 6624 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
@@ -6682,7 +6679,9 @@ static int __nf_tables_abort(struct net *net) | |||
6682 | case NFT_MSG_NEWRULE: | 6679 | case NFT_MSG_NEWRULE: |
6683 | trans->ctx.chain->use--; | 6680 | trans->ctx.chain->use--; |
6684 | list_del_rcu(&nft_trans_rule(trans)->list); | 6681 | list_del_rcu(&nft_trans_rule(trans)->list); |
6685 | nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); | 6682 | nft_rule_expr_deactivate(&trans->ctx, |
6683 | nft_trans_rule(trans), | ||
6684 | NFT_TRANS_ABORT); | ||
6686 | break; | 6685 | break; |
6687 | case NFT_MSG_DELRULE: | 6686 | case NFT_MSG_DELRULE: |
6688 | trans->ctx.chain->use++; | 6687 | trans->ctx.chain->use++; |
@@ -6692,7 +6691,8 @@ static int __nf_tables_abort(struct net *net) | |||
6692 | break; | 6691 | break; |
6693 | case NFT_MSG_NEWSET: | 6692 | case NFT_MSG_NEWSET: |
6694 | trans->ctx.table->use--; | 6693 | trans->ctx.table->use--; |
6695 | list_del_rcu(&nft_trans_set(trans)->list); | 6694 | if (!nft_trans_set_bound(trans)) |
6695 | list_del_rcu(&nft_trans_set(trans)->list); | ||
6696 | break; | 6696 | break; |
6697 | case NFT_MSG_DELSET: | 6697 | case NFT_MSG_DELSET: |
6698 | trans->ctx.table->use++; | 6698 | trans->ctx.table->use++; |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 5eb269428832..0a4bad55a8aa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -61,6 +61,21 @@ static struct nft_compat_net *nft_compat_pernet(struct net *net) | |||
61 | return net_generic(net, nft_compat_net_id); | 61 | return net_generic(net, nft_compat_net_id); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void nft_xt_get(struct nft_xt *xt) | ||
65 | { | ||
66 | /* refcount_inc() warns on 0 -> 1 transition, but we can't | ||
67 | * init the reference count to 1 in .select_ops -- we can't | ||
68 | * undo such an increase when another expression inside the same | ||
69 | * rule fails afterwards. | ||
70 | */ | ||
71 | if (xt->listcnt == 0) | ||
72 | refcount_set(&xt->refcnt, 1); | ||
73 | else | ||
74 | refcount_inc(&xt->refcnt); | ||
75 | |||
76 | xt->listcnt++; | ||
77 | } | ||
78 | |||
64 | static bool nft_xt_put(struct nft_xt *xt) | 79 | static bool nft_xt_put(struct nft_xt *xt) |
65 | { | 80 | { |
66 | if (refcount_dec_and_test(&xt->refcnt)) { | 81 | if (refcount_dec_and_test(&xt->refcnt)) { |
@@ -291,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
291 | return -EINVAL; | 306 | return -EINVAL; |
292 | 307 | ||
293 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 308 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
294 | refcount_inc(&nft_xt->refcnt); | 309 | nft_xt_get(nft_xt); |
295 | return 0; | 310 | return 0; |
296 | } | 311 | } |
297 | 312 | ||
@@ -300,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
300 | { | 315 | { |
301 | struct xt_target *target = expr->ops->data; | 316 | struct xt_target *target = expr->ops->data; |
302 | void *info = nft_expr_priv(expr); | 317 | void *info = nft_expr_priv(expr); |
318 | struct module *me = target->me; | ||
303 | struct xt_tgdtor_param par; | 319 | struct xt_tgdtor_param par; |
304 | 320 | ||
305 | par.net = ctx->net; | 321 | par.net = ctx->net; |
@@ -310,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
310 | par.target->destroy(&par); | 326 | par.target->destroy(&par); |
311 | 327 | ||
312 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | 328 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
313 | module_put(target->me); | 329 | module_put(me); |
314 | } | 330 | } |
315 | 331 | ||
316 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, | 332 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, |
@@ -504,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
504 | return ret; | 520 | return ret; |
505 | 521 | ||
506 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 522 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
507 | refcount_inc(&nft_xt->refcnt); | 523 | nft_xt_get(nft_xt); |
508 | return 0; | 524 | return 0; |
509 | } | 525 | } |
510 | 526 | ||
@@ -558,41 +574,16 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
558 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); | 574 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); |
559 | } | 575 | } |
560 | 576 | ||
561 | static void nft_compat_activate(const struct nft_ctx *ctx, | ||
562 | const struct nft_expr *expr, | ||
563 | struct list_head *h) | ||
564 | { | ||
565 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); | ||
566 | |||
567 | if (xt->listcnt == 0) | ||
568 | list_add(&xt->head, h); | ||
569 | |||
570 | xt->listcnt++; | ||
571 | } | ||
572 | |||
573 | static void nft_compat_activate_mt(const struct nft_ctx *ctx, | ||
574 | const struct nft_expr *expr) | ||
575 | { | ||
576 | struct nft_compat_net *cn = nft_compat_pernet(ctx->net); | ||
577 | |||
578 | nft_compat_activate(ctx, expr, &cn->nft_match_list); | ||
579 | } | ||
580 | |||
581 | static void nft_compat_activate_tg(const struct nft_ctx *ctx, | ||
582 | const struct nft_expr *expr) | ||
583 | { | ||
584 | struct nft_compat_net *cn = nft_compat_pernet(ctx->net); | ||
585 | |||
586 | nft_compat_activate(ctx, expr, &cn->nft_target_list); | ||
587 | } | ||
588 | |||
589 | static void nft_compat_deactivate(const struct nft_ctx *ctx, | 577 | static void nft_compat_deactivate(const struct nft_ctx *ctx, |
590 | const struct nft_expr *expr) | 578 | const struct nft_expr *expr, |
579 | enum nft_trans_phase phase) | ||
591 | { | 580 | { |
592 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); | 581 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); |
593 | 582 | ||
594 | if (--xt->listcnt == 0) | 583 | if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { |
595 | list_del_init(&xt->head); | 584 | if (--xt->listcnt == 0) |
585 | list_del_init(&xt->head); | ||
586 | } | ||
596 | } | 587 | } |
597 | 588 | ||
598 | static void | 589 | static void |
@@ -848,7 +839,6 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
848 | nft_match->ops.eval = nft_match_eval; | 839 | nft_match->ops.eval = nft_match_eval; |
849 | nft_match->ops.init = nft_match_init; | 840 | nft_match->ops.init = nft_match_init; |
850 | nft_match->ops.destroy = nft_match_destroy; | 841 | nft_match->ops.destroy = nft_match_destroy; |
851 | nft_match->ops.activate = nft_compat_activate_mt; | ||
852 | nft_match->ops.deactivate = nft_compat_deactivate; | 842 | nft_match->ops.deactivate = nft_compat_deactivate; |
853 | nft_match->ops.dump = nft_match_dump; | 843 | nft_match->ops.dump = nft_match_dump; |
854 | nft_match->ops.validate = nft_match_validate; | 844 | nft_match->ops.validate = nft_match_validate; |
@@ -866,7 +856,7 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
866 | 856 | ||
867 | nft_match->ops.size = matchsize; | 857 | nft_match->ops.size = matchsize; |
868 | 858 | ||
869 | nft_match->listcnt = 1; | 859 | nft_match->listcnt = 0; |
870 | list_add(&nft_match->head, &cn->nft_match_list); | 860 | list_add(&nft_match->head, &cn->nft_match_list); |
871 | 861 | ||
872 | return &nft_match->ops; | 862 | return &nft_match->ops; |
@@ -953,7 +943,6 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
953 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | 943 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); |
954 | nft_target->ops.init = nft_target_init; | 944 | nft_target->ops.init = nft_target_init; |
955 | nft_target->ops.destroy = nft_target_destroy; | 945 | nft_target->ops.destroy = nft_target_destroy; |
956 | nft_target->ops.activate = nft_compat_activate_tg; | ||
957 | nft_target->ops.deactivate = nft_compat_deactivate; | 946 | nft_target->ops.deactivate = nft_compat_deactivate; |
958 | nft_target->ops.dump = nft_target_dump; | 947 | nft_target->ops.dump = nft_target_dump; |
959 | nft_target->ops.validate = nft_target_validate; | 948 | nft_target->ops.validate = nft_target_validate; |
@@ -964,7 +953,7 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
964 | else | 953 | else |
965 | nft_target->ops.eval = nft_target_eval_xt; | 954 | nft_target->ops.eval = nft_target_eval_xt; |
966 | 955 | ||
967 | nft_target->listcnt = 1; | 956 | nft_target->listcnt = 0; |
968 | list_add(&nft_target->head, &cn->nft_target_list); | 957 | list_add(&nft_target->head, &cn->nft_target_list); |
969 | 958 | ||
970 | return &nft_target->ops; | 959 | return &nft_target->ops; |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 07d4efd3d851..f1172f99752b 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -235,20 +235,17 @@ err1: | |||
235 | return err; | 235 | return err; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void nft_dynset_activate(const struct nft_ctx *ctx, | ||
239 | const struct nft_expr *expr) | ||
240 | { | ||
241 | struct nft_dynset *priv = nft_expr_priv(expr); | ||
242 | |||
243 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
244 | } | ||
245 | |||
246 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, | 238 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, |
247 | const struct nft_expr *expr) | 239 | const struct nft_expr *expr, |
240 | enum nft_trans_phase phase) | ||
248 | { | 241 | { |
249 | struct nft_dynset *priv = nft_expr_priv(expr); | 242 | struct nft_dynset *priv = nft_expr_priv(expr); |
250 | 243 | ||
251 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 244 | if (phase == NFT_TRANS_PREPARE) |
245 | return; | ||
246 | |||
247 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
248 | phase == NFT_TRANS_COMMIT); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static void nft_dynset_destroy(const struct nft_ctx *ctx, | 251 | static void nft_dynset_destroy(const struct nft_ctx *ctx, |
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = { | |||
296 | .eval = nft_dynset_eval, | 293 | .eval = nft_dynset_eval, |
297 | .init = nft_dynset_init, | 294 | .init = nft_dynset_init, |
298 | .destroy = nft_dynset_destroy, | 295 | .destroy = nft_dynset_destroy, |
299 | .activate = nft_dynset_activate, | ||
300 | .deactivate = nft_dynset_deactivate, | 296 | .deactivate = nft_dynset_deactivate, |
301 | .dump = nft_dynset_dump, | 297 | .dump = nft_dynset_dump, |
302 | }; | 298 | }; |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 0777a93211e2..3f6d1d2a6281 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, | 74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, |
75 | const struct nft_expr *expr) | 75 | const struct nft_expr *expr, |
76 | enum nft_trans_phase phase) | ||
76 | { | 77 | { |
77 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 78 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
78 | 79 | ||
80 | if (phase == NFT_TRANS_COMMIT) | ||
81 | return; | ||
82 | |||
79 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); | 83 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); |
80 | } | 84 | } |
81 | 85 | ||
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 227b2b15a19c..14496da5141d 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx, | |||
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void nft_lookup_activate(const struct nft_ctx *ctx, | ||
125 | const struct nft_expr *expr) | ||
126 | { | ||
127 | struct nft_lookup *priv = nft_expr_priv(expr); | ||
128 | |||
129 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
130 | } | ||
131 | |||
132 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, | 124 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, |
133 | const struct nft_expr *expr) | 125 | const struct nft_expr *expr, |
126 | enum nft_trans_phase phase) | ||
134 | { | 127 | { |
135 | struct nft_lookup *priv = nft_expr_priv(expr); | 128 | struct nft_lookup *priv = nft_expr_priv(expr); |
136 | 129 | ||
137 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 130 | if (phase == NFT_TRANS_PREPARE) |
131 | return; | ||
132 | |||
133 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
134 | phase == NFT_TRANS_COMMIT); | ||
138 | } | 135 | } |
139 | 136 | ||
140 | static void nft_lookup_destroy(const struct nft_ctx *ctx, | 137 | static void nft_lookup_destroy(const struct nft_ctx *ctx, |
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = { | |||
225 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), | 222 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), |
226 | .eval = nft_lookup_eval, | 223 | .eval = nft_lookup_eval, |
227 | .init = nft_lookup_init, | 224 | .init = nft_lookup_init, |
228 | .activate = nft_lookup_activate, | ||
229 | .deactivate = nft_lookup_deactivate, | 225 | .deactivate = nft_lookup_deactivate, |
230 | .destroy = nft_lookup_destroy, | 226 | .destroy = nft_lookup_destroy, |
231 | .dump = nft_lookup_dump, | 227 | .dump = nft_lookup_dump, |
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index a3185ca2a3a9..ae178e914486 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
@@ -155,20 +155,17 @@ nla_put_failure: | |||
155 | return -1; | 155 | return -1; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void nft_objref_map_activate(const struct nft_ctx *ctx, | ||
159 | const struct nft_expr *expr) | ||
160 | { | ||
161 | struct nft_objref_map *priv = nft_expr_priv(expr); | ||
162 | |||
163 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
164 | } | ||
165 | |||
166 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, | 158 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, |
167 | const struct nft_expr *expr) | 159 | const struct nft_expr *expr, |
160 | enum nft_trans_phase phase) | ||
168 | { | 161 | { |
169 | struct nft_objref_map *priv = nft_expr_priv(expr); | 162 | struct nft_objref_map *priv = nft_expr_priv(expr); |
170 | 163 | ||
171 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 164 | if (phase == NFT_TRANS_PREPARE) |
165 | return; | ||
166 | |||
167 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
168 | phase == NFT_TRANS_COMMIT); | ||
172 | } | 169 | } |
173 | 170 | ||
174 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, | 171 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, |
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = { | |||
185 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), | 182 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), |
186 | .eval = nft_objref_map_eval, | 183 | .eval = nft_objref_map_eval, |
187 | .init = nft_objref_map_init, | 184 | .init = nft_objref_map_init, |
188 | .activate = nft_objref_map_activate, | ||
189 | .deactivate = nft_objref_map_deactivate, | 185 | .deactivate = nft_objref_map_deactivate, |
190 | .destroy = nft_objref_map_destroy, | 186 | .destroy = nft_objref_map_destroy, |
191 | .dump = nft_objref_map_dump, | 187 | .dump = nft_objref_map_dump, |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index aecadd471e1d..13e1ac333fa4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -1899,7 +1899,7 @@ static int __init xt_init(void) | |||
1899 | seqcount_init(&per_cpu(xt_recseq, i)); | 1899 | seqcount_init(&per_cpu(xt_recseq, i)); |
1900 | } | 1900 | } |
1901 | 1901 | ||
1902 | xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); | 1902 | xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); |
1903 | if (!xt) | 1903 | if (!xt) |
1904 | return -ENOMEM; | 1904 | return -ENOMEM; |
1905 | 1905 | ||
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3b1a78906bc0..1cd1d83a4be0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; | 4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; |
4293 | if (unlikely(rb->frames_per_block == 0)) | 4293 | if (unlikely(rb->frames_per_block == 0)) |
4294 | goto out; | 4294 | goto out; |
4295 | if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) | 4295 | if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) |
4296 | goto out; | 4296 | goto out; |
4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | 4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != |
4298 | req->tp_frame_nr)) | 4298 | req->tp_frame_nr)) |
diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 9fc76b19cd3c..db3473540303 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c | |||
@@ -132,7 +132,7 @@ static int pep_indicate(struct sock *sk, u8 id, u8 code, | |||
132 | ph->utid = 0; | 132 | ph->utid = 0; |
133 | ph->message_id = id; | 133 | ph->message_id = id; |
134 | ph->pipe_handle = pn->pipe_handle; | 134 | ph->pipe_handle = pn->pipe_handle; |
135 | ph->data[0] = code; | 135 | ph->error_code = code; |
136 | return pn_skb_send(sk, skb, NULL); | 136 | return pn_skb_send(sk, skb, NULL); |
137 | } | 137 | } |
138 | 138 | ||
@@ -153,7 +153,7 @@ static int pipe_handler_request(struct sock *sk, u8 id, u8 code, | |||
153 | ph->utid = id; /* whatever */ | 153 | ph->utid = id; /* whatever */ |
154 | ph->message_id = id; | 154 | ph->message_id = id; |
155 | ph->pipe_handle = pn->pipe_handle; | 155 | ph->pipe_handle = pn->pipe_handle; |
156 | ph->data[0] = code; | 156 | ph->error_code = code; |
157 | return pn_skb_send(sk, skb, NULL); | 157 | return pn_skb_send(sk, skb, NULL); |
158 | } | 158 | } |
159 | 159 | ||
@@ -208,7 +208,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, | |||
208 | struct pnpipehdr *ph; | 208 | struct pnpipehdr *ph; |
209 | struct sockaddr_pn dst; | 209 | struct sockaddr_pn dst; |
210 | u8 data[4] = { | 210 | u8 data[4] = { |
211 | oph->data[0], /* PEP type */ | 211 | oph->pep_type, /* PEP type */ |
212 | code, /* error code, at an unusual offset */ | 212 | code, /* error code, at an unusual offset */ |
213 | PAD, PAD, | 213 | PAD, PAD, |
214 | }; | 214 | }; |
@@ -221,7 +221,7 @@ static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code, | |||
221 | ph->utid = oph->utid; | 221 | ph->utid = oph->utid; |
222 | ph->message_id = PNS_PEP_CTRL_RESP; | 222 | ph->message_id = PNS_PEP_CTRL_RESP; |
223 | ph->pipe_handle = oph->pipe_handle; | 223 | ph->pipe_handle = oph->pipe_handle; |
224 | ph->data[0] = oph->data[1]; /* CTRL id */ | 224 | ph->data0 = oph->data[0]; /* CTRL id */ |
225 | 225 | ||
226 | pn_skb_get_src_sockaddr(oskb, &dst); | 226 | pn_skb_get_src_sockaddr(oskb, &dst); |
227 | return pn_skb_send(sk, skb, &dst); | 227 | return pn_skb_send(sk, skb, &dst); |
@@ -272,17 +272,17 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) | |||
272 | return -EINVAL; | 272 | return -EINVAL; |
273 | 273 | ||
274 | hdr = pnp_hdr(skb); | 274 | hdr = pnp_hdr(skb); |
275 | if (hdr->data[0] != PN_PEP_TYPE_COMMON) { | 275 | if (hdr->pep_type != PN_PEP_TYPE_COMMON) { |
276 | net_dbg_ratelimited("Phonet unknown PEP type: %u\n", | 276 | net_dbg_ratelimited("Phonet unknown PEP type: %u\n", |
277 | (unsigned int)hdr->data[0]); | 277 | (unsigned int)hdr->pep_type); |
278 | return -EOPNOTSUPP; | 278 | return -EOPNOTSUPP; |
279 | } | 279 | } |
280 | 280 | ||
281 | switch (hdr->data[1]) { | 281 | switch (hdr->data[0]) { |
282 | case PN_PEP_IND_FLOW_CONTROL: | 282 | case PN_PEP_IND_FLOW_CONTROL: |
283 | switch (pn->tx_fc) { | 283 | switch (pn->tx_fc) { |
284 | case PN_LEGACY_FLOW_CONTROL: | 284 | case PN_LEGACY_FLOW_CONTROL: |
285 | switch (hdr->data[4]) { | 285 | switch (hdr->data[3]) { |
286 | case PEP_IND_BUSY: | 286 | case PEP_IND_BUSY: |
287 | atomic_set(&pn->tx_credits, 0); | 287 | atomic_set(&pn->tx_credits, 0); |
288 | break; | 288 | break; |
@@ -292,7 +292,7 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) | |||
292 | } | 292 | } |
293 | break; | 293 | break; |
294 | case PN_ONE_CREDIT_FLOW_CONTROL: | 294 | case PN_ONE_CREDIT_FLOW_CONTROL: |
295 | if (hdr->data[4] == PEP_IND_READY) | 295 | if (hdr->data[3] == PEP_IND_READY) |
296 | atomic_set(&pn->tx_credits, wake = 1); | 296 | atomic_set(&pn->tx_credits, wake = 1); |
297 | break; | 297 | break; |
298 | } | 298 | } |
@@ -301,12 +301,12 @@ static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb) | |||
301 | case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: | 301 | case PN_PEP_IND_ID_MCFC_GRANT_CREDITS: |
302 | if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) | 302 | if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL) |
303 | break; | 303 | break; |
304 | atomic_add(wake = hdr->data[4], &pn->tx_credits); | 304 | atomic_add(wake = hdr->data[3], &pn->tx_credits); |
305 | break; | 305 | break; |
306 | 306 | ||
307 | default: | 307 | default: |
308 | net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", | 308 | net_dbg_ratelimited("Phonet unknown PEP indication: %u\n", |
309 | (unsigned int)hdr->data[1]); | 309 | (unsigned int)hdr->data[0]); |
310 | return -EOPNOTSUPP; | 310 | return -EOPNOTSUPP; |
311 | } | 311 | } |
312 | if (wake) | 312 | if (wake) |
@@ -318,7 +318,7 @@ static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb) | |||
318 | { | 318 | { |
319 | struct pep_sock *pn = pep_sk(sk); | 319 | struct pep_sock *pn = pep_sk(sk); |
320 | struct pnpipehdr *hdr = pnp_hdr(skb); | 320 | struct pnpipehdr *hdr = pnp_hdr(skb); |
321 | u8 n_sb = hdr->data[0]; | 321 | u8 n_sb = hdr->data0; |
322 | 322 | ||
323 | pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; | 323 | pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL; |
324 | __skb_pull(skb, sizeof(*hdr)); | 324 | __skb_pull(skb, sizeof(*hdr)); |
@@ -506,7 +506,7 @@ static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb) | |||
506 | return -ECONNREFUSED; | 506 | return -ECONNREFUSED; |
507 | 507 | ||
508 | /* Parse sub-blocks */ | 508 | /* Parse sub-blocks */ |
509 | n_sb = hdr->data[4]; | 509 | n_sb = hdr->data[3]; |
510 | while (n_sb > 0) { | 510 | while (n_sb > 0) { |
511 | u8 type, buf[6], len = sizeof(buf); | 511 | u8 type, buf[6], len = sizeof(buf); |
512 | const u8 *data = pep_get_sb(skb, &type, &len, buf); | 512 | const u8 *data = pep_get_sb(skb, &type, &len, buf); |
@@ -739,7 +739,7 @@ static int pipe_do_remove(struct sock *sk) | |||
739 | ph->utid = 0; | 739 | ph->utid = 0; |
740 | ph->message_id = PNS_PIPE_REMOVE_REQ; | 740 | ph->message_id = PNS_PIPE_REMOVE_REQ; |
741 | ph->pipe_handle = pn->pipe_handle; | 741 | ph->pipe_handle = pn->pipe_handle; |
742 | ph->data[0] = PAD; | 742 | ph->data0 = PAD; |
743 | return pn_skb_send(sk, skb, NULL); | 743 | return pn_skb_send(sk, skb, NULL); |
744 | } | 744 | } |
745 | 745 | ||
@@ -817,7 +817,7 @@ static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp, | |||
817 | peer_type = hdr->other_pep_type << 8; | 817 | peer_type = hdr->other_pep_type << 8; |
818 | 818 | ||
819 | /* Parse sub-blocks (options) */ | 819 | /* Parse sub-blocks (options) */ |
820 | n_sb = hdr->data[4]; | 820 | n_sb = hdr->data[3]; |
821 | while (n_sb > 0) { | 821 | while (n_sb > 0) { |
822 | u8 type, buf[1], len = sizeof(buf); | 822 | u8 type, buf[1], len = sizeof(buf); |
823 | const u8 *data = pep_get_sb(skb, &type, &len, buf); | 823 | const u8 *data = pep_get_sb(skb, &type, &len, buf); |
@@ -1109,7 +1109,7 @@ static int pipe_skb_send(struct sock *sk, struct sk_buff *skb) | |||
1109 | ph->utid = 0; | 1109 | ph->utid = 0; |
1110 | if (pn->aligned) { | 1110 | if (pn->aligned) { |
1111 | ph->message_id = PNS_PIPE_ALIGNED_DATA; | 1111 | ph->message_id = PNS_PIPE_ALIGNED_DATA; |
1112 | ph->data[0] = 0; /* padding */ | 1112 | ph->data0 = 0; /* padding */ |
1113 | } else | 1113 | } else |
1114 | ph->message_id = PNS_PIPE_DATA; | 1114 | ph->message_id = PNS_PIPE_DATA; |
1115 | ph->pipe_handle = pn->pipe_handle; | 1115 | ph->pipe_handle = pn->pipe_handle; |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 762d2c6788a3..17c9d9f0c848 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, | |||
78 | __rds_create_bind_key(key, addr, port, scope_id); | 78 | __rds_create_bind_key(key, addr, port, scope_id); |
79 | rcu_read_lock(); | 79 | rcu_read_lock(); |
80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); | 80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); |
81 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) | 81 | if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) || |
82 | rds_sock_addref(rs); | 82 | !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt))) |
83 | else | ||
84 | rs = NULL; | 83 | rs = NULL; |
84 | |||
85 | rcu_read_unlock(); | 85 | rcu_read_unlock(); |
86 | 86 | ||
87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, | 87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index eaf19ebaa964..3f7bb11f3290 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -596,6 +596,7 @@ error_requeue_call: | |||
596 | } | 596 | } |
597 | error_no_call: | 597 | error_no_call: |
598 | release_sock(&rx->sk); | 598 | release_sock(&rx->sk); |
599 | error_trace: | ||
599 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); | 600 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); |
600 | return ret; | 601 | return ret; |
601 | 602 | ||
@@ -604,7 +605,7 @@ wait_interrupted: | |||
604 | wait_error: | 605 | wait_error: |
605 | finish_wait(sk_sleep(&rx->sk), &wait); | 606 | finish_wait(sk_sleep(&rx->sk), &wait); |
606 | call = NULL; | 607 | call = NULL; |
607 | goto error_no_call; | 608 | goto error_trace; |
608 | } | 609 | } |
609 | 610 | ||
610 | /** | 611 | /** |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f6aa57fbbbaf..12ca9d13db83 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1371 | if (!tc_skip_hw(fnew->flags)) { | 1371 | if (!tc_skip_hw(fnew->flags)) { |
1372 | err = fl_hw_replace_filter(tp, fnew, extack); | 1372 | err = fl_hw_replace_filter(tp, fnew, extack); |
1373 | if (err) | 1373 | if (err) |
1374 | goto errout_mask; | 1374 | goto errout_mask_ht; |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | if (!tc_in_hw(fnew->flags)) | 1377 | if (!tc_in_hw(fnew->flags)) |
@@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1401 | kfree(mask); | 1401 | kfree(mask); |
1402 | return 0; | 1402 | return 0; |
1403 | 1403 | ||
1404 | errout_mask_ht: | ||
1405 | rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, | ||
1406 | fnew->mask->filter_ht_params); | ||
1407 | |||
1404 | errout_mask: | 1408 | errout_mask: |
1405 | fl_mask_put(head, fnew->mask, false); | 1409 | fl_mask_put(head, fnew->mask, false); |
1406 | 1410 | ||
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 9ccc93f257db..38bb882bb958 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -48,7 +48,7 @@ struct tcindex_data { | |||
48 | u32 hash; /* hash table size; 0 if undefined */ | 48 | u32 hash; /* hash table size; 0 if undefined */ |
49 | u32 alloc_hash; /* allocated size */ | 49 | u32 alloc_hash; /* allocated size */ |
50 | u32 fall_through; /* 0: only classify if explicit match */ | 50 | u32 fall_through; /* 0: only classify if explicit match */ |
51 | struct rcu_head rcu; | 51 | struct rcu_work rwork; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) | 54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) |
@@ -221,17 +221,11 @@ found: | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int tcindex_destroy_element(struct tcf_proto *tp, | 224 | static void tcindex_destroy_work(struct work_struct *work) |
225 | void *arg, struct tcf_walker *walker) | ||
226 | { | ||
227 | bool last; | ||
228 | |||
229 | return tcindex_delete(tp, arg, &last, NULL); | ||
230 | } | ||
231 | |||
232 | static void __tcindex_destroy(struct rcu_head *head) | ||
233 | { | 225 | { |
234 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 226 | struct tcindex_data *p = container_of(to_rcu_work(work), |
227 | struct tcindex_data, | ||
228 | rwork); | ||
235 | 229 | ||
236 | kfree(p->perfect); | 230 | kfree(p->perfect); |
237 | kfree(p->h); | 231 | kfree(p->h); |
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r) | |||
258 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 252 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
259 | } | 253 | } |
260 | 254 | ||
261 | static void __tcindex_partial_destroy(struct rcu_head *head) | 255 | static void tcindex_partial_destroy_work(struct work_struct *work) |
262 | { | 256 | { |
263 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 257 | struct tcindex_data *p = container_of(to_rcu_work(work), |
258 | struct tcindex_data, | ||
259 | rwork); | ||
264 | 260 | ||
265 | kfree(p->perfect); | 261 | kfree(p->perfect); |
266 | kfree(p); | 262 | kfree(p); |
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp) | |||
275 | kfree(cp->perfect); | 271 | kfree(cp->perfect); |
276 | } | 272 | } |
277 | 273 | ||
278 | static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | 274 | static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) |
279 | { | 275 | { |
280 | int i, err = 0; | 276 | int i, err = 0; |
281 | 277 | ||
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | |||
289 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 285 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
290 | if (err < 0) | 286 | if (err < 0) |
291 | goto errout; | 287 | goto errout; |
288 | #ifdef CONFIG_NET_CLS_ACT | ||
289 | cp->perfect[i].exts.net = net; | ||
290 | #endif | ||
292 | } | 291 | } |
293 | 292 | ||
294 | return 0; | 293 | return 0; |
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
305 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) | 304 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) |
306 | { | 305 | { |
307 | struct tcindex_filter_result new_filter_result, *old_r = r; | 306 | struct tcindex_filter_result new_filter_result, *old_r = r; |
308 | struct tcindex_filter_result cr; | ||
309 | struct tcindex_data *cp = NULL, *oldp; | 307 | struct tcindex_data *cp = NULL, *oldp; |
310 | struct tcindex_filter *f = NULL; /* make gcc behave */ | 308 | struct tcindex_filter *f = NULL; /* make gcc behave */ |
309 | struct tcf_result cr = {}; | ||
311 | int err, balloc = 0; | 310 | int err, balloc = 0; |
312 | struct tcf_exts e; | 311 | struct tcf_exts e; |
313 | 312 | ||
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
337 | if (p->perfect) { | 336 | if (p->perfect) { |
338 | int i; | 337 | int i; |
339 | 338 | ||
340 | if (tcindex_alloc_perfect_hash(cp) < 0) | 339 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
341 | goto errout; | 340 | goto errout; |
342 | for (i = 0; i < cp->hash; i++) | 341 | for (i = 0; i < cp->hash; i++) |
343 | cp->perfect[i].res = p->perfect[i].res; | 342 | cp->perfect[i].res = p->perfect[i].res; |
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
348 | err = tcindex_filter_result_init(&new_filter_result); | 347 | err = tcindex_filter_result_init(&new_filter_result); |
349 | if (err < 0) | 348 | if (err < 0) |
350 | goto errout1; | 349 | goto errout1; |
351 | err = tcindex_filter_result_init(&cr); | ||
352 | if (err < 0) | ||
353 | goto errout1; | ||
354 | if (old_r) | 350 | if (old_r) |
355 | cr.res = r->res; | 351 | cr = r->res; |
356 | 352 | ||
357 | if (tb[TCA_TCINDEX_HASH]) | 353 | if (tb[TCA_TCINDEX_HASH]) |
358 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); | 354 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
406 | err = -ENOMEM; | 402 | err = -ENOMEM; |
407 | if (!cp->perfect && !cp->h) { | 403 | if (!cp->perfect && !cp->h) { |
408 | if (valid_perfect_hash(cp)) { | 404 | if (valid_perfect_hash(cp)) { |
409 | if (tcindex_alloc_perfect_hash(cp) < 0) | 405 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
410 | goto errout_alloc; | 406 | goto errout_alloc; |
411 | balloc = 1; | 407 | balloc = 1; |
412 | } else { | 408 | } else { |
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
443 | } | 439 | } |
444 | 440 | ||
445 | if (tb[TCA_TCINDEX_CLASSID]) { | 441 | if (tb[TCA_TCINDEX_CLASSID]) { |
446 | cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); | 442 | cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); |
447 | tcf_bind_filter(tp, &cr.res, base); | 443 | tcf_bind_filter(tp, &cr, base); |
448 | } | 444 | } |
449 | 445 | ||
450 | if (old_r && old_r != r) { | 446 | if (old_r && old_r != r) { |
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
456 | } | 452 | } |
457 | 453 | ||
458 | oldp = p; | 454 | oldp = p; |
459 | r->res = cr.res; | 455 | r->res = cr; |
460 | tcf_exts_change(&r->exts, &e); | 456 | tcf_exts_change(&r->exts, &e); |
461 | 457 | ||
462 | rcu_assign_pointer(tp->root, cp); | 458 | rcu_assign_pointer(tp->root, cp); |
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
475 | ; /* nothing */ | 471 | ; /* nothing */ |
476 | 472 | ||
477 | rcu_assign_pointer(*fp, f); | 473 | rcu_assign_pointer(*fp, f); |
474 | } else { | ||
475 | tcf_exts_destroy(&new_filter_result.exts); | ||
478 | } | 476 | } |
479 | 477 | ||
480 | if (oldp) | 478 | if (oldp) |
481 | call_rcu(&oldp->rcu, __tcindex_partial_destroy); | 479 | tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); |
482 | return 0; | 480 | return 0; |
483 | 481 | ||
484 | errout_alloc: | 482 | errout_alloc: |
@@ -487,7 +485,6 @@ errout_alloc: | |||
487 | else if (balloc == 2) | 485 | else if (balloc == 2) |
488 | kfree(cp->h); | 486 | kfree(cp->h); |
489 | errout1: | 487 | errout1: |
490 | tcf_exts_destroy(&cr.exts); | ||
491 | tcf_exts_destroy(&new_filter_result.exts); | 488 | tcf_exts_destroy(&new_filter_result.exts); |
492 | errout: | 489 | errout: |
493 | kfree(cp); | 490 | kfree(cp); |
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp, | |||
562 | struct netlink_ext_ack *extack) | 559 | struct netlink_ext_ack *extack) |
563 | { | 560 | { |
564 | struct tcindex_data *p = rtnl_dereference(tp->root); | 561 | struct tcindex_data *p = rtnl_dereference(tp->root); |
565 | struct tcf_walker walker; | 562 | int i; |
566 | 563 | ||
567 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); | 564 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); |
568 | walker.count = 0; | ||
569 | walker.skip = 0; | ||
570 | walker.fn = tcindex_destroy_element; | ||
571 | tcindex_walk(tp, &walker); | ||
572 | 565 | ||
573 | call_rcu(&p->rcu, __tcindex_destroy); | 566 | if (p->perfect) { |
567 | for (i = 0; i < p->hash; i++) { | ||
568 | struct tcindex_filter_result *r = p->perfect + i; | ||
569 | |||
570 | tcf_unbind_filter(tp, &r->res); | ||
571 | if (tcf_exts_get_net(&r->exts)) | ||
572 | tcf_queue_work(&r->rwork, | ||
573 | tcindex_destroy_rexts_work); | ||
574 | else | ||
575 | __tcindex_destroy_rexts(r); | ||
576 | } | ||
577 | } | ||
578 | |||
579 | for (i = 0; p->h && i < p->hash; i++) { | ||
580 | struct tcindex_filter *f, *next; | ||
581 | bool last; | ||
582 | |||
583 | for (f = rtnl_dereference(p->h[i]); f; f = next) { | ||
584 | next = rtnl_dereference(f->next); | ||
585 | tcindex_delete(tp, &f->result, &last, NULL); | ||
586 | } | ||
587 | } | ||
588 | |||
589 | tcf_queue_work(&p->rwork, tcindex_destroy_work); | ||
574 | } | 590 | } |
575 | 591 | ||
576 | 592 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 66ba2ce2320f..968a85fe4d4a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev) | |||
500 | * netif_carrier_on - set carrier | 500 | * netif_carrier_on - set carrier |
501 | * @dev: network device | 501 | * @dev: network device |
502 | * | 502 | * |
503 | * Device has detected that carrier. | 503 | * Device has detected acquisition of carrier. |
504 | */ | 504 | */ |
505 | void netif_carrier_on(struct net_device *dev) | 505 | void netif_carrier_on(struct net_device *dev) |
506 | { | 506 | { |
diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 078f01a8d582..435847d98b51 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c | |||
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) | |||
256 | + nla_total_size(1) /* INET_DIAG_TOS */ | 256 | + nla_total_size(1) /* INET_DIAG_TOS */ |
257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
258 | + nla_total_size(4) /* INET_DIAG_MARK */ | 258 | + nla_total_size(4) /* INET_DIAG_MARK */ |
259 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
259 | + nla_total_size(addrlen * asoc->peer.transport_count) | 260 | + nla_total_size(addrlen * asoc->peer.transport_count) |
260 | + nla_total_size(addrlen * addrcnt) | 261 | + nla_total_size(addrlen * addrcnt) |
261 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 262 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 123e9f2dc226..edfcf16e704c 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c | |||
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb) | |||
36 | { | 36 | { |
37 | skb->ip_summed = CHECKSUM_NONE; | 37 | skb->ip_summed = CHECKSUM_NONE; |
38 | skb->csum_not_inet = 0; | 38 | skb->csum_not_inet = 0; |
39 | gso_reset_checksum(skb, ~0); | ||
39 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); | 40 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); |
40 | } | 41 | } |
41 | 42 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f93c3cf9e567..65d6d04546ae 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; | 2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
2028 | struct sctp_transport *transport = NULL; | 2028 | struct sctp_transport *transport = NULL; |
2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; | 2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; |
2030 | struct sctp_association *asoc; | 2030 | struct sctp_association *asoc, *tmp; |
2031 | struct sctp_cmsgs cmsgs; | 2031 | struct sctp_cmsgs cmsgs; |
2032 | union sctp_addr *daddr; | 2032 | union sctp_addr *daddr; |
2033 | bool new = false; | 2033 | bool new = false; |
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2053 | 2053 | ||
2054 | /* SCTP_SENDALL process */ | 2054 | /* SCTP_SENDALL process */ |
2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { | 2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { |
2056 | list_for_each_entry(asoc, &ep->asocs, asocs) { | 2056 | list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) { |
2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, | 2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, |
2058 | msg_len); | 2058 | msg_len); |
2059 | if (err == 0) | 2059 | if (err == 0) |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 80e0ae5534ec..2936ed17bf9e 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count) | |||
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
87 | static size_t fa_index(struct flex_array *fa, void *elem, size_t count) | ||
88 | { | ||
89 | size_t index = 0; | ||
90 | |||
91 | while (count--) { | ||
92 | if (elem == flex_array_get(fa, index)) | ||
93 | break; | ||
94 | index++; | ||
95 | } | ||
96 | |||
97 | return index; | ||
98 | } | ||
99 | |||
87 | /* Migrates chunks from stream queues to new stream queues if needed, | 100 | /* Migrates chunks from stream queues to new stream queues if needed, |
88 | * but not across associations. Also, removes those chunks to streams | 101 | * but not across associations. Also, removes those chunks to streams |
89 | * higher than the new max. | 102 | * higher than the new max. |
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, | |||
131 | } | 144 | } |
132 | } | 145 | } |
133 | 146 | ||
134 | for (i = outcnt; i < stream->outcnt; i++) | 147 | for (i = outcnt; i < stream->outcnt; i++) { |
135 | kfree(SCTP_SO(stream, i)->ext); | 148 | kfree(SCTP_SO(stream, i)->ext); |
149 | SCTP_SO(stream, i)->ext = NULL; | ||
150 | } | ||
136 | } | 151 | } |
137 | 152 | ||
138 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | 153 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, |
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | |||
147 | 162 | ||
148 | if (stream->out) { | 163 | if (stream->out) { |
149 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); | 164 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); |
165 | if (stream->out_curr) { | ||
166 | size_t index = fa_index(stream->out, stream->out_curr, | ||
167 | stream->outcnt); | ||
168 | |||
169 | BUG_ON(index == stream->outcnt); | ||
170 | stream->out_curr = flex_array_get(out, index); | ||
171 | } | ||
150 | fa_free(stream->out); | 172 | fa_free(stream->out); |
151 | } | 173 | } |
152 | 174 | ||
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index 033696e6f74f..ad158d311ffa 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -207,7 +207,8 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport) | |||
207 | 207 | ||
208 | /* When a data chunk is sent, reset the heartbeat interval. */ | 208 | /* When a data chunk is sent, reset the heartbeat interval. */ |
209 | expires = jiffies + sctp_transport_timeout(transport); | 209 | expires = jiffies + sctp_transport_timeout(transport); |
210 | if (time_before(transport->hb_timer.expires, expires) && | 210 | if ((time_before(transport->hb_timer.expires, expires) || |
211 | !timer_pending(&transport->hb_timer)) && | ||
211 | !mod_timer(&transport->hb_timer, | 212 | !mod_timer(&transport->hb_timer, |
212 | expires + prandom_u32_max(transport->rto))) | 213 | expires + prandom_u32_max(transport->rto))) |
213 | sctp_transport_hold(transport); | 214 | sctp_transport_hold(transport); |
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4e56602e0c6..b04a813fc865 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
1505 | 1505 | ||
1506 | smc = smc_sk(sk); | 1506 | smc = smc_sk(sk); |
1507 | lock_sock(sk); | 1507 | lock_sock(sk); |
1508 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | ||
1509 | /* socket was connected before, no more data to read */ | ||
1510 | rc = 0; | ||
1511 | goto out; | ||
1512 | } | ||
1508 | if ((sk->sk_state == SMC_INIT) || | 1513 | if ((sk->sk_state == SMC_INIT) || |
1509 | (sk->sk_state == SMC_LISTEN) || | 1514 | (sk->sk_state == SMC_LISTEN) || |
1510 | (sk->sk_state == SMC_CLOSED)) | 1515 | (sk->sk_state == SMC_CLOSED)) |
@@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, | |||
1840 | 1845 | ||
1841 | smc = smc_sk(sk); | 1846 | smc = smc_sk(sk); |
1842 | lock_sock(sk); | 1847 | lock_sock(sk); |
1843 | 1848 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1849 | /* socket was connected before, no more data to read */ | ||
1850 | rc = 0; | ||
1851 | goto out; | ||
1852 | } | ||
1844 | if (sk->sk_state == SMC_INIT || | 1853 | if (sk->sk_state == SMC_INIT || |
1845 | sk->sk_state == SMC_LISTEN || | 1854 | sk->sk_state == SMC_LISTEN || |
1846 | sk->sk_state == SMC_CLOSED) | 1855 | sk->sk_state == SMC_CLOSED) |
diff --git a/net/smc/smc.h b/net/smc/smc.h index 5721416d0605..adbdf195eb08 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h | |||
@@ -113,9 +113,9 @@ struct smc_host_cdc_msg { /* Connection Data Control message */ | |||
113 | } __aligned(8); | 113 | } __aligned(8); |
114 | 114 | ||
115 | enum smc_urg_state { | 115 | enum smc_urg_state { |
116 | SMC_URG_VALID, /* data present */ | 116 | SMC_URG_VALID = 1, /* data present */ |
117 | SMC_URG_NOTYET, /* data pending */ | 117 | SMC_URG_NOTYET = 2, /* data pending */ |
118 | SMC_URG_READ /* data was already read */ | 118 | SMC_URG_READ = 3, /* data was already read */ |
119 | }; | 119 | }; |
120 | 120 | ||
121 | struct smc_connection { | 121 | struct smc_connection { |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index db83332ac1c8..fb07ad8d69a6 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -21,13 +21,6 @@ | |||
21 | 21 | ||
22 | /********************************** send *************************************/ | 22 | /********************************** send *************************************/ |
23 | 23 | ||
24 | struct smc_cdc_tx_pend { | ||
25 | struct smc_connection *conn; /* socket connection */ | ||
26 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
27 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
28 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
29 | }; | ||
30 | |||
31 | /* handler for send/transmission completion of a CDC msg */ | 24 | /* handler for send/transmission completion of a CDC msg */ |
32 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | 25 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, |
33 | struct smc_link *link, | 26 | struct smc_link *link, |
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | |||
61 | 54 | ||
62 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 55 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
63 | struct smc_wr_buf **wr_buf, | 56 | struct smc_wr_buf **wr_buf, |
57 | struct smc_rdma_wr **wr_rdma_buf, | ||
64 | struct smc_cdc_tx_pend **pend) | 58 | struct smc_cdc_tx_pend **pend) |
65 | { | 59 | { |
66 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | 60 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; |
67 | int rc; | 61 | int rc; |
68 | 62 | ||
69 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, | 63 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, |
64 | wr_rdma_buf, | ||
70 | (struct smc_wr_tx_pend_priv **)pend); | 65 | (struct smc_wr_tx_pend_priv **)pend); |
71 | if (!conn->alert_token_local) | 66 | if (!conn->alert_token_local) |
72 | /* abnormal termination */ | 67 | /* abnormal termination */ |
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
96 | struct smc_wr_buf *wr_buf, | 91 | struct smc_wr_buf *wr_buf, |
97 | struct smc_cdc_tx_pend *pend) | 92 | struct smc_cdc_tx_pend *pend) |
98 | { | 93 | { |
94 | union smc_host_cursor cfed; | ||
99 | struct smc_link *link; | 95 | struct smc_link *link; |
100 | int rc; | 96 | int rc; |
101 | 97 | ||
@@ -105,12 +101,10 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
105 | 101 | ||
106 | conn->tx_cdc_seq++; | 102 | conn->tx_cdc_seq++; |
107 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; | 103 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; |
108 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, | 104 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); |
109 | &conn->local_tx_ctrl, conn); | ||
110 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); | 105 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); |
111 | if (!rc) | 106 | if (!rc) |
112 | smc_curs_copy(&conn->rx_curs_confirmed, | 107 | smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); |
113 | &conn->local_tx_ctrl.cons, conn); | ||
114 | 108 | ||
115 | return rc; | 109 | return rc; |
116 | } | 110 | } |
@@ -121,11 +115,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) | |||
121 | struct smc_wr_buf *wr_buf; | 115 | struct smc_wr_buf *wr_buf; |
122 | int rc; | 116 | int rc; |
123 | 117 | ||
124 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | 118 | rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend); |
125 | if (rc) | 119 | if (rc) |
126 | return rc; | 120 | return rc; |
127 | 121 | ||
128 | return smc_cdc_msg_send(conn, wr_buf, pend); | 122 | spin_lock_bh(&conn->send_lock); |
123 | rc = smc_cdc_msg_send(conn, wr_buf, pend); | ||
124 | spin_unlock_bh(&conn->send_lock); | ||
125 | return rc; | ||
129 | } | 126 | } |
130 | 127 | ||
131 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) | 128 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index b5bfe38c7f9b..f1cdde9d4b89 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | |||
160 | #endif | 160 | #endif |
161 | } | 161 | } |
162 | 162 | ||
163 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new and |
164 | * difference cannot exceed size | ||
165 | */ | ||
164 | static inline int smc_curs_diff(unsigned int size, | 166 | static inline int smc_curs_diff(unsigned int size, |
165 | union smc_host_cursor *old, | 167 | union smc_host_cursor *old, |
166 | union smc_host_cursor *new) | 168 | union smc_host_cursor *new) |
@@ -185,28 +187,51 @@ static inline int smc_curs_comp(unsigned int size, | |||
185 | return smc_curs_diff(size, old, new); | 187 | return smc_curs_diff(size, old, new); |
186 | } | 188 | } |
187 | 189 | ||
190 | /* calculate cursor difference between old and new, where old <= new and | ||
191 | * difference may exceed size | ||
192 | */ | ||
193 | static inline int smc_curs_diff_large(unsigned int size, | ||
194 | union smc_host_cursor *old, | ||
195 | union smc_host_cursor *new) | ||
196 | { | ||
197 | if (old->wrap < new->wrap) | ||
198 | return min_t(int, | ||
199 | (size - old->count) + new->count + | ||
200 | (new->wrap - old->wrap - 1) * size, | ||
201 | size); | ||
202 | |||
203 | if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */ | ||
204 | return min_t(int, | ||
205 | (size - old->count) + new->count + | ||
206 | (new->wrap + 0xffff - old->wrap) * size, | ||
207 | size); | ||
208 | |||
209 | return max_t(int, 0, (new->count - old->count)); | ||
210 | } | ||
211 | |||
188 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, | 212 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, |
189 | union smc_host_cursor *local, | 213 | union smc_host_cursor *local, |
214 | union smc_host_cursor *save, | ||
190 | struct smc_connection *conn) | 215 | struct smc_connection *conn) |
191 | { | 216 | { |
192 | union smc_host_cursor temp; | 217 | smc_curs_copy(save, local, conn); |
193 | 218 | peer->count = htonl(save->count); | |
194 | smc_curs_copy(&temp, local, conn); | 219 | peer->wrap = htons(save->wrap); |
195 | peer->count = htonl(temp.count); | ||
196 | peer->wrap = htons(temp.wrap); | ||
197 | /* peer->reserved = htons(0); must be ensured by caller */ | 220 | /* peer->reserved = htons(0); must be ensured by caller */ |
198 | } | 221 | } |
199 | 222 | ||
200 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, | 223 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, |
201 | struct smc_host_cdc_msg *local, | 224 | struct smc_connection *conn, |
202 | struct smc_connection *conn) | 225 | union smc_host_cursor *save) |
203 | { | 226 | { |
227 | struct smc_host_cdc_msg *local = &conn->local_tx_ctrl; | ||
228 | |||
204 | peer->common.type = local->common.type; | 229 | peer->common.type = local->common.type; |
205 | peer->len = local->len; | 230 | peer->len = local->len; |
206 | peer->seqno = htons(local->seqno); | 231 | peer->seqno = htons(local->seqno); |
207 | peer->token = htonl(local->token); | 232 | peer->token = htonl(local->token); |
208 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); | 233 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn); |
209 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); | 234 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn); |
210 | peer->prod_flags = local->prod_flags; | 235 | peer->prod_flags = local->prod_flags; |
211 | peer->conn_state_flags = local->conn_state_flags; | 236 | peer->conn_state_flags = local->conn_state_flags; |
212 | } | 237 | } |
@@ -270,10 +295,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
270 | smcr_cdc_msg_to_host(local, peer, conn); | 295 | smcr_cdc_msg_to_host(local, peer, conn); |
271 | } | 296 | } |
272 | 297 | ||
273 | struct smc_cdc_tx_pend; | 298 | struct smc_cdc_tx_pend { |
299 | struct smc_connection *conn; /* socket connection */ | ||
300 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
301 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
302 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
303 | }; | ||
274 | 304 | ||
275 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 305 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
276 | struct smc_wr_buf **wr_buf, | 306 | struct smc_wr_buf **wr_buf, |
307 | struct smc_rdma_wr **wr_rdma_buf, | ||
277 | struct smc_cdc_tx_pend **pend); | 308 | struct smc_cdc_tx_pend **pend); |
278 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); | 309 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); |
279 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, | 310 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 776e9dfc915d..d53fd588d1f5 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) | |||
378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); | 378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); |
379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, | 379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, |
380 | sizeof(struct smc_clc_msg_decline)); | 380 | sizeof(struct smc_clc_msg_decline)); |
381 | if (len < sizeof(struct smc_clc_msg_decline)) | 381 | if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) |
382 | len = -EPROTO; | 382 | len = -EPROTO; |
383 | return len > 0 ? 0 : len; | 383 | return len > 0 ? 0 : len; |
384 | } | 384 | } |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ea2b87f29469..e39cadda1bf5 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work) | |||
345 | 345 | ||
346 | switch (sk->sk_state) { | 346 | switch (sk->sk_state) { |
347 | case SMC_INIT: | 347 | case SMC_INIT: |
348 | if (atomic_read(&conn->bytes_to_rcv) || | 348 | sk->sk_state = SMC_APPCLOSEWAIT1; |
349 | (rxflags->peer_done_writing && | ||
350 | !smc_cdc_rxed_any_close(conn))) { | ||
351 | sk->sk_state = SMC_APPCLOSEWAIT1; | ||
352 | } else { | ||
353 | sk->sk_state = SMC_CLOSED; | ||
354 | sock_put(sk); /* passive closing */ | ||
355 | } | ||
356 | break; | 349 | break; |
357 | case SMC_ACTIVE: | 350 | case SMC_ACTIVE: |
358 | sk->sk_state = SMC_APPCLOSEWAIT1; | 351 | sk->sk_state = SMC_APPCLOSEWAIT1; |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 35c1cdc93e1c..aa1c551cee81 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) | |||
128 | { | 128 | { |
129 | struct smc_link_group *lgr = conn->lgr; | 129 | struct smc_link_group *lgr = conn->lgr; |
130 | 130 | ||
131 | if (!lgr) | ||
132 | return; | ||
131 | write_lock_bh(&lgr->conns_lock); | 133 | write_lock_bh(&lgr->conns_lock); |
132 | if (conn->alert_token_local) { | 134 | if (conn->alert_token_local) { |
133 | __smc_lgr_unregister_conn(conn); | 135 | __smc_lgr_unregister_conn(conn); |
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn, | |||
300 | conn->sndbuf_desc->used = 0; | 302 | conn->sndbuf_desc->used = 0; |
301 | if (conn->rmb_desc) { | 303 | if (conn->rmb_desc) { |
302 | if (!conn->rmb_desc->regerr) { | 304 | if (!conn->rmb_desc->regerr) { |
303 | conn->rmb_desc->used = 0; | ||
304 | if (!lgr->is_smcd) { | 305 | if (!lgr->is_smcd) { |
305 | /* unregister rmb with peer */ | 306 | /* unregister rmb with peer */ |
306 | smc_llc_do_delete_rkey( | 307 | smc_llc_do_delete_rkey( |
307 | &lgr->lnk[SMC_SINGLE_LINK], | 308 | &lgr->lnk[SMC_SINGLE_LINK], |
308 | conn->rmb_desc); | 309 | conn->rmb_desc); |
309 | } | 310 | } |
311 | conn->rmb_desc->used = 0; | ||
310 | } else { | 312 | } else { |
311 | /* buf registration failed, reuse not possible */ | 313 | /* buf registration failed, reuse not possible */ |
312 | write_lock_bh(&lgr->rmbs_lock); | 314 | write_lock_bh(&lgr->rmbs_lock); |
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
628 | local_contact = SMC_REUSE_CONTACT; | 630 | local_contact = SMC_REUSE_CONTACT; |
629 | conn->lgr = lgr; | 631 | conn->lgr = lgr; |
630 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ | 632 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ |
633 | if (delayed_work_pending(&lgr->free_work)) | ||
634 | cancel_delayed_work(&lgr->free_work); | ||
631 | write_unlock_bh(&lgr->conns_lock); | 635 | write_unlock_bh(&lgr->conns_lock); |
632 | break; | 636 | break; |
633 | } | 637 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index b00287989a3d..8806d2afa6ed 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -52,6 +52,24 @@ enum smc_wr_reg_state { | |||
52 | FAILED /* ib_wr_reg_mr response: failure */ | 52 | FAILED /* ib_wr_reg_mr response: failure */ |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct smc_rdma_sge { /* sges for RDMA writes */ | ||
56 | struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; | ||
57 | }; | ||
58 | |||
59 | #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per | ||
60 | * message send | ||
61 | */ | ||
62 | |||
63 | struct smc_rdma_sges { /* sges per message send */ | ||
64 | struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; | ||
65 | }; | ||
66 | |||
67 | struct smc_rdma_wr { /* work requests per message | ||
68 | * send | ||
69 | */ | ||
70 | struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; | ||
71 | }; | ||
72 | |||
55 | struct smc_link { | 73 | struct smc_link { |
56 | struct smc_ib_device *smcibdev; /* ib-device */ | 74 | struct smc_ib_device *smcibdev; /* ib-device */ |
57 | u8 ibport; /* port - values 1 | 2 */ | 75 | u8 ibport; /* port - values 1 | 2 */ |
@@ -64,6 +82,8 @@ struct smc_link { | |||
64 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ | 82 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ |
65 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ | 83 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ |
66 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ | 84 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ |
85 | struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ | ||
86 | struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ | ||
67 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ | 87 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ |
68 | /* above four vectors have wr_tx_cnt elements and use the same index */ | 88 | /* above four vectors have wr_tx_cnt elements and use the same index */ |
69 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ | 89 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index e519ef29c0ff..76487a16934e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) | |||
289 | 289 | ||
290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | 290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) |
291 | { | 291 | { |
292 | struct smc_ib_device *smcibdev = | 292 | struct smc_link *lnk = (struct smc_link *)priv; |
293 | (struct smc_ib_device *)ibevent->device; | 293 | struct smc_ib_device *smcibdev = lnk->smcibdev; |
294 | u8 port_idx; | 294 | u8 port_idx; |
295 | 295 | ||
296 | switch (ibevent->event) { | 296 | switch (ibevent->event) { |
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | |||
298 | case IB_EVENT_GID_CHANGE: | 298 | case IB_EVENT_GID_CHANGE: |
299 | case IB_EVENT_PORT_ERR: | 299 | case IB_EVENT_PORT_ERR: |
300 | case IB_EVENT_QP_ACCESS_ERR: | 300 | case IB_EVENT_QP_ACCESS_ERR: |
301 | port_idx = ibevent->element.port_num - 1; | 301 | port_idx = ibevent->element.qp->port - 1; |
302 | set_bit(port_idx, &smcibdev->port_event_mask); | 302 | set_bit(port_idx, &smcibdev->port_event_mask); |
303 | schedule_work(&smcibdev->port_event_work); | 303 | schedule_work(&smcibdev->port_event_work); |
304 | break; | 304 | break; |
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a6d3623d06f4..4fd60c522802 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c | |||
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link, | |||
166 | { | 166 | { |
167 | int rc; | 167 | int rc; |
168 | 168 | ||
169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); | 169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, |
170 | pend); | ||
170 | if (rc < 0) | 171 | if (rc < 0) |
171 | return rc; | 172 | return rc; |
172 | BUILD_BUG_ON_MSG( | 173 | BUILD_BUG_ON_MSG( |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 7cb3e4f07c10..632c3109dee5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -27,7 +27,7 @@ | |||
27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { | 27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { |
28 | [SMC_PNETID_NAME] = { | 28 | [SMC_PNETID_NAME] = { |
29 | .type = NLA_NUL_STRING, | 29 | .type = NLA_NUL_STRING, |
30 | .len = SMC_MAX_PNETID_LEN - 1 | 30 | .len = SMC_MAX_PNETID_LEN |
31 | }, | 31 | }, |
32 | [SMC_PNETID_ETHNAME] = { | 32 | [SMC_PNETID_ETHNAME] = { |
33 | .type = NLA_NUL_STRING, | 33 | .type = NLA_NUL_STRING, |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index d8366ed51757..f93f3580c100 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) | |||
165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; | 165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; |
166 | 166 | ||
167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { | 167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { |
168 | if (send_done) | ||
169 | return send_done; | ||
168 | rc = smc_tx_wait(smc, msg->msg_flags); | 170 | rc = smc_tx_wait(smc, msg->msg_flags); |
169 | if (rc) { | 171 | if (rc) |
170 | if (send_done) | ||
171 | return send_done; | ||
172 | goto out_err; | 172 | goto out_err; |
173 | } | ||
174 | continue; | 173 | continue; |
175 | } | 174 | } |
176 | 175 | ||
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, | |||
267 | 266 | ||
268 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ | 267 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ |
269 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, | 268 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, |
270 | int num_sges, struct ib_sge sges[]) | 269 | int num_sges, struct ib_rdma_wr *rdma_wr) |
271 | { | 270 | { |
272 | struct smc_link_group *lgr = conn->lgr; | 271 | struct smc_link_group *lgr = conn->lgr; |
273 | struct ib_rdma_wr rdma_wr; | ||
274 | struct smc_link *link; | 272 | struct smc_link *link; |
275 | int rc; | 273 | int rc; |
276 | 274 | ||
277 | memset(&rdma_wr, 0, sizeof(rdma_wr)); | ||
278 | link = &lgr->lnk[SMC_SINGLE_LINK]; | 275 | link = &lgr->lnk[SMC_SINGLE_LINK]; |
279 | rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); | 276 | rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); |
280 | rdma_wr.wr.sg_list = sges; | 277 | rdma_wr->wr.num_sge = num_sges; |
281 | rdma_wr.wr.num_sge = num_sges; | 278 | rdma_wr->remote_addr = |
282 | rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; | ||
283 | rdma_wr.remote_addr = | ||
284 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + | 279 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + |
285 | /* RMBE within RMB */ | 280 | /* RMBE within RMB */ |
286 | conn->tx_off + | 281 | conn->tx_off + |
287 | /* offset within RMBE */ | 282 | /* offset within RMBE */ |
288 | peer_rmbe_offset; | 283 | peer_rmbe_offset; |
289 | rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; | 284 | rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; |
290 | rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); | 285 | rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); |
291 | if (rc) { | 286 | if (rc) { |
292 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; | 287 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; |
293 | smc_lgr_terminate(lgr); | 288 | smc_lgr_terminate(lgr); |
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, | |||
314 | /* SMC-R helper for smc_tx_rdma_writes() */ | 309 | /* SMC-R helper for smc_tx_rdma_writes() */ |
315 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | 310 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, |
316 | size_t src_off, size_t src_len, | 311 | size_t src_off, size_t src_len, |
317 | size_t dst_off, size_t dst_len) | 312 | size_t dst_off, size_t dst_len, |
313 | struct smc_rdma_wr *wr_rdma_buf) | ||
318 | { | 314 | { |
319 | dma_addr_t dma_addr = | 315 | dma_addr_t dma_addr = |
320 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); | 316 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); |
321 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | ||
322 | int src_len_sum = src_len, dst_len_sum = dst_len; | 317 | int src_len_sum = src_len, dst_len_sum = dst_len; |
323 | struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; | ||
324 | int sent_count = src_off; | 318 | int sent_count = src_off; |
325 | int srcchunk, dstchunk; | 319 | int srcchunk, dstchunk; |
326 | int num_sges; | 320 | int num_sges; |
327 | int rc; | 321 | int rc; |
328 | 322 | ||
329 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { | 323 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { |
324 | struct ib_sge *sge = | ||
325 | wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; | ||
326 | |||
330 | num_sges = 0; | 327 | num_sges = 0; |
331 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { | 328 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { |
332 | sges[srcchunk].addr = dma_addr + src_off; | 329 | sge[srcchunk].addr = dma_addr + src_off; |
333 | sges[srcchunk].length = src_len; | 330 | sge[srcchunk].length = src_len; |
334 | sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; | ||
335 | num_sges++; | 331 | num_sges++; |
336 | 332 | ||
337 | src_off += src_len; | 333 | src_off += src_len; |
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
344 | src_len = dst_len - src_len; /* remainder */ | 340 | src_len = dst_len - src_len; /* remainder */ |
345 | src_len_sum += src_len; | 341 | src_len_sum += src_len; |
346 | } | 342 | } |
347 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); | 343 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, |
344 | &wr_rdma_buf->wr_tx_rdma[dstchunk]); | ||
348 | if (rc) | 345 | if (rc) |
349 | return rc; | 346 | return rc; |
350 | if (dst_len_sum == len) | 347 | if (dst_len_sum == len) |
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
403 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; | 400 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; |
404 | * usable snd_wnd as max transmit | 401 | * usable snd_wnd as max transmit |
405 | */ | 402 | */ |
406 | static int smc_tx_rdma_writes(struct smc_connection *conn) | 403 | static int smc_tx_rdma_writes(struct smc_connection *conn, |
404 | struct smc_rdma_wr *wr_rdma_buf) | ||
407 | { | 405 | { |
408 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ | 406 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ |
409 | union smc_host_cursor sent, prep, prod, cons; | 407 | union smc_host_cursor sent, prep, prod, cons; |
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
464 | dst_off, dst_len); | 462 | dst_off, dst_len); |
465 | else | 463 | else |
466 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, | 464 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, |
467 | dst_off, dst_len); | 465 | dst_off, dst_len, wr_rdma_buf); |
468 | if (rc) | 466 | if (rc) |
469 | return rc; | 467 | return rc; |
470 | 468 | ||
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
485 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) | 483 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) |
486 | { | 484 | { |
487 | struct smc_cdc_producer_flags *pflags; | 485 | struct smc_cdc_producer_flags *pflags; |
486 | struct smc_rdma_wr *wr_rdma_buf; | ||
488 | struct smc_cdc_tx_pend *pend; | 487 | struct smc_cdc_tx_pend *pend; |
489 | struct smc_wr_buf *wr_buf; | 488 | struct smc_wr_buf *wr_buf; |
490 | int rc; | 489 | int rc; |
491 | 490 | ||
492 | spin_lock_bh(&conn->send_lock); | 491 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); |
493 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | ||
494 | if (rc < 0) { | 492 | if (rc < 0) { |
495 | if (rc == -EBUSY) { | 493 | if (rc == -EBUSY) { |
496 | struct smc_sock *smc = | 494 | struct smc_sock *smc = |
497 | container_of(conn, struct smc_sock, conn); | 495 | container_of(conn, struct smc_sock, conn); |
498 | 496 | ||
499 | if (smc->sk.sk_err == ECONNABORTED) { | 497 | if (smc->sk.sk_err == ECONNABORTED) |
500 | rc = sock_error(&smc->sk); | 498 | return sock_error(&smc->sk); |
501 | goto out_unlock; | ||
502 | } | ||
503 | rc = 0; | 499 | rc = 0; |
504 | if (conn->alert_token_local) /* connection healthy */ | 500 | if (conn->alert_token_local) /* connection healthy */ |
505 | mod_delayed_work(system_wq, &conn->tx_work, | 501 | mod_delayed_work(system_wq, &conn->tx_work, |
506 | SMC_TX_WORK_DELAY); | 502 | SMC_TX_WORK_DELAY); |
507 | } | 503 | } |
508 | goto out_unlock; | 504 | return rc; |
509 | } | 505 | } |
510 | 506 | ||
507 | spin_lock_bh(&conn->send_lock); | ||
511 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { | 508 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { |
512 | rc = smc_tx_rdma_writes(conn); | 509 | rc = smc_tx_rdma_writes(conn, wr_rdma_buf); |
513 | if (rc) { | 510 | if (rc) { |
514 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], | 511 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], |
515 | (struct smc_wr_tx_pend_priv *)pend); | 512 | (struct smc_wr_tx_pend_priv *)pend); |
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) | |||
536 | 533 | ||
537 | spin_lock_bh(&conn->send_lock); | 534 | spin_lock_bh(&conn->send_lock); |
538 | if (!pflags->urg_data_present) | 535 | if (!pflags->urg_data_present) |
539 | rc = smc_tx_rdma_writes(conn); | 536 | rc = smc_tx_rdma_writes(conn, NULL); |
540 | if (!rc) | 537 | if (!rc) |
541 | rc = smcd_cdc_msg_send(conn); | 538 | rc = smcd_cdc_msg_send(conn); |
542 | 539 | ||
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) | |||
598 | if (to_confirm > conn->rmbe_update_limit) { | 595 | if (to_confirm > conn->rmbe_update_limit) { |
599 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); | 596 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); |
600 | sender_free = conn->rmb_desc->len - | 597 | sender_free = conn->rmb_desc->len - |
601 | smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); | 598 | smc_curs_diff_large(conn->rmb_desc->len, |
599 | &cfed, &prod); | ||
602 | } | 600 | } |
603 | 601 | ||
604 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || | 602 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index c2694750a6a8..253aa75dc2b6 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
160 | * @link: Pointer to smc_link used to later send the message. | 160 | * @link: Pointer to smc_link used to later send the message. |
161 | * @handler: Send completion handler function pointer. | 161 | * @handler: Send completion handler function pointer. |
162 | * @wr_buf: Out value returns pointer to message buffer. | 162 | * @wr_buf: Out value returns pointer to message buffer. |
163 | * @wr_rdma_buf: Out value returns pointer to rdma work request. | ||
163 | * @wr_pend_priv: Out value returns pointer serving as handler context. | 164 | * @wr_pend_priv: Out value returns pointer serving as handler context. |
164 | * | 165 | * |
165 | * Return: 0 on success, or -errno on error. | 166 | * Return: 0 on success, or -errno on error. |
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
167 | int smc_wr_tx_get_free_slot(struct smc_link *link, | 168 | int smc_wr_tx_get_free_slot(struct smc_link *link, |
168 | smc_wr_tx_handler handler, | 169 | smc_wr_tx_handler handler, |
169 | struct smc_wr_buf **wr_buf, | 170 | struct smc_wr_buf **wr_buf, |
171 | struct smc_rdma_wr **wr_rdma_buf, | ||
170 | struct smc_wr_tx_pend_priv **wr_pend_priv) | 172 | struct smc_wr_tx_pend_priv **wr_pend_priv) |
171 | { | 173 | { |
172 | struct smc_wr_tx_pend *wr_pend; | 174 | struct smc_wr_tx_pend *wr_pend; |
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, | |||
204 | wr_ib = &link->wr_tx_ibs[idx]; | 206 | wr_ib = &link->wr_tx_ibs[idx]; |
205 | wr_ib->wr_id = wr_id; | 207 | wr_ib->wr_id = wr_id; |
206 | *wr_buf = &link->wr_tx_bufs[idx]; | 208 | *wr_buf = &link->wr_tx_bufs[idx]; |
209 | if (wr_rdma_buf) | ||
210 | *wr_rdma_buf = &link->wr_tx_rdmas[idx]; | ||
207 | *wr_pend_priv = &wr_pend->priv; | 211 | *wr_pend_priv = &wr_pend->priv; |
208 | return 0; | 212 | return 0; |
209 | } | 213 | } |
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
218 | u32 idx = pend->idx; | 222 | u32 idx = pend->idx; |
219 | 223 | ||
220 | /* clear the full struct smc_wr_tx_pend including .priv */ | 224 | /* clear the full struct smc_wr_tx_pend including .priv */ |
221 | memset(&link->wr_tx_pends[pend->idx], 0, | 225 | memset(&link->wr_tx_pends[idx], 0, |
222 | sizeof(link->wr_tx_pends[pend->idx])); | 226 | sizeof(link->wr_tx_pends[idx])); |
223 | memset(&link->wr_tx_bufs[pend->idx], 0, | 227 | memset(&link->wr_tx_bufs[idx], 0, |
224 | sizeof(link->wr_tx_bufs[pend->idx])); | 228 | sizeof(link->wr_tx_bufs[idx])); |
225 | test_and_clear_bit(idx, link->wr_tx_mask); | 229 | test_and_clear_bit(idx, link->wr_tx_mask); |
226 | return 1; | 230 | return 1; |
227 | } | 231 | } |
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk) | |||
465 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; | 469 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; |
466 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; | 470 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; |
467 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; | 471 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; |
472 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = | ||
473 | lnk->roce_pd->local_dma_lkey; | ||
474 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = | ||
475 | lnk->roce_pd->local_dma_lkey; | ||
476 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = | ||
477 | lnk->roce_pd->local_dma_lkey; | ||
478 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = | ||
479 | lnk->roce_pd->local_dma_lkey; | ||
468 | lnk->wr_tx_ibs[i].next = NULL; | 480 | lnk->wr_tx_ibs[i].next = NULL; |
469 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; | 481 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; |
470 | lnk->wr_tx_ibs[i].num_sge = 1; | 482 | lnk->wr_tx_ibs[i].num_sge = 1; |
471 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; | 483 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; |
472 | lnk->wr_tx_ibs[i].send_flags = | 484 | lnk->wr_tx_ibs[i].send_flags = |
473 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 485 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
486 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; | ||
487 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; | ||
488 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = | ||
489 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; | ||
490 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = | ||
491 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; | ||
474 | } | 492 | } |
475 | for (i = 0; i < lnk->wr_rx_cnt; i++) { | 493 | for (i = 0; i < lnk->wr_rx_cnt; i++) { |
476 | lnk->wr_rx_sges[i].addr = | 494 | lnk->wr_rx_sges[i].addr = |
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk) | |||
521 | lnk->wr_tx_mask = NULL; | 539 | lnk->wr_tx_mask = NULL; |
522 | kfree(lnk->wr_tx_sges); | 540 | kfree(lnk->wr_tx_sges); |
523 | lnk->wr_tx_sges = NULL; | 541 | lnk->wr_tx_sges = NULL; |
542 | kfree(lnk->wr_tx_rdma_sges); | ||
543 | lnk->wr_tx_rdma_sges = NULL; | ||
524 | kfree(lnk->wr_rx_sges); | 544 | kfree(lnk->wr_rx_sges); |
525 | lnk->wr_rx_sges = NULL; | 545 | lnk->wr_rx_sges = NULL; |
546 | kfree(lnk->wr_tx_rdmas); | ||
547 | lnk->wr_tx_rdmas = NULL; | ||
526 | kfree(lnk->wr_rx_ibs); | 548 | kfree(lnk->wr_rx_ibs); |
527 | lnk->wr_rx_ibs = NULL; | 549 | lnk->wr_rx_ibs = NULL; |
528 | kfree(lnk->wr_tx_ibs); | 550 | kfree(lnk->wr_tx_ibs); |
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link) | |||
552 | GFP_KERNEL); | 574 | GFP_KERNEL); |
553 | if (!link->wr_rx_ibs) | 575 | if (!link->wr_rx_ibs) |
554 | goto no_mem_wr_tx_ibs; | 576 | goto no_mem_wr_tx_ibs; |
577 | link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, | ||
578 | sizeof(link->wr_tx_rdmas[0]), | ||
579 | GFP_KERNEL); | ||
580 | if (!link->wr_tx_rdmas) | ||
581 | goto no_mem_wr_rx_ibs; | ||
582 | link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, | ||
583 | sizeof(link->wr_tx_rdma_sges[0]), | ||
584 | GFP_KERNEL); | ||
585 | if (!link->wr_tx_rdma_sges) | ||
586 | goto no_mem_wr_tx_rdmas; | ||
555 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), | 587 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), |
556 | GFP_KERNEL); | 588 | GFP_KERNEL); |
557 | if (!link->wr_tx_sges) | 589 | if (!link->wr_tx_sges) |
558 | goto no_mem_wr_rx_ibs; | 590 | goto no_mem_wr_tx_rdma_sges; |
559 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, | 591 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, |
560 | sizeof(link->wr_rx_sges[0]), | 592 | sizeof(link->wr_rx_sges[0]), |
561 | GFP_KERNEL); | 593 | GFP_KERNEL); |
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges: | |||
579 | kfree(link->wr_rx_sges); | 611 | kfree(link->wr_rx_sges); |
580 | no_mem_wr_tx_sges: | 612 | no_mem_wr_tx_sges: |
581 | kfree(link->wr_tx_sges); | 613 | kfree(link->wr_tx_sges); |
614 | no_mem_wr_tx_rdma_sges: | ||
615 | kfree(link->wr_tx_rdma_sges); | ||
616 | no_mem_wr_tx_rdmas: | ||
617 | kfree(link->wr_tx_rdmas); | ||
582 | no_mem_wr_rx_ibs: | 618 | no_mem_wr_rx_ibs: |
583 | kfree(link->wr_rx_ibs); | 619 | kfree(link->wr_rx_ibs); |
584 | no_mem_wr_tx_ibs: | 620 | no_mem_wr_tx_ibs: |
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 1d85bb14fd6f..09bf32fd3959 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h | |||
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev); | |||
85 | 85 | ||
86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, | 86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, |
87 | struct smc_wr_buf **wr_buf, | 87 | struct smc_wr_buf **wr_buf, |
88 | struct smc_rdma_wr **wrs, | ||
88 | struct smc_wr_tx_pend_priv **wr_pend_priv); | 89 | struct smc_wr_tx_pend_priv **wr_pend_priv); |
89 | int smc_wr_tx_put_slot(struct smc_link *link, | 90 | int smc_wr_tx_put_slot(struct smc_link *link, |
90 | struct smc_wr_tx_pend_priv *wr_pend_priv); | 91 | struct smc_wr_tx_pend_priv *wr_pend_priv); |
diff --git a/net/socket.c b/net/socket.c index e89884e2197b..d80d87a395ea 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
941 | EXPORT_SYMBOL(dlci_ioctl_set); | 941 | EXPORT_SYMBOL(dlci_ioctl_set); |
942 | 942 | ||
943 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 943 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
944 | unsigned int cmd, unsigned long arg, | 944 | unsigned int cmd, unsigned long arg) |
945 | unsigned int ifreq_size) | ||
946 | { | 945 | { |
947 | int err; | 946 | int err; |
948 | void __user *argp = (void __user *)arg; | 947 | void __user *argp = (void __user *)arg; |
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
968 | } else { | 967 | } else { |
969 | struct ifreq ifr; | 968 | struct ifreq ifr; |
970 | bool need_copyout; | 969 | bool need_copyout; |
971 | if (copy_from_user(&ifr, argp, ifreq_size)) | 970 | if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) |
972 | return -EFAULT; | 971 | return -EFAULT; |
973 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); | 972 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); |
974 | if (!err && need_copyout) | 973 | if (!err && need_copyout) |
975 | if (copy_to_user(argp, &ifr, ifreq_size)) | 974 | if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) |
976 | return -EFAULT; | 975 | return -EFAULT; |
977 | } | 976 | } |
978 | return err; | 977 | return err; |
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
1071 | err = open_related_ns(&net->ns, get_net_ns); | 1070 | err = open_related_ns(&net->ns, get_net_ns); |
1072 | break; | 1071 | break; |
1073 | default: | 1072 | default: |
1074 | err = sock_do_ioctl(net, sock, cmd, arg, | 1073 | err = sock_do_ioctl(net, sock, cmd, arg); |
1075 | sizeof(struct ifreq)); | ||
1076 | break; | 1074 | break; |
1077 | } | 1075 | } |
1078 | return err; | 1076 | return err; |
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2780 | int err; | 2778 | int err; |
2781 | 2779 | ||
2782 | set_fs(KERNEL_DS); | 2780 | set_fs(KERNEL_DS); |
2783 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, | 2781 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); |
2784 | sizeof(struct compat_ifreq)); | ||
2785 | set_fs(old_fs); | 2782 | set_fs(old_fs); |
2786 | if (!err) | 2783 | if (!err) |
2787 | err = compat_put_timeval(&ktv, up); | 2784 | err = compat_put_timeval(&ktv, up); |
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2797 | int err; | 2794 | int err; |
2798 | 2795 | ||
2799 | set_fs(KERNEL_DS); | 2796 | set_fs(KERNEL_DS); |
2800 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, | 2797 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); |
2801 | sizeof(struct compat_ifreq)); | ||
2802 | set_fs(old_fs); | 2798 | set_fs(old_fs); |
2803 | if (!err) | 2799 | if (!err) |
2804 | err = compat_put_timespec(&kts, up); | 2800 | err = compat_put_timespec(&kts, up); |
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, | |||
2994 | return dev_ioctl(net, cmd, &ifreq, NULL); | 2990 | return dev_ioctl(net, cmd, &ifreq, NULL); |
2995 | } | 2991 | } |
2996 | 2992 | ||
2993 | static int compat_ifreq_ioctl(struct net *net, struct socket *sock, | ||
2994 | unsigned int cmd, | ||
2995 | struct compat_ifreq __user *uifr32) | ||
2996 | { | ||
2997 | struct ifreq __user *uifr; | ||
2998 | int err; | ||
2999 | |||
3000 | /* Handle the fact that while struct ifreq has the same *layout* on | ||
3001 | * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data, | ||
3002 | * which are handled elsewhere, it still has different *size* due to | ||
3003 | * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit, | ||
3004 | * resulting in struct ifreq being 32 and 40 bytes respectively). | ||
3005 | * As a result, if the struct happens to be at the end of a page and | ||
3006 | * the next page isn't readable/writable, we get a fault. To prevent | ||
3007 | * that, copy back and forth to the full size. | ||
3008 | */ | ||
3009 | |||
3010 | uifr = compat_alloc_user_space(sizeof(*uifr)); | ||
3011 | if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) | ||
3012 | return -EFAULT; | ||
3013 | |||
3014 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); | ||
3015 | |||
3016 | if (!err) { | ||
3017 | switch (cmd) { | ||
3018 | case SIOCGIFFLAGS: | ||
3019 | case SIOCGIFMETRIC: | ||
3020 | case SIOCGIFMTU: | ||
3021 | case SIOCGIFMEM: | ||
3022 | case SIOCGIFHWADDR: | ||
3023 | case SIOCGIFINDEX: | ||
3024 | case SIOCGIFADDR: | ||
3025 | case SIOCGIFBRDADDR: | ||
3026 | case SIOCGIFDSTADDR: | ||
3027 | case SIOCGIFNETMASK: | ||
3028 | case SIOCGIFPFLAGS: | ||
3029 | case SIOCGIFTXQLEN: | ||
3030 | case SIOCGMIIPHY: | ||
3031 | case SIOCGMIIREG: | ||
3032 | case SIOCGIFNAME: | ||
3033 | if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) | ||
3034 | err = -EFAULT; | ||
3035 | break; | ||
3036 | } | ||
3037 | } | ||
3038 | return err; | ||
3039 | } | ||
3040 | |||
2997 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | 3041 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, |
2998 | struct compat_ifreq __user *uifr32) | 3042 | struct compat_ifreq __user *uifr32) |
2999 | { | 3043 | { |
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
3109 | } | 3153 | } |
3110 | 3154 | ||
3111 | set_fs(KERNEL_DS); | 3155 | set_fs(KERNEL_DS); |
3112 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, | 3156 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); |
3113 | sizeof(struct compat_ifreq)); | ||
3114 | set_fs(old_fs); | 3157 | set_fs(old_fs); |
3115 | 3158 | ||
3116 | out: | 3159 | out: |
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | |||
3210 | case SIOCSIFTXQLEN: | 3253 | case SIOCSIFTXQLEN: |
3211 | case SIOCBRADDIF: | 3254 | case SIOCBRADDIF: |
3212 | case SIOCBRDELIF: | 3255 | case SIOCBRDELIF: |
3256 | case SIOCGIFNAME: | ||
3213 | case SIOCSIFNAME: | 3257 | case SIOCSIFNAME: |
3214 | case SIOCGMIIPHY: | 3258 | case SIOCGMIIPHY: |
3215 | case SIOCGMIIREG: | 3259 | case SIOCGMIIREG: |
3216 | case SIOCSMIIREG: | 3260 | case SIOCSMIIREG: |
3217 | case SIOCSARP: | ||
3218 | case SIOCGARP: | ||
3219 | case SIOCDARP: | ||
3220 | case SIOCATMARK: | ||
3221 | case SIOCBONDENSLAVE: | 3261 | case SIOCBONDENSLAVE: |
3222 | case SIOCBONDRELEASE: | 3262 | case SIOCBONDRELEASE: |
3223 | case SIOCBONDSETHWADDR: | 3263 | case SIOCBONDSETHWADDR: |
3224 | case SIOCBONDCHANGEACTIVE: | 3264 | case SIOCBONDCHANGEACTIVE: |
3225 | case SIOCGIFNAME: | 3265 | return compat_ifreq_ioctl(net, sock, cmd, argp); |
3226 | return sock_do_ioctl(net, sock, cmd, arg, | 3266 | |
3227 | sizeof(struct compat_ifreq)); | 3267 | case SIOCSARP: |
3268 | case SIOCGARP: | ||
3269 | case SIOCDARP: | ||
3270 | case SIOCATMARK: | ||
3271 | return sock_do_ioctl(net, sock, cmd, arg); | ||
3228 | } | 3272 | } |
3229 | 3273 | ||
3230 | return -ENOIOCTLCMD; | 3274 | return -ENOIOCTLCMD; |
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c index fb6656295204..507105127095 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c | |||
@@ -44,7 +44,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
44 | unsigned char *cksum, unsigned char *buf) | 44 | unsigned char *cksum, unsigned char *buf) |
45 | { | 45 | { |
46 | struct crypto_sync_skcipher *cipher; | 46 | struct crypto_sync_skcipher *cipher; |
47 | unsigned char plain[8]; | 47 | unsigned char *plain; |
48 | s32 code; | 48 | s32 code; |
49 | 49 | ||
50 | dprintk("RPC: %s:\n", __func__); | 50 | dprintk("RPC: %s:\n", __func__); |
@@ -52,6 +52,10 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
52 | if (IS_ERR(cipher)) | 52 | if (IS_ERR(cipher)) |
53 | return PTR_ERR(cipher); | 53 | return PTR_ERR(cipher); |
54 | 54 | ||
55 | plain = kmalloc(8, GFP_NOFS); | ||
56 | if (!plain) | ||
57 | return -ENOMEM; | ||
58 | |||
55 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); | 59 | plain[0] = (unsigned char) ((seqnum >> 24) & 0xff); |
56 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); | 60 | plain[1] = (unsigned char) ((seqnum >> 16) & 0xff); |
57 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); | 61 | plain[2] = (unsigned char) ((seqnum >> 8) & 0xff); |
@@ -67,6 +71,7 @@ krb5_make_rc4_seq_num(struct krb5_ctx *kctx, int direction, s32 seqnum, | |||
67 | 71 | ||
68 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); | 72 | code = krb5_encrypt(cipher, cksum, plain, buf, 8); |
69 | out: | 73 | out: |
74 | kfree(plain); | ||
70 | crypto_free_sync_skcipher(cipher); | 75 | crypto_free_sync_skcipher(cipher); |
71 | return code; | 76 | return code; |
72 | } | 77 | } |
@@ -77,12 +82,17 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
77 | u32 seqnum, | 82 | u32 seqnum, |
78 | unsigned char *cksum, unsigned char *buf) | 83 | unsigned char *cksum, unsigned char *buf) |
79 | { | 84 | { |
80 | unsigned char plain[8]; | 85 | unsigned char *plain; |
86 | s32 code; | ||
81 | 87 | ||
82 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 88 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
83 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, | 89 | return krb5_make_rc4_seq_num(kctx, direction, seqnum, |
84 | cksum, buf); | 90 | cksum, buf); |
85 | 91 | ||
92 | plain = kmalloc(8, GFP_NOFS); | ||
93 | if (!plain) | ||
94 | return -ENOMEM; | ||
95 | |||
86 | plain[0] = (unsigned char) (seqnum & 0xff); | 96 | plain[0] = (unsigned char) (seqnum & 0xff); |
87 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); | 97 | plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); |
88 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); | 98 | plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); |
@@ -93,7 +103,9 @@ krb5_make_seq_num(struct krb5_ctx *kctx, | |||
93 | plain[6] = direction; | 103 | plain[6] = direction; |
94 | plain[7] = direction; | 104 | plain[7] = direction; |
95 | 105 | ||
96 | return krb5_encrypt(key, cksum, plain, buf, 8); | 106 | code = krb5_encrypt(key, cksum, plain, buf, 8); |
107 | kfree(plain); | ||
108 | return code; | ||
97 | } | 109 | } |
98 | 110 | ||
99 | static s32 | 111 | static s32 |
@@ -101,7 +113,7 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
101 | unsigned char *buf, int *direction, s32 *seqnum) | 113 | unsigned char *buf, int *direction, s32 *seqnum) |
102 | { | 114 | { |
103 | struct crypto_sync_skcipher *cipher; | 115 | struct crypto_sync_skcipher *cipher; |
104 | unsigned char plain[8]; | 116 | unsigned char *plain; |
105 | s32 code; | 117 | s32 code; |
106 | 118 | ||
107 | dprintk("RPC: %s:\n", __func__); | 119 | dprintk("RPC: %s:\n", __func__); |
@@ -113,20 +125,28 @@ krb5_get_rc4_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, | |||
113 | if (code) | 125 | if (code) |
114 | goto out; | 126 | goto out; |
115 | 127 | ||
128 | plain = kmalloc(8, GFP_NOFS); | ||
129 | if (!plain) { | ||
130 | code = -ENOMEM; | ||
131 | goto out; | ||
132 | } | ||
133 | |||
116 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); | 134 | code = krb5_decrypt(cipher, cksum, buf, plain, 8); |
117 | if (code) | 135 | if (code) |
118 | goto out; | 136 | goto out_plain; |
119 | 137 | ||
120 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) | 138 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) |
121 | || (plain[4] != plain[7])) { | 139 | || (plain[4] != plain[7])) { |
122 | code = (s32)KG_BAD_SEQ; | 140 | code = (s32)KG_BAD_SEQ; |
123 | goto out; | 141 | goto out_plain; |
124 | } | 142 | } |
125 | 143 | ||
126 | *direction = plain[4]; | 144 | *direction = plain[4]; |
127 | 145 | ||
128 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | | 146 | *seqnum = ((plain[0] << 24) | (plain[1] << 16) | |
129 | (plain[2] << 8) | (plain[3])); | 147 | (plain[2] << 8) | (plain[3])); |
148 | out_plain: | ||
149 | kfree(plain); | ||
130 | out: | 150 | out: |
131 | crypto_free_sync_skcipher(cipher); | 151 | crypto_free_sync_skcipher(cipher); |
132 | return code; | 152 | return code; |
@@ -139,7 +159,7 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
139 | int *direction, u32 *seqnum) | 159 | int *direction, u32 *seqnum) |
140 | { | 160 | { |
141 | s32 code; | 161 | s32 code; |
142 | unsigned char plain[8]; | 162 | unsigned char *plain; |
143 | struct crypto_sync_skcipher *key = kctx->seq; | 163 | struct crypto_sync_skcipher *key = kctx->seq; |
144 | 164 | ||
145 | dprintk("RPC: krb5_get_seq_num:\n"); | 165 | dprintk("RPC: krb5_get_seq_num:\n"); |
@@ -147,18 +167,25 @@ krb5_get_seq_num(struct krb5_ctx *kctx, | |||
147 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) | 167 | if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) |
148 | return krb5_get_rc4_seq_num(kctx, cksum, buf, | 168 | return krb5_get_rc4_seq_num(kctx, cksum, buf, |
149 | direction, seqnum); | 169 | direction, seqnum); |
170 | plain = kmalloc(8, GFP_NOFS); | ||
171 | if (!plain) | ||
172 | return -ENOMEM; | ||
150 | 173 | ||
151 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) | 174 | if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) |
152 | return code; | 175 | goto out; |
153 | 176 | ||
154 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || | 177 | if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || |
155 | (plain[4] != plain[7])) | 178 | (plain[4] != plain[7])) { |
156 | return (s32)KG_BAD_SEQ; | 179 | code = (s32)KG_BAD_SEQ; |
180 | goto out; | ||
181 | } | ||
157 | 182 | ||
158 | *direction = plain[4]; | 183 | *direction = plain[4]; |
159 | 184 | ||
160 | *seqnum = ((plain[0]) | | 185 | *seqnum = ((plain[0]) | |
161 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); | 186 | (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); |
162 | 187 | ||
163 | return 0; | 188 | out: |
189 | kfree(plain); | ||
190 | return code; | ||
164 | } | 191 | } |
diff --git a/net/sunrpc/debugfs.c b/net/sunrpc/debugfs.c index 45a033329cd4..19bb356230ed 100644 --- a/net/sunrpc/debugfs.c +++ b/net/sunrpc/debugfs.c | |||
@@ -146,7 +146,7 @@ rpc_clnt_debugfs_register(struct rpc_clnt *clnt) | |||
146 | rcu_read_lock(); | 146 | rcu_read_lock(); |
147 | xprt = rcu_dereference(clnt->cl_xprt); | 147 | xprt = rcu_dereference(clnt->cl_xprt); |
148 | /* no "debugfs" dentry? Don't bother with the symlink. */ | 148 | /* no "debugfs" dentry? Don't bother with the symlink. */ |
149 | if (!xprt->debugfs) { | 149 | if (IS_ERR_OR_NULL(xprt->debugfs)) { |
150 | rcu_read_unlock(); | 150 | rcu_read_unlock(); |
151 | return; | 151 | return; |
152 | } | 152 | } |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index cf51b8f9b15f..1f200119268c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, | |||
537 | DMA_TO_DEVICE); | 537 | DMA_TO_DEVICE); |
538 | } | 538 | } |
539 | 539 | ||
540 | /* If the xdr_buf has more elements than the device can | ||
541 | * transmit in a single RDMA Send, then the reply will | ||
542 | * have to be copied into a bounce buffer. | ||
543 | */ | ||
544 | static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, | ||
545 | struct xdr_buf *xdr, | ||
546 | __be32 *wr_lst) | ||
547 | { | ||
548 | int elements; | ||
549 | |||
550 | /* xdr->head */ | ||
551 | elements = 1; | ||
552 | |||
553 | /* xdr->pages */ | ||
554 | if (!wr_lst) { | ||
555 | unsigned int remaining; | ||
556 | unsigned long pageoff; | ||
557 | |||
558 | pageoff = xdr->page_base & ~PAGE_MASK; | ||
559 | remaining = xdr->page_len; | ||
560 | while (remaining) { | ||
561 | ++elements; | ||
562 | remaining -= min_t(u32, PAGE_SIZE - pageoff, | ||
563 | remaining); | ||
564 | pageoff = 0; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* xdr->tail */ | ||
569 | if (xdr->tail[0].iov_len) | ||
570 | ++elements; | ||
571 | |||
572 | /* assume 1 SGE is needed for the transport header */ | ||
573 | return elements >= rdma->sc_max_send_sges; | ||
574 | } | ||
575 | |||
576 | /* The device is not capable of sending the reply directly. | ||
577 | * Assemble the elements of @xdr into the transport header | ||
578 | * buffer. | ||
579 | */ | ||
580 | static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, | ||
581 | struct svc_rdma_send_ctxt *ctxt, | ||
582 | struct xdr_buf *xdr, __be32 *wr_lst) | ||
583 | { | ||
584 | unsigned char *dst, *tailbase; | ||
585 | unsigned int taillen; | ||
586 | |||
587 | dst = ctxt->sc_xprt_buf; | ||
588 | dst += ctxt->sc_sges[0].length; | ||
589 | |||
590 | memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); | ||
591 | dst += xdr->head[0].iov_len; | ||
592 | |||
593 | tailbase = xdr->tail[0].iov_base; | ||
594 | taillen = xdr->tail[0].iov_len; | ||
595 | if (wr_lst) { | ||
596 | u32 xdrpad; | ||
597 | |||
598 | xdrpad = xdr_padsize(xdr->page_len); | ||
599 | if (taillen && xdrpad) { | ||
600 | tailbase += xdrpad; | ||
601 | taillen -= xdrpad; | ||
602 | } | ||
603 | } else { | ||
604 | unsigned int len, remaining; | ||
605 | unsigned long pageoff; | ||
606 | struct page **ppages; | ||
607 | |||
608 | ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); | ||
609 | pageoff = xdr->page_base & ~PAGE_MASK; | ||
610 | remaining = xdr->page_len; | ||
611 | while (remaining) { | ||
612 | len = min_t(u32, PAGE_SIZE - pageoff, remaining); | ||
613 | |||
614 | memcpy(dst, page_address(*ppages), len); | ||
615 | remaining -= len; | ||
616 | dst += len; | ||
617 | pageoff = 0; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | if (taillen) | ||
622 | memcpy(dst, tailbase, taillen); | ||
623 | |||
624 | ctxt->sc_sges[0].length += xdr->len; | ||
625 | ib_dma_sync_single_for_device(rdma->sc_pd->device, | ||
626 | ctxt->sc_sges[0].addr, | ||
627 | ctxt->sc_sges[0].length, | ||
628 | DMA_TO_DEVICE); | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
540 | /* svc_rdma_map_reply_msg - Map the buffer holding RPC message | 633 | /* svc_rdma_map_reply_msg - Map the buffer holding RPC message |
541 | * @rdma: controlling transport | 634 | * @rdma: controlling transport |
542 | * @ctxt: send_ctxt for the Send WR | 635 | * @ctxt: send_ctxt for the Send WR |
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
559 | u32 xdr_pad; | 652 | u32 xdr_pad; |
560 | int ret; | 653 | int ret; |
561 | 654 | ||
562 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 655 | if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) |
563 | return -EIO; | 656 | return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); |
657 | |||
658 | ++ctxt->sc_cur_sge_no; | ||
564 | ret = svc_rdma_dma_map_buf(rdma, ctxt, | 659 | ret = svc_rdma_dma_map_buf(rdma, ctxt, |
565 | xdr->head[0].iov_base, | 660 | xdr->head[0].iov_base, |
566 | xdr->head[0].iov_len); | 661 | xdr->head[0].iov_len); |
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
591 | while (remaining) { | 686 | while (remaining) { |
592 | len = min_t(u32, PAGE_SIZE - page_off, remaining); | 687 | len = min_t(u32, PAGE_SIZE - page_off, remaining); |
593 | 688 | ||
594 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 689 | ++ctxt->sc_cur_sge_no; |
595 | return -EIO; | ||
596 | ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, | 690 | ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, |
597 | page_off, len); | 691 | page_off, len); |
598 | if (ret < 0) | 692 | if (ret < 0) |
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
606 | len = xdr->tail[0].iov_len; | 700 | len = xdr->tail[0].iov_len; |
607 | tail: | 701 | tail: |
608 | if (len) { | 702 | if (len) { |
609 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 703 | ++ctxt->sc_cur_sge_no; |
610 | return -EIO; | ||
611 | ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); | 704 | ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); |
612 | if (ret < 0) | 705 | if (ret < 0) |
613 | return ret; | 706 | return ret; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 924c17d46903..57f86c63a463 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
419 | /* Transport header, head iovec, tail iovec */ | 419 | /* Transport header, head iovec, tail iovec */ |
420 | newxprt->sc_max_send_sges = 3; | 420 | newxprt->sc_max_send_sges = 3; |
421 | /* Add one SGE per page list entry */ | 421 | /* Add one SGE per page list entry */ |
422 | newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; | 422 | newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1; |
423 | if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { | 423 | if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) |
424 | pr_err("svcrdma: too few Send SGEs available (%d needed)\n", | 424 | newxprt->sc_max_send_sges = dev->attrs.max_send_sge; |
425 | newxprt->sc_max_send_sges); | ||
426 | goto errout; | ||
427 | } | ||
428 | newxprt->sc_max_req_size = svcrdma_max_req_size; | 425 | newxprt->sc_max_req_size = svcrdma_max_req_size; |
429 | newxprt->sc_max_requests = svcrdma_max_requests; | 426 | newxprt->sc_max_requests = svcrdma_max_requests; |
430 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; | 427 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4994e75945b8..21113bfd4eca 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -527,7 +527,8 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |||
527 | 527 | ||
528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, | 528 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
529 | ep->rep_attr.cap.max_send_wr + 1, | 529 | ep->rep_attr.cap.max_send_wr + 1, |
530 | 1, IB_POLL_WORKQUEUE); | 530 | ia->ri_device->num_comp_vectors > 1 ? 1 : 0, |
531 | IB_POLL_WORKQUEUE); | ||
531 | if (IS_ERR(sendcq)) { | 532 | if (IS_ERR(sendcq)) { |
532 | rc = PTR_ERR(sendcq); | 533 | rc = PTR_ERR(sendcq); |
533 | goto out1; | 534 | goto out1; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 2792a3cae682..85ad5c0678d0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, | |||
1145 | default: | 1145 | default: |
1146 | pr_warn("Dropping received illegal msg type\n"); | 1146 | pr_warn("Dropping received illegal msg type\n"); |
1147 | kfree_skb(skb); | 1147 | kfree_skb(skb); |
1148 | return false; | 1148 | return true; |
1149 | }; | 1149 | }; |
1150 | } | 1150 | } |
1151 | 1151 | ||
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |||
1425 | l->rcv_unacked = 0; | 1425 | l->rcv_unacked = 0; |
1426 | } else { | 1426 | } else { |
1427 | /* RESET_MSG or ACTIVATE_MSG */ | 1427 | /* RESET_MSG or ACTIVATE_MSG */ |
1428 | if (mtyp == ACTIVATE_MSG) { | ||
1429 | msg_set_dest_session_valid(hdr, 1); | ||
1430 | msg_set_dest_session(hdr, l->peer_session); | ||
1431 | } | ||
1428 | msg_set_max_pkt(hdr, l->advertised_mtu); | 1432 | msg_set_max_pkt(hdr, l->advertised_mtu); |
1429 | strcpy(data, l->if_name); | 1433 | strcpy(data, l->if_name); |
1430 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); | 1434 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); |
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1642 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | 1646 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
1643 | break; | 1647 | break; |
1644 | } | 1648 | } |
1649 | |||
1650 | /* If this endpoint was re-created while peer was ESTABLISHING | ||
1651 | * it doesn't know current session number. Force re-synch. | ||
1652 | */ | ||
1653 | if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && | ||
1654 | l->session != msg_dest_session(hdr)) { | ||
1655 | if (less(l->session, msg_dest_session(hdr))) | ||
1656 | l->session = msg_dest_session(hdr) + 1; | ||
1657 | break; | ||
1658 | } | ||
1659 | |||
1645 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ | 1660 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ |
1646 | if (mtyp == RESET_MSG || !link_is_up(l)) | 1661 | if (mtyp == RESET_MSG || !link_is_up(l)) |
1647 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); | 1662 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a0924956bb61..d7e4b8b93f9d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n) | |||
360 | msg_set_bits(m, 1, 0, 0xffff, n); | 360 | msg_set_bits(m, 1, 0, 0xffff, n); |
361 | } | 361 | } |
362 | 362 | ||
363 | /* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch | ||
364 | * link peer session number | ||
365 | */ | ||
366 | static inline bool msg_dest_session_valid(struct tipc_msg *m) | ||
367 | { | ||
368 | return msg_bits(m, 1, 16, 0x1); | ||
369 | } | ||
370 | |||
371 | static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid) | ||
372 | { | ||
373 | msg_set_bits(m, 1, 16, 0x1, valid); | ||
374 | } | ||
375 | |||
376 | static inline u16 msg_dest_session(struct tipc_msg *m) | ||
377 | { | ||
378 | return msg_bits(m, 1, 0, 0xffff); | ||
379 | } | ||
380 | |||
381 | static inline void msg_set_dest_session(struct tipc_msg *m, u16 n) | ||
382 | { | ||
383 | msg_set_bits(m, 1, 0, 0xffff, n); | ||
384 | } | ||
363 | 385 | ||
364 | /* | 386 | /* |
365 | * Word 2 | 387 | * Word 2 |
diff --git a/net/tipc/node.c b/net/tipc/node.c index db2a6c3e0be9..2dc4919ab23c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) | |||
830 | tipc_node_write_lock(n); | 830 | tipc_node_write_lock(n); |
831 | if (!tipc_link_is_establishing(l)) { | 831 | if (!tipc_link_is_establishing(l)) { |
832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); | 832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); |
833 | if (delete) { | ||
834 | kfree(l); | ||
835 | le->link = NULL; | ||
836 | n->link_cnt--; | ||
837 | } | ||
838 | } else { | 833 | } else { |
839 | /* Defuse pending tipc_node_link_up() */ | 834 | /* Defuse pending tipc_node_link_up() */ |
835 | tipc_link_reset(l); | ||
840 | tipc_link_fsm_evt(l, LINK_RESET_EVT); | 836 | tipc_link_fsm_evt(l, LINK_RESET_EVT); |
841 | } | 837 | } |
838 | if (delete) { | ||
839 | kfree(l); | ||
840 | le->link = NULL; | ||
841 | n->link_cnt--; | ||
842 | } | ||
842 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); | 843 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); |
843 | tipc_node_write_unlock(n); | 844 | tipc_node_write_unlock(n); |
844 | if (delete) | 845 | if (delete) |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 1217c90a363b..684f2125fc6b 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -388,7 +388,7 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout) | |||
388 | rc_ = tipc_sk_sock_err((sock_), timeo_); \ | 388 | rc_ = tipc_sk_sock_err((sock_), timeo_); \ |
389 | if (rc_) \ | 389 | if (rc_) \ |
390 | break; \ | 390 | break; \ |
391 | prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \ | 391 | add_wait_queue(sk_sleep(sk_), &wait_); \ |
392 | release_sock(sk_); \ | 392 | release_sock(sk_); \ |
393 | *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ | 393 | *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \ |
394 | sched_annotate_sleep(); \ | 394 | sched_annotate_sleep(); \ |
@@ -1677,7 +1677,7 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk) | |||
1677 | static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | 1677 | static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) |
1678 | { | 1678 | { |
1679 | struct sock *sk = sock->sk; | 1679 | struct sock *sk = sock->sk; |
1680 | DEFINE_WAIT(wait); | 1680 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
1681 | long timeo = *timeop; | 1681 | long timeo = *timeop; |
1682 | int err = sock_error(sk); | 1682 | int err = sock_error(sk); |
1683 | 1683 | ||
@@ -1685,15 +1685,17 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
1685 | return err; | 1685 | return err; |
1686 | 1686 | ||
1687 | for (;;) { | 1687 | for (;;) { |
1688 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
1689 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { | 1688 | if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { |
1690 | if (sk->sk_shutdown & RCV_SHUTDOWN) { | 1689 | if (sk->sk_shutdown & RCV_SHUTDOWN) { |
1691 | err = -ENOTCONN; | 1690 | err = -ENOTCONN; |
1692 | break; | 1691 | break; |
1693 | } | 1692 | } |
1693 | add_wait_queue(sk_sleep(sk), &wait); | ||
1694 | release_sock(sk); | 1694 | release_sock(sk); |
1695 | timeo = schedule_timeout(timeo); | 1695 | timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); |
1696 | sched_annotate_sleep(); | ||
1696 | lock_sock(sk); | 1697 | lock_sock(sk); |
1698 | remove_wait_queue(sk_sleep(sk), &wait); | ||
1697 | } | 1699 | } |
1698 | err = 0; | 1700 | err = 0; |
1699 | if (!skb_queue_empty(&sk->sk_receive_queue)) | 1701 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
@@ -1709,7 +1711,6 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) | |||
1709 | if (err) | 1711 | if (err) |
1710 | break; | 1712 | break; |
1711 | } | 1713 | } |
1712 | finish_wait(sk_sleep(sk), &wait); | ||
1713 | *timeop = timeo; | 1714 | *timeop = timeo; |
1714 | return err; | 1715 | return err; |
1715 | } | 1716 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 74d1eed7cbd4..a95d479caeea 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -890,7 +890,7 @@ retry: | |||
890 | addr->hash ^= sk->sk_type; | 890 | addr->hash ^= sk->sk_type; |
891 | 891 | ||
892 | __unix_remove_socket(sk); | 892 | __unix_remove_socket(sk); |
893 | u->addr = addr; | 893 | smp_store_release(&u->addr, addr); |
894 | __unix_insert_socket(&unix_socket_table[addr->hash], sk); | 894 | __unix_insert_socket(&unix_socket_table[addr->hash], sk); |
895 | spin_unlock(&unix_table_lock); | 895 | spin_unlock(&unix_table_lock); |
896 | err = 0; | 896 | err = 0; |
@@ -1060,7 +1060,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
1060 | 1060 | ||
1061 | err = 0; | 1061 | err = 0; |
1062 | __unix_remove_socket(sk); | 1062 | __unix_remove_socket(sk); |
1063 | u->addr = addr; | 1063 | smp_store_release(&u->addr, addr); |
1064 | __unix_insert_socket(list, sk); | 1064 | __unix_insert_socket(list, sk); |
1065 | 1065 | ||
1066 | out_unlock: | 1066 | out_unlock: |
@@ -1331,15 +1331,29 @@ restart: | |||
1331 | RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); | 1331 | RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq); |
1332 | otheru = unix_sk(other); | 1332 | otheru = unix_sk(other); |
1333 | 1333 | ||
1334 | /* copy address information from listening to new sock*/ | 1334 | /* copy address information from listening to new sock |
1335 | if (otheru->addr) { | 1335 | * |
1336 | refcount_inc(&otheru->addr->refcnt); | 1336 | * The contents of *(otheru->addr) and otheru->path |
1337 | newu->addr = otheru->addr; | 1337 | * are seen fully set up here, since we have found |
1338 | } | 1338 | * otheru in hash under unix_table_lock. Insertion |
1339 | * into the hash chain we'd found it in had been done | ||
1340 | * in an earlier critical area protected by unix_table_lock, | ||
1341 | * the same one where we'd set *(otheru->addr) contents, | ||
1342 | * as well as otheru->path and otheru->addr itself. | ||
1343 | * | ||
1344 | * Using smp_store_release() here to set newu->addr | ||
1345 | * is enough to make those stores, as well as stores | ||
1346 | * to newu->path visible to anyone who gets newu->addr | ||
1347 | * by smp_load_acquire(). IOW, the same warranties | ||
1348 | * as for unix_sock instances bound in unix_bind() or | ||
1349 | * in unix_autobind(). | ||
1350 | */ | ||
1339 | if (otheru->path.dentry) { | 1351 | if (otheru->path.dentry) { |
1340 | path_get(&otheru->path); | 1352 | path_get(&otheru->path); |
1341 | newu->path = otheru->path; | 1353 | newu->path = otheru->path; |
1342 | } | 1354 | } |
1355 | refcount_inc(&otheru->addr->refcnt); | ||
1356 | smp_store_release(&newu->addr, otheru->addr); | ||
1343 | 1357 | ||
1344 | /* Set credentials */ | 1358 | /* Set credentials */ |
1345 | copy_peercred(sk, other); | 1359 | copy_peercred(sk, other); |
@@ -1453,7 +1467,7 @@ out: | |||
1453 | static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) | 1467 | static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) |
1454 | { | 1468 | { |
1455 | struct sock *sk = sock->sk; | 1469 | struct sock *sk = sock->sk; |
1456 | struct unix_sock *u; | 1470 | struct unix_address *addr; |
1457 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); | 1471 | DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr); |
1458 | int err = 0; | 1472 | int err = 0; |
1459 | 1473 | ||
@@ -1468,19 +1482,15 @@ static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer) | |||
1468 | sock_hold(sk); | 1482 | sock_hold(sk); |
1469 | } | 1483 | } |
1470 | 1484 | ||
1471 | u = unix_sk(sk); | 1485 | addr = smp_load_acquire(&unix_sk(sk)->addr); |
1472 | unix_state_lock(sk); | 1486 | if (!addr) { |
1473 | if (!u->addr) { | ||
1474 | sunaddr->sun_family = AF_UNIX; | 1487 | sunaddr->sun_family = AF_UNIX; |
1475 | sunaddr->sun_path[0] = 0; | 1488 | sunaddr->sun_path[0] = 0; |
1476 | err = sizeof(short); | 1489 | err = sizeof(short); |
1477 | } else { | 1490 | } else { |
1478 | struct unix_address *addr = u->addr; | ||
1479 | |||
1480 | err = addr->len; | 1491 | err = addr->len; |
1481 | memcpy(sunaddr, addr->name, addr->len); | 1492 | memcpy(sunaddr, addr->name, addr->len); |
1482 | } | 1493 | } |
1483 | unix_state_unlock(sk); | ||
1484 | sock_put(sk); | 1494 | sock_put(sk); |
1485 | out: | 1495 | out: |
1486 | return err; | 1496 | return err; |
@@ -2073,11 +2083,11 @@ static int unix_seqpacket_recvmsg(struct socket *sock, struct msghdr *msg, | |||
2073 | 2083 | ||
2074 | static void unix_copy_addr(struct msghdr *msg, struct sock *sk) | 2084 | static void unix_copy_addr(struct msghdr *msg, struct sock *sk) |
2075 | { | 2085 | { |
2076 | struct unix_sock *u = unix_sk(sk); | 2086 | struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); |
2077 | 2087 | ||
2078 | if (u->addr) { | 2088 | if (addr) { |
2079 | msg->msg_namelen = u->addr->len; | 2089 | msg->msg_namelen = addr->len; |
2080 | memcpy(msg->msg_name, u->addr->name, u->addr->len); | 2090 | memcpy(msg->msg_name, addr->name, addr->len); |
2081 | } | 2091 | } |
2082 | } | 2092 | } |
2083 | 2093 | ||
@@ -2581,15 +2591,14 @@ static int unix_open_file(struct sock *sk) | |||
2581 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) | 2591 | if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) |
2582 | return -EPERM; | 2592 | return -EPERM; |
2583 | 2593 | ||
2584 | unix_state_lock(sk); | 2594 | if (!smp_load_acquire(&unix_sk(sk)->addr)) |
2595 | return -ENOENT; | ||
2596 | |||
2585 | path = unix_sk(sk)->path; | 2597 | path = unix_sk(sk)->path; |
2586 | if (!path.dentry) { | 2598 | if (!path.dentry) |
2587 | unix_state_unlock(sk); | ||
2588 | return -ENOENT; | 2599 | return -ENOENT; |
2589 | } | ||
2590 | 2600 | ||
2591 | path_get(&path); | 2601 | path_get(&path); |
2592 | unix_state_unlock(sk); | ||
2593 | 2602 | ||
2594 | fd = get_unused_fd_flags(O_CLOEXEC); | 2603 | fd = get_unused_fd_flags(O_CLOEXEC); |
2595 | if (fd < 0) | 2604 | if (fd < 0) |
@@ -2830,7 +2839,7 @@ static int unix_seq_show(struct seq_file *seq, void *v) | |||
2830 | (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), | 2839 | (s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING), |
2831 | sock_i_ino(s)); | 2840 | sock_i_ino(s)); |
2832 | 2841 | ||
2833 | if (u->addr) { | 2842 | if (u->addr) { // under unix_table_lock here |
2834 | int i, len; | 2843 | int i, len; |
2835 | seq_putc(seq, ' '); | 2844 | seq_putc(seq, ' '); |
2836 | 2845 | ||
diff --git a/net/unix/diag.c b/net/unix/diag.c index 384c84e83462..3183d9b8ab33 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
@@ -10,7 +10,8 @@ | |||
10 | 10 | ||
11 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) | 11 | static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb) |
12 | { | 12 | { |
13 | struct unix_address *addr = unix_sk(sk)->addr; | 13 | /* might or might not have unix_table_lock */ |
14 | struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr); | ||
14 | 15 | ||
15 | if (!addr) | 16 | if (!addr) |
16 | return 0; | 17 | return 0; |
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 5d3cce9e8744..15eb5d3d4750 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c | |||
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void) | |||
75 | { | 75 | { |
76 | struct virtio_vsock *vsock = virtio_vsock_get(); | 76 | struct virtio_vsock *vsock = virtio_vsock_get(); |
77 | 77 | ||
78 | if (!vsock) | ||
79 | return VMADDR_CID_ANY; | ||
80 | |||
78 | return vsock->guest_cid; | 81 | return vsock->guest_cid; |
79 | } | 82 | } |
80 | 83 | ||
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
584 | 587 | ||
585 | virtio_vsock_update_guest_cid(vsock); | 588 | virtio_vsock_update_guest_cid(vsock); |
586 | 589 | ||
587 | ret = vsock_core_init(&virtio_transport.transport); | ||
588 | if (ret < 0) | ||
589 | goto out_vqs; | ||
590 | |||
591 | vsock->rx_buf_nr = 0; | 590 | vsock->rx_buf_nr = 0; |
592 | vsock->rx_buf_max_nr = 0; | 591 | vsock->rx_buf_max_nr = 0; |
593 | atomic_set(&vsock->queued_replies, 0); | 592 | atomic_set(&vsock->queued_replies, 0); |
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
618 | mutex_unlock(&the_virtio_vsock_mutex); | 617 | mutex_unlock(&the_virtio_vsock_mutex); |
619 | return 0; | 618 | return 0; |
620 | 619 | ||
621 | out_vqs: | ||
622 | vsock->vdev->config->del_vqs(vsock->vdev); | ||
623 | out: | 620 | out: |
624 | kfree(vsock); | 621 | kfree(vsock); |
625 | mutex_unlock(&the_virtio_vsock_mutex); | 622 | mutex_unlock(&the_virtio_vsock_mutex); |
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
637 | flush_work(&vsock->event_work); | 634 | flush_work(&vsock->event_work); |
638 | flush_work(&vsock->send_pkt_work); | 635 | flush_work(&vsock->send_pkt_work); |
639 | 636 | ||
637 | /* Reset all connected sockets when the device disappear */ | ||
638 | vsock_for_each_connected_socket(virtio_vsock_reset_sock); | ||
639 | |||
640 | vdev->config->reset(vdev); | 640 | vdev->config->reset(vdev); |
641 | 641 | ||
642 | mutex_lock(&vsock->rx_lock); | 642 | mutex_lock(&vsock->rx_lock); |
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
669 | 669 | ||
670 | mutex_lock(&the_virtio_vsock_mutex); | 670 | mutex_lock(&the_virtio_vsock_mutex); |
671 | the_virtio_vsock = NULL; | 671 | the_virtio_vsock = NULL; |
672 | vsock_core_exit(); | ||
673 | mutex_unlock(&the_virtio_vsock_mutex); | 672 | mutex_unlock(&the_virtio_vsock_mutex); |
674 | 673 | ||
675 | vdev->config->del_vqs(vdev); | 674 | vdev->config->del_vqs(vdev); |
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void) | |||
702 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | 701 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); |
703 | if (!virtio_vsock_workqueue) | 702 | if (!virtio_vsock_workqueue) |
704 | return -ENOMEM; | 703 | return -ENOMEM; |
704 | |||
705 | ret = register_virtio_driver(&virtio_vsock_driver); | 705 | ret = register_virtio_driver(&virtio_vsock_driver); |
706 | if (ret) | 706 | if (ret) |
707 | destroy_workqueue(virtio_vsock_workqueue); | 707 | goto out_wq; |
708 | |||
709 | ret = vsock_core_init(&virtio_transport.transport); | ||
710 | if (ret) | ||
711 | goto out_vdr; | ||
712 | |||
713 | return 0; | ||
714 | |||
715 | out_vdr: | ||
716 | unregister_virtio_driver(&virtio_vsock_driver); | ||
717 | out_wq: | ||
718 | destroy_workqueue(virtio_vsock_workqueue); | ||
708 | return ret; | 719 | return ret; |
720 | |||
709 | } | 721 | } |
710 | 722 | ||
711 | static void __exit virtio_vsock_exit(void) | 723 | static void __exit virtio_vsock_exit(void) |
712 | { | 724 | { |
725 | vsock_core_exit(); | ||
713 | unregister_virtio_driver(&virtio_vsock_driver); | 726 | unregister_virtio_driver(&virtio_vsock_driver); |
714 | destroy_workqueue(virtio_vsock_workqueue); | 727 | destroy_workqueue(virtio_vsock_workqueue); |
715 | } | 728 | } |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index c361ce782412..c3d5ab01fba7 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work) | |||
1651 | 1651 | ||
1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) | 1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
1653 | { | 1653 | { |
1654 | /* transport can be NULL if we hit a failure at init() time */ | ||
1655 | if (!vmci_trans(vsk)) | ||
1656 | return; | ||
1657 | |||
1654 | /* Ensure that the detach callback doesn't use the sk/vsk | 1658 | /* Ensure that the detach callback doesn't use the sk/vsk |
1655 | * we are about to destruct. | 1659 | * we are about to destruct. |
1656 | */ | 1660 | */ |
diff --git a/net/wireless/ap.c b/net/wireless/ap.c index 882d97bdc6bf..550ac9d827fe 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c | |||
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | |||
41 | cfg80211_sched_dfs_chan_update(rdev); | 41 | cfg80211_sched_dfs_chan_update(rdev); |
42 | } | 42 | } |
43 | 43 | ||
44 | schedule_work(&cfg80211_disconnect_work); | ||
45 | |||
44 | return err; | 46 | return err; |
45 | } | 47 | } |
46 | 48 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 623dfe5e211c..b36ad8efb5e5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) | |||
1068 | 1068 | ||
1069 | ASSERT_RTNL(); | 1069 | ASSERT_RTNL(); |
1070 | 1070 | ||
1071 | flush_work(&wdev->pmsr_free_wk); | ||
1072 | |||
1071 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); | 1073 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); |
1072 | 1074 | ||
1073 | list_del_rcu(&wdev->list); | 1075 | list_del_rcu(&wdev->list); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index c5d6f3418601..f6b40563dc63 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev); | |||
445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, | 445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, |
446 | u32 center_freq_khz, u32 bw_khz); | 446 | u32 center_freq_khz, u32 bw_khz); |
447 | 447 | ||
448 | extern struct work_struct cfg80211_disconnect_work; | ||
449 | |||
448 | /** | 450 | /** |
449 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable | 451 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable |
450 | * @wiphy: the wiphy to validate against | 452 | * @wiphy: the wiphy to validate against |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 74150ad95823..d91a408db113 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { | |||
250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = | 250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = |
251 | NLA_POLICY_MAX(NLA_U8, 15), | 251 | NLA_POLICY_MAX(NLA_U8, 15), |
252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = | 252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = |
253 | NLA_POLICY_MAX(NLA_U8, 15), | 253 | NLA_POLICY_MAX(NLA_U8, 31), |
254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, | 254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, |
255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, | 255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, |
256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, | 256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, |
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index de9286703280..0216ab555249 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c | |||
@@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
256 | if (err) | 256 | if (err) |
257 | goto out_err; | 257 | goto out_err; |
258 | } else { | 258 | } else { |
259 | memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), | 259 | memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); |
260 | ETH_ALEN); | ||
261 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); | 260 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); |
262 | } | 261 | } |
263 | 262 | ||
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
272 | 271 | ||
273 | req->n_peers = count; | 272 | req->n_peers = count; |
274 | req->cookie = cfg80211_assign_cookie(rdev); | 273 | req->cookie = cfg80211_assign_cookie(rdev); |
274 | req->nl_portid = info->snd_portid; | ||
275 | 275 | ||
276 | err = rdev_start_pmsr(rdev, wdev, req); | 276 | err = rdev_start_pmsr(rdev, wdev, req); |
277 | if (err) | 277 | if (err) |
@@ -530,14 +530,14 @@ free: | |||
530 | } | 530 | } |
531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); | 531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); |
532 | 532 | ||
533 | void cfg80211_pmsr_free_wk(struct work_struct *work) | 533 | static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) |
534 | { | 534 | { |
535 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
536 | pmsr_free_wk); | ||
537 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 535 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
538 | struct cfg80211_pmsr_request *req, *tmp; | 536 | struct cfg80211_pmsr_request *req, *tmp; |
539 | LIST_HEAD(free_list); | 537 | LIST_HEAD(free_list); |
540 | 538 | ||
539 | lockdep_assert_held(&wdev->mtx); | ||
540 | |||
541 | spin_lock_bh(&wdev->pmsr_lock); | 541 | spin_lock_bh(&wdev->pmsr_lock); |
542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { | 542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { |
543 | if (req->nl_portid) | 543 | if (req->nl_portid) |
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work) | |||
547 | spin_unlock_bh(&wdev->pmsr_lock); | 547 | spin_unlock_bh(&wdev->pmsr_lock); |
548 | 548 | ||
549 | list_for_each_entry_safe(req, tmp, &free_list, list) { | 549 | list_for_each_entry_safe(req, tmp, &free_list, list) { |
550 | wdev_lock(wdev); | ||
551 | rdev_abort_pmsr(rdev, wdev, req); | 550 | rdev_abort_pmsr(rdev, wdev, req); |
552 | wdev_unlock(wdev); | ||
553 | 551 | ||
554 | kfree(req); | 552 | kfree(req); |
555 | } | 553 | } |
556 | } | 554 | } |
557 | 555 | ||
556 | void cfg80211_pmsr_free_wk(struct work_struct *work) | ||
557 | { | ||
558 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
559 | pmsr_free_wk); | ||
560 | |||
561 | wdev_lock(wdev); | ||
562 | cfg80211_pmsr_process_abort(wdev); | ||
563 | wdev_unlock(wdev); | ||
564 | } | ||
565 | |||
558 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | 566 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) |
559 | { | 567 | { |
560 | struct cfg80211_pmsr_request *req; | 568 | struct cfg80211_pmsr_request *req; |
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | |||
568 | spin_unlock_bh(&wdev->pmsr_lock); | 576 | spin_unlock_bh(&wdev->pmsr_lock); |
569 | 577 | ||
570 | if (found) | 578 | if (found) |
571 | schedule_work(&wdev->pmsr_free_wk); | 579 | cfg80211_pmsr_process_abort(wdev); |
572 | flush_work(&wdev->pmsr_free_wk); | 580 | |
573 | WARN_ON(!list_empty(&wdev->pmsr_list)); | 581 | WARN_ON(!list_empty(&wdev->pmsr_list)); |
574 | } | 582 | } |
575 | 583 | ||
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f741d8376a46..7d34cb884840 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work) | |||
667 | rtnl_unlock(); | 667 | rtnl_unlock(); |
668 | } | 668 | } |
669 | 669 | ||
670 | static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); | 670 | DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); |
671 | 671 | ||
672 | 672 | ||
673 | /* | 673 | /* |
diff --git a/net/wireless/util.c b/net/wireless/util.c index cd48cdd582c0..ec30e3732c7b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright 2017 Intel Deutschland GmbH | 7 | * Copyright 2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | */ | 9 | */ |
10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mpls.h> | 19 | #include <linux/mpls.h> |
20 | #include <linux/gcd.h> | 20 | #include <linux/gcd.h> |
21 | #include <linux/bitfield.h> | 21 | #include <linux/bitfield.h> |
22 | #include <linux/nospec.h> | ||
22 | #include "core.h" | 23 | #include "core.h" |
23 | #include "rdev-ops.h" | 24 | #include "rdev-ops.h" |
24 | 25 | ||
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
715 | { | 716 | { |
716 | unsigned int dscp; | 717 | unsigned int dscp; |
717 | unsigned char vlan_priority; | 718 | unsigned char vlan_priority; |
719 | unsigned int ret; | ||
718 | 720 | ||
719 | /* skb->priority values from 256->263 are magic values to | 721 | /* skb->priority values from 256->263 are magic values to |
720 | * directly indicate a specific 802.1d priority. This is used | 722 | * directly indicate a specific 802.1d priority. This is used |
721 | * to allow 802.1d priority to be passed directly in from VLAN | 723 | * to allow 802.1d priority to be passed directly in from VLAN |
722 | * tags, etc. | 724 | * tags, etc. |
723 | */ | 725 | */ |
724 | if (skb->priority >= 256 && skb->priority <= 263) | 726 | if (skb->priority >= 256 && skb->priority <= 263) { |
725 | return skb->priority - 256; | 727 | ret = skb->priority - 256; |
728 | goto out; | ||
729 | } | ||
726 | 730 | ||
727 | if (skb_vlan_tag_present(skb)) { | 731 | if (skb_vlan_tag_present(skb)) { |
728 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) | 732 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) |
729 | >> VLAN_PRIO_SHIFT; | 733 | >> VLAN_PRIO_SHIFT; |
730 | if (vlan_priority > 0) | 734 | if (vlan_priority > 0) { |
731 | return vlan_priority; | 735 | ret = vlan_priority; |
736 | goto out; | ||
737 | } | ||
732 | } | 738 | } |
733 | 739 | ||
734 | switch (skb->protocol) { | 740 | switch (skb->protocol) { |
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
747 | if (!mpls) | 753 | if (!mpls) |
748 | return 0; | 754 | return 0; |
749 | 755 | ||
750 | return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) | 756 | ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) |
751 | >> MPLS_LS_TC_SHIFT; | 757 | >> MPLS_LS_TC_SHIFT; |
758 | goto out; | ||
752 | } | 759 | } |
753 | case htons(ETH_P_80221): | 760 | case htons(ETH_P_80221): |
754 | /* 802.21 is always network control traffic */ | 761 | /* 802.21 is always network control traffic */ |
@@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
761 | unsigned int i, tmp_dscp = dscp >> 2; | 768 | unsigned int i, tmp_dscp = dscp >> 2; |
762 | 769 | ||
763 | for (i = 0; i < qos_map->num_des; i++) { | 770 | for (i = 0; i < qos_map->num_des; i++) { |
764 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) | 771 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) { |
765 | return qos_map->dscp_exception[i].up; | 772 | ret = qos_map->dscp_exception[i].up; |
773 | goto out; | ||
774 | } | ||
766 | } | 775 | } |
767 | 776 | ||
768 | for (i = 0; i < 8; i++) { | 777 | for (i = 0; i < 8; i++) { |
769 | if (tmp_dscp >= qos_map->up[i].low && | 778 | if (tmp_dscp >= qos_map->up[i].low && |
770 | tmp_dscp <= qos_map->up[i].high) | 779 | tmp_dscp <= qos_map->up[i].high) { |
771 | return i; | 780 | ret = i; |
781 | goto out; | ||
782 | } | ||
772 | } | 783 | } |
773 | } | 784 | } |
774 | 785 | ||
775 | return dscp >> 5; | 786 | ret = dscp >> 5; |
787 | out: | ||
788 | return array_index_nospec(ret, IEEE80211_NUM_TIDS); | ||
776 | } | 789 | } |
777 | EXPORT_SYMBOL(cfg80211_classify8021d); | 790 | EXPORT_SYMBOL(cfg80211_classify8021d); |
778 | 791 | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5121729b8b63..eff31348e20b 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) | |||
352 | unsigned int lci = 1; | 352 | unsigned int lci = 1; |
353 | struct sock *sk; | 353 | struct sock *sk; |
354 | 354 | ||
355 | read_lock_bh(&x25_list_lock); | 355 | while ((sk = x25_find_socket(lci, nb)) != NULL) { |
356 | |||
357 | while ((sk = __x25_find_socket(lci, nb)) != NULL) { | ||
358 | sock_put(sk); | 356 | sock_put(sk); |
359 | if (++lci == 4096) { | 357 | if (++lci == 4096) { |
360 | lci = 0; | 358 | lci = 0; |
361 | break; | 359 | break; |
362 | } | 360 | } |
361 | cond_resched(); | ||
363 | } | 362 | } |
364 | 363 | ||
365 | read_unlock_bh(&x25_list_lock); | ||
366 | return lci; | 364 | return lci; |
367 | } | 365 | } |
368 | 366 | ||
@@ -681,8 +679,7 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
681 | struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; | 679 | struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr; |
682 | int len, i, rc = 0; | 680 | int len, i, rc = 0; |
683 | 681 | ||
684 | if (!sock_flag(sk, SOCK_ZAPPED) || | 682 | if (addr_len != sizeof(struct sockaddr_x25) || |
685 | addr_len != sizeof(struct sockaddr_x25) || | ||
686 | addr->sx25_family != AF_X25) { | 683 | addr->sx25_family != AF_X25) { |
687 | rc = -EINVAL; | 684 | rc = -EINVAL; |
688 | goto out; | 685 | goto out; |
@@ -701,9 +698,13 @@ static int x25_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) | |||
701 | } | 698 | } |
702 | 699 | ||
703 | lock_sock(sk); | 700 | lock_sock(sk); |
704 | x25_sk(sk)->source_addr = addr->sx25_addr; | 701 | if (sock_flag(sk, SOCK_ZAPPED)) { |
705 | x25_insert_socket(sk); | 702 | x25_sk(sk)->source_addr = addr->sx25_addr; |
706 | sock_reset_flag(sk, SOCK_ZAPPED); | 703 | x25_insert_socket(sk); |
704 | sock_reset_flag(sk, SOCK_ZAPPED); | ||
705 | } else { | ||
706 | rc = -EINVAL; | ||
707 | } | ||
707 | release_sock(sk); | 708 | release_sock(sk); |
708 | SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); | 709 | SOCK_DEBUG(sk, "x25_bind: socket is bound\n"); |
709 | out: | 710 | out: |
diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index d4de871e7d4d..37e1fe180769 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c | |||
@@ -125,9 +125,10 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, | |||
125 | return 0; | 125 | return 0; |
126 | 126 | ||
127 | err_unreg_umem: | 127 | err_unreg_umem: |
128 | xdp_clear_umem_at_qid(dev, queue_id); | ||
129 | if (!force_zc) | 128 | if (!force_zc) |
130 | err = 0; /* fallback to copy mode */ | 129 | err = 0; /* fallback to copy mode */ |
130 | if (err) | ||
131 | xdp_clear_umem_at_qid(dev, queue_id); | ||
131 | out_rtnl_unlock: | 132 | out_rtnl_unlock: |
132 | rtnl_unlock(); | 133 | rtnl_unlock(); |
133 | return err; | 134 | return err; |
@@ -259,10 +260,10 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem) | |||
259 | if (!umem->pgs) | 260 | if (!umem->pgs) |
260 | return -ENOMEM; | 261 | return -ENOMEM; |
261 | 262 | ||
262 | down_write(¤t->mm->mmap_sem); | 263 | down_read(¤t->mm->mmap_sem); |
263 | npgs = get_user_pages(umem->address, umem->npgs, | 264 | npgs = get_user_pages_longterm(umem->address, umem->npgs, |
264 | gup_flags, &umem->pgs[0], NULL); | 265 | gup_flags, &umem->pgs[0], NULL); |
265 | up_write(¤t->mm->mmap_sem); | 266 | up_read(¤t->mm->mmap_sem); |
266 | 267 | ||
267 | if (npgs != umem->npgs) { | 268 | if (npgs != umem->npgs) { |
268 | if (npgs >= 0) { | 269 | if (npgs >= 0) { |
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index a03268454a27..85e4fe4f18cc 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
@@ -366,7 +366,6 @@ static int xsk_release(struct socket *sock) | |||
366 | 366 | ||
367 | xskq_destroy(xs->rx); | 367 | xskq_destroy(xs->rx); |
368 | xskq_destroy(xs->tx); | 368 | xskq_destroy(xs->tx); |
369 | xdp_put_umem(xs->umem); | ||
370 | 369 | ||
371 | sock_orphan(sk); | 370 | sock_orphan(sk); |
372 | sock->sk = NULL; | 371 | sock->sk = NULL; |
@@ -669,6 +668,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
669 | if (!umem) | 668 | if (!umem) |
670 | return -EINVAL; | 669 | return -EINVAL; |
671 | 670 | ||
671 | /* Matches the smp_wmb() in XDP_UMEM_REG */ | ||
672 | smp_rmb(); | ||
672 | if (offset == XDP_UMEM_PGOFF_FILL_RING) | 673 | if (offset == XDP_UMEM_PGOFF_FILL_RING) |
673 | q = READ_ONCE(umem->fq); | 674 | q = READ_ONCE(umem->fq); |
674 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) | 675 | else if (offset == XDP_UMEM_PGOFF_COMPLETION_RING) |
@@ -678,6 +679,8 @@ static int xsk_mmap(struct file *file, struct socket *sock, | |||
678 | if (!q) | 679 | if (!q) |
679 | return -EINVAL; | 680 | return -EINVAL; |
680 | 681 | ||
682 | /* Matches the smp_wmb() in xsk_init_queue */ | ||
683 | smp_rmb(); | ||
681 | qpg = virt_to_head_page(q->ring); | 684 | qpg = virt_to_head_page(q->ring); |
682 | if (size > (PAGE_SIZE << compound_order(qpg))) | 685 | if (size > (PAGE_SIZE << compound_order(qpg))) |
683 | return -EINVAL; | 686 | return -EINVAL; |
@@ -714,6 +717,18 @@ static const struct proto_ops xsk_proto_ops = { | |||
714 | .sendpage = sock_no_sendpage, | 717 | .sendpage = sock_no_sendpage, |
715 | }; | 718 | }; |
716 | 719 | ||
720 | static void xsk_destruct(struct sock *sk) | ||
721 | { | ||
722 | struct xdp_sock *xs = xdp_sk(sk); | ||
723 | |||
724 | if (!sock_flag(sk, SOCK_DEAD)) | ||
725 | return; | ||
726 | |||
727 | xdp_put_umem(xs->umem); | ||
728 | |||
729 | sk_refcnt_debug_dec(sk); | ||
730 | } | ||
731 | |||
717 | static int xsk_create(struct net *net, struct socket *sock, int protocol, | 732 | static int xsk_create(struct net *net, struct socket *sock, int protocol, |
718 | int kern) | 733 | int kern) |
719 | { | 734 | { |
@@ -740,6 +755,9 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, | |||
740 | 755 | ||
741 | sk->sk_family = PF_XDP; | 756 | sk->sk_family = PF_XDP; |
742 | 757 | ||
758 | sk->sk_destruct = xsk_destruct; | ||
759 | sk_refcnt_debug_inc(sk); | ||
760 | |||
743 | sock_set_flag(sk, SOCK_RCU_FREE); | 761 | sock_set_flag(sk, SOCK_RCU_FREE); |
744 | 762 | ||
745 | xs = xdp_sk(sk); | 763 | xs = xdp_sk(sk); |
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index 6be8c7df15bb..dbb3c1945b5c 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c | |||
@@ -76,10 +76,10 @@ static struct xfrm_if *xfrmi_decode_session(struct sk_buff *skb) | |||
76 | int ifindex; | 76 | int ifindex; |
77 | struct xfrm_if *xi; | 77 | struct xfrm_if *xi; |
78 | 78 | ||
79 | if (!skb->dev) | 79 | if (!secpath_exists(skb) || !skb->dev) |
80 | return NULL; | 80 | return NULL; |
81 | 81 | ||
82 | xfrmn = net_generic(dev_net(skb->dev), xfrmi_net_id); | 82 | xfrmn = net_generic(xs_net(xfrm_input_state(skb)), xfrmi_net_id); |
83 | ifindex = skb->dev->ifindex; | 83 | ifindex = skb->dev->ifindex; |
84 | 84 | ||
85 | for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { | 85 | for_each_xfrmi_rcu(xfrmn->xfrmi[0], xi) { |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index ba0a4048c846..8d1a898d0ba5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -3314,8 +3314,10 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, | |||
3314 | 3314 | ||
3315 | if (ifcb) { | 3315 | if (ifcb) { |
3316 | xi = ifcb->decode_session(skb); | 3316 | xi = ifcb->decode_session(skb); |
3317 | if (xi) | 3317 | if (xi) { |
3318 | if_id = xi->p.if_id; | 3318 | if_id = xi->p.if_id; |
3319 | net = xi->net; | ||
3320 | } | ||
3319 | } | 3321 | } |
3320 | rcu_read_unlock(); | 3322 | rcu_read_unlock(); |
3321 | 3323 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 23c92891758a..1bb971f46fc6 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -432,7 +432,7 @@ void xfrm_state_free(struct xfrm_state *x) | |||
432 | } | 432 | } |
433 | EXPORT_SYMBOL(xfrm_state_free); | 433 | EXPORT_SYMBOL(xfrm_state_free); |
434 | 434 | ||
435 | static void xfrm_state_gc_destroy(struct xfrm_state *x) | 435 | static void ___xfrm_state_destroy(struct xfrm_state *x) |
436 | { | 436 | { |
437 | tasklet_hrtimer_cancel(&x->mtimer); | 437 | tasklet_hrtimer_cancel(&x->mtimer); |
438 | del_timer_sync(&x->rtimer); | 438 | del_timer_sync(&x->rtimer); |
@@ -474,7 +474,7 @@ static void xfrm_state_gc_task(struct work_struct *work) | |||
474 | synchronize_rcu(); | 474 | synchronize_rcu(); |
475 | 475 | ||
476 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) | 476 | hlist_for_each_entry_safe(x, tmp, &gc_list, gclist) |
477 | xfrm_state_gc_destroy(x); | 477 | ___xfrm_state_destroy(x); |
478 | } | 478 | } |
479 | 479 | ||
480 | static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) | 480 | static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) |
@@ -598,14 +598,19 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) | |||
598 | } | 598 | } |
599 | EXPORT_SYMBOL(xfrm_state_alloc); | 599 | EXPORT_SYMBOL(xfrm_state_alloc); |
600 | 600 | ||
601 | void __xfrm_state_destroy(struct xfrm_state *x) | 601 | void __xfrm_state_destroy(struct xfrm_state *x, bool sync) |
602 | { | 602 | { |
603 | WARN_ON(x->km.state != XFRM_STATE_DEAD); | 603 | WARN_ON(x->km.state != XFRM_STATE_DEAD); |
604 | 604 | ||
605 | spin_lock_bh(&xfrm_state_gc_lock); | 605 | if (sync) { |
606 | hlist_add_head(&x->gclist, &xfrm_state_gc_list); | 606 | synchronize_rcu(); |
607 | spin_unlock_bh(&xfrm_state_gc_lock); | 607 | ___xfrm_state_destroy(x); |
608 | schedule_work(&xfrm_state_gc_work); | 608 | } else { |
609 | spin_lock_bh(&xfrm_state_gc_lock); | ||
610 | hlist_add_head(&x->gclist, &xfrm_state_gc_list); | ||
611 | spin_unlock_bh(&xfrm_state_gc_lock); | ||
612 | schedule_work(&xfrm_state_gc_work); | ||
613 | } | ||
609 | } | 614 | } |
610 | EXPORT_SYMBOL(__xfrm_state_destroy); | 615 | EXPORT_SYMBOL(__xfrm_state_destroy); |
611 | 616 | ||
@@ -708,7 +713,7 @@ xfrm_dev_state_flush_secctx_check(struct net *net, struct net_device *dev, bool | |||
708 | } | 713 | } |
709 | #endif | 714 | #endif |
710 | 715 | ||
711 | int xfrm_state_flush(struct net *net, u8 proto, bool task_valid) | 716 | int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync) |
712 | { | 717 | { |
713 | int i, err = 0, cnt = 0; | 718 | int i, err = 0, cnt = 0; |
714 | 719 | ||
@@ -730,7 +735,10 @@ restart: | |||
730 | err = xfrm_state_delete(x); | 735 | err = xfrm_state_delete(x); |
731 | xfrm_audit_state_delete(x, err ? 0 : 1, | 736 | xfrm_audit_state_delete(x, err ? 0 : 1, |
732 | task_valid); | 737 | task_valid); |
733 | xfrm_state_put(x); | 738 | if (sync) |
739 | xfrm_state_put_sync(x); | ||
740 | else | ||
741 | xfrm_state_put(x); | ||
734 | if (!err) | 742 | if (!err) |
735 | cnt++; | 743 | cnt++; |
736 | 744 | ||
@@ -2215,7 +2223,7 @@ void xfrm_state_delete_tunnel(struct xfrm_state *x) | |||
2215 | if (atomic_read(&t->tunnel_users) == 2) | 2223 | if (atomic_read(&t->tunnel_users) == 2) |
2216 | xfrm_state_delete(t); | 2224 | xfrm_state_delete(t); |
2217 | atomic_dec(&t->tunnel_users); | 2225 | atomic_dec(&t->tunnel_users); |
2218 | xfrm_state_put(t); | 2226 | xfrm_state_put_sync(t); |
2219 | x->tunnel = NULL; | 2227 | x->tunnel = NULL; |
2220 | } | 2228 | } |
2221 | } | 2229 | } |
@@ -2375,8 +2383,8 @@ void xfrm_state_fini(struct net *net) | |||
2375 | unsigned int sz; | 2383 | unsigned int sz; |
2376 | 2384 | ||
2377 | flush_work(&net->xfrm.state_hash_work); | 2385 | flush_work(&net->xfrm.state_hash_work); |
2378 | xfrm_state_flush(net, IPSEC_PROTO_ANY, false); | ||
2379 | flush_work(&xfrm_state_gc_work); | 2386 | flush_work(&xfrm_state_gc_work); |
2387 | xfrm_state_flush(net, IPSEC_PROTO_ANY, false, true); | ||
2380 | 2388 | ||
2381 | WARN_ON(!list_empty(&net->xfrm.state_all)); | 2389 | WARN_ON(!list_empty(&net->xfrm.state_all)); |
2382 | 2390 | ||
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index c6d26afcf89d..a131f9ff979e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1932,7 +1932,7 @@ static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
1932 | struct xfrm_usersa_flush *p = nlmsg_data(nlh); | 1932 | struct xfrm_usersa_flush *p = nlmsg_data(nlh); |
1933 | int err; | 1933 | int err; |
1934 | 1934 | ||
1935 | err = xfrm_state_flush(net, p->proto, true); | 1935 | err = xfrm_state_flush(net, p->proto, true, false); |
1936 | if (err) { | 1936 | if (err) { |
1937 | if (err == -ESRCH) /* empty table */ | 1937 | if (err == -ESRCH) /* empty table */ |
1938 | return 0; | 1938 | return 0; |
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index 33e67bd1dc34..32234481ad7d 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c | |||
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid, | |||
117 | 117 | ||
118 | me->verbose = verbose; | 118 | me->verbose = verbose; |
119 | 119 | ||
120 | me->fd = open("/dev/mei", O_RDWR); | 120 | me->fd = open("/dev/mei0", O_RDWR); |
121 | if (me->fd == -1) { | 121 | if (me->fd == -1) { |
122 | mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); | 122 | mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); |
123 | goto err; | 123 | goto err; |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 77cebad0474e..f75e7bda4889 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -118,8 +118,8 @@ static int read_symbol(FILE *in, struct sym_entry *s) | |||
118 | fprintf(stderr, "Read error or end of file.\n"); | 118 | fprintf(stderr, "Read error or end of file.\n"); |
119 | return -1; | 119 | return -1; |
120 | } | 120 | } |
121 | if (strlen(sym) > KSYM_NAME_LEN) { | 121 | if (strlen(sym) >= KSYM_NAME_LEN) { |
122 | fprintf(stderr, "Symbol %s too long for kallsyms (%zu vs %d).\n" | 122 | fprintf(stderr, "Symbol %s too long for kallsyms (%zu >= %d).\n" |
123 | "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", | 123 | "Please increase KSYM_NAME_LEN both in kernel and kallsyms.c\n", |
124 | sym, strlen(sym), KSYM_NAME_LEN); | 124 | sym, strlen(sym), KSYM_NAME_LEN); |
125 | return -1; | 125 | return -1; |
diff --git a/security/keys/internal.h b/security/keys/internal.h index 479909b858c7..8f533c81aa8d 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
@@ -186,20 +186,9 @@ static inline int key_permission(const key_ref_t key_ref, unsigned perm) | |||
186 | return key_task_permission(key_ref, current_cred(), perm); | 186 | return key_task_permission(key_ref, current_cred(), perm); |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | ||
190 | * Authorisation record for request_key(). | ||
191 | */ | ||
192 | struct request_key_auth { | ||
193 | struct key *target_key; | ||
194 | struct key *dest_keyring; | ||
195 | const struct cred *cred; | ||
196 | void *callout_info; | ||
197 | size_t callout_len; | ||
198 | pid_t pid; | ||
199 | } __randomize_layout; | ||
200 | |||
201 | extern struct key_type key_type_request_key_auth; | 189 | extern struct key_type key_type_request_key_auth; |
202 | extern struct key *request_key_auth_new(struct key *target, | 190 | extern struct key *request_key_auth_new(struct key *target, |
191 | const char *op, | ||
203 | const void *callout_info, | 192 | const void *callout_info, |
204 | size_t callout_len, | 193 | size_t callout_len, |
205 | struct key *dest_keyring); | 194 | struct key *dest_keyring); |
diff --git a/security/keys/key.c b/security/keys/key.c index 44a80d6741a1..696f1c092c50 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -265,8 +265,8 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
265 | 265 | ||
266 | spin_lock(&user->lock); | 266 | spin_lock(&user->lock); |
267 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { | 267 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { |
268 | if (user->qnkeys + 1 >= maxkeys || | 268 | if (user->qnkeys + 1 > maxkeys || |
269 | user->qnbytes + quotalen >= maxbytes || | 269 | user->qnbytes + quotalen > maxbytes || |
270 | user->qnbytes + quotalen < user->qnbytes) | 270 | user->qnbytes + quotalen < user->qnbytes) |
271 | goto no_quota; | 271 | goto no_quota; |
272 | } | 272 | } |
@@ -297,6 +297,7 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
297 | key->gid = gid; | 297 | key->gid = gid; |
298 | key->perm = perm; | 298 | key->perm = perm; |
299 | key->restrict_link = restrict_link; | 299 | key->restrict_link = restrict_link; |
300 | key->last_used_at = ktime_get_real_seconds(); | ||
300 | 301 | ||
301 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) | 302 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) |
302 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; | 303 | key->flags |= 1 << KEY_FLAG_IN_QUOTA; |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index e8093d025966..7bbe03593e58 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/security.h> | 25 | #include <linux/security.h> |
26 | #include <linux/uio.h> | 26 | #include <linux/uio.h> |
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <keys/request_key_auth-type.h> | ||
28 | #include "internal.h" | 29 | #include "internal.h" |
29 | 30 | ||
30 | #define KEY_MAX_DESC_SIZE 4096 | 31 | #define KEY_MAX_DESC_SIZE 4096 |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index eadebb92986a..f81372f53dd7 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -661,9 +661,6 @@ static bool search_nested_keyrings(struct key *keyring, | |||
661 | BUG_ON((ctx->flags & STATE_CHECKS) == 0 || | 661 | BUG_ON((ctx->flags & STATE_CHECKS) == 0 || |
662 | (ctx->flags & STATE_CHECKS) == STATE_CHECKS); | 662 | (ctx->flags & STATE_CHECKS) == STATE_CHECKS); |
663 | 663 | ||
664 | if (ctx->index_key.description) | ||
665 | ctx->index_key.desc_len = strlen(ctx->index_key.description); | ||
666 | |||
667 | /* Check to see if this top-level keyring is what we are looking for | 664 | /* Check to see if this top-level keyring is what we are looking for |
668 | * and whether it is valid or not. | 665 | * and whether it is valid or not. |
669 | */ | 666 | */ |
@@ -914,6 +911,7 @@ key_ref_t keyring_search(key_ref_t keyring, | |||
914 | struct keyring_search_context ctx = { | 911 | struct keyring_search_context ctx = { |
915 | .index_key.type = type, | 912 | .index_key.type = type, |
916 | .index_key.description = description, | 913 | .index_key.description = description, |
914 | .index_key.desc_len = strlen(description), | ||
917 | .cred = current_cred(), | 915 | .cred = current_cred(), |
918 | .match_data.cmp = key_default_cmp, | 916 | .match_data.cmp = key_default_cmp, |
919 | .match_data.raw_data = description, | 917 | .match_data.raw_data = description, |
diff --git a/security/keys/proc.c b/security/keys/proc.c index d2b802072693..78ac305d715e 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c | |||
@@ -165,8 +165,7 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
165 | int rc; | 165 | int rc; |
166 | 166 | ||
167 | struct keyring_search_context ctx = { | 167 | struct keyring_search_context ctx = { |
168 | .index_key.type = key->type, | 168 | .index_key = key->index_key, |
169 | .index_key.description = key->description, | ||
170 | .cred = m->file->f_cred, | 169 | .cred = m->file->f_cred, |
171 | .match_data.cmp = lookup_user_key_possessed, | 170 | .match_data.cmp = lookup_user_key_possessed, |
172 | .match_data.raw_data = key, | 171 | .match_data.raw_data = key, |
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 02c77e928f68..0e0b9ccad2f8 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/security.h> | 19 | #include <linux/security.h> |
20 | #include <linux/user_namespace.h> | 20 | #include <linux/user_namespace.h> |
21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
22 | #include <keys/request_key_auth-type.h> | ||
22 | #include "internal.h" | 23 | #include "internal.h" |
23 | 24 | ||
24 | /* Session keyring create vs join semaphore */ | 25 | /* Session keyring create vs join semaphore */ |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 301f0e300dbd..7a0c6b666ff0 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -18,31 +18,30 @@ | |||
18 | #include <linux/keyctl.h> | 18 | #include <linux/keyctl.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "internal.h" | 20 | #include "internal.h" |
21 | #include <keys/request_key_auth-type.h> | ||
21 | 22 | ||
22 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ | 23 | #define key_negative_timeout 60 /* default timeout on a negative key's existence */ |
23 | 24 | ||
24 | /** | 25 | /** |
25 | * complete_request_key - Complete the construction of a key. | 26 | * complete_request_key - Complete the construction of a key. |
26 | * @cons: The key construction record. | 27 | * @auth_key: The authorisation key. |
27 | * @error: The success or failute of the construction. | 28 | * @error: The success or failute of the construction. |
28 | * | 29 | * |
29 | * Complete the attempt to construct a key. The key will be negated | 30 | * Complete the attempt to construct a key. The key will be negated |
30 | * if an error is indicated. The authorisation key will be revoked | 31 | * if an error is indicated. The authorisation key will be revoked |
31 | * unconditionally. | 32 | * unconditionally. |
32 | */ | 33 | */ |
33 | void complete_request_key(struct key_construction *cons, int error) | 34 | void complete_request_key(struct key *authkey, int error) |
34 | { | 35 | { |
35 | kenter("{%d,%d},%d", cons->key->serial, cons->authkey->serial, error); | 36 | struct request_key_auth *rka = get_request_key_auth(authkey); |
37 | struct key *key = rka->target_key; | ||
38 | |||
39 | kenter("%d{%d},%d", authkey->serial, key->serial, error); | ||
36 | 40 | ||
37 | if (error < 0) | 41 | if (error < 0) |
38 | key_negate_and_link(cons->key, key_negative_timeout, NULL, | 42 | key_negate_and_link(key, key_negative_timeout, NULL, authkey); |
39 | cons->authkey); | ||
40 | else | 43 | else |
41 | key_revoke(cons->authkey); | 44 | key_revoke(authkey); |
42 | |||
43 | key_put(cons->key); | ||
44 | key_put(cons->authkey); | ||
45 | kfree(cons); | ||
46 | } | 45 | } |
47 | EXPORT_SYMBOL(complete_request_key); | 46 | EXPORT_SYMBOL(complete_request_key); |
48 | 47 | ||
@@ -91,21 +90,19 @@ static int call_usermodehelper_keys(const char *path, char **argv, char **envp, | |||
91 | * Request userspace finish the construction of a key | 90 | * Request userspace finish the construction of a key |
92 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" | 91 | * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" |
93 | */ | 92 | */ |
94 | static int call_sbin_request_key(struct key_construction *cons, | 93 | static int call_sbin_request_key(struct key *authkey, void *aux) |
95 | const char *op, | ||
96 | void *aux) | ||
97 | { | 94 | { |
98 | static char const request_key[] = "/sbin/request-key"; | 95 | static char const request_key[] = "/sbin/request-key"; |
96 | struct request_key_auth *rka = get_request_key_auth(authkey); | ||
99 | const struct cred *cred = current_cred(); | 97 | const struct cred *cred = current_cred(); |
100 | key_serial_t prkey, sskey; | 98 | key_serial_t prkey, sskey; |
101 | struct key *key = cons->key, *authkey = cons->authkey, *keyring, | 99 | struct key *key = rka->target_key, *keyring, *session; |
102 | *session; | ||
103 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; | 100 | char *argv[9], *envp[3], uid_str[12], gid_str[12]; |
104 | char key_str[12], keyring_str[3][12]; | 101 | char key_str[12], keyring_str[3][12]; |
105 | char desc[20]; | 102 | char desc[20]; |
106 | int ret, i; | 103 | int ret, i; |
107 | 104 | ||
108 | kenter("{%d},{%d},%s", key->serial, authkey->serial, op); | 105 | kenter("{%d},{%d},%s", key->serial, authkey->serial, rka->op); |
109 | 106 | ||
110 | ret = install_user_keyrings(); | 107 | ret = install_user_keyrings(); |
111 | if (ret < 0) | 108 | if (ret < 0) |
@@ -163,7 +160,7 @@ static int call_sbin_request_key(struct key_construction *cons, | |||
163 | /* set up the argument list */ | 160 | /* set up the argument list */ |
164 | i = 0; | 161 | i = 0; |
165 | argv[i++] = (char *)request_key; | 162 | argv[i++] = (char *)request_key; |
166 | argv[i++] = (char *) op; | 163 | argv[i++] = (char *)rka->op; |
167 | argv[i++] = key_str; | 164 | argv[i++] = key_str; |
168 | argv[i++] = uid_str; | 165 | argv[i++] = uid_str; |
169 | argv[i++] = gid_str; | 166 | argv[i++] = gid_str; |
@@ -191,7 +188,7 @@ error_link: | |||
191 | key_put(keyring); | 188 | key_put(keyring); |
192 | 189 | ||
193 | error_alloc: | 190 | error_alloc: |
194 | complete_request_key(cons, ret); | 191 | complete_request_key(authkey, ret); |
195 | kleave(" = %d", ret); | 192 | kleave(" = %d", ret); |
196 | return ret; | 193 | return ret; |
197 | } | 194 | } |
@@ -205,42 +202,31 @@ static int construct_key(struct key *key, const void *callout_info, | |||
205 | size_t callout_len, void *aux, | 202 | size_t callout_len, void *aux, |
206 | struct key *dest_keyring) | 203 | struct key *dest_keyring) |
207 | { | 204 | { |
208 | struct key_construction *cons; | ||
209 | request_key_actor_t actor; | 205 | request_key_actor_t actor; |
210 | struct key *authkey; | 206 | struct key *authkey; |
211 | int ret; | 207 | int ret; |
212 | 208 | ||
213 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); | 209 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); |
214 | 210 | ||
215 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); | ||
216 | if (!cons) | ||
217 | return -ENOMEM; | ||
218 | |||
219 | /* allocate an authorisation key */ | 211 | /* allocate an authorisation key */ |
220 | authkey = request_key_auth_new(key, callout_info, callout_len, | 212 | authkey = request_key_auth_new(key, "create", callout_info, callout_len, |
221 | dest_keyring); | 213 | dest_keyring); |
222 | if (IS_ERR(authkey)) { | 214 | if (IS_ERR(authkey)) |
223 | kfree(cons); | 215 | return PTR_ERR(authkey); |
224 | ret = PTR_ERR(authkey); | ||
225 | authkey = NULL; | ||
226 | } else { | ||
227 | cons->authkey = key_get(authkey); | ||
228 | cons->key = key_get(key); | ||
229 | 216 | ||
230 | /* make the call */ | 217 | /* Make the call */ |
231 | actor = call_sbin_request_key; | 218 | actor = call_sbin_request_key; |
232 | if (key->type->request_key) | 219 | if (key->type->request_key) |
233 | actor = key->type->request_key; | 220 | actor = key->type->request_key; |
234 | 221 | ||
235 | ret = actor(cons, "create", aux); | 222 | ret = actor(authkey, aux); |
236 | 223 | ||
237 | /* check that the actor called complete_request_key() prior to | 224 | /* check that the actor called complete_request_key() prior to |
238 | * returning an error */ | 225 | * returning an error */ |
239 | WARN_ON(ret < 0 && | 226 | WARN_ON(ret < 0 && |
240 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); | 227 | !test_bit(KEY_FLAG_REVOKED, &authkey->flags)); |
241 | key_put(authkey); | ||
242 | } | ||
243 | 228 | ||
229 | key_put(authkey); | ||
244 | kleave(" = %d", ret); | 230 | kleave(" = %d", ret); |
245 | return ret; | 231 | return ret; |
246 | } | 232 | } |
@@ -275,7 +261,7 @@ static int construct_get_dest_keyring(struct key **_dest_keyring) | |||
275 | if (cred->request_key_auth) { | 261 | if (cred->request_key_auth) { |
276 | authkey = cred->request_key_auth; | 262 | authkey = cred->request_key_auth; |
277 | down_read(&authkey->sem); | 263 | down_read(&authkey->sem); |
278 | rka = authkey->payload.data[0]; | 264 | rka = get_request_key_auth(authkey); |
279 | if (!test_bit(KEY_FLAG_REVOKED, | 265 | if (!test_bit(KEY_FLAG_REVOKED, |
280 | &authkey->flags)) | 266 | &authkey->flags)) |
281 | dest_keyring = | 267 | dest_keyring = |
@@ -545,6 +531,7 @@ struct key *request_key_and_link(struct key_type *type, | |||
545 | struct keyring_search_context ctx = { | 531 | struct keyring_search_context ctx = { |
546 | .index_key.type = type, | 532 | .index_key.type = type, |
547 | .index_key.description = description, | 533 | .index_key.description = description, |
534 | .index_key.desc_len = strlen(description), | ||
548 | .cred = current_cred(), | 535 | .cred = current_cred(), |
549 | .match_data.cmp = key_default_cmp, | 536 | .match_data.cmp = key_default_cmp, |
550 | .match_data.raw_data = description, | 537 | .match_data.raw_data = description, |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 87ea2f54dedc..bda6201c6c45 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/uaccess.h> | 18 | #include <linux/uaccess.h> |
19 | #include "internal.h" | 19 | #include "internal.h" |
20 | #include <keys/user-type.h> | 20 | #include <keys/request_key_auth-type.h> |
21 | 21 | ||
22 | static int request_key_auth_preparse(struct key_preparsed_payload *); | 22 | static int request_key_auth_preparse(struct key_preparsed_payload *); |
23 | static void request_key_auth_free_preparse(struct key_preparsed_payload *); | 23 | static void request_key_auth_free_preparse(struct key_preparsed_payload *); |
@@ -68,7 +68,7 @@ static int request_key_auth_instantiate(struct key *key, | |||
68 | static void request_key_auth_describe(const struct key *key, | 68 | static void request_key_auth_describe(const struct key *key, |
69 | struct seq_file *m) | 69 | struct seq_file *m) |
70 | { | 70 | { |
71 | struct request_key_auth *rka = key->payload.data[0]; | 71 | struct request_key_auth *rka = get_request_key_auth(key); |
72 | 72 | ||
73 | seq_puts(m, "key:"); | 73 | seq_puts(m, "key:"); |
74 | seq_puts(m, key->description); | 74 | seq_puts(m, key->description); |
@@ -83,7 +83,7 @@ static void request_key_auth_describe(const struct key *key, | |||
83 | static long request_key_auth_read(const struct key *key, | 83 | static long request_key_auth_read(const struct key *key, |
84 | char __user *buffer, size_t buflen) | 84 | char __user *buffer, size_t buflen) |
85 | { | 85 | { |
86 | struct request_key_auth *rka = key->payload.data[0]; | 86 | struct request_key_auth *rka = get_request_key_auth(key); |
87 | size_t datalen; | 87 | size_t datalen; |
88 | long ret; | 88 | long ret; |
89 | 89 | ||
@@ -109,7 +109,7 @@ static long request_key_auth_read(const struct key *key, | |||
109 | */ | 109 | */ |
110 | static void request_key_auth_revoke(struct key *key) | 110 | static void request_key_auth_revoke(struct key *key) |
111 | { | 111 | { |
112 | struct request_key_auth *rka = key->payload.data[0]; | 112 | struct request_key_auth *rka = get_request_key_auth(key); |
113 | 113 | ||
114 | kenter("{%d}", key->serial); | 114 | kenter("{%d}", key->serial); |
115 | 115 | ||
@@ -136,7 +136,7 @@ static void free_request_key_auth(struct request_key_auth *rka) | |||
136 | */ | 136 | */ |
137 | static void request_key_auth_destroy(struct key *key) | 137 | static void request_key_auth_destroy(struct key *key) |
138 | { | 138 | { |
139 | struct request_key_auth *rka = key->payload.data[0]; | 139 | struct request_key_auth *rka = get_request_key_auth(key); |
140 | 140 | ||
141 | kenter("{%d}", key->serial); | 141 | kenter("{%d}", key->serial); |
142 | 142 | ||
@@ -147,8 +147,9 @@ static void request_key_auth_destroy(struct key *key) | |||
147 | * Create an authorisation token for /sbin/request-key or whoever to gain | 147 | * Create an authorisation token for /sbin/request-key or whoever to gain |
148 | * access to the caller's security data. | 148 | * access to the caller's security data. |
149 | */ | 149 | */ |
150 | struct key *request_key_auth_new(struct key *target, const void *callout_info, | 150 | struct key *request_key_auth_new(struct key *target, const char *op, |
151 | size_t callout_len, struct key *dest_keyring) | 151 | const void *callout_info, size_t callout_len, |
152 | struct key *dest_keyring) | ||
152 | { | 153 | { |
153 | struct request_key_auth *rka, *irka; | 154 | struct request_key_auth *rka, *irka; |
154 | const struct cred *cred = current->cred; | 155 | const struct cred *cred = current->cred; |
@@ -166,6 +167,7 @@ struct key *request_key_auth_new(struct key *target, const void *callout_info, | |||
166 | if (!rka->callout_info) | 167 | if (!rka->callout_info) |
167 | goto error_free_rka; | 168 | goto error_free_rka; |
168 | rka->callout_len = callout_len; | 169 | rka->callout_len = callout_len; |
170 | strlcpy(rka->op, op, sizeof(rka->op)); | ||
169 | 171 | ||
170 | /* see if the calling process is already servicing the key request of | 172 | /* see if the calling process is already servicing the key request of |
171 | * another process */ | 173 | * another process */ |
@@ -245,7 +247,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) | |||
245 | struct key *authkey; | 247 | struct key *authkey; |
246 | key_ref_t authkey_ref; | 248 | key_ref_t authkey_ref; |
247 | 249 | ||
248 | sprintf(description, "%x", target_id); | 250 | ctx.index_key.desc_len = sprintf(description, "%x", target_id); |
249 | 251 | ||
250 | authkey_ref = search_process_keyrings(&ctx); | 252 | authkey_ref = search_process_keyrings(&ctx); |
251 | 253 | ||
diff --git a/security/lsm_audit.c b/security/lsm_audit.c index f84001019356..33028c098ef3 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c | |||
@@ -321,6 +321,7 @@ static void dump_common_audit_data(struct audit_buffer *ab, | |||
321 | if (a->u.net->sk) { | 321 | if (a->u.net->sk) { |
322 | struct sock *sk = a->u.net->sk; | 322 | struct sock *sk = a->u.net->sk; |
323 | struct unix_sock *u; | 323 | struct unix_sock *u; |
324 | struct unix_address *addr; | ||
324 | int len = 0; | 325 | int len = 0; |
325 | char *p = NULL; | 326 | char *p = NULL; |
326 | 327 | ||
@@ -351,14 +352,15 @@ static void dump_common_audit_data(struct audit_buffer *ab, | |||
351 | #endif | 352 | #endif |
352 | case AF_UNIX: | 353 | case AF_UNIX: |
353 | u = unix_sk(sk); | 354 | u = unix_sk(sk); |
355 | addr = smp_load_acquire(&u->addr); | ||
356 | if (!addr) | ||
357 | break; | ||
354 | if (u->path.dentry) { | 358 | if (u->path.dentry) { |
355 | audit_log_d_path(ab, " path=", &u->path); | 359 | audit_log_d_path(ab, " path=", &u->path); |
356 | break; | 360 | break; |
357 | } | 361 | } |
358 | if (!u->addr) | 362 | len = addr->len-sizeof(short); |
359 | break; | 363 | p = &addr->name->sun_path[0]; |
360 | len = u->addr->len-sizeof(short); | ||
361 | p = &u->addr->name->sun_path[0]; | ||
362 | audit_log_format(ab, " path="); | 364 | audit_log_format(ab, " path="); |
363 | if (*p) | 365 | if (*p) |
364 | audit_log_untrustedstring(ab, p); | 366 | audit_log_untrustedstring(ab, p); |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 6c99fa8ac5fa..6c0b30391ba9 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -2112,13 +2112,6 @@ int pcm_lib_apply_appl_ptr(struct snd_pcm_substream *substream, | |||
2112 | return 0; | 2112 | return 0; |
2113 | } | 2113 | } |
2114 | 2114 | ||
2115 | /* allow waiting for a capture stream that hasn't been started */ | ||
2116 | #if IS_ENABLED(CONFIG_SND_PCM_OSS) | ||
2117 | #define wait_capture_start(substream) ((substream)->oss.oss) | ||
2118 | #else | ||
2119 | #define wait_capture_start(substream) false | ||
2120 | #endif | ||
2121 | |||
2122 | /* the common loop for read/write data */ | 2115 | /* the common loop for read/write data */ |
2123 | snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, | 2116 | snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, |
2124 | void *data, bool interleaved, | 2117 | void *data, bool interleaved, |
@@ -2184,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, | |||
2184 | snd_pcm_update_hw_ptr(substream); | 2177 | snd_pcm_update_hw_ptr(substream); |
2185 | 2178 | ||
2186 | if (!is_playback && | 2179 | if (!is_playback && |
2187 | runtime->status->state == SNDRV_PCM_STATE_PREPARED) { | 2180 | runtime->status->state == SNDRV_PCM_STATE_PREPARED && |
2188 | if (size >= runtime->start_threshold) { | 2181 | size >= runtime->start_threshold) { |
2189 | err = snd_pcm_start(substream); | 2182 | err = snd_pcm_start(substream); |
2190 | if (err < 0) | 2183 | if (err < 0) |
2191 | goto _end_unlock; | ||
2192 | } else if (!wait_capture_start(substream)) { | ||
2193 | /* nothing to do */ | ||
2194 | err = 0; | ||
2195 | goto _end_unlock; | 2184 | goto _end_unlock; |
2196 | } | ||
2197 | } | 2185 | } |
2198 | 2186 | ||
2199 | avail = snd_pcm_avail(substream); | 2187 | avail = snd_pcm_avail(substream); |
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 9174f1b3a987..1ec706ced75c 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c | |||
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev) | |||
115 | err = snd_hda_codec_build_controls(codec); | 115 | err = snd_hda_codec_build_controls(codec); |
116 | if (err < 0) | 116 | if (err < 0) |
117 | goto error_module; | 117 | goto error_module; |
118 | if (codec->card->registered) { | 118 | /* only register after the bus probe finished; otherwise it's racy */ |
119 | if (!codec->bus->bus_probing && codec->card->registered) { | ||
119 | err = snd_card_register(codec->card); | 120 | err = snd_card_register(codec->card); |
120 | if (err < 0) | 121 | if (err < 0) |
121 | goto error_module; | 122 | goto error_module; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e784130ea4e0..e5c49003e75f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip) | |||
2185 | int dev = chip->dev_index; | 2185 | int dev = chip->dev_index; |
2186 | int err; | 2186 | int err; |
2187 | 2187 | ||
2188 | to_hda_bus(bus)->bus_probing = 1; | ||
2188 | hda->probe_continued = 1; | 2189 | hda->probe_continued = 1; |
2189 | 2190 | ||
2190 | /* bind with i915 if needed */ | 2191 | /* bind with i915 if needed */ |
@@ -2269,6 +2270,7 @@ out_free: | |||
2269 | if (err < 0) | 2270 | if (err < 0) |
2270 | hda->init_failed = 1; | 2271 | hda->init_failed = 1; |
2271 | complete_all(&hda->probe_wait); | 2272 | complete_all(&hda->probe_wait); |
2273 | to_hda_bus(bus)->bus_probing = 0; | ||
2272 | return err; | 2274 | return err; |
2273 | } | 2275 | } |
2274 | 2276 | ||
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index e5bdbc245682..29882bda7632 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c | |||
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec) | |||
8451 | ca0132_exit_chip(codec); | 8451 | ca0132_exit_chip(codec); |
8452 | 8452 | ||
8453 | snd_hda_power_down(codec); | 8453 | snd_hda_power_down(codec); |
8454 | if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) | 8454 | #ifdef CONFIG_PCI |
8455 | if (spec->mem_base) | ||
8455 | pci_iounmap(codec->bus->pci, spec->mem_base); | 8456 | pci_iounmap(codec->bus->pci, spec->mem_base); |
8457 | #endif | ||
8456 | kfree(spec->spec_init_verbs); | 8458 | kfree(spec->spec_init_verbs); |
8457 | kfree(codec->spec); | 8459 | kfree(codec->spec); |
8458 | } | 8460 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 152f54137082..a4ee7656d9ee 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), | 924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), |
925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), | 925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), |
926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), | 926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), |
927 | SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), | ||
927 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), | 928 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), |
928 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), | 929 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), |
929 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 930 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 4139aced63f8..1ffa36e987b4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -515,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) | |||
515 | } | 515 | } |
516 | } | 516 | } |
517 | 517 | ||
518 | /* get a primary headphone pin if available */ | ||
519 | static hda_nid_t alc_get_hp_pin(struct alc_spec *spec) | ||
520 | { | ||
521 | if (spec->gen.autocfg.hp_pins[0]) | ||
522 | return spec->gen.autocfg.hp_pins[0]; | ||
523 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
524 | return spec->gen.autocfg.line_out_pins[0]; | ||
525 | return 0; | ||
526 | } | ||
518 | 527 | ||
519 | /* | 528 | /* |
520 | * Realtek SSID verification | 529 | * Realtek SSID verification |
@@ -725,9 +734,7 @@ do_sku: | |||
725 | * 15 : 1 --> enable the function "Mute internal speaker | 734 | * 15 : 1 --> enable the function "Mute internal speaker |
726 | * when the external headphone out jack is plugged" | 735 | * when the external headphone out jack is plugged" |
727 | */ | 736 | */ |
728 | if (!spec->gen.autocfg.hp_pins[0] && | 737 | if (!alc_get_hp_pin(spec)) { |
729 | !(spec->gen.autocfg.line_out_pins[0] && | ||
730 | spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) { | ||
731 | hda_nid_t nid; | 738 | hda_nid_t nid; |
732 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ | 739 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ |
733 | nid = ports[tmp]; | 740 | nid = ports[tmp]; |
@@ -1848,6 +1855,8 @@ enum { | |||
1848 | ALC887_FIXUP_BASS_CHMAP, | 1855 | ALC887_FIXUP_BASS_CHMAP, |
1849 | ALC1220_FIXUP_GB_DUAL_CODECS, | 1856 | ALC1220_FIXUP_GB_DUAL_CODECS, |
1850 | ALC1220_FIXUP_CLEVO_P950, | 1857 | ALC1220_FIXUP_CLEVO_P950, |
1858 | ALC1220_FIXUP_SYSTEM76_ORYP5, | ||
1859 | ALC1220_FIXUP_SYSTEM76_ORYP5_PINS, | ||
1851 | }; | 1860 | }; |
1852 | 1861 | ||
1853 | static void alc889_fixup_coef(struct hda_codec *codec, | 1862 | static void alc889_fixup_coef(struct hda_codec *codec, |
@@ -2049,6 +2058,17 @@ static void alc1220_fixup_clevo_p950(struct hda_codec *codec, | |||
2049 | snd_hda_override_conn_list(codec, 0x1b, 1, conn1); | 2058 | snd_hda_override_conn_list(codec, 0x1b, 1, conn1); |
2050 | } | 2059 | } |
2051 | 2060 | ||
2061 | static void alc_fixup_headset_mode_no_hp_mic(struct hda_codec *codec, | ||
2062 | const struct hda_fixup *fix, int action); | ||
2063 | |||
2064 | static void alc1220_fixup_system76_oryp5(struct hda_codec *codec, | ||
2065 | const struct hda_fixup *fix, | ||
2066 | int action) | ||
2067 | { | ||
2068 | alc1220_fixup_clevo_p950(codec, fix, action); | ||
2069 | alc_fixup_headset_mode_no_hp_mic(codec, fix, action); | ||
2070 | } | ||
2071 | |||
2052 | static const struct hda_fixup alc882_fixups[] = { | 2072 | static const struct hda_fixup alc882_fixups[] = { |
2053 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { | 2073 | [ALC882_FIXUP_ABIT_AW9D_MAX] = { |
2054 | .type = HDA_FIXUP_PINS, | 2074 | .type = HDA_FIXUP_PINS, |
@@ -2293,6 +2313,19 @@ static const struct hda_fixup alc882_fixups[] = { | |||
2293 | .type = HDA_FIXUP_FUNC, | 2313 | .type = HDA_FIXUP_FUNC, |
2294 | .v.func = alc1220_fixup_clevo_p950, | 2314 | .v.func = alc1220_fixup_clevo_p950, |
2295 | }, | 2315 | }, |
2316 | [ALC1220_FIXUP_SYSTEM76_ORYP5] = { | ||
2317 | .type = HDA_FIXUP_FUNC, | ||
2318 | .v.func = alc1220_fixup_system76_oryp5, | ||
2319 | }, | ||
2320 | [ALC1220_FIXUP_SYSTEM76_ORYP5_PINS] = { | ||
2321 | .type = HDA_FIXUP_PINS, | ||
2322 | .v.pins = (const struct hda_pintbl[]) { | ||
2323 | { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | ||
2324 | {} | ||
2325 | }, | ||
2326 | .chained = true, | ||
2327 | .chain_id = ALC1220_FIXUP_SYSTEM76_ORYP5, | ||
2328 | }, | ||
2296 | }; | 2329 | }; |
2297 | 2330 | ||
2298 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { | 2331 | static const struct snd_pci_quirk alc882_fixup_tbl[] = { |
@@ -2369,6 +2402,8 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { | |||
2369 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), | 2402 | SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950), |
2370 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), | 2403 | SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950), |
2371 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), | 2404 | SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950), |
2405 | SND_PCI_QUIRK(0x1558, 0x96e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | ||
2406 | SND_PCI_QUIRK(0x1558, 0x97e1, "System76 Oryx Pro (oryp5)", ALC1220_FIXUP_SYSTEM76_ORYP5_PINS), | ||
2372 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), | 2407 | SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD), |
2373 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), | 2408 | SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD), |
2374 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), | 2409 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Y530", ALC882_FIXUP_LENOVO_Y530), |
@@ -2959,7 +2994,7 @@ static void alc282_restore_default_value(struct hda_codec *codec) | |||
2959 | static void alc282_init(struct hda_codec *codec) | 2994 | static void alc282_init(struct hda_codec *codec) |
2960 | { | 2995 | { |
2961 | struct alc_spec *spec = codec->spec; | 2996 | struct alc_spec *spec = codec->spec; |
2962 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 2997 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
2963 | bool hp_pin_sense; | 2998 | bool hp_pin_sense; |
2964 | int coef78; | 2999 | int coef78; |
2965 | 3000 | ||
@@ -2996,7 +3031,7 @@ static void alc282_init(struct hda_codec *codec) | |||
2996 | static void alc282_shutup(struct hda_codec *codec) | 3031 | static void alc282_shutup(struct hda_codec *codec) |
2997 | { | 3032 | { |
2998 | struct alc_spec *spec = codec->spec; | 3033 | struct alc_spec *spec = codec->spec; |
2999 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3034 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3000 | bool hp_pin_sense; | 3035 | bool hp_pin_sense; |
3001 | int coef78; | 3036 | int coef78; |
3002 | 3037 | ||
@@ -3074,14 +3109,9 @@ static void alc283_restore_default_value(struct hda_codec *codec) | |||
3074 | static void alc283_init(struct hda_codec *codec) | 3109 | static void alc283_init(struct hda_codec *codec) |
3075 | { | 3110 | { |
3076 | struct alc_spec *spec = codec->spec; | 3111 | struct alc_spec *spec = codec->spec; |
3077 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3112 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3078 | bool hp_pin_sense; | 3113 | bool hp_pin_sense; |
3079 | 3114 | ||
3080 | if (!spec->gen.autocfg.hp_outs) { | ||
3081 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
3082 | hp_pin = spec->gen.autocfg.line_out_pins[0]; | ||
3083 | } | ||
3084 | |||
3085 | alc283_restore_default_value(codec); | 3115 | alc283_restore_default_value(codec); |
3086 | 3116 | ||
3087 | if (!hp_pin) | 3117 | if (!hp_pin) |
@@ -3115,14 +3145,9 @@ static void alc283_init(struct hda_codec *codec) | |||
3115 | static void alc283_shutup(struct hda_codec *codec) | 3145 | static void alc283_shutup(struct hda_codec *codec) |
3116 | { | 3146 | { |
3117 | struct alc_spec *spec = codec->spec; | 3147 | struct alc_spec *spec = codec->spec; |
3118 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3148 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3119 | bool hp_pin_sense; | 3149 | bool hp_pin_sense; |
3120 | 3150 | ||
3121 | if (!spec->gen.autocfg.hp_outs) { | ||
3122 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
3123 | hp_pin = spec->gen.autocfg.line_out_pins[0]; | ||
3124 | } | ||
3125 | |||
3126 | if (!hp_pin) { | 3151 | if (!hp_pin) { |
3127 | alc269_shutup(codec); | 3152 | alc269_shutup(codec); |
3128 | return; | 3153 | return; |
@@ -3156,7 +3181,7 @@ static void alc283_shutup(struct hda_codec *codec) | |||
3156 | static void alc256_init(struct hda_codec *codec) | 3181 | static void alc256_init(struct hda_codec *codec) |
3157 | { | 3182 | { |
3158 | struct alc_spec *spec = codec->spec; | 3183 | struct alc_spec *spec = codec->spec; |
3159 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3184 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3160 | bool hp_pin_sense; | 3185 | bool hp_pin_sense; |
3161 | 3186 | ||
3162 | if (!hp_pin) | 3187 | if (!hp_pin) |
@@ -3192,7 +3217,7 @@ static void alc256_init(struct hda_codec *codec) | |||
3192 | static void alc256_shutup(struct hda_codec *codec) | 3217 | static void alc256_shutup(struct hda_codec *codec) |
3193 | { | 3218 | { |
3194 | struct alc_spec *spec = codec->spec; | 3219 | struct alc_spec *spec = codec->spec; |
3195 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3220 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3196 | bool hp_pin_sense; | 3221 | bool hp_pin_sense; |
3197 | 3222 | ||
3198 | if (!hp_pin) { | 3223 | if (!hp_pin) { |
@@ -3228,7 +3253,7 @@ static void alc256_shutup(struct hda_codec *codec) | |||
3228 | static void alc225_init(struct hda_codec *codec) | 3253 | static void alc225_init(struct hda_codec *codec) |
3229 | { | 3254 | { |
3230 | struct alc_spec *spec = codec->spec; | 3255 | struct alc_spec *spec = codec->spec; |
3231 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3256 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3232 | bool hp1_pin_sense, hp2_pin_sense; | 3257 | bool hp1_pin_sense, hp2_pin_sense; |
3233 | 3258 | ||
3234 | if (!hp_pin) | 3259 | if (!hp_pin) |
@@ -3271,7 +3296,7 @@ static void alc225_init(struct hda_codec *codec) | |||
3271 | static void alc225_shutup(struct hda_codec *codec) | 3296 | static void alc225_shutup(struct hda_codec *codec) |
3272 | { | 3297 | { |
3273 | struct alc_spec *spec = codec->spec; | 3298 | struct alc_spec *spec = codec->spec; |
3274 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3299 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3275 | bool hp1_pin_sense, hp2_pin_sense; | 3300 | bool hp1_pin_sense, hp2_pin_sense; |
3276 | 3301 | ||
3277 | if (!hp_pin) { | 3302 | if (!hp_pin) { |
@@ -3315,7 +3340,7 @@ static void alc225_shutup(struct hda_codec *codec) | |||
3315 | static void alc_default_init(struct hda_codec *codec) | 3340 | static void alc_default_init(struct hda_codec *codec) |
3316 | { | 3341 | { |
3317 | struct alc_spec *spec = codec->spec; | 3342 | struct alc_spec *spec = codec->spec; |
3318 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3343 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3319 | bool hp_pin_sense; | 3344 | bool hp_pin_sense; |
3320 | 3345 | ||
3321 | if (!hp_pin) | 3346 | if (!hp_pin) |
@@ -3344,7 +3369,7 @@ static void alc_default_init(struct hda_codec *codec) | |||
3344 | static void alc_default_shutup(struct hda_codec *codec) | 3369 | static void alc_default_shutup(struct hda_codec *codec) |
3345 | { | 3370 | { |
3346 | struct alc_spec *spec = codec->spec; | 3371 | struct alc_spec *spec = codec->spec; |
3347 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3372 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3348 | bool hp_pin_sense; | 3373 | bool hp_pin_sense; |
3349 | 3374 | ||
3350 | if (!hp_pin) { | 3375 | if (!hp_pin) { |
@@ -3376,7 +3401,7 @@ static void alc_default_shutup(struct hda_codec *codec) | |||
3376 | static void alc294_hp_init(struct hda_codec *codec) | 3401 | static void alc294_hp_init(struct hda_codec *codec) |
3377 | { | 3402 | { |
3378 | struct alc_spec *spec = codec->spec; | 3403 | struct alc_spec *spec = codec->spec; |
3379 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3404 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3380 | int i, val; | 3405 | int i, val; |
3381 | 3406 | ||
3382 | if (!hp_pin) | 3407 | if (!hp_pin) |
@@ -4780,7 +4805,7 @@ static void alc_update_headset_mode(struct hda_codec *codec) | |||
4780 | struct alc_spec *spec = codec->spec; | 4805 | struct alc_spec *spec = codec->spec; |
4781 | 4806 | ||
4782 | hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; | 4807 | hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; |
4783 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 4808 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
4784 | 4809 | ||
4785 | int new_headset_mode; | 4810 | int new_headset_mode; |
4786 | 4811 | ||
@@ -5059,7 +5084,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, | |||
5059 | static void alc_shutup_dell_xps13(struct hda_codec *codec) | 5084 | static void alc_shutup_dell_xps13(struct hda_codec *codec) |
5060 | { | 5085 | { |
5061 | struct alc_spec *spec = codec->spec; | 5086 | struct alc_spec *spec = codec->spec; |
5062 | int hp_pin = spec->gen.autocfg.hp_pins[0]; | 5087 | int hp_pin = alc_get_hp_pin(spec); |
5063 | 5088 | ||
5064 | /* Prevent pop noises when headphones are plugged in */ | 5089 | /* Prevent pop noises when headphones are plugged in */ |
5065 | snd_hda_codec_write(codec, hp_pin, 0, | 5090 | snd_hda_codec_write(codec, hp_pin, 0, |
@@ -5152,7 +5177,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec, | |||
5152 | 5177 | ||
5153 | if (action == HDA_FIXUP_ACT_PROBE) { | 5178 | if (action == HDA_FIXUP_ACT_PROBE) { |
5154 | int mic_pin = find_ext_mic_pin(codec); | 5179 | int mic_pin = find_ext_mic_pin(codec); |
5155 | int hp_pin = spec->gen.autocfg.hp_pins[0]; | 5180 | int hp_pin = alc_get_hp_pin(spec); |
5156 | 5181 | ||
5157 | if (snd_BUG_ON(!mic_pin || !hp_pin)) | 5182 | if (snd_BUG_ON(!mic_pin || !hp_pin)) |
5158 | return; | 5183 | return; |
@@ -5634,6 +5659,8 @@ enum { | |||
5634 | ALC294_FIXUP_ASUS_HEADSET_MIC, | 5659 | ALC294_FIXUP_ASUS_HEADSET_MIC, |
5635 | ALC294_FIXUP_ASUS_SPK, | 5660 | ALC294_FIXUP_ASUS_SPK, |
5636 | ALC225_FIXUP_HEADSET_JACK, | 5661 | ALC225_FIXUP_HEADSET_JACK, |
5662 | ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, | ||
5663 | ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, | ||
5637 | }; | 5664 | }; |
5638 | 5665 | ||
5639 | static const struct hda_fixup alc269_fixups[] = { | 5666 | static const struct hda_fixup alc269_fixups[] = { |
@@ -6580,6 +6607,26 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6580 | .type = HDA_FIXUP_FUNC, | 6607 | .type = HDA_FIXUP_FUNC, |
6581 | .v.func = alc_fixup_headset_jack, | 6608 | .v.func = alc_fixup_headset_jack, |
6582 | }, | 6609 | }, |
6610 | [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { | ||
6611 | .type = HDA_FIXUP_PINS, | ||
6612 | .v.pins = (const struct hda_pintbl[]) { | ||
6613 | { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | ||
6614 | { } | ||
6615 | }, | ||
6616 | .chained = true, | ||
6617 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | ||
6618 | }, | ||
6619 | [ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE] = { | ||
6620 | .type = HDA_FIXUP_VERBS, | ||
6621 | .v.verbs = (const struct hda_verb[]) { | ||
6622 | /* Disable PCBEEP-IN passthrough */ | ||
6623 | { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 }, | ||
6624 | { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 }, | ||
6625 | { } | ||
6626 | }, | ||
6627 | .chained = true, | ||
6628 | .chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE | ||
6629 | }, | ||
6583 | }; | 6630 | }; |
6584 | 6631 | ||
6585 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6632 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
@@ -6758,6 +6805,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6758 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), | 6805 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), |
6759 | SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), | 6806 | SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), |
6760 | SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), | 6807 | SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), |
6808 | SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), | ||
6761 | SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), | 6809 | SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), |
6762 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), | 6810 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), |
6763 | SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), | 6811 | SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), |
@@ -7264,7 +7312,7 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
7264 | {0x12, 0x90a60130}, | 7312 | {0x12, 0x90a60130}, |
7265 | {0x19, 0x03a11020}, | 7313 | {0x19, 0x03a11020}, |
7266 | {0x21, 0x0321101f}), | 7314 | {0x21, 0x0321101f}), |
7267 | SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_HEADPHONE_NOISE, | 7315 | SND_HDA_PIN_QUIRK(0x10ec0285, 0x17aa, "Lenovo", ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE, |
7268 | {0x12, 0x90a60130}, | 7316 | {0x12, 0x90a60130}, |
7269 | {0x14, 0x90170110}, | 7317 | {0x14, 0x90170110}, |
7270 | {0x19, 0x04a11040}, | 7318 | {0x19, 0x04a11040}, |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index d00734d31e04..e5b6769b9797 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
795 | if (hcd->spdif) | 795 | if (hcd->spdif) |
796 | hcp->daidrv[i] = hdmi_spdif_dai; | 796 | hcp->daidrv[i] = hdmi_spdif_dai; |
797 | 797 | ||
798 | dev_set_drvdata(dev, hcp); | ||
799 | |||
798 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, | 800 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, |
799 | dai_count); | 801 | dai_count); |
800 | if (ret) { | 802 | if (ret) { |
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
802 | __func__, ret); | 804 | __func__, ret); |
803 | return ret; | 805 | return ret; |
804 | } | 806 | } |
805 | |||
806 | dev_set_drvdata(dev, hcp); | ||
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 89c43b26c379..a9b91bcfcc09 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
@@ -1778,7 +1778,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = { | |||
1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, | 1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, |
1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, | 1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, |
1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, | 1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, |
1781 | {"ADC STO1 ASRC", NULL, "DA ASRC"}, | ||
1781 | {"ADC STO1 ASRC", NULL, "CLKDET"}, | 1782 | {"ADC STO1 ASRC", NULL, "CLKDET"}, |
1783 | {"DAC STO1 ASRC", NULL, "AD ASRC"}, | ||
1782 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, | 1784 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, |
1783 | {"DAC STO1 ASRC", NULL, "CLKDET"}, | 1785 | {"DAC STO1 ASRC", NULL, "CLKDET"}, |
1784 | 1786 | ||
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index 37e001cf9cd1..3fe34417ec89 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c | |||
@@ -462,7 +462,7 @@ static int asoc_simple_card_parse_of(struct simple_card_data *priv) | |||
462 | conf_idx = 0; | 462 | conf_idx = 0; |
463 | node = of_get_child_by_name(top, PREFIX "dai-link"); | 463 | node = of_get_child_by_name(top, PREFIX "dai-link"); |
464 | if (!node) { | 464 | if (!node) { |
465 | node = dev->of_node; | 465 | node = of_node_get(top); |
466 | loop = 0; | 466 | loop = 0; |
467 | } | 467 | } |
468 | 468 | ||
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d6c62aa13041..d4bde4834ce5 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -604,6 +604,7 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, | |||
604 | unsigned int fmt) | 604 | unsigned int fmt) |
605 | { | 605 | { |
606 | struct i2s_dai *i2s = to_info(dai); | 606 | struct i2s_dai *i2s = to_info(dai); |
607 | struct i2s_dai *other = get_other_dai(i2s); | ||
607 | int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; | 608 | int lrp_shift, sdf_shift, sdf_mask, lrp_rlow, mod_slave; |
608 | u32 mod, tmp = 0; | 609 | u32 mod, tmp = 0; |
609 | unsigned long flags; | 610 | unsigned long flags; |
@@ -661,7 +662,8 @@ static int i2s_set_fmt(struct snd_soc_dai *dai, | |||
661 | * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any | 662 | * CLK_I2S_RCLK_SRC clock is not exposed so we ensure any |
662 | * clock configuration assigned in DT is not overwritten. | 663 | * clock configuration assigned in DT is not overwritten. |
663 | */ | 664 | */ |
664 | if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL) | 665 | if (i2s->rclk_srcrate == 0 && i2s->clk_data.clks == NULL && |
666 | other->clk_data.clks == NULL) | ||
665 | i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, | 667 | i2s_set_sysclk(dai, SAMSUNG_I2S_RCLKSRC_0, |
666 | 0, SND_SOC_CLOCK_IN); | 668 | 0, SND_SOC_CLOCK_IN); |
667 | break; | 669 | break; |
@@ -699,7 +701,9 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
699 | struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) | 701 | struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) |
700 | { | 702 | { |
701 | struct i2s_dai *i2s = to_info(dai); | 703 | struct i2s_dai *i2s = to_info(dai); |
704 | struct i2s_dai *other = get_other_dai(i2s); | ||
702 | u32 mod, mask = 0, val = 0; | 705 | u32 mod, mask = 0, val = 0; |
706 | struct clk *rclksrc; | ||
703 | unsigned long flags; | 707 | unsigned long flags; |
704 | 708 | ||
705 | WARN_ON(!pm_runtime_active(dai->dev)); | 709 | WARN_ON(!pm_runtime_active(dai->dev)); |
@@ -782,6 +786,13 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
782 | 786 | ||
783 | i2s->frmclk = params_rate(params); | 787 | i2s->frmclk = params_rate(params); |
784 | 788 | ||
789 | rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
790 | if (!rclksrc || IS_ERR(rclksrc)) | ||
791 | rclksrc = other->clk_table[CLK_I2S_RCLK_SRC]; | ||
792 | |||
793 | if (rclksrc && !IS_ERR(rclksrc)) | ||
794 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
795 | |||
785 | return 0; | 796 | return 0; |
786 | } | 797 | } |
787 | 798 | ||
@@ -886,11 +897,6 @@ static int config_setup(struct i2s_dai *i2s) | |||
886 | return 0; | 897 | return 0; |
887 | 898 | ||
888 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { | 899 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { |
889 | struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
890 | |||
891 | if (rclksrc && !IS_ERR(rclksrc)) | ||
892 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
893 | |||
894 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; | 900 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; |
895 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); | 901 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); |
896 | dev_dbg(&i2s->pdev->dev, | 902 | dev_dbg(&i2s->pdev->dev, |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 59e250cc2e9d..e819e965e1db 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -1526,14 +1526,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod, | |||
1526 | int ret; | 1526 | int ret; |
1527 | 1527 | ||
1528 | /* | 1528 | /* |
1529 | * 1) Avoid duplicate register (ex. MIXer case) | 1529 | * 1) Avoid duplicate register for DVC with MIX case |
1530 | * 2) re-register if card was rebinded | 1530 | * 2) Allow duplicate register for MIX |
1531 | * 3) re-register if card was rebinded | ||
1531 | */ | 1532 | */ |
1532 | list_for_each_entry(kctrl, &card->controls, list) { | 1533 | list_for_each_entry(kctrl, &card->controls, list) { |
1533 | struct rsnd_kctrl_cfg *c = kctrl->private_data; | 1534 | struct rsnd_kctrl_cfg *c = kctrl->private_data; |
1534 | 1535 | ||
1535 | if (strcmp(kctrl->id.name, name) == 0 && | 1536 | if (c == cfg) |
1536 | c->mod == mod) | ||
1537 | return 0; | 1537 | return 0; |
1538 | } | 1538 | } |
1539 | 1539 | ||
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 45ef295743ec..f5afab631abb 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, | |||
286 | if (rsnd_ssi_is_multi_slave(mod, io)) | 286 | if (rsnd_ssi_is_multi_slave(mod, io)) |
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | if (ssi->usrcnt > 1) { | 289 | if (ssi->usrcnt > 0) { |
290 | if (ssi->rate != rate) { | 290 | if (ssi->rate != rate) { |
291 | dev_err(dev, "SSI parent/child should use same rate\n"); | 291 | dev_err(dev, "SSI parent/child should use same rate\n"); |
292 | return -EINVAL; | 292 | return -EINVAL; |
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index c5934adcfd01..c74991dd18ab 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c | |||
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod, | |||
79 | break; | 79 | break; |
80 | case 9: | 80 | case 9: |
81 | for (i = 0; i < 4; i++) | 81 | for (i = 0; i < 4; i++) |
82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4)); | 82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4); |
83 | break; | 83 | break; |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index aae450ba4f08..50617db05c46 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -735,12 +735,17 @@ static struct snd_soc_component *soc_find_component( | |||
735 | const struct device_node *of_node, const char *name) | 735 | const struct device_node *of_node, const char *name) |
736 | { | 736 | { |
737 | struct snd_soc_component *component; | 737 | struct snd_soc_component *component; |
738 | struct device_node *component_of_node; | ||
738 | 739 | ||
739 | lockdep_assert_held(&client_mutex); | 740 | lockdep_assert_held(&client_mutex); |
740 | 741 | ||
741 | for_each_component(component) { | 742 | for_each_component(component) { |
742 | if (of_node) { | 743 | if (of_node) { |
743 | if (component->dev->of_node == of_node) | 744 | component_of_node = component->dev->of_node; |
745 | if (!component_of_node && component->dev->parent) | ||
746 | component_of_node = component->dev->parent->of_node; | ||
747 | |||
748 | if (component_of_node == of_node) | ||
744 | return component; | 749 | return component; |
745 | } else if (name && strcmp(component->name, name) == 0) { | 750 | } else if (name && strcmp(component->name, name) == 0) { |
746 | return component; | 751 | return component; |
@@ -951,7 +956,7 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order) | |||
951 | { | 956 | { |
952 | int err; | 957 | int err; |
953 | 958 | ||
954 | if (!dai || !dai->probed || | 959 | if (!dai || !dai->probed || !dai->driver || |
955 | dai->driver->remove_order != order) | 960 | dai->driver->remove_order != order) |
956 | return; | 961 | return; |
957 | 962 | ||
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2c4c13419539..20bad755888b 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -70,12 +70,16 @@ static int dapm_up_seq[] = { | |||
70 | [snd_soc_dapm_clock_supply] = 1, | 70 | [snd_soc_dapm_clock_supply] = 1, |
71 | [snd_soc_dapm_supply] = 2, | 71 | [snd_soc_dapm_supply] = 2, |
72 | [snd_soc_dapm_micbias] = 3, | 72 | [snd_soc_dapm_micbias] = 3, |
73 | [snd_soc_dapm_vmid] = 3, | ||
73 | [snd_soc_dapm_dai_link] = 2, | 74 | [snd_soc_dapm_dai_link] = 2, |
74 | [snd_soc_dapm_dai_in] = 4, | 75 | [snd_soc_dapm_dai_in] = 4, |
75 | [snd_soc_dapm_dai_out] = 4, | 76 | [snd_soc_dapm_dai_out] = 4, |
76 | [snd_soc_dapm_aif_in] = 4, | 77 | [snd_soc_dapm_aif_in] = 4, |
77 | [snd_soc_dapm_aif_out] = 4, | 78 | [snd_soc_dapm_aif_out] = 4, |
78 | [snd_soc_dapm_mic] = 5, | 79 | [snd_soc_dapm_mic] = 5, |
80 | [snd_soc_dapm_siggen] = 5, | ||
81 | [snd_soc_dapm_input] = 5, | ||
82 | [snd_soc_dapm_output] = 5, | ||
79 | [snd_soc_dapm_mux] = 6, | 83 | [snd_soc_dapm_mux] = 6, |
80 | [snd_soc_dapm_demux] = 6, | 84 | [snd_soc_dapm_demux] = 6, |
81 | [snd_soc_dapm_dac] = 7, | 85 | [snd_soc_dapm_dac] = 7, |
@@ -83,11 +87,19 @@ static int dapm_up_seq[] = { | |||
83 | [snd_soc_dapm_mixer] = 8, | 87 | [snd_soc_dapm_mixer] = 8, |
84 | [snd_soc_dapm_mixer_named_ctl] = 8, | 88 | [snd_soc_dapm_mixer_named_ctl] = 8, |
85 | [snd_soc_dapm_pga] = 9, | 89 | [snd_soc_dapm_pga] = 9, |
90 | [snd_soc_dapm_buffer] = 9, | ||
91 | [snd_soc_dapm_scheduler] = 9, | ||
92 | [snd_soc_dapm_effect] = 9, | ||
93 | [snd_soc_dapm_src] = 9, | ||
94 | [snd_soc_dapm_asrc] = 9, | ||
95 | [snd_soc_dapm_encoder] = 9, | ||
96 | [snd_soc_dapm_decoder] = 9, | ||
86 | [snd_soc_dapm_adc] = 10, | 97 | [snd_soc_dapm_adc] = 10, |
87 | [snd_soc_dapm_out_drv] = 11, | 98 | [snd_soc_dapm_out_drv] = 11, |
88 | [snd_soc_dapm_hp] = 11, | 99 | [snd_soc_dapm_hp] = 11, |
89 | [snd_soc_dapm_spk] = 11, | 100 | [snd_soc_dapm_spk] = 11, |
90 | [snd_soc_dapm_line] = 11, | 101 | [snd_soc_dapm_line] = 11, |
102 | [snd_soc_dapm_sink] = 11, | ||
91 | [snd_soc_dapm_kcontrol] = 12, | 103 | [snd_soc_dapm_kcontrol] = 12, |
92 | [snd_soc_dapm_post] = 13, | 104 | [snd_soc_dapm_post] = 13, |
93 | }; | 105 | }; |
@@ -100,13 +112,25 @@ static int dapm_down_seq[] = { | |||
100 | [snd_soc_dapm_spk] = 3, | 112 | [snd_soc_dapm_spk] = 3, |
101 | [snd_soc_dapm_line] = 3, | 113 | [snd_soc_dapm_line] = 3, |
102 | [snd_soc_dapm_out_drv] = 3, | 114 | [snd_soc_dapm_out_drv] = 3, |
115 | [snd_soc_dapm_sink] = 3, | ||
103 | [snd_soc_dapm_pga] = 4, | 116 | [snd_soc_dapm_pga] = 4, |
117 | [snd_soc_dapm_buffer] = 4, | ||
118 | [snd_soc_dapm_scheduler] = 4, | ||
119 | [snd_soc_dapm_effect] = 4, | ||
120 | [snd_soc_dapm_src] = 4, | ||
121 | [snd_soc_dapm_asrc] = 4, | ||
122 | [snd_soc_dapm_encoder] = 4, | ||
123 | [snd_soc_dapm_decoder] = 4, | ||
104 | [snd_soc_dapm_switch] = 5, | 124 | [snd_soc_dapm_switch] = 5, |
105 | [snd_soc_dapm_mixer_named_ctl] = 5, | 125 | [snd_soc_dapm_mixer_named_ctl] = 5, |
106 | [snd_soc_dapm_mixer] = 5, | 126 | [snd_soc_dapm_mixer] = 5, |
107 | [snd_soc_dapm_dac] = 6, | 127 | [snd_soc_dapm_dac] = 6, |
108 | [snd_soc_dapm_mic] = 7, | 128 | [snd_soc_dapm_mic] = 7, |
129 | [snd_soc_dapm_siggen] = 7, | ||
130 | [snd_soc_dapm_input] = 7, | ||
131 | [snd_soc_dapm_output] = 7, | ||
109 | [snd_soc_dapm_micbias] = 8, | 132 | [snd_soc_dapm_micbias] = 8, |
133 | [snd_soc_dapm_vmid] = 8, | ||
110 | [snd_soc_dapm_mux] = 9, | 134 | [snd_soc_dapm_mux] = 9, |
111 | [snd_soc_dapm_demux] = 9, | 135 | [snd_soc_dapm_demux] = 9, |
112 | [snd_soc_dapm_aif_in] = 10, | 136 | [snd_soc_dapm_aif_in] = 10, |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 045ef136903d..731b963b6995 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
@@ -502,6 +502,7 @@ static void remove_dai(struct snd_soc_component *comp, | |||
502 | { | 502 | { |
503 | struct snd_soc_dai_driver *dai_drv = | 503 | struct snd_soc_dai_driver *dai_drv = |
504 | container_of(dobj, struct snd_soc_dai_driver, dobj); | 504 | container_of(dobj, struct snd_soc_dai_driver, dobj); |
505 | struct snd_soc_dai *dai; | ||
505 | 506 | ||
506 | if (pass != SOC_TPLG_PASS_PCM_DAI) | 507 | if (pass != SOC_TPLG_PASS_PCM_DAI) |
507 | return; | 508 | return; |
@@ -509,6 +510,10 @@ static void remove_dai(struct snd_soc_component *comp, | |||
509 | if (dobj->ops && dobj->ops->dai_unload) | 510 | if (dobj->ops && dobj->ops->dai_unload) |
510 | dobj->ops->dai_unload(comp, dobj); | 511 | dobj->ops->dai_unload(comp, dobj); |
511 | 512 | ||
513 | list_for_each_entry(dai, &comp->dai_list, list) | ||
514 | if (dai->driver == dai_drv) | ||
515 | dai->driver = NULL; | ||
516 | |||
512 | kfree(dai_drv->name); | 517 | kfree(dai_drv->name); |
513 | list_del(&dobj->list); | 518 | list_del(&dobj->list); |
514 | kfree(dai_drv); | 519 | kfree(dai_drv); |
@@ -2482,6 +2487,7 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, | |||
2482 | struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) | 2487 | struct snd_soc_tplg_ops *ops, const struct firmware *fw, u32 id) |
2483 | { | 2488 | { |
2484 | struct soc_tplg tplg; | 2489 | struct soc_tplg tplg; |
2490 | int ret; | ||
2485 | 2491 | ||
2486 | /* setup parsing context */ | 2492 | /* setup parsing context */ |
2487 | memset(&tplg, 0, sizeof(tplg)); | 2493 | memset(&tplg, 0, sizeof(tplg)); |
@@ -2495,7 +2501,12 @@ int snd_soc_tplg_component_load(struct snd_soc_component *comp, | |||
2495 | tplg.bytes_ext_ops = ops->bytes_ext_ops; | 2501 | tplg.bytes_ext_ops = ops->bytes_ext_ops; |
2496 | tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; | 2502 | tplg.bytes_ext_ops_count = ops->bytes_ext_ops_count; |
2497 | 2503 | ||
2498 | return soc_tplg_load(&tplg); | 2504 | ret = soc_tplg_load(&tplg); |
2505 | /* free the created components if fail to load topology */ | ||
2506 | if (ret) | ||
2507 | snd_soc_tplg_component_remove(comp, SND_SOC_TPLG_INDEX_ALL); | ||
2508 | |||
2509 | return ret; | ||
2499 | } | 2510 | } |
2500 | EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); | 2511 | EXPORT_SYMBOL_GPL(snd_soc_tplg_component_load); |
2501 | 2512 | ||
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 382847154227..db114f3977e0 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum, | |||
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk | ||
318 | * applies. Returns 1 if a quirk was found. | ||
319 | */ | ||
317 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, | 320 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, |
318 | struct usb_device *dev, | 321 | struct usb_device *dev, |
319 | struct usb_interface_descriptor *altsd, | 322 | struct usb_interface_descriptor *altsd, |
@@ -384,7 +387,7 @@ add_sync_ep: | |||
384 | 387 | ||
385 | subs->data_endpoint->sync_master = subs->sync_endpoint; | 388 | subs->data_endpoint->sync_master = subs->sync_endpoint; |
386 | 389 | ||
387 | return 0; | 390 | return 1; |
388 | } | 391 | } |
389 | 392 | ||
390 | static int set_sync_endpoint(struct snd_usb_substream *subs, | 393 | static int set_sync_endpoint(struct snd_usb_substream *subs, |
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, | |||
423 | if (err < 0) | 426 | if (err < 0) |
424 | return err; | 427 | return err; |
425 | 428 | ||
429 | /* endpoint set by quirk */ | ||
430 | if (err > 0) | ||
431 | return 0; | ||
432 | |||
426 | if (altsd->bNumEndpoints < 2) | 433 | if (altsd->bNumEndpoints < 2) |
427 | return 0; | 434 | return 0; |
428 | 435 | ||
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index bb8372833fc2..7e65fe853ee3 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1567,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
1567 | case 0x20b1: /* XMOS based devices */ | 1567 | case 0x20b1: /* XMOS based devices */ |
1568 | case 0x152a: /* Thesycon devices */ | 1568 | case 0x152a: /* Thesycon devices */ |
1569 | case 0x25ce: /* Mytek devices */ | 1569 | case 0x25ce: /* Mytek devices */ |
1570 | case 0x2ab6: /* T+A devices */ | ||
1570 | if (fp->dsd_raw) | 1571 | if (fp->dsd_raw) |
1571 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; | 1572 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
1572 | break; | 1573 | break; |
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 897483457bf0..f7261fad45c1 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c | |||
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key) | |||
297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); | 297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); |
298 | 298 | ||
299 | fdi = fopen(path, "r"); | 299 | fdi = fopen(path, "r"); |
300 | if (!fdi) { | 300 | if (!fdi) |
301 | p_err("can't open fdinfo: %s", strerror(errno)); | ||
302 | return NULL; | 301 | return NULL; |
303 | } | ||
304 | 302 | ||
305 | while ((n = getline(&line, &line_n, fdi)) > 0) { | 303 | while ((n = getline(&line, &line_n, fdi)) > 0) { |
306 | char *value; | 304 | char *value; |
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key) | |||
313 | 311 | ||
314 | value = strchr(line, '\t'); | 312 | value = strchr(line, '\t'); |
315 | if (!value || !value[1]) { | 313 | if (!value || !value[1]) { |
316 | p_err("malformed fdinfo!?"); | ||
317 | free(line); | 314 | free(line); |
318 | return NULL; | 315 | return NULL; |
319 | } | 316 | } |
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key) | |||
326 | return line; | 323 | return line; |
327 | } | 324 | } |
328 | 325 | ||
329 | p_err("key '%s' not found in fdinfo", key); | ||
330 | free(line); | 326 | free(line); |
331 | fclose(fdi); | 327 | fclose(fdi); |
332 | return NULL; | 328 | return NULL; |
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 2037e3dc864b..1ef1ee2280a2 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c | |||
@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val, | |||
347 | return argv + i; | 347 | return argv + i; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* on per cpu maps we must copy the provided value on all value instances */ | ||
351 | static void fill_per_cpu_value(struct bpf_map_info *info, void *value) | ||
352 | { | ||
353 | unsigned int i, n, step; | ||
354 | |||
355 | if (!map_is_per_cpu(info->type)) | ||
356 | return; | ||
357 | |||
358 | n = get_possible_cpus(); | ||
359 | step = round_up(info->value_size, 8); | ||
360 | for (i = 1; i < n; i++) | ||
361 | memcpy(value + i * step, value, info->value_size); | ||
362 | } | ||
363 | |||
350 | static int parse_elem(char **argv, struct bpf_map_info *info, | 364 | static int parse_elem(char **argv, struct bpf_map_info *info, |
351 | void *key, void *value, __u32 key_size, __u32 value_size, | 365 | void *key, void *value, __u32 key_size, __u32 value_size, |
352 | __u32 *flags, __u32 **value_fd) | 366 | __u32 *flags, __u32 **value_fd) |
@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info, | |||
426 | argv = parse_bytes(argv, "value", value, value_size); | 440 | argv = parse_bytes(argv, "value", value, value_size); |
427 | if (!argv) | 441 | if (!argv) |
428 | return -1; | 442 | return -1; |
443 | |||
444 | fill_per_cpu_value(info, value); | ||
429 | } | 445 | } |
430 | 446 | ||
431 | return parse_elem(argv, info, key, NULL, key_size, value_size, | 447 | return parse_elem(argv, info, key, NULL, key_size, value_size, |
@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) | |||
497 | jsonw_uint_field(json_wtr, "owner_prog_type", | 513 | jsonw_uint_field(json_wtr, "owner_prog_type", |
498 | prog_type); | 514 | prog_type); |
499 | } | 515 | } |
500 | if (atoi(owner_jited)) | 516 | if (owner_jited) |
501 | jsonw_bool_field(json_wtr, "owner_jited", true); | 517 | jsonw_bool_field(json_wtr, "owner_jited", |
502 | else | 518 | !!atoi(owner_jited)); |
503 | jsonw_bool_field(json_wtr, "owner_jited", false); | ||
504 | 519 | ||
505 | free(owner_prog_type); | 520 | free(owner_prog_type); |
506 | free(owner_jited); | 521 | free(owner_jited); |
@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
553 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); | 568 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); |
554 | char *owner_jited = get_fdinfo(fd, "owner_jited"); | 569 | char *owner_jited = get_fdinfo(fd, "owner_jited"); |
555 | 570 | ||
556 | printf("\n\t"); | 571 | if (owner_prog_type || owner_jited) |
572 | printf("\n\t"); | ||
557 | if (owner_prog_type) { | 573 | if (owner_prog_type) { |
558 | unsigned int prog_type = atoi(owner_prog_type); | 574 | unsigned int prog_type = atoi(owner_prog_type); |
559 | 575 | ||
@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
563 | else | 579 | else |
564 | printf("owner_prog_type %d ", prog_type); | 580 | printf("owner_prog_type %d ", prog_type); |
565 | } | 581 | } |
566 | if (atoi(owner_jited)) | 582 | if (owner_jited) |
567 | printf("owner jited"); | 583 | printf("owner%s jited", |
568 | else | 584 | atoi(owner_jited) ? "" : " not"); |
569 | printf("owner not jited"); | ||
570 | 585 | ||
571 | free(owner_prog_type); | 586 | free(owner_prog_type); |
572 | free(owner_jited); | 587 | free(owner_jited); |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2d1bb7d6ff51..b54ed82b9589 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) | |||
78 | 78 | ||
79 | static int prog_fd_by_tag(unsigned char *tag) | 79 | static int prog_fd_by_tag(unsigned char *tag) |
80 | { | 80 | { |
81 | struct bpf_prog_info info = {}; | ||
82 | __u32 len = sizeof(info); | ||
83 | unsigned int id = 0; | 81 | unsigned int id = 0; |
84 | int err; | 82 | int err; |
85 | int fd; | 83 | int fd; |
86 | 84 | ||
87 | while (true) { | 85 | while (true) { |
86 | struct bpf_prog_info info = {}; | ||
87 | __u32 len = sizeof(info); | ||
88 | |||
88 | err = bpf_prog_get_next_id(id, &id); | 89 | err = bpf_prog_get_next_id(id, &id); |
89 | if (err) { | 90 | if (err) { |
90 | p_err("%s", strerror(errno)); | 91 | p_err("%s", strerror(errno)); |
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c index 3040830d7797..84545666a09c 100644 --- a/tools/iio/iio_generic_buffer.c +++ b/tools/iio/iio_generic_buffer.c | |||
@@ -330,7 +330,7 @@ static const struct option longopts[] = { | |||
330 | 330 | ||
331 | int main(int argc, char **argv) | 331 | int main(int argc, char **argv) |
332 | { | 332 | { |
333 | unsigned long long num_loops = 2; | 333 | long long num_loops = 2; |
334 | unsigned long timedelay = 1000000; | 334 | unsigned long timedelay = 1000000; |
335 | unsigned long buf_len = 128; | 335 | unsigned long buf_len = 128; |
336 | 336 | ||
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index fd92ce8388fc..57aaeaf8e192 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" | 15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" |
16 | #elif defined(__riscv) | 16 | #elif defined(__riscv) |
17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" | 17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" |
18 | #elif defined(__alpha__) | ||
19 | #include "../../arch/alpha/include/uapi/asm/bitsperlong.h" | ||
18 | #else | 20 | #else |
19 | #include <asm-generic/bitsperlong.h> | 21 | #include <asm-generic/bitsperlong.h> |
20 | #endif | 22 | #endif |
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/tools/include/uapi/linux/in.h +++ b/tools/include/uapi/linux/in.h | |||
@@ -268,7 +268,7 @@ struct sockaddr_in { | |||
268 | #define IN_MULTICAST(a) IN_CLASSD(a) | 268 | #define IN_MULTICAST(a) IN_CLASSD(a) |
269 | #define IN_MULTICAST_NET 0xe0000000 | 269 | #define IN_MULTICAST_NET 0xe0000000 |
270 | 270 | ||
271 | #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) | 271 | #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) |
272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) | 272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) |
273 | 273 | ||
274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) | 274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) |
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 095aebdc5bb7..e6150f21267d 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt | |||
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache. | |||
19 | The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows | 19 | The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows |
20 | you to track down the cacheline contentions. | 20 | you to track down the cacheline contentions. |
21 | 21 | ||
22 | The tool is based on x86's load latency and precise store facility events | 22 | On x86, the tool is based on load latency and precise store facility events |
23 | provided by Intel CPUs. These events provide: | 23 | provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling |
24 | with thresholding feature. | ||
25 | |||
26 | These events provide: | ||
24 | - memory address of the access | 27 | - memory address of the access |
25 | - type of the access (load and store details) | 28 | - type of the access (load and store details) |
26 | - latency (in cycles) of the load access | 29 | - latency (in cycles) of the load access |
@@ -46,7 +49,7 @@ RECORD OPTIONS | |||
46 | 49 | ||
47 | -l:: | 50 | -l:: |
48 | --ldlat:: | 51 | --ldlat:: |
49 | Configure mem-loads latency. | 52 | Configure mem-loads latency. (x86 only) |
50 | 53 | ||
51 | -k:: | 54 | -k:: |
52 | --all-kernel:: | 55 | --all-kernel:: |
@@ -119,11 +122,16 @@ Following perf record options are configured by default: | |||
119 | -W,-d,--phys-data,--sample-cpu | 122 | -W,-d,--phys-data,--sample-cpu |
120 | 123 | ||
121 | Unless specified otherwise with '-e' option, following events are monitored by | 124 | Unless specified otherwise with '-e' option, following events are monitored by |
122 | default: | 125 | default on x86: |
123 | 126 | ||
124 | cpu/mem-loads,ldlat=30/P | 127 | cpu/mem-loads,ldlat=30/P |
125 | cpu/mem-stores/P | 128 | cpu/mem-stores/P |
126 | 129 | ||
130 | and following on PowerPC: | ||
131 | |||
132 | cpu/mem-loads/ | ||
133 | cpu/mem-stores/ | ||
134 | |||
127 | User can pass any 'perf record' option behind '--' mark, like (to enable | 135 | User can pass any 'perf record' option behind '--' mark, like (to enable |
128 | callchains and system wide monitoring): | 136 | callchains and system wide monitoring): |
129 | 137 | ||
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index f8d2167cf3e7..199ea0f0a6c0 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt | |||
@@ -82,7 +82,7 @@ RECORD OPTIONS | |||
82 | Be more verbose (show counter open errors, etc) | 82 | Be more verbose (show counter open errors, etc) |
83 | 83 | ||
84 | --ldlat <n>:: | 84 | --ldlat <n>:: |
85 | Specify desired latency for loads event. | 85 | Specify desired latency for loads event. (x86 only) |
86 | 86 | ||
87 | In addition, for report all perf report options are valid, and for record | 87 | In addition, for report all perf report options are valid, and for record |
88 | all perf record options. | 88 | all perf record options. |
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build index 2e6595310420..ba98bd006488 100644 --- a/tools/perf/arch/powerpc/util/Build +++ b/tools/perf/arch/powerpc/util/Build | |||
@@ -2,6 +2,7 @@ libperf-y += header.o | |||
2 | libperf-y += sym-handling.o | 2 | libperf-y += sym-handling.o |
3 | libperf-y += kvm-stat.o | 3 | libperf-y += kvm-stat.o |
4 | libperf-y += perf_regs.o | 4 | libperf-y += perf_regs.o |
5 | libperf-y += mem-events.o | ||
5 | 6 | ||
6 | libperf-$(CONFIG_DWARF) += dwarf-regs.o | 7 | libperf-$(CONFIG_DWARF) += dwarf-regs.o |
7 | libperf-$(CONFIG_DWARF) += skip-callchain-idx.o | 8 | libperf-$(CONFIG_DWARF) += skip-callchain-idx.o |
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c new file mode 100644 index 000000000000..d08311f04e95 --- /dev/null +++ b/tools/perf/arch/powerpc/util/mem-events.c | |||
@@ -0,0 +1,11 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include "mem-events.h" | ||
3 | |||
4 | /* PowerPC does not support 'ldlat' parameter. */ | ||
5 | char *perf_mem_events__name(int i) | ||
6 | { | ||
7 | if (i == PERF_MEM_EVENTS__LOAD) | ||
8 | return (char *) "cpu/mem-loads/"; | ||
9 | |||
10 | return (char *) "cpu/mem-stores/"; | ||
11 | } | ||
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ed4583128b9c..b36061cd1ab8 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -2514,19 +2514,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); | |||
2514 | 2514 | ||
2515 | static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) | 2515 | static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) |
2516 | { | 2516 | { |
2517 | struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); | 2517 | bool found = false; |
2518 | struct perf_evsel *evsel, *tmp; | ||
2519 | struct parse_events_error err = { .idx = 0, }; | ||
2520 | int ret = parse_events(evlist, "probe:vfs_getname*", &err); | ||
2518 | 2521 | ||
2519 | if (IS_ERR(evsel)) | 2522 | if (ret) |
2520 | return false; | 2523 | return false; |
2521 | 2524 | ||
2522 | if (perf_evsel__field(evsel, "pathname") == NULL) { | 2525 | evlist__for_each_entry_safe(evlist, evsel, tmp) { |
2526 | if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname")) | ||
2527 | continue; | ||
2528 | |||
2529 | if (perf_evsel__field(evsel, "pathname")) { | ||
2530 | evsel->handler = trace__vfs_getname; | ||
2531 | found = true; | ||
2532 | continue; | ||
2533 | } | ||
2534 | |||
2535 | list_del_init(&evsel->node); | ||
2536 | evsel->evlist = NULL; | ||
2523 | perf_evsel__delete(evsel); | 2537 | perf_evsel__delete(evsel); |
2524 | return false; | ||
2525 | } | 2538 | } |
2526 | 2539 | ||
2527 | evsel->handler = trace__vfs_getname; | 2540 | return found; |
2528 | perf_evlist__add(evlist, evsel); | ||
2529 | return true; | ||
2530 | } | 2541 | } |
2531 | 2542 | ||
2532 | static struct perf_evsel *perf_evsel__new_pgfault(u64 config) | 2543 | static struct perf_evsel *perf_evsel__new_pgfault(u64 config) |
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index 44090a9a19f3..e952127e4fb0 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py | |||
@@ -1,6 +1,8 @@ | |||
1 | #! /usr/bin/python | 1 | #! /usr/bin/python |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | from __future__ import print_function | ||
5 | |||
4 | import os | 6 | import os |
5 | import sys | 7 | import sys |
6 | import glob | 8 | import glob |
@@ -8,7 +10,11 @@ import optparse | |||
8 | import tempfile | 10 | import tempfile |
9 | import logging | 11 | import logging |
10 | import shutil | 12 | import shutil |
11 | import ConfigParser | 13 | |
14 | try: | ||
15 | import configparser | ||
16 | except ImportError: | ||
17 | import ConfigParser as configparser | ||
12 | 18 | ||
13 | def data_equal(a, b): | 19 | def data_equal(a, b): |
14 | # Allow multiple values in assignment separated by '|' | 20 | # Allow multiple values in assignment separated by '|' |
@@ -100,20 +106,20 @@ class Event(dict): | |||
100 | def equal(self, other): | 106 | def equal(self, other): |
101 | for t in Event.terms: | 107 | for t in Event.terms: |
102 | log.debug(" [%s] %s %s" % (t, self[t], other[t])); | 108 | log.debug(" [%s] %s %s" % (t, self[t], other[t])); |
103 | if not self.has_key(t) or not other.has_key(t): | 109 | if t not in self or t not in other: |
104 | return False | 110 | return False |
105 | if not data_equal(self[t], other[t]): | 111 | if not data_equal(self[t], other[t]): |
106 | return False | 112 | return False |
107 | return True | 113 | return True |
108 | 114 | ||
109 | def optional(self): | 115 | def optional(self): |
110 | if self.has_key('optional') and self['optional'] == '1': | 116 | if 'optional' in self and self['optional'] == '1': |
111 | return True | 117 | return True |
112 | return False | 118 | return False |
113 | 119 | ||
114 | def diff(self, other): | 120 | def diff(self, other): |
115 | for t in Event.terms: | 121 | for t in Event.terms: |
116 | if not self.has_key(t) or not other.has_key(t): | 122 | if t not in self or t not in other: |
117 | continue | 123 | continue |
118 | if not data_equal(self[t], other[t]): | 124 | if not data_equal(self[t], other[t]): |
119 | log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) | 125 | log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) |
@@ -134,7 +140,7 @@ class Event(dict): | |||
134 | # - expected values assignments | 140 | # - expected values assignments |
135 | class Test(object): | 141 | class Test(object): |
136 | def __init__(self, path, options): | 142 | def __init__(self, path, options): |
137 | parser = ConfigParser.SafeConfigParser() | 143 | parser = configparser.SafeConfigParser() |
138 | parser.read(path) | 144 | parser.read(path) |
139 | 145 | ||
140 | log.warning("running '%s'" % path) | 146 | log.warning("running '%s'" % path) |
@@ -193,7 +199,7 @@ class Test(object): | |||
193 | return True | 199 | return True |
194 | 200 | ||
195 | def load_events(self, path, events): | 201 | def load_events(self, path, events): |
196 | parser_event = ConfigParser.SafeConfigParser() | 202 | parser_event = configparser.SafeConfigParser() |
197 | parser_event.read(path) | 203 | parser_event.read(path) |
198 | 204 | ||
199 | # The event record section header contains 'event' word, | 205 | # The event record section header contains 'event' word, |
@@ -207,7 +213,7 @@ class Test(object): | |||
207 | # Read parent event if there's any | 213 | # Read parent event if there's any |
208 | if (':' in section): | 214 | if (':' in section): |
209 | base = section[section.index(':') + 1:] | 215 | base = section[section.index(':') + 1:] |
210 | parser_base = ConfigParser.SafeConfigParser() | 216 | parser_base = configparser.SafeConfigParser() |
211 | parser_base.read(self.test_dir + '/' + base) | 217 | parser_base.read(self.test_dir + '/' + base) |
212 | base_items = parser_base.items('event') | 218 | base_items = parser_base.items('event') |
213 | 219 | ||
@@ -322,9 +328,9 @@ def run_tests(options): | |||
322 | for f in glob.glob(options.test_dir + '/' + options.test): | 328 | for f in glob.glob(options.test_dir + '/' + options.test): |
323 | try: | 329 | try: |
324 | Test(f, options).run() | 330 | Test(f, options).run() |
325 | except Unsup, obj: | 331 | except Unsup as obj: |
326 | log.warning("unsupp %s" % obj.getMsg()) | 332 | log.warning("unsupp %s" % obj.getMsg()) |
327 | except Notest, obj: | 333 | except Notest as obj: |
328 | log.warning("skipped %s" % obj.getMsg()) | 334 | log.warning("skipped %s" % obj.getMsg()) |
329 | 335 | ||
330 | def setup_log(verbose): | 336 | def setup_log(verbose): |
@@ -363,7 +369,7 @@ def main(): | |||
363 | parser.add_option("-p", "--perf", | 369 | parser.add_option("-p", "--perf", |
364 | action="store", type="string", dest="perf") | 370 | action="store", type="string", dest="perf") |
365 | parser.add_option("-v", "--verbose", | 371 | parser.add_option("-v", "--verbose", |
366 | action="count", dest="verbose") | 372 | default=0, action="count", dest="verbose") |
367 | 373 | ||
368 | options, args = parser.parse_args() | 374 | options, args = parser.parse_args() |
369 | if args: | 375 | if args: |
@@ -373,7 +379,7 @@ def main(): | |||
373 | setup_log(options.verbose) | 379 | setup_log(options.verbose) |
374 | 380 | ||
375 | if not options.test_dir: | 381 | if not options.test_dir: |
376 | print 'FAILED no -d option specified' | 382 | print('FAILED no -d option specified') |
377 | sys.exit(-1) | 383 | sys.exit(-1) |
378 | 384 | ||
379 | if not options.test: | 385 | if not options.test: |
@@ -382,8 +388,8 @@ def main(): | |||
382 | try: | 388 | try: |
383 | run_tests(options) | 389 | run_tests(options) |
384 | 390 | ||
385 | except Fail, obj: | 391 | except Fail as obj: |
386 | print "FAILED %s" % obj.getMsg(); | 392 | print("FAILED %s" % obj.getMsg()) |
387 | sys.exit(-1) | 393 | sys.exit(-1) |
388 | 394 | ||
389 | sys.exit(0) | 395 | sys.exit(0) |
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c index 5f8501c68da4..5cbba70bcdd0 100644 --- a/tools/perf/tests/evsel-tp-sched.c +++ b/tools/perf/tests/evsel-tp-sched.c | |||
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | |||
17 | return -1; | 17 | return -1; |
18 | } | 18 | } |
19 | 19 | ||
20 | is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED); | 20 | is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED); |
21 | if (should_be_signed && !is_signed) { | 21 | if (should_be_signed && !is_signed) { |
22 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | 22 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", |
23 | evsel->name, name, is_signed, should_be_signed); | 23 | evsel->name, name, is_signed, should_be_signed); |
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index 89512504551b..39c0004f2886 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp | |||
@@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module) | |||
160 | } | 160 | } |
161 | PM.run(*Module); | 161 | PM.run(*Module); |
162 | 162 | ||
163 | return std::move(Buffer); | 163 | return Buffer; |
164 | } | 164 | } |
165 | 165 | ||
166 | } | 166 | } |
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 93f74d8d3cdd..42c3e5a229d2 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c | |||
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { | |||
28 | static char mem_loads_name[100]; | 28 | static char mem_loads_name[100]; |
29 | static bool mem_loads_name__init; | 29 | static bool mem_loads_name__init; |
30 | 30 | ||
31 | char *perf_mem_events__name(int i) | 31 | char * __weak perf_mem_events__name(int i) |
32 | { | 32 | { |
33 | if (i == PERF_MEM_EVENTS__LOAD) { | 33 | if (i == PERF_MEM_EVENTS__LOAD) { |
34 | if (!mem_loads_name__init) { | 34 | if (!mem_loads_name__init) { |
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 66a84d5846c8..dca7dfae69ad 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -19,6 +19,20 @@ | |||
19 | #define EM_AARCH64 183 /* ARM 64 bit */ | 19 | #define EM_AARCH64 183 /* ARM 64 bit */ |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifndef ELF32_ST_VISIBILITY | ||
23 | #define ELF32_ST_VISIBILITY(o) ((o) & 0x03) | ||
24 | #endif | ||
25 | |||
26 | /* For ELF64 the definitions are the same. */ | ||
27 | #ifndef ELF64_ST_VISIBILITY | ||
28 | #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) | ||
29 | #endif | ||
30 | |||
31 | /* How to extract information held in the st_other field. */ | ||
32 | #ifndef GELF_ST_VISIBILITY | ||
33 | #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) | ||
34 | #endif | ||
35 | |||
22 | typedef Elf64_Nhdr GElf_Nhdr; | 36 | typedef Elf64_Nhdr GElf_Nhdr; |
23 | 37 | ||
24 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT | 38 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT |
@@ -87,6 +101,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) | |||
87 | return GELF_ST_TYPE(sym->st_info); | 101 | return GELF_ST_TYPE(sym->st_info); |
88 | } | 102 | } |
89 | 103 | ||
104 | static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) | ||
105 | { | ||
106 | return GELF_ST_VISIBILITY(sym->st_other); | ||
107 | } | ||
108 | |||
90 | #ifndef STT_GNU_IFUNC | 109 | #ifndef STT_GNU_IFUNC |
91 | #define STT_GNU_IFUNC 10 | 110 | #define STT_GNU_IFUNC 10 |
92 | #endif | 111 | #endif |
@@ -111,7 +130,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym) | |||
111 | return elf_sym__type(sym) == STT_NOTYPE && | 130 | return elf_sym__type(sym) == STT_NOTYPE && |
112 | sym->st_name != 0 && | 131 | sym->st_name != 0 && |
113 | sym->st_shndx != SHN_UNDEF && | 132 | sym->st_shndx != SHN_UNDEF && |
114 | sym->st_shndx != SHN_ABS; | 133 | sym->st_shndx != SHN_ABS && |
134 | elf_sym__visibility(sym) != STV_HIDDEN && | ||
135 | elf_sym__visibility(sym) != STV_INTERNAL; | ||
115 | } | 136 | } |
116 | 137 | ||
117 | static bool elf_sym__filter(GElf_Sym *sym) | 138 | static bool elf_sym__filter(GElf_Sym *sym) |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 1a2bd15c5b6e..400ee81a3043 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf | |||
10 | TARGETS += efivarfs | 10 | TARGETS += efivarfs |
11 | TARGETS += exec | 11 | TARGETS += exec |
12 | TARGETS += filesystems | 12 | TARGETS += filesystems |
13 | TARGETS += filesystems/binderfs | ||
13 | TARGETS += firmware | 14 | TARGETS += firmware |
14 | TARGETS += ftrace | 15 | TARGETS += ftrace |
15 | TARGETS += futex | 16 | TARGETS += futex |
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 315a44fa32af..84fd6f1bf33e 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h | |||
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
13 | unsigned int start, end, possible_cpus = 0; | 13 | unsigned int start, end, possible_cpus = 0; |
14 | char buff[128]; | 14 | char buff[128]; |
15 | FILE *fp; | 15 | FILE *fp; |
16 | int n; | 16 | int len, n, i, j = 0; |
17 | 17 | ||
18 | fp = fopen(fcpu, "r"); | 18 | fp = fopen(fcpu, "r"); |
19 | if (!fp) { | 19 | if (!fp) { |
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
21 | exit(1); | 21 | exit(1); |
22 | } | 22 | } |
23 | 23 | ||
24 | while (fgets(buff, sizeof(buff), fp)) { | 24 | if (!fgets(buff, sizeof(buff), fp)) { |
25 | n = sscanf(buff, "%u-%u", &start, &end); | 25 | printf("Failed to read %s!\n", fcpu); |
26 | if (n == 0) { | 26 | exit(1); |
27 | printf("Failed to retrieve # possible CPUs!\n"); | 27 | } |
28 | exit(1); | 28 | |
29 | } else if (n == 1) { | 29 | len = strlen(buff); |
30 | end = start; | 30 | for (i = 0; i <= len; i++) { |
31 | if (buff[i] == ',' || buff[i] == '\0') { | ||
32 | buff[i] = '\0'; | ||
33 | n = sscanf(&buff[j], "%u-%u", &start, &end); | ||
34 | if (n <= 0) { | ||
35 | printf("Failed to retrieve # possible CPUs!\n"); | ||
36 | exit(1); | ||
37 | } else if (n == 1) { | ||
38 | end = start; | ||
39 | } | ||
40 | possible_cpus += end - start + 1; | ||
41 | j = i + 1; | ||
31 | } | 42 | } |
32 | possible_cpus = start == 0 ? end + 1 : 0; | ||
33 | break; | ||
34 | } | 43 | } |
44 | |||
35 | fclose(fp); | 45 | fclose(fp); |
36 | 46 | ||
37 | return possible_cpus; | 47 | return possible_cpus; |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index a0bd04befe87..91420fa83b08 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = { | |||
1881 | }, | 1881 | }, |
1882 | 1882 | ||
1883 | { | 1883 | { |
1884 | .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", | 1884 | .descr = "func proto (TYPEDEF=>FUNC_PROTO)", |
1885 | .raw_types = { | 1885 | .raw_types = { |
1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | 1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ |
1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ | 1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ |
1888 | BTF_CONST_ENC(4), /* [3] */ | 1888 | BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ |
1889 | BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ | 1889 | BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ |
1890 | BTF_FUNC_PROTO_ENC(0, 2), /* [5] */ | ||
1891 | BTF_FUNC_PROTO_ARG_ENC(0, 1), | 1890 | BTF_FUNC_PROTO_ARG_ENC(0, 1), |
1892 | BTF_FUNC_PROTO_ARG_ENC(0, 2), | 1891 | BTF_FUNC_PROTO_ARG_ENC(0, 2), |
1893 | BTF_END_RAW, | 1892 | BTF_END_RAW, |
@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = { | |||
1901 | .key_type_id = 1, | 1900 | .key_type_id = 1, |
1902 | .value_type_id = 1, | 1901 | .value_type_id = 1, |
1903 | .max_entries = 4, | 1902 | .max_entries = 4, |
1904 | .btf_load_err = true, | ||
1905 | .err_str = "Invalid type_id", | ||
1906 | }, | 1903 | }, |
1907 | 1904 | ||
1908 | { | 1905 | { |
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c index 147e34cfceb7..02d7c871862a 100644 --- a/tools/testing/selftests/bpf/test_lpm_map.c +++ b/tools/testing/selftests/bpf/test_lpm_map.c | |||
@@ -474,6 +474,16 @@ static void test_lpm_delete(void) | |||
474 | assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && | 474 | assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 && |
475 | errno == ENOENT); | 475 | errno == ENOENT); |
476 | 476 | ||
477 | key->prefixlen = 30; // unused prefix so far | ||
478 | inet_pton(AF_INET, "192.255.0.0", key->data); | ||
479 | assert(bpf_map_delete_elem(map_fd, key) == -1 && | ||
480 | errno == ENOENT); | ||
481 | |||
482 | key->prefixlen = 16; // same prefix as the root node | ||
483 | inet_pton(AF_INET, "192.255.0.0", key->data); | ||
484 | assert(bpf_map_delete_elem(map_fd, key) == -1 && | ||
485 | errno == ENOENT); | ||
486 | |||
477 | /* assert initial lookup */ | 487 | /* assert initial lookup */ |
478 | key->prefixlen = 32; | 488 | key->prefixlen = 32; |
479 | inet_pton(AF_INET, "192.168.0.1", key->data); | 489 | inet_pton(AF_INET, "192.168.0.1", key->data); |
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore new file mode 100644 index 000000000000..8a5d9bf63dd4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/.gitignore | |||
@@ -0,0 +1 @@ | |||
binderfs_test | |||
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile new file mode 100644 index 000000000000..58cb659b56b4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | CFLAGS += -I../../../../../usr/include/ | ||
4 | TEST_GEN_PROGS := binderfs_test | ||
5 | |||
6 | include ../../lib.mk | ||
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c new file mode 100644 index 000000000000..8c2ed962e1c7 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c | |||
@@ -0,0 +1,275 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #define _GNU_SOURCE | ||
4 | #include <errno.h> | ||
5 | #include <fcntl.h> | ||
6 | #include <sched.h> | ||
7 | #include <stdbool.h> | ||
8 | #include <stdio.h> | ||
9 | #include <stdlib.h> | ||
10 | #include <string.h> | ||
11 | #include <sys/ioctl.h> | ||
12 | #include <sys/mount.h> | ||
13 | #include <sys/stat.h> | ||
14 | #include <sys/types.h> | ||
15 | #include <unistd.h> | ||
16 | #include <linux/android/binder.h> | ||
17 | #include <linux/android/binderfs.h> | ||
18 | #include "../../kselftest.h" | ||
19 | |||
20 | static ssize_t write_nointr(int fd, const void *buf, size_t count) | ||
21 | { | ||
22 | ssize_t ret; | ||
23 | again: | ||
24 | ret = write(fd, buf, count); | ||
25 | if (ret < 0 && errno == EINTR) | ||
26 | goto again; | ||
27 | |||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | static void write_to_file(const char *filename, const void *buf, size_t count, | ||
32 | int allowed_errno) | ||
33 | { | ||
34 | int fd, saved_errno; | ||
35 | ssize_t ret; | ||
36 | |||
37 | fd = open(filename, O_WRONLY | O_CLOEXEC); | ||
38 | if (fd < 0) | ||
39 | ksft_exit_fail_msg("%s - Failed to open file %s\n", | ||
40 | strerror(errno), filename); | ||
41 | |||
42 | ret = write_nointr(fd, buf, count); | ||
43 | if (ret < 0) { | ||
44 | if (allowed_errno && (errno == allowed_errno)) { | ||
45 | close(fd); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | goto on_error; | ||
50 | } | ||
51 | |||
52 | if ((size_t)ret != count) | ||
53 | goto on_error; | ||
54 | |||
55 | close(fd); | ||
56 | return; | ||
57 | |||
58 | on_error: | ||
59 | saved_errno = errno; | ||
60 | close(fd); | ||
61 | errno = saved_errno; | ||
62 | |||
63 | if (ret < 0) | ||
64 | ksft_exit_fail_msg("%s - Failed to write to file %s\n", | ||
65 | strerror(errno), filename); | ||
66 | |||
67 | ksft_exit_fail_msg("Failed to write to file %s\n", filename); | ||
68 | } | ||
69 | |||
70 | static void change_to_userns(void) | ||
71 | { | ||
72 | int ret; | ||
73 | uid_t uid; | ||
74 | gid_t gid; | ||
75 | /* {g,u}id_map files only allow a max of 4096 bytes written to them */ | ||
76 | char idmap[4096]; | ||
77 | |||
78 | uid = getuid(); | ||
79 | gid = getgid(); | ||
80 | |||
81 | ret = unshare(CLONE_NEWUSER); | ||
82 | if (ret < 0) | ||
83 | ksft_exit_fail_msg("%s - Failed to unshare user namespace\n", | ||
84 | strerror(errno)); | ||
85 | |||
86 | write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT); | ||
87 | |||
88 | ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid); | ||
89 | if (ret < 0 || (size_t)ret >= sizeof(idmap)) | ||
90 | ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", | ||
91 | strerror(errno)); | ||
92 | |||
93 | write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0); | ||
94 | |||
95 | ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid); | ||
96 | if (ret < 0 || (size_t)ret >= sizeof(idmap)) | ||
97 | ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", | ||
98 | strerror(errno)); | ||
99 | |||
100 | write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0); | ||
101 | |||
102 | ret = setgid(0); | ||
103 | if (ret) | ||
104 | ksft_exit_fail_msg("%s - Failed to setgid(0)\n", | ||
105 | strerror(errno)); | ||
106 | |||
107 | ret = setuid(0); | ||
108 | if (ret) | ||
109 | ksft_exit_fail_msg("%s - Failed to setgid(0)\n", | ||
110 | strerror(errno)); | ||
111 | } | ||
112 | |||
113 | static void change_to_mountns(void) | ||
114 | { | ||
115 | int ret; | ||
116 | |||
117 | ret = unshare(CLONE_NEWNS); | ||
118 | if (ret < 0) | ||
119 | ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n", | ||
120 | strerror(errno)); | ||
121 | |||
122 | ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); | ||
123 | if (ret < 0) | ||
124 | ksft_exit_fail_msg("%s - Failed to mount / as private\n", | ||
125 | strerror(errno)); | ||
126 | } | ||
127 | |||
128 | static void rmdir_protect_errno(const char *dir) | ||
129 | { | ||
130 | int saved_errno = errno; | ||
131 | (void)rmdir(dir); | ||
132 | errno = saved_errno; | ||
133 | } | ||
134 | |||
135 | static void __do_binderfs_test(void) | ||
136 | { | ||
137 | int fd, ret, saved_errno; | ||
138 | size_t len; | ||
139 | ssize_t wret; | ||
140 | bool keep = false; | ||
141 | struct binderfs_device device = { 0 }; | ||
142 | struct binder_version version = { 0 }; | ||
143 | |||
144 | change_to_mountns(); | ||
145 | |||
146 | ret = mkdir("/dev/binderfs", 0755); | ||
147 | if (ret < 0) { | ||
148 | if (errno != EEXIST) | ||
149 | ksft_exit_fail_msg( | ||
150 | "%s - Failed to create binderfs mountpoint\n", | ||
151 | strerror(errno)); | ||
152 | |||
153 | keep = true; | ||
154 | } | ||
155 | |||
156 | ret = mount(NULL, "/dev/binderfs", "binder", 0, 0); | ||
157 | if (ret < 0) { | ||
158 | if (errno != ENODEV) | ||
159 | ksft_exit_fail_msg("%s - Failed to mount binderfs\n", | ||
160 | strerror(errno)); | ||
161 | |||
162 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
163 | ksft_exit_skip( | ||
164 | "The Android binderfs filesystem is not available\n"); | ||
165 | } | ||
166 | |||
167 | /* binderfs mount test passed */ | ||
168 | ksft_inc_pass_cnt(); | ||
169 | |||
170 | memcpy(device.name, "my-binder", strlen("my-binder")); | ||
171 | |||
172 | fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC); | ||
173 | if (fd < 0) | ||
174 | ksft_exit_fail_msg( | ||
175 | "%s - Failed to open binder-control device\n", | ||
176 | strerror(errno)); | ||
177 | |||
178 | ret = ioctl(fd, BINDER_CTL_ADD, &device); | ||
179 | saved_errno = errno; | ||
180 | close(fd); | ||
181 | errno = saved_errno; | ||
182 | if (ret < 0) { | ||
183 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
184 | ksft_exit_fail_msg( | ||
185 | "%s - Failed to allocate new binder device\n", | ||
186 | strerror(errno)); | ||
187 | } | ||
188 | |||
189 | ksft_print_msg( | ||
190 | "Allocated new binder device with major %d, minor %d, and name %s\n", | ||
191 | device.major, device.minor, device.name); | ||
192 | |||
193 | /* binder device allocation test passed */ | ||
194 | ksft_inc_pass_cnt(); | ||
195 | |||
196 | fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY); | ||
197 | if (fd < 0) { | ||
198 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
199 | ksft_exit_fail_msg("%s - Failed to open my-binder device\n", | ||
200 | strerror(errno)); | ||
201 | } | ||
202 | |||
203 | ret = ioctl(fd, BINDER_VERSION, &version); | ||
204 | saved_errno = errno; | ||
205 | close(fd); | ||
206 | errno = saved_errno; | ||
207 | if (ret < 0) { | ||
208 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
209 | ksft_exit_fail_msg( | ||
210 | "%s - Failed to open perform BINDER_VERSION request\n", | ||
211 | strerror(errno)); | ||
212 | } | ||
213 | |||
214 | ksft_print_msg("Detected binder version: %d\n", | ||
215 | version.protocol_version); | ||
216 | |||
217 | /* binder transaction with binderfs binder device passed */ | ||
218 | ksft_inc_pass_cnt(); | ||
219 | |||
220 | ret = unlink("/dev/binderfs/my-binder"); | ||
221 | if (ret < 0) { | ||
222 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
223 | ksft_exit_fail_msg("%s - Failed to delete binder device\n", | ||
224 | strerror(errno)); | ||
225 | } | ||
226 | |||
227 | /* binder device removal passed */ | ||
228 | ksft_inc_pass_cnt(); | ||
229 | |||
230 | ret = unlink("/dev/binderfs/binder-control"); | ||
231 | if (!ret) { | ||
232 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
233 | ksft_exit_fail_msg("Managed to delete binder-control device\n"); | ||
234 | } else if (errno != EPERM) { | ||
235 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
236 | ksft_exit_fail_msg( | ||
237 | "%s - Failed to delete binder-control device but exited with unexpected error code\n", | ||
238 | strerror(errno)); | ||
239 | } | ||
240 | |||
241 | /* binder-control device removal failed as expected */ | ||
242 | ksft_inc_xfail_cnt(); | ||
243 | |||
244 | on_error: | ||
245 | ret = umount2("/dev/binderfs", MNT_DETACH); | ||
246 | keep ?: rmdir_protect_errno("/dev/binderfs"); | ||
247 | if (ret < 0) | ||
248 | ksft_exit_fail_msg("%s - Failed to unmount binderfs\n", | ||
249 | strerror(errno)); | ||
250 | |||
251 | /* binderfs unmount test passed */ | ||
252 | ksft_inc_pass_cnt(); | ||
253 | } | ||
254 | |||
255 | static void binderfs_test_privileged() | ||
256 | { | ||
257 | if (geteuid() != 0) | ||
258 | ksft_print_msg( | ||
259 | "Tests are not run as root. Skipping privileged tests\n"); | ||
260 | else | ||
261 | __do_binderfs_test(); | ||
262 | } | ||
263 | |||
264 | static void binderfs_test_unprivileged() | ||
265 | { | ||
266 | change_to_userns(); | ||
267 | __do_binderfs_test(); | ||
268 | } | ||
269 | |||
270 | int main(int argc, char *argv[]) | ||
271 | { | ||
272 | binderfs_test_privileged(); | ||
273 | binderfs_test_unprivileged(); | ||
274 | ksft_exit_pass(); | ||
275 | } | ||
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config new file mode 100644 index 000000000000..02dd6cc9cf99 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/config | |||
@@ -0,0 +1,3 @@ | |||
1 | CONFIG_ANDROID=y | ||
2 | CONFIG_ANDROID_BINDERFS=y | ||
3 | CONFIG_ANDROID_BINDER_IPC=y | ||
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh index 802b4af18729..1080ff55a788 100755 --- a/tools/testing/selftests/net/fib_tests.sh +++ b/tools/testing/selftests/net/fib_tests.sh | |||
@@ -388,6 +388,7 @@ fib_carrier_unicast_test() | |||
388 | 388 | ||
389 | set -e | 389 | set -e |
390 | $IP link set dev dummy0 carrier off | 390 | $IP link set dev dummy0 carrier off |
391 | sleep 1 | ||
391 | set +e | 392 | set +e |
392 | 393 | ||
393 | echo " Carrier down" | 394 | echo " Carrier down" |
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 47ed6cef93fb..c9ff2b47bd1c 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for netfilter selftests | 2 | # Makefile for netfilter selftests |
3 | 3 | ||
4 | TEST_PROGS := nft_trans_stress.sh | 4 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh |
5 | 5 | ||
6 | include ../lib.mk | 6 | include ../lib.mk |
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config index 1017313e41a8..59caa8f71cd8 100644 --- a/tools/testing/selftests/netfilter/config +++ b/tools/testing/selftests/netfilter/config | |||
@@ -1,2 +1,2 @@ | |||
1 | CONFIG_NET_NS=y | 1 | CONFIG_NET_NS=y |
2 | NF_TABLES_INET=y | 2 | CONFIG_NF_TABLES_INET=y |
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh new file mode 100755 index 000000000000..8ec76681605c --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_nat.sh | |||
@@ -0,0 +1,762 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # This test is for basic NAT functionality: snat, dnat, redirect, masquerade. | ||
4 | # | ||
5 | |||
6 | # Kselftest framework requirement - SKIP code is 4. | ||
7 | ksft_skip=4 | ||
8 | ret=0 | ||
9 | |||
10 | nft --version > /dev/null 2>&1 | ||
11 | if [ $? -ne 0 ];then | ||
12 | echo "SKIP: Could not run test without nft tool" | ||
13 | exit $ksft_skip | ||
14 | fi | ||
15 | |||
16 | ip -Version > /dev/null 2>&1 | ||
17 | if [ $? -ne 0 ];then | ||
18 | echo "SKIP: Could not run test without ip tool" | ||
19 | exit $ksft_skip | ||
20 | fi | ||
21 | |||
22 | ip netns add ns0 | ||
23 | ip netns add ns1 | ||
24 | ip netns add ns2 | ||
25 | |||
26 | ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 | ||
27 | ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 | ||
28 | |||
29 | ip -net ns0 link set lo up | ||
30 | ip -net ns0 link set veth0 up | ||
31 | ip -net ns0 addr add 10.0.1.1/24 dev veth0 | ||
32 | ip -net ns0 addr add dead:1::1/64 dev veth0 | ||
33 | |||
34 | ip -net ns0 link set veth1 up | ||
35 | ip -net ns0 addr add 10.0.2.1/24 dev veth1 | ||
36 | ip -net ns0 addr add dead:2::1/64 dev veth1 | ||
37 | |||
38 | for i in 1 2; do | ||
39 | ip -net ns$i link set lo up | ||
40 | ip -net ns$i link set eth0 up | ||
41 | ip -net ns$i addr add 10.0.$i.99/24 dev eth0 | ||
42 | ip -net ns$i route add default via 10.0.$i.1 | ||
43 | ip -net ns$i addr add dead:$i::99/64 dev eth0 | ||
44 | ip -net ns$i route add default via dead:$i::1 | ||
45 | done | ||
46 | |||
47 | bad_counter() | ||
48 | { | ||
49 | local ns=$1 | ||
50 | local counter=$2 | ||
51 | local expect=$3 | ||
52 | |||
53 | echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 | ||
54 | ip netns exec $ns nft list counter inet filter $counter 1>&2 | ||
55 | } | ||
56 | |||
57 | check_counters() | ||
58 | { | ||
59 | ns=$1 | ||
60 | local lret=0 | ||
61 | |||
62 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") | ||
63 | if [ $? -ne 0 ]; then | ||
64 | bad_counter $ns ns0in "packets 1 bytes 84" | ||
65 | lret=1 | ||
66 | fi | ||
67 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") | ||
68 | if [ $? -ne 0 ]; then | ||
69 | bad_counter $ns ns0out "packets 1 bytes 84" | ||
70 | lret=1 | ||
71 | fi | ||
72 | |||
73 | expect="packets 1 bytes 104" | ||
74 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") | ||
75 | if [ $? -ne 0 ]; then | ||
76 | bad_counter $ns ns0in6 "$expect" | ||
77 | lret=1 | ||
78 | fi | ||
79 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") | ||
80 | if [ $? -ne 0 ]; then | ||
81 | bad_counter $ns ns0out6 "$expect" | ||
82 | lret=1 | ||
83 | fi | ||
84 | |||
85 | return $lret | ||
86 | } | ||
87 | |||
88 | check_ns0_counters() | ||
89 | { | ||
90 | local ns=$1 | ||
91 | local lret=0 | ||
92 | |||
93 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") | ||
94 | if [ $? -ne 0 ]; then | ||
95 | bad_counter ns0 ns0in "packets 0 bytes 0" | ||
96 | lret=1 | ||
97 | fi | ||
98 | |||
99 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") | ||
100 | if [ $? -ne 0 ]; then | ||
101 | bad_counter ns0 ns0in6 "packets 0 bytes 0" | ||
102 | lret=1 | ||
103 | fi | ||
104 | |||
105 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") | ||
106 | if [ $? -ne 0 ]; then | ||
107 | bad_counter ns0 ns0out "packets 0 bytes 0" | ||
108 | lret=1 | ||
109 | fi | ||
110 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") | ||
111 | if [ $? -ne 0 ]; then | ||
112 | bad_counter ns0 ns0out6 "packets 0 bytes 0" | ||
113 | lret=1 | ||
114 | fi | ||
115 | |||
116 | for dir in "in" "out" ; do | ||
117 | expect="packets 1 bytes 84" | ||
118 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") | ||
119 | if [ $? -ne 0 ]; then | ||
120 | bad_counter ns0 $ns$dir "$expect" | ||
121 | lret=1 | ||
122 | fi | ||
123 | |||
124 | expect="packets 1 bytes 104" | ||
125 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") | ||
126 | if [ $? -ne 0 ]; then | ||
127 | bad_counter ns0 $ns$dir6 "$expect" | ||
128 | lret=1 | ||
129 | fi | ||
130 | done | ||
131 | |||
132 | return $lret | ||
133 | } | ||
134 | |||
135 | reset_counters() | ||
136 | { | ||
137 | for i in 0 1 2;do | ||
138 | ip netns exec ns$i nft reset counters inet > /dev/null | ||
139 | done | ||
140 | } | ||
141 | |||
142 | test_local_dnat6() | ||
143 | { | ||
144 | local lret=0 | ||
145 | ip netns exec ns0 nft -f - <<EOF | ||
146 | table ip6 nat { | ||
147 | chain output { | ||
148 | type nat hook output priority 0; policy accept; | ||
149 | ip6 daddr dead:1::99 dnat to dead:2::99 | ||
150 | } | ||
151 | } | ||
152 | EOF | ||
153 | if [ $? -ne 0 ]; then | ||
154 | echo "SKIP: Could not add add ip6 dnat hook" | ||
155 | return $ksft_skip | ||
156 | fi | ||
157 | |||
158 | # ping netns1, expect rewrite to netns2 | ||
159 | ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null | ||
160 | if [ $? -ne 0 ]; then | ||
161 | lret=1 | ||
162 | echo "ERROR: ping6 failed" | ||
163 | return $lret | ||
164 | fi | ||
165 | |||
166 | expect="packets 0 bytes 0" | ||
167 | for dir in "in6" "out6" ; do | ||
168 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
169 | if [ $? -ne 0 ]; then | ||
170 | bad_counter ns0 ns1$dir "$expect" | ||
171 | lret=1 | ||
172 | fi | ||
173 | done | ||
174 | |||
175 | expect="packets 1 bytes 104" | ||
176 | for dir in "in6" "out6" ; do | ||
177 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
178 | if [ $? -ne 0 ]; then | ||
179 | bad_counter ns0 ns2$dir "$expect" | ||
180 | lret=1 | ||
181 | fi | ||
182 | done | ||
183 | |||
184 | # expect 0 count in ns1 | ||
185 | expect="packets 0 bytes 0" | ||
186 | for dir in "in6" "out6" ; do | ||
187 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
188 | if [ $? -ne 0 ]; then | ||
189 | bad_counter ns1 ns0$dir "$expect" | ||
190 | lret=1 | ||
191 | fi | ||
192 | done | ||
193 | |||
194 | # expect 1 packet in ns2 | ||
195 | expect="packets 1 bytes 104" | ||
196 | for dir in "in6" "out6" ; do | ||
197 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
198 | if [ $? -ne 0 ]; then | ||
199 | bad_counter ns2 ns0$dir "$expect" | ||
200 | lret=1 | ||
201 | fi | ||
202 | done | ||
203 | |||
204 | test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2" | ||
205 | ip netns exec ns0 nft flush chain ip6 nat output | ||
206 | |||
207 | return $lret | ||
208 | } | ||
209 | |||
210 | test_local_dnat() | ||
211 | { | ||
212 | local lret=0 | ||
213 | ip netns exec ns0 nft -f - <<EOF | ||
214 | table ip nat { | ||
215 | chain output { | ||
216 | type nat hook output priority 0; policy accept; | ||
217 | ip daddr 10.0.1.99 dnat to 10.0.2.99 | ||
218 | } | ||
219 | } | ||
220 | EOF | ||
221 | # ping netns1, expect rewrite to netns2 | ||
222 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
223 | if [ $? -ne 0 ]; then | ||
224 | lret=1 | ||
225 | echo "ERROR: ping failed" | ||
226 | return $lret | ||
227 | fi | ||
228 | |||
229 | expect="packets 0 bytes 0" | ||
230 | for dir in "in" "out" ; do | ||
231 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
232 | if [ $? -ne 0 ]; then | ||
233 | bad_counter ns0 ns1$dir "$expect" | ||
234 | lret=1 | ||
235 | fi | ||
236 | done | ||
237 | |||
238 | expect="packets 1 bytes 84" | ||
239 | for dir in "in" "out" ; do | ||
240 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
241 | if [ $? -ne 0 ]; then | ||
242 | bad_counter ns0 ns2$dir "$expect" | ||
243 | lret=1 | ||
244 | fi | ||
245 | done | ||
246 | |||
247 | # expect 0 count in ns1 | ||
248 | expect="packets 0 bytes 0" | ||
249 | for dir in "in" "out" ; do | ||
250 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
251 | if [ $? -ne 0 ]; then | ||
252 | bad_counter ns1 ns0$dir "$expect" | ||
253 | lret=1 | ||
254 | fi | ||
255 | done | ||
256 | |||
257 | # expect 1 packet in ns2 | ||
258 | expect="packets 1 bytes 84" | ||
259 | for dir in "in" "out" ; do | ||
260 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
261 | if [ $? -ne 0 ]; then | ||
262 | bad_counter ns2 ns0$dir "$expect" | ||
263 | lret=1 | ||
264 | fi | ||
265 | done | ||
266 | |||
267 | test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2" | ||
268 | |||
269 | ip netns exec ns0 nft flush chain ip nat output | ||
270 | |||
271 | reset_counters | ||
272 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
273 | if [ $? -ne 0 ]; then | ||
274 | lret=1 | ||
275 | echo "ERROR: ping failed" | ||
276 | return $lret | ||
277 | fi | ||
278 | |||
279 | expect="packets 1 bytes 84" | ||
280 | for dir in "in" "out" ; do | ||
281 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
282 | if [ $? -ne 0 ]; then | ||
283 | bad_counter ns1 ns1$dir "$expect" | ||
284 | lret=1 | ||
285 | fi | ||
286 | done | ||
287 | expect="packets 0 bytes 0" | ||
288 | for dir in "in" "out" ; do | ||
289 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
290 | if [ $? -ne 0 ]; then | ||
291 | bad_counter ns0 ns2$dir "$expect" | ||
292 | lret=1 | ||
293 | fi | ||
294 | done | ||
295 | |||
296 | # expect 1 count in ns1 | ||
297 | expect="packets 1 bytes 84" | ||
298 | for dir in "in" "out" ; do | ||
299 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
300 | if [ $? -ne 0 ]; then | ||
301 | bad_counter ns0 ns0$dir "$expect" | ||
302 | lret=1 | ||
303 | fi | ||
304 | done | ||
305 | |||
306 | # expect 0 packet in ns2 | ||
307 | expect="packets 0 bytes 0" | ||
308 | for dir in "in" "out" ; do | ||
309 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
310 | if [ $? -ne 0 ]; then | ||
311 | bad_counter ns2 ns2$dir "$expect" | ||
312 | lret=1 | ||
313 | fi | ||
314 | done | ||
315 | |||
316 | test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush" | ||
317 | |||
318 | return $lret | ||
319 | } | ||
320 | |||
321 | |||
322 | test_masquerade6() | ||
323 | { | ||
324 | local lret=0 | ||
325 | |||
326 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
327 | |||
328 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
329 | if [ $? -ne 0 ] ; then | ||
330 | echo "ERROR: cannot ping ns1 from ns2 via ipv6" | ||
331 | return 1 | ||
332 | lret=1 | ||
333 | fi | ||
334 | |||
335 | expect="packets 1 bytes 104" | ||
336 | for dir in "in6" "out6" ; do | ||
337 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
338 | if [ $? -ne 0 ]; then | ||
339 | bad_counter ns1 ns2$dir "$expect" | ||
340 | lret=1 | ||
341 | fi | ||
342 | |||
343 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
344 | if [ $? -ne 0 ]; then | ||
345 | bad_counter ns2 ns1$dir "$expect" | ||
346 | lret=1 | ||
347 | fi | ||
348 | done | ||
349 | |||
350 | reset_counters | ||
351 | |||
352 | # add masquerading rule | ||
353 | ip netns exec ns0 nft -f - <<EOF | ||
354 | table ip6 nat { | ||
355 | chain postrouting { | ||
356 | type nat hook postrouting priority 0; policy accept; | ||
357 | meta oif veth0 masquerade | ||
358 | } | ||
359 | } | ||
360 | EOF | ||
361 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
362 | if [ $? -ne 0 ] ; then | ||
363 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" | ||
364 | lret=1 | ||
365 | fi | ||
366 | |||
367 | # ns1 should have seen packets from ns0, due to masquerade | ||
368 | expect="packets 1 bytes 104" | ||
369 | for dir in "in6" "out6" ; do | ||
370 | |||
371 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
372 | if [ $? -ne 0 ]; then | ||
373 | bad_counter ns1 ns0$dir "$expect" | ||
374 | lret=1 | ||
375 | fi | ||
376 | |||
377 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
378 | if [ $? -ne 0 ]; then | ||
379 | bad_counter ns2 ns1$dir "$expect" | ||
380 | lret=1 | ||
381 | fi | ||
382 | done | ||
383 | |||
384 | # ns1 should not have seen packets from ns2, due to masquerade | ||
385 | expect="packets 0 bytes 0" | ||
386 | for dir in "in6" "out6" ; do | ||
387 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
388 | if [ $? -ne 0 ]; then | ||
389 | bad_counter ns1 ns0$dir "$expect" | ||
390 | lret=1 | ||
391 | fi | ||
392 | |||
393 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
394 | if [ $? -ne 0 ]; then | ||
395 | bad_counter ns2 ns1$dir "$expect" | ||
396 | lret=1 | ||
397 | fi | ||
398 | done | ||
399 | |||
400 | ip netns exec ns0 nft flush chain ip6 nat postrouting | ||
401 | if [ $? -ne 0 ]; then | ||
402 | echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 | ||
403 | lret=1 | ||
404 | fi | ||
405 | |||
406 | test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" | ||
407 | |||
408 | return $lret | ||
409 | } | ||
410 | |||
411 | test_masquerade() | ||
412 | { | ||
413 | local lret=0 | ||
414 | |||
415 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
416 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
417 | |||
418 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
419 | if [ $? -ne 0 ] ; then | ||
420 | echo "ERROR: canot ping ns1 from ns2" | ||
421 | lret=1 | ||
422 | fi | ||
423 | |||
424 | expect="packets 1 bytes 84" | ||
425 | for dir in "in" "out" ; do | ||
426 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
427 | if [ $? -ne 0 ]; then | ||
428 | bad_counter ns1 ns2$dir "$expect" | ||
429 | lret=1 | ||
430 | fi | ||
431 | |||
432 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
433 | if [ $? -ne 0 ]; then | ||
434 | bad_counter ns2 ns1$dir "$expect" | ||
435 | lret=1 | ||
436 | fi | ||
437 | done | ||
438 | |||
439 | reset_counters | ||
440 | |||
441 | # add masquerading rule | ||
442 | ip netns exec ns0 nft -f - <<EOF | ||
443 | table ip nat { | ||
444 | chain postrouting { | ||
445 | type nat hook postrouting priority 0; policy accept; | ||
446 | meta oif veth0 masquerade | ||
447 | } | ||
448 | } | ||
449 | EOF | ||
450 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
451 | if [ $? -ne 0 ] ; then | ||
452 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" | ||
453 | lret=1 | ||
454 | fi | ||
455 | |||
456 | # ns1 should have seen packets from ns0, due to masquerade | ||
457 | expect="packets 1 bytes 84" | ||
458 | for dir in "in" "out" ; do | ||
459 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
460 | if [ $? -ne 0 ]; then | ||
461 | bad_counter ns1 ns0$dir "$expect" | ||
462 | lret=1 | ||
463 | fi | ||
464 | |||
465 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
466 | if [ $? -ne 0 ]; then | ||
467 | bad_counter ns2 ns1$dir "$expect" | ||
468 | lret=1 | ||
469 | fi | ||
470 | done | ||
471 | |||
472 | # ns1 should not have seen packets from ns2, due to masquerade | ||
473 | expect="packets 0 bytes 0" | ||
474 | for dir in "in" "out" ; do | ||
475 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
476 | if [ $? -ne 0 ]; then | ||
477 | bad_counter ns1 ns0$dir "$expect" | ||
478 | lret=1 | ||
479 | fi | ||
480 | |||
481 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
482 | if [ $? -ne 0 ]; then | ||
483 | bad_counter ns2 ns1$dir "$expect" | ||
484 | lret=1 | ||
485 | fi | ||
486 | done | ||
487 | |||
488 | ip netns exec ns0 nft flush chain ip nat postrouting | ||
489 | if [ $? -ne 0 ]; then | ||
490 | echo "ERROR: Could not flush nat postrouting" 1>&2 | ||
491 | lret=1 | ||
492 | fi | ||
493 | |||
494 | test $lret -eq 0 && echo "PASS: IP masquerade for ns2" | ||
495 | |||
496 | return $lret | ||
497 | } | ||
498 | |||
499 | test_redirect6() | ||
500 | { | ||
501 | local lret=0 | ||
502 | |||
503 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
504 | |||
505 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
506 | if [ $? -ne 0 ] ; then | ||
507 | echo "ERROR: cannnot ping ns1 from ns2 via ipv6" | ||
508 | lret=1 | ||
509 | fi | ||
510 | |||
511 | expect="packets 1 bytes 104" | ||
512 | for dir in "in6" "out6" ; do | ||
513 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
514 | if [ $? -ne 0 ]; then | ||
515 | bad_counter ns1 ns2$dir "$expect" | ||
516 | lret=1 | ||
517 | fi | ||
518 | |||
519 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
520 | if [ $? -ne 0 ]; then | ||
521 | bad_counter ns2 ns1$dir "$expect" | ||
522 | lret=1 | ||
523 | fi | ||
524 | done | ||
525 | |||
526 | reset_counters | ||
527 | |||
528 | # add redirect rule | ||
529 | ip netns exec ns0 nft -f - <<EOF | ||
530 | table ip6 nat { | ||
531 | chain prerouting { | ||
532 | type nat hook prerouting priority 0; policy accept; | ||
533 | meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect | ||
534 | } | ||
535 | } | ||
536 | EOF | ||
537 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
538 | if [ $? -ne 0 ] ; then | ||
539 | echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect" | ||
540 | lret=1 | ||
541 | fi | ||
542 | |||
543 | # ns1 should have seen no packets from ns2, due to redirection | ||
544 | expect="packets 0 bytes 0" | ||
545 | for dir in "in6" "out6" ; do | ||
546 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
547 | if [ $? -ne 0 ]; then | ||
548 | bad_counter ns1 ns0$dir "$expect" | ||
549 | lret=1 | ||
550 | fi | ||
551 | done | ||
552 | |||
553 | # ns0 should have seen packets from ns2, due to masquerade | ||
554 | expect="packets 1 bytes 104" | ||
555 | for dir in "in6" "out6" ; do | ||
556 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
557 | if [ $? -ne 0 ]; then | ||
558 | bad_counter ns1 ns0$dir "$expect" | ||
559 | lret=1 | ||
560 | fi | ||
561 | done | ||
562 | |||
563 | ip netns exec ns0 nft delete table ip6 nat | ||
564 | if [ $? -ne 0 ]; then | ||
565 | echo "ERROR: Could not delete ip6 nat table" 1>&2 | ||
566 | lret=1 | ||
567 | fi | ||
568 | |||
569 | test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2" | ||
570 | |||
571 | return $lret | ||
572 | } | ||
573 | |||
574 | test_redirect() | ||
575 | { | ||
576 | local lret=0 | ||
577 | |||
578 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
579 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
580 | |||
581 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
582 | if [ $? -ne 0 ] ; then | ||
583 | echo "ERROR: cannot ping ns1 from ns2" | ||
584 | lret=1 | ||
585 | fi | ||
586 | |||
587 | expect="packets 1 bytes 84" | ||
588 | for dir in "in" "out" ; do | ||
589 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
590 | if [ $? -ne 0 ]; then | ||
591 | bad_counter ns1 ns2$dir "$expect" | ||
592 | lret=1 | ||
593 | fi | ||
594 | |||
595 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
596 | if [ $? -ne 0 ]; then | ||
597 | bad_counter ns2 ns1$dir "$expect" | ||
598 | lret=1 | ||
599 | fi | ||
600 | done | ||
601 | |||
602 | reset_counters | ||
603 | |||
604 | # add redirect rule | ||
605 | ip netns exec ns0 nft -f - <<EOF | ||
606 | table ip nat { | ||
607 | chain prerouting { | ||
608 | type nat hook prerouting priority 0; policy accept; | ||
609 | meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect | ||
610 | } | ||
611 | } | ||
612 | EOF | ||
613 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
614 | if [ $? -ne 0 ] ; then | ||
615 | echo "ERROR: cannot ping ns1 from ns2 with active ip redirect" | ||
616 | lret=1 | ||
617 | fi | ||
618 | |||
619 | # ns1 should have seen no packets from ns2, due to redirection | ||
620 | expect="packets 0 bytes 0" | ||
621 | for dir in "in" "out" ; do | ||
622 | |||
623 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
624 | if [ $? -ne 0 ]; then | ||
625 | bad_counter ns1 ns0$dir "$expect" | ||
626 | lret=1 | ||
627 | fi | ||
628 | done | ||
629 | |||
630 | # ns0 should have seen packets from ns2, due to masquerade | ||
631 | expect="packets 1 bytes 84" | ||
632 | for dir in "in" "out" ; do | ||
633 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
634 | if [ $? -ne 0 ]; then | ||
635 | bad_counter ns1 ns0$dir "$expect" | ||
636 | lret=1 | ||
637 | fi | ||
638 | done | ||
639 | |||
640 | ip netns exec ns0 nft delete table ip nat | ||
641 | if [ $? -ne 0 ]; then | ||
642 | echo "ERROR: Could not delete nat table" 1>&2 | ||
643 | lret=1 | ||
644 | fi | ||
645 | |||
646 | test $lret -eq 0 && echo "PASS: IP redirection for ns2" | ||
647 | |||
648 | return $lret | ||
649 | } | ||
650 | |||
651 | |||
652 | # ip netns exec ns0 ping -c 1 -q 10.0.$i.99 | ||
653 | for i in 0 1 2; do | ||
654 | ip netns exec ns$i nft -f - <<EOF | ||
655 | table inet filter { | ||
656 | counter ns0in {} | ||
657 | counter ns1in {} | ||
658 | counter ns2in {} | ||
659 | |||
660 | counter ns0out {} | ||
661 | counter ns1out {} | ||
662 | counter ns2out {} | ||
663 | |||
664 | counter ns0in6 {} | ||
665 | counter ns1in6 {} | ||
666 | counter ns2in6 {} | ||
667 | |||
668 | counter ns0out6 {} | ||
669 | counter ns1out6 {} | ||
670 | counter ns2out6 {} | ||
671 | |||
672 | map nsincounter { | ||
673 | type ipv4_addr : counter | ||
674 | elements = { 10.0.1.1 : "ns0in", | ||
675 | 10.0.2.1 : "ns0in", | ||
676 | 10.0.1.99 : "ns1in", | ||
677 | 10.0.2.99 : "ns2in" } | ||
678 | } | ||
679 | |||
680 | map nsincounter6 { | ||
681 | type ipv6_addr : counter | ||
682 | elements = { dead:1::1 : "ns0in6", | ||
683 | dead:2::1 : "ns0in6", | ||
684 | dead:1::99 : "ns1in6", | ||
685 | dead:2::99 : "ns2in6" } | ||
686 | } | ||
687 | |||
688 | map nsoutcounter { | ||
689 | type ipv4_addr : counter | ||
690 | elements = { 10.0.1.1 : "ns0out", | ||
691 | 10.0.2.1 : "ns0out", | ||
692 | 10.0.1.99: "ns1out", | ||
693 | 10.0.2.99: "ns2out" } | ||
694 | } | ||
695 | |||
696 | map nsoutcounter6 { | ||
697 | type ipv6_addr : counter | ||
698 | elements = { dead:1::1 : "ns0out6", | ||
699 | dead:2::1 : "ns0out6", | ||
700 | dead:1::99 : "ns1out6", | ||
701 | dead:2::99 : "ns2out6" } | ||
702 | } | ||
703 | |||
704 | chain input { | ||
705 | type filter hook input priority 0; policy accept; | ||
706 | counter name ip saddr map @nsincounter | ||
707 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6 | ||
708 | } | ||
709 | chain output { | ||
710 | type filter hook output priority 0; policy accept; | ||
711 | counter name ip daddr map @nsoutcounter | ||
712 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6 | ||
713 | } | ||
714 | } | ||
715 | EOF | ||
716 | done | ||
717 | |||
718 | sleep 3 | ||
719 | # test basic connectivity | ||
720 | for i in 1 2; do | ||
721 | ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null | ||
722 | if [ $? -ne 0 ];then | ||
723 | echo "ERROR: Could not reach other namespace(s)" 1>&2 | ||
724 | ret=1 | ||
725 | fi | ||
726 | |||
727 | ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null | ||
728 | if [ $? -ne 0 ];then | ||
729 | echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 | ||
730 | ret=1 | ||
731 | fi | ||
732 | check_counters ns$i | ||
733 | if [ $? -ne 0 ]; then | ||
734 | ret=1 | ||
735 | fi | ||
736 | |||
737 | check_ns0_counters ns$i | ||
738 | if [ $? -ne 0 ]; then | ||
739 | ret=1 | ||
740 | fi | ||
741 | reset_counters | ||
742 | done | ||
743 | |||
744 | if [ $ret -eq 0 ];then | ||
745 | echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" | ||
746 | fi | ||
747 | |||
748 | reset_counters | ||
749 | test_local_dnat | ||
750 | test_local_dnat6 | ||
751 | |||
752 | reset_counters | ||
753 | test_masquerade | ||
754 | test_masquerade6 | ||
755 | |||
756 | reset_counters | ||
757 | test_redirect | ||
758 | test_redirect6 | ||
759 | |||
760 | for i in 0 1 2; do ip netns del ns$i;done | ||
761 | |||
762 | exit $ret | ||
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index 9050eeea5f5f..1de8bd8ccf5d 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile | |||
@@ -9,6 +9,3 @@ all: $(TEST_PROGS) | |||
9 | top_srcdir = ../../../../.. | 9 | top_srcdir = ../../../../.. |
10 | KSFT_KHDR_INSTALL := 1 | 10 | KSFT_KHDR_INSTALL := 1 |
11 | include ../../lib.mk | 11 | include ../../lib.mk |
12 | |||
13 | clean: | ||
14 | rm -fr $(TEST_GEN_FILES) | ||
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 9e350fd34504..9c486fad3f9f 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c | |||
@@ -626,6 +626,13 @@ static void vcpu_req_sleep(struct kvm_vcpu *vcpu) | |||
626 | /* Awaken to handle a signal, request we sleep again later. */ | 626 | /* Awaken to handle a signal, request we sleep again later. */ |
627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); | 627 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
628 | } | 628 | } |
629 | |||
630 | /* | ||
631 | * Make sure we will observe a potential reset request if we've | ||
632 | * observed a change to the power state. Pairs with the smp_wmb() in | ||
633 | * kvm_psci_vcpu_on(). | ||
634 | */ | ||
635 | smp_rmb(); | ||
629 | } | 636 | } |
630 | 637 | ||
631 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | 638 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) |
@@ -639,6 +646,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu) | |||
639 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) | 646 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
640 | vcpu_req_sleep(vcpu); | 647 | vcpu_req_sleep(vcpu); |
641 | 648 | ||
649 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) | ||
650 | kvm_reset_vcpu(vcpu); | ||
651 | |||
642 | /* | 652 | /* |
643 | * Clear IRQ_PENDING requests that were made to guarantee | 653 | * Clear IRQ_PENDING requests that were made to guarantee |
644 | * that a VCPU sees new virtual interrupts. | 654 | * that a VCPU sees new virtual interrupts. |
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c index fbdf3ac2f001..30251e288629 100644 --- a/virt/kvm/arm/mmu.c +++ b/virt/kvm/arm/mmu.c | |||
@@ -1695,11 +1695,14 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1695 | 1695 | ||
1696 | vma_pagesize = vma_kernel_pagesize(vma); | 1696 | vma_pagesize = vma_kernel_pagesize(vma); |
1697 | /* | 1697 | /* |
1698 | * PUD level may not exist for a VM but PMD is guaranteed to | 1698 | * The stage2 has a minimum of 2 level table (For arm64 see |
1699 | * exist. | 1699 | * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can |
1700 | * use PMD_SIZE huge mappings (even when the PMD is folded into PGD). | ||
1701 | * As for PUD huge maps, we must make sure that we have at least | ||
1702 | * 3 levels, i.e, PMD is not folded. | ||
1700 | */ | 1703 | */ |
1701 | if ((vma_pagesize == PMD_SIZE || | 1704 | if ((vma_pagesize == PMD_SIZE || |
1702 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pud(kvm))) && | 1705 | (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm))) && |
1703 | !force_pte) { | 1706 | !force_pte) { |
1704 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; | 1707 | gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; |
1705 | } | 1708 | } |
diff --git a/virt/kvm/arm/psci.c b/virt/kvm/arm/psci.c index 9b73d3ad918a..34d08ee63747 100644 --- a/virt/kvm/arm/psci.c +++ b/virt/kvm/arm/psci.c | |||
@@ -104,12 +104,10 @@ static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) | |||
104 | 104 | ||
105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | 105 | static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) |
106 | { | 106 | { |
107 | struct vcpu_reset_state *reset_state; | ||
107 | struct kvm *kvm = source_vcpu->kvm; | 108 | struct kvm *kvm = source_vcpu->kvm; |
108 | struct kvm_vcpu *vcpu = NULL; | 109 | struct kvm_vcpu *vcpu = NULL; |
109 | struct swait_queue_head *wq; | ||
110 | unsigned long cpu_id; | 110 | unsigned long cpu_id; |
111 | unsigned long context_id; | ||
112 | phys_addr_t target_pc; | ||
113 | 111 | ||
114 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; | 112 | cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK; |
115 | if (vcpu_mode_is_32bit(source_vcpu)) | 113 | if (vcpu_mode_is_32bit(source_vcpu)) |
@@ -130,32 +128,30 @@ static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) | |||
130 | return PSCI_RET_INVALID_PARAMS; | 128 | return PSCI_RET_INVALID_PARAMS; |
131 | } | 129 | } |
132 | 130 | ||
133 | target_pc = smccc_get_arg2(source_vcpu); | 131 | reset_state = &vcpu->arch.reset_state; |
134 | context_id = smccc_get_arg3(source_vcpu); | ||
135 | 132 | ||
136 | kvm_reset_vcpu(vcpu); | 133 | reset_state->pc = smccc_get_arg2(source_vcpu); |
137 | |||
138 | /* Gracefully handle Thumb2 entry point */ | ||
139 | if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { | ||
140 | target_pc &= ~((phys_addr_t) 1); | ||
141 | vcpu_set_thumb(vcpu); | ||
142 | } | ||
143 | 134 | ||
144 | /* Propagate caller endianness */ | 135 | /* Propagate caller endianness */ |
145 | if (kvm_vcpu_is_be(source_vcpu)) | 136 | reset_state->be = kvm_vcpu_is_be(source_vcpu); |
146 | kvm_vcpu_set_be(vcpu); | ||
147 | 137 | ||
148 | *vcpu_pc(vcpu) = target_pc; | ||
149 | /* | 138 | /* |
150 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 | 139 | * NOTE: We always update r0 (or x0) because for PSCI v0.1 |
151 | * the general puspose registers are undefined upon CPU_ON. | 140 | * the general puspose registers are undefined upon CPU_ON. |
152 | */ | 141 | */ |
153 | smccc_set_retval(vcpu, context_id, 0, 0, 0); | 142 | reset_state->r0 = smccc_get_arg3(source_vcpu); |
154 | vcpu->arch.power_off = false; | 143 | |
155 | smp_mb(); /* Make sure the above is visible */ | 144 | WRITE_ONCE(reset_state->reset, true); |
145 | kvm_make_request(KVM_REQ_VCPU_RESET, vcpu); | ||
156 | 146 | ||
157 | wq = kvm_arch_vcpu_wq(vcpu); | 147 | /* |
158 | swake_up_one(wq); | 148 | * Make sure the reset request is observed if the change to |
149 | * power_state is observed. | ||
150 | */ | ||
151 | smp_wmb(); | ||
152 | |||
153 | vcpu->arch.power_off = false; | ||
154 | kvm_vcpu_wake_up(vcpu); | ||
159 | 155 | ||
160 | return PSCI_RET_SUCCESS; | 156 | return PSCI_RET_SUCCESS; |
161 | } | 157 | } |
diff --git a/virt/kvm/arm/vgic/vgic-debug.c b/virt/kvm/arm/vgic/vgic-debug.c index 07aa900bac56..1f62f2b8065d 100644 --- a/virt/kvm/arm/vgic/vgic-debug.c +++ b/virt/kvm/arm/vgic/vgic-debug.c | |||
@@ -251,9 +251,9 @@ static int vgic_debug_show(struct seq_file *s, void *v) | |||
251 | return 0; | 251 | return 0; |
252 | } | 252 | } |
253 | 253 | ||
254 | spin_lock_irqsave(&irq->irq_lock, flags); | 254 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
255 | print_irq_state(s, irq, vcpu); | 255 | print_irq_state(s, irq, vcpu); |
256 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 256 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
257 | 257 | ||
258 | vgic_put_irq(kvm, irq); | 258 | vgic_put_irq(kvm, irq); |
259 | return 0; | 259 | return 0; |
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index c0c0b88af1d5..3bdb31eaed64 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c | |||
@@ -64,7 +64,7 @@ void kvm_vgic_early_init(struct kvm *kvm) | |||
64 | struct vgic_dist *dist = &kvm->arch.vgic; | 64 | struct vgic_dist *dist = &kvm->arch.vgic; |
65 | 65 | ||
66 | INIT_LIST_HEAD(&dist->lpi_list_head); | 66 | INIT_LIST_HEAD(&dist->lpi_list_head); |
67 | spin_lock_init(&dist->lpi_list_lock); | 67 | raw_spin_lock_init(&dist->lpi_list_lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* CREATION */ | 70 | /* CREATION */ |
@@ -171,7 +171,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis) | |||
171 | 171 | ||
172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; | 172 | irq->intid = i + VGIC_NR_PRIVATE_IRQS; |
173 | INIT_LIST_HEAD(&irq->ap_list); | 173 | INIT_LIST_HEAD(&irq->ap_list); |
174 | spin_lock_init(&irq->irq_lock); | 174 | raw_spin_lock_init(&irq->irq_lock); |
175 | irq->vcpu = NULL; | 175 | irq->vcpu = NULL; |
176 | irq->target_vcpu = vcpu0; | 176 | irq->target_vcpu = vcpu0; |
177 | kref_init(&irq->refcount); | 177 | kref_init(&irq->refcount); |
@@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; | 206 | vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; |
207 | 207 | ||
208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); | 208 | INIT_LIST_HEAD(&vgic_cpu->ap_list_head); |
209 | spin_lock_init(&vgic_cpu->ap_list_lock); | 209 | raw_spin_lock_init(&vgic_cpu->ap_list_lock); |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Enable and configure all SGIs to be edge-triggered and | 212 | * Enable and configure all SGIs to be edge-triggered and |
@@ -216,7 +216,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | 216 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; |
217 | 217 | ||
218 | INIT_LIST_HEAD(&irq->ap_list); | 218 | INIT_LIST_HEAD(&irq->ap_list); |
219 | spin_lock_init(&irq->irq_lock); | 219 | raw_spin_lock_init(&irq->irq_lock); |
220 | irq->intid = i; | 220 | irq->intid = i; |
221 | irq->vcpu = NULL; | 221 | irq->vcpu = NULL; |
222 | irq->target_vcpu = vcpu; | 222 | irq->target_vcpu = vcpu; |
@@ -231,13 +231,6 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) | |||
231 | irq->config = VGIC_CONFIG_LEVEL; | 231 | irq->config = VGIC_CONFIG_LEVEL; |
232 | } | 232 | } |
233 | 233 | ||
234 | /* | ||
235 | * GICv3 can only be created via the KVM_DEVICE_CREATE API and | ||
236 | * so we always know the emulation type at this point as it's | ||
237 | * either explicitly configured as GICv3, or explicitly | ||
238 | * configured as GICv2, or not configured yet which also | ||
239 | * implies GICv2. | ||
240 | */ | ||
241 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | 234 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) |
242 | irq->group = 1; | 235 | irq->group = 1; |
243 | else | 236 | else |
@@ -281,7 +274,7 @@ int vgic_init(struct kvm *kvm) | |||
281 | { | 274 | { |
282 | struct vgic_dist *dist = &kvm->arch.vgic; | 275 | struct vgic_dist *dist = &kvm->arch.vgic; |
283 | struct kvm_vcpu *vcpu; | 276 | struct kvm_vcpu *vcpu; |
284 | int ret = 0, i; | 277 | int ret = 0, i, idx; |
285 | 278 | ||
286 | if (vgic_initialized(kvm)) | 279 | if (vgic_initialized(kvm)) |
287 | return 0; | 280 | return 0; |
@@ -298,6 +291,19 @@ int vgic_init(struct kvm *kvm) | |||
298 | if (ret) | 291 | if (ret) |
299 | goto out; | 292 | goto out; |
300 | 293 | ||
294 | /* Initialize groups on CPUs created before the VGIC type was known */ | ||
295 | kvm_for_each_vcpu(idx, vcpu, kvm) { | ||
296 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | ||
297 | |||
298 | for (i = 0; i < VGIC_NR_PRIVATE_IRQS; i++) { | ||
299 | struct vgic_irq *irq = &vgic_cpu->private_irqs[i]; | ||
300 | if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) | ||
301 | irq->group = 1; | ||
302 | else | ||
303 | irq->group = 0; | ||
304 | } | ||
305 | } | ||
306 | |||
301 | if (vgic_has_its(kvm)) { | 307 | if (vgic_has_its(kvm)) { |
302 | ret = vgic_v4_init(kvm); | 308 | ret = vgic_v4_init(kvm); |
303 | if (ret) | 309 | if (ret) |
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index eb2a390a6c86..ab3f47745d9c 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c | |||
@@ -65,7 +65,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
65 | 65 | ||
66 | INIT_LIST_HEAD(&irq->lpi_list); | 66 | INIT_LIST_HEAD(&irq->lpi_list); |
67 | INIT_LIST_HEAD(&irq->ap_list); | 67 | INIT_LIST_HEAD(&irq->ap_list); |
68 | spin_lock_init(&irq->irq_lock); | 68 | raw_spin_lock_init(&irq->irq_lock); |
69 | 69 | ||
70 | irq->config = VGIC_CONFIG_EDGE; | 70 | irq->config = VGIC_CONFIG_EDGE; |
71 | kref_init(&irq->refcount); | 71 | kref_init(&irq->refcount); |
@@ -73,7 +73,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
73 | irq->target_vcpu = vcpu; | 73 | irq->target_vcpu = vcpu; |
74 | irq->group = 1; | 74 | irq->group = 1; |
75 | 75 | ||
76 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 76 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * There could be a race with another vgic_add_lpi(), so we need to | 79 | * There could be a race with another vgic_add_lpi(), so we need to |
@@ -101,7 +101,7 @@ static struct vgic_irq *vgic_add_lpi(struct kvm *kvm, u32 intid, | |||
101 | dist->lpi_list_count++; | 101 | dist->lpi_list_count++; |
102 | 102 | ||
103 | out_unlock: | 103 | out_unlock: |
104 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 104 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
105 | 105 | ||
106 | /* | 106 | /* |
107 | * We "cache" the configuration table entries in our struct vgic_irq's. | 107 | * We "cache" the configuration table entries in our struct vgic_irq's. |
@@ -287,7 +287,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
287 | if (ret) | 287 | if (ret) |
288 | return ret; | 288 | return ret; |
289 | 289 | ||
290 | spin_lock_irqsave(&irq->irq_lock, flags); | 290 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
291 | 291 | ||
292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { | 292 | if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { |
293 | irq->priority = LPI_PROP_PRIORITY(prop); | 293 | irq->priority = LPI_PROP_PRIORITY(prop); |
@@ -299,7 +299,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, | |||
299 | } | 299 | } |
300 | } | 300 | } |
301 | 301 | ||
302 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 302 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
303 | 303 | ||
304 | if (irq->hw) | 304 | if (irq->hw) |
305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); | 305 | return its_prop_update_vlpi(irq->host_irq, prop, needs_inv); |
@@ -332,7 +332,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
332 | if (!intids) | 332 | if (!intids) |
333 | return -ENOMEM; | 333 | return -ENOMEM; |
334 | 334 | ||
335 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 335 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 336 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
337 | if (i == irq_count) | 337 | if (i == irq_count) |
338 | break; | 338 | break; |
@@ -341,7 +341,7 @@ int vgic_copy_lpi_list(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 **intid_ptr) | |||
341 | continue; | 341 | continue; |
342 | intids[i++] = irq->intid; | 342 | intids[i++] = irq->intid; |
343 | } | 343 | } |
344 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 344 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
345 | 345 | ||
346 | *intid_ptr = intids; | 346 | *intid_ptr = intids; |
347 | return i; | 347 | return i; |
@@ -352,9 +352,9 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) | |||
352 | int ret = 0; | 352 | int ret = 0; |
353 | unsigned long flags; | 353 | unsigned long flags; |
354 | 354 | ||
355 | spin_lock_irqsave(&irq->irq_lock, flags); | 355 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
356 | irq->target_vcpu = vcpu; | 356 | irq->target_vcpu = vcpu; |
357 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 357 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
358 | 358 | ||
359 | if (irq->hw) { | 359 | if (irq->hw) { |
360 | struct its_vlpi_map map; | 360 | struct its_vlpi_map map; |
@@ -455,7 +455,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) | |||
455 | } | 455 | } |
456 | 456 | ||
457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); | 457 | irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); |
458 | spin_lock_irqsave(&irq->irq_lock, flags); | 458 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
459 | irq->pending_latch = pendmask & (1U << bit_nr); | 459 | irq->pending_latch = pendmask & (1U << bit_nr); |
460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 460 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
461 | vgic_put_irq(vcpu->kvm, irq); | 461 | vgic_put_irq(vcpu->kvm, irq); |
@@ -612,7 +612,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, | |||
612 | return irq_set_irqchip_state(irq->host_irq, | 612 | return irq_set_irqchip_state(irq->host_irq, |
613 | IRQCHIP_STATE_PENDING, true); | 613 | IRQCHIP_STATE_PENDING, true); |
614 | 614 | ||
615 | spin_lock_irqsave(&irq->irq_lock, flags); | 615 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
616 | irq->pending_latch = true; | 616 | irq->pending_latch = true; |
617 | vgic_queue_irq_unlock(kvm, irq, flags); | 617 | vgic_queue_irq_unlock(kvm, irq, flags); |
618 | 618 | ||
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index 738b65d2d0e7..b535fffc7400 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c | |||
@@ -147,7 +147,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, | |||
147 | 147 | ||
148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); | 148 | irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); |
149 | 149 | ||
150 | spin_lock_irqsave(&irq->irq_lock, flags); | 150 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
151 | irq->pending_latch = true; | 151 | irq->pending_latch = true; |
152 | irq->source |= 1U << source_vcpu->vcpu_id; | 152 | irq->source |= 1U << source_vcpu->vcpu_id; |
153 | 153 | ||
@@ -191,13 +191,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, | |||
191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); | 191 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); |
192 | int target; | 192 | int target; |
193 | 193 | ||
194 | spin_lock_irqsave(&irq->irq_lock, flags); | 194 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
195 | 195 | ||
196 | irq->targets = (val >> (i * 8)) & cpu_mask; | 196 | irq->targets = (val >> (i * 8)) & cpu_mask; |
197 | target = irq->targets ? __ffs(irq->targets) : 0; | 197 | target = irq->targets ? __ffs(irq->targets) : 0; |
198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); | 198 | irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); |
199 | 199 | ||
200 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 200 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
201 | vgic_put_irq(vcpu->kvm, irq); | 201 | vgic_put_irq(vcpu->kvm, irq); |
202 | } | 202 | } |
203 | } | 203 | } |
@@ -230,13 +230,13 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, | |||
230 | for (i = 0; i < len; i++) { | 230 | for (i = 0; i < len; i++) { |
231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 231 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
232 | 232 | ||
233 | spin_lock_irqsave(&irq->irq_lock, flags); | 233 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
234 | 234 | ||
235 | irq->source &= ~((val >> (i * 8)) & 0xff); | 235 | irq->source &= ~((val >> (i * 8)) & 0xff); |
236 | if (!irq->source) | 236 | if (!irq->source) |
237 | irq->pending_latch = false; | 237 | irq->pending_latch = false; |
238 | 238 | ||
239 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 239 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
240 | vgic_put_irq(vcpu->kvm, irq); | 240 | vgic_put_irq(vcpu->kvm, irq); |
241 | } | 241 | } |
242 | } | 242 | } |
@@ -252,7 +252,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
252 | for (i = 0; i < len; i++) { | 252 | for (i = 0; i < len; i++) { |
253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 253 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
254 | 254 | ||
255 | spin_lock_irqsave(&irq->irq_lock, flags); | 255 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
256 | 256 | ||
257 | irq->source |= (val >> (i * 8)) & 0xff; | 257 | irq->source |= (val >> (i * 8)) & 0xff; |
258 | 258 | ||
@@ -260,7 +260,7 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, | |||
260 | irq->pending_latch = true; | 260 | irq->pending_latch = true; |
261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 261 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
262 | } else { | 262 | } else { |
263 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 263 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
264 | } | 264 | } |
265 | vgic_put_irq(vcpu->kvm, irq); | 265 | vgic_put_irq(vcpu->kvm, irq); |
266 | } | 266 | } |
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index b3d1f0985117..4a12322bf7df 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c | |||
@@ -169,13 +169,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, | |||
169 | if (!irq) | 169 | if (!irq) |
170 | return; | 170 | return; |
171 | 171 | ||
172 | spin_lock_irqsave(&irq->irq_lock, flags); | 172 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
173 | 173 | ||
174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ | 174 | /* We only care about and preserve Aff0, Aff1 and Aff2. */ |
175 | irq->mpidr = val & GENMASK(23, 0); | 175 | irq->mpidr = val & GENMASK(23, 0); |
176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); | 176 | irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); |
177 | 177 | ||
178 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 178 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
179 | vgic_put_irq(vcpu->kvm, irq); | 179 | vgic_put_irq(vcpu->kvm, irq); |
180 | } | 180 | } |
181 | 181 | ||
@@ -281,7 +281,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
281 | for (i = 0; i < len * 8; i++) { | 281 | for (i = 0; i < len * 8; i++) { |
282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 282 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
283 | 283 | ||
284 | spin_lock_irqsave(&irq->irq_lock, flags); | 284 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
285 | if (test_bit(i, &val)) { | 285 | if (test_bit(i, &val)) { |
286 | /* | 286 | /* |
287 | * pending_latch is set irrespective of irq type | 287 | * pending_latch is set irrespective of irq type |
@@ -292,7 +292,7 @@ static int vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, | |||
292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 292 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
293 | } else { | 293 | } else { |
294 | irq->pending_latch = false; | 294 | irq->pending_latch = false; |
295 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 295 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
296 | } | 296 | } |
297 | 297 | ||
298 | vgic_put_irq(vcpu->kvm, irq); | 298 | vgic_put_irq(vcpu->kvm, irq); |
@@ -957,7 +957,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
957 | 957 | ||
958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); | 958 | irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); |
959 | 959 | ||
960 | spin_lock_irqsave(&irq->irq_lock, flags); | 960 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
961 | 961 | ||
962 | /* | 962 | /* |
963 | * An access targetting Group0 SGIs can only generate | 963 | * An access targetting Group0 SGIs can only generate |
@@ -968,7 +968,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) | |||
968 | irq->pending_latch = true; | 968 | irq->pending_latch = true; |
969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 969 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
970 | } else { | 970 | } else { |
971 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 971 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
972 | } | 972 | } |
973 | 973 | ||
974 | vgic_put_irq(vcpu->kvm, irq); | 974 | vgic_put_irq(vcpu->kvm, irq); |
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index ceeda7e04a4d..7de42fba05b5 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c | |||
@@ -77,7 +77,7 @@ void vgic_mmio_write_group(struct kvm_vcpu *vcpu, gpa_t addr, | |||
77 | for (i = 0; i < len * 8; i++) { | 77 | for (i = 0; i < len * 8; i++) { |
78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 78 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
79 | 79 | ||
80 | spin_lock_irqsave(&irq->irq_lock, flags); | 80 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
81 | irq->group = !!(val & BIT(i)); | 81 | irq->group = !!(val & BIT(i)); |
82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 82 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
83 | 83 | ||
@@ -120,7 +120,7 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, | |||
120 | for_each_set_bit(i, &val, len * 8) { | 120 | for_each_set_bit(i, &val, len * 8) { |
121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 121 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
122 | 122 | ||
123 | spin_lock_irqsave(&irq->irq_lock, flags); | 123 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
124 | irq->enabled = true; | 124 | irq->enabled = true; |
125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 125 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
126 | 126 | ||
@@ -139,11 +139,11 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, | |||
139 | for_each_set_bit(i, &val, len * 8) { | 139 | for_each_set_bit(i, &val, len * 8) { |
140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 140 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
141 | 141 | ||
142 | spin_lock_irqsave(&irq->irq_lock, flags); | 142 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
143 | 143 | ||
144 | irq->enabled = false; | 144 | irq->enabled = false; |
145 | 145 | ||
146 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 146 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
147 | vgic_put_irq(vcpu->kvm, irq); | 147 | vgic_put_irq(vcpu->kvm, irq); |
148 | } | 148 | } |
149 | } | 149 | } |
@@ -160,10 +160,10 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu, | |||
160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 160 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
161 | unsigned long flags; | 161 | unsigned long flags; |
162 | 162 | ||
163 | spin_lock_irqsave(&irq->irq_lock, flags); | 163 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
164 | if (irq_is_pending(irq)) | 164 | if (irq_is_pending(irq)) |
165 | value |= (1U << i); | 165 | value |= (1U << i); |
166 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 166 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
167 | 167 | ||
168 | vgic_put_irq(vcpu->kvm, irq); | 168 | vgic_put_irq(vcpu->kvm, irq); |
169 | } | 169 | } |
@@ -215,7 +215,7 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, | |||
215 | for_each_set_bit(i, &val, len * 8) { | 215 | for_each_set_bit(i, &val, len * 8) { |
216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 216 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
217 | 217 | ||
218 | spin_lock_irqsave(&irq->irq_lock, flags); | 218 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
219 | if (irq->hw) | 219 | if (irq->hw) |
220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); | 220 | vgic_hw_irq_spending(vcpu, irq, is_uaccess); |
221 | else | 221 | else |
@@ -262,14 +262,14 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, | |||
262 | for_each_set_bit(i, &val, len * 8) { | 262 | for_each_set_bit(i, &val, len * 8) { |
263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 263 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
264 | 264 | ||
265 | spin_lock_irqsave(&irq->irq_lock, flags); | 265 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
266 | 266 | ||
267 | if (irq->hw) | 267 | if (irq->hw) |
268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); | 268 | vgic_hw_irq_cpending(vcpu, irq, is_uaccess); |
269 | else | 269 | else |
270 | irq->pending_latch = false; | 270 | irq->pending_latch = false; |
271 | 271 | ||
272 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 272 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
273 | vgic_put_irq(vcpu->kvm, irq); | 273 | vgic_put_irq(vcpu->kvm, irq); |
274 | } | 274 | } |
275 | } | 275 | } |
@@ -311,7 +311,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
311 | unsigned long flags; | 311 | unsigned long flags; |
312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); | 312 | struct kvm_vcpu *requester_vcpu = vgic_get_mmio_requester_vcpu(); |
313 | 313 | ||
314 | spin_lock_irqsave(&irq->irq_lock, flags); | 314 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
315 | 315 | ||
316 | if (irq->hw) { | 316 | if (irq->hw) { |
317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); | 317 | vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); |
@@ -342,7 +342,7 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, | |||
342 | if (irq->active) | 342 | if (irq->active) |
343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 343 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
344 | else | 344 | else |
345 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 345 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
@@ -485,10 +485,10 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, | |||
485 | for (i = 0; i < len; i++) { | 485 | for (i = 0; i < len; i++) { |
486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 486 | struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
487 | 487 | ||
488 | spin_lock_irqsave(&irq->irq_lock, flags); | 488 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
489 | /* Narrow the priority range to what we actually support */ | 489 | /* Narrow the priority range to what we actually support */ |
490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); | 490 | irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); |
491 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 491 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
492 | 492 | ||
493 | vgic_put_irq(vcpu->kvm, irq); | 493 | vgic_put_irq(vcpu->kvm, irq); |
494 | } | 494 | } |
@@ -534,14 +534,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, | |||
534 | continue; | 534 | continue; |
535 | 535 | ||
536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); | 536 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); |
537 | spin_lock_irqsave(&irq->irq_lock, flags); | 537 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
538 | 538 | ||
539 | if (test_bit(i * 2 + 1, &val)) | 539 | if (test_bit(i * 2 + 1, &val)) |
540 | irq->config = VGIC_CONFIG_EDGE; | 540 | irq->config = VGIC_CONFIG_EDGE; |
541 | else | 541 | else |
542 | irq->config = VGIC_CONFIG_LEVEL; | 542 | irq->config = VGIC_CONFIG_LEVEL; |
543 | 543 | ||
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 544 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 545 | vgic_put_irq(vcpu->kvm, irq); |
546 | } | 546 | } |
547 | } | 547 | } |
@@ -590,12 +590,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, | |||
590 | * restore irq config before line level. | 590 | * restore irq config before line level. |
591 | */ | 591 | */ |
592 | new_level = !!(val & (1U << i)); | 592 | new_level = !!(val & (1U << i)); |
593 | spin_lock_irqsave(&irq->irq_lock, flags); | 593 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
594 | irq->line_level = new_level; | 594 | irq->line_level = new_level; |
595 | if (new_level) | 595 | if (new_level) |
596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); | 596 | vgic_queue_irq_unlock(vcpu->kvm, irq, flags); |
597 | else | 597 | else |
598 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 598 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
599 | 599 | ||
600 | vgic_put_irq(vcpu->kvm, irq); | 600 | vgic_put_irq(vcpu->kvm, irq); |
601 | } | 601 | } |
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index 69b892abd7dc..d91a8938aa7c 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c | |||
@@ -84,7 +84,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
84 | 84 | ||
85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 85 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
86 | 86 | ||
87 | spin_lock(&irq->irq_lock); | 87 | raw_spin_lock(&irq->irq_lock); |
88 | 88 | ||
89 | /* Always preserve the active bit */ | 89 | /* Always preserve the active bit */ |
90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); | 90 | irq->active = !!(val & GICH_LR_ACTIVE_BIT); |
@@ -127,7 +127,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) | |||
127 | vgic_irq_set_phys_active(irq, false); | 127 | vgic_irq_set_phys_active(irq, false); |
128 | } | 128 | } |
129 | 129 | ||
130 | spin_unlock(&irq->irq_lock); | 130 | raw_spin_unlock(&irq->irq_lock); |
131 | vgic_put_irq(vcpu->kvm, irq); | 131 | vgic_put_irq(vcpu->kvm, irq); |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 9c0dd234ebe8..4ee0aeb9a905 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c | |||
@@ -76,7 +76,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
76 | if (!irq) /* An LPI could have been unmapped. */ | 76 | if (!irq) /* An LPI could have been unmapped. */ |
77 | continue; | 77 | continue; |
78 | 78 | ||
79 | spin_lock(&irq->irq_lock); | 79 | raw_spin_lock(&irq->irq_lock); |
80 | 80 | ||
81 | /* Always preserve the active bit */ | 81 | /* Always preserve the active bit */ |
82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); | 82 | irq->active = !!(val & ICH_LR_ACTIVE_BIT); |
@@ -119,7 +119,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) | |||
119 | vgic_irq_set_phys_active(irq, false); | 119 | vgic_irq_set_phys_active(irq, false); |
120 | } | 120 | } |
121 | 121 | ||
122 | spin_unlock(&irq->irq_lock); | 122 | raw_spin_unlock(&irq->irq_lock); |
123 | vgic_put_irq(vcpu->kvm, irq); | 123 | vgic_put_irq(vcpu->kvm, irq); |
124 | } | 124 | } |
125 | 125 | ||
@@ -347,9 +347,9 @@ retry: | |||
347 | 347 | ||
348 | status = val & (1 << bit_nr); | 348 | status = val & (1 << bit_nr); |
349 | 349 | ||
350 | spin_lock_irqsave(&irq->irq_lock, flags); | 350 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
351 | if (irq->target_vcpu != vcpu) { | 351 | if (irq->target_vcpu != vcpu) { |
352 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 352 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
353 | goto retry; | 353 | goto retry; |
354 | } | 354 | } |
355 | irq->pending_latch = status; | 355 | irq->pending_latch = status; |
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 870b1185173b..abd9c7352677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { | |||
54 | * When taking more than one ap_list_lock at the same time, always take the | 54 | * When taking more than one ap_list_lock at the same time, always take the |
55 | * lowest numbered VCPU's ap_list_lock first, so: | 55 | * lowest numbered VCPU's ap_list_lock first, so: |
56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: | 56 | * vcpuX->vcpu_id < vcpuY->vcpu_id: |
57 | * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); | 57 | * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); |
58 | * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); | 58 | * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); |
59 | * | 59 | * |
60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have | 60 | * Since the VGIC must support injecting virtual interrupts from ISRs, we have |
61 | * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer | 61 | * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer |
62 | * spinlocks for any lock that may be taken while injecting an interrupt. | 62 | * spinlocks for any lock that may be taken while injecting an interrupt. |
63 | */ | 63 | */ |
64 | 64 | ||
@@ -72,7 +72,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
72 | struct vgic_irq *irq = NULL; | 72 | struct vgic_irq *irq = NULL; |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | 74 | ||
75 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 75 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
76 | 76 | ||
77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { | 77 | list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { |
78 | if (irq->intid != intid) | 78 | if (irq->intid != intid) |
@@ -88,7 +88,7 @@ static struct vgic_irq *vgic_get_lpi(struct kvm *kvm, u32 intid) | |||
88 | irq = NULL; | 88 | irq = NULL; |
89 | 89 | ||
90 | out_unlock: | 90 | out_unlock: |
91 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 91 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
92 | 92 | ||
93 | return irq; | 93 | return irq; |
94 | } | 94 | } |
@@ -138,15 +138,15 @@ void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) | |||
138 | if (irq->intid < VGIC_MIN_LPI) | 138 | if (irq->intid < VGIC_MIN_LPI) |
139 | return; | 139 | return; |
140 | 140 | ||
141 | spin_lock_irqsave(&dist->lpi_list_lock, flags); | 141 | raw_spin_lock_irqsave(&dist->lpi_list_lock, flags); |
142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { | 142 | if (!kref_put(&irq->refcount, vgic_irq_release)) { |
143 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 143 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
144 | return; | 144 | return; |
145 | }; | 145 | }; |
146 | 146 | ||
147 | list_del(&irq->lpi_list); | 147 | list_del(&irq->lpi_list); |
148 | dist->lpi_list_count--; | 148 | dist->lpi_list_count--; |
149 | spin_unlock_irqrestore(&dist->lpi_list_lock, flags); | 149 | raw_spin_unlock_irqrestore(&dist->lpi_list_lock, flags); |
150 | 150 | ||
151 | kfree(irq); | 151 | kfree(irq); |
152 | } | 152 | } |
@@ -244,8 +244,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
244 | bool penda, pendb; | 244 | bool penda, pendb; |
245 | int ret; | 245 | int ret; |
246 | 246 | ||
247 | spin_lock(&irqa->irq_lock); | 247 | raw_spin_lock(&irqa->irq_lock); |
248 | spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); | 248 | raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); |
249 | 249 | ||
250 | if (irqa->active || irqb->active) { | 250 | if (irqa->active || irqb->active) { |
251 | ret = (int)irqb->active - (int)irqa->active; | 251 | ret = (int)irqb->active - (int)irqa->active; |
@@ -263,8 +263,8 @@ static int vgic_irq_cmp(void *priv, struct list_head *a, struct list_head *b) | |||
263 | /* Both pending and enabled, sort by priority */ | 263 | /* Both pending and enabled, sort by priority */ |
264 | ret = irqa->priority - irqb->priority; | 264 | ret = irqa->priority - irqb->priority; |
265 | out: | 265 | out: |
266 | spin_unlock(&irqb->irq_lock); | 266 | raw_spin_unlock(&irqb->irq_lock); |
267 | spin_unlock(&irqa->irq_lock); | 267 | raw_spin_unlock(&irqa->irq_lock); |
268 | return ret; | 268 | return ret; |
269 | } | 269 | } |
270 | 270 | ||
@@ -325,7 +325,7 @@ retry: | |||
325 | * not need to be inserted into an ap_list and there is also | 325 | * not need to be inserted into an ap_list and there is also |
326 | * no more work for us to do. | 326 | * no more work for us to do. |
327 | */ | 327 | */ |
328 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 328 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * We have to kick the VCPU here, because we could be | 331 | * We have to kick the VCPU here, because we could be |
@@ -347,12 +347,12 @@ retry: | |||
347 | * We must unlock the irq lock to take the ap_list_lock where | 347 | * We must unlock the irq lock to take the ap_list_lock where |
348 | * we are going to insert this new pending interrupt. | 348 | * we are going to insert this new pending interrupt. |
349 | */ | 349 | */ |
350 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 350 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
351 | 351 | ||
352 | /* someone can do stuff here, which we re-check below */ | 352 | /* someone can do stuff here, which we re-check below */ |
353 | 353 | ||
354 | spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 354 | raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
355 | spin_lock(&irq->irq_lock); | 355 | raw_spin_lock(&irq->irq_lock); |
356 | 356 | ||
357 | /* | 357 | /* |
358 | * Did something change behind our backs? | 358 | * Did something change behind our backs? |
@@ -367,10 +367,11 @@ retry: | |||
367 | */ | 367 | */ |
368 | 368 | ||
369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { | 369 | if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { |
370 | spin_unlock(&irq->irq_lock); | 370 | raw_spin_unlock(&irq->irq_lock); |
371 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 371 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, |
372 | flags); | ||
372 | 373 | ||
373 | spin_lock_irqsave(&irq->irq_lock, flags); | 374 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
374 | goto retry; | 375 | goto retry; |
375 | } | 376 | } |
376 | 377 | ||
@@ -382,8 +383,8 @@ retry: | |||
382 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); | 383 | list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); |
383 | irq->vcpu = vcpu; | 384 | irq->vcpu = vcpu; |
384 | 385 | ||
385 | spin_unlock(&irq->irq_lock); | 386 | raw_spin_unlock(&irq->irq_lock); |
386 | spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); | 387 | raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); |
387 | 388 | ||
388 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | 389 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
389 | kvm_vcpu_kick(vcpu); | 390 | kvm_vcpu_kick(vcpu); |
@@ -430,11 +431,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, | |||
430 | if (!irq) | 431 | if (!irq) |
431 | return -EINVAL; | 432 | return -EINVAL; |
432 | 433 | ||
433 | spin_lock_irqsave(&irq->irq_lock, flags); | 434 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
434 | 435 | ||
435 | if (!vgic_validate_injection(irq, level, owner)) { | 436 | if (!vgic_validate_injection(irq, level, owner)) { |
436 | /* Nothing to see here, move along... */ | 437 | /* Nothing to see here, move along... */ |
437 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 438 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
438 | vgic_put_irq(kvm, irq); | 439 | vgic_put_irq(kvm, irq); |
439 | return 0; | 440 | return 0; |
440 | } | 441 | } |
@@ -494,9 +495,9 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, | |||
494 | 495 | ||
495 | BUG_ON(!irq); | 496 | BUG_ON(!irq); |
496 | 497 | ||
497 | spin_lock_irqsave(&irq->irq_lock, flags); | 498 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
498 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); | 499 | ret = kvm_vgic_map_irq(vcpu, irq, host_irq, get_input_level); |
499 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 500 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
500 | vgic_put_irq(vcpu->kvm, irq); | 501 | vgic_put_irq(vcpu->kvm, irq); |
501 | 502 | ||
502 | return ret; | 503 | return ret; |
@@ -519,11 +520,11 @@ void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid) | |||
519 | if (!irq->hw) | 520 | if (!irq->hw) |
520 | goto out; | 521 | goto out; |
521 | 522 | ||
522 | spin_lock_irqsave(&irq->irq_lock, flags); | 523 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
523 | irq->active = false; | 524 | irq->active = false; |
524 | irq->pending_latch = false; | 525 | irq->pending_latch = false; |
525 | irq->line_level = false; | 526 | irq->line_level = false; |
526 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 527 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
527 | out: | 528 | out: |
528 | vgic_put_irq(vcpu->kvm, irq); | 529 | vgic_put_irq(vcpu->kvm, irq); |
529 | } | 530 | } |
@@ -539,9 +540,9 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
539 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 540 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
540 | BUG_ON(!irq); | 541 | BUG_ON(!irq); |
541 | 542 | ||
542 | spin_lock_irqsave(&irq->irq_lock, flags); | 543 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
543 | kvm_vgic_unmap_irq(irq); | 544 | kvm_vgic_unmap_irq(irq); |
544 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 545 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
545 | vgic_put_irq(vcpu->kvm, irq); | 546 | vgic_put_irq(vcpu->kvm, irq); |
546 | 547 | ||
547 | return 0; | 548 | return 0; |
@@ -571,12 +572,12 @@ int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner) | |||
571 | return -EINVAL; | 572 | return -EINVAL; |
572 | 573 | ||
573 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); | 574 | irq = vgic_get_irq(vcpu->kvm, vcpu, intid); |
574 | spin_lock_irqsave(&irq->irq_lock, flags); | 575 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
575 | if (irq->owner && irq->owner != owner) | 576 | if (irq->owner && irq->owner != owner) |
576 | ret = -EEXIST; | 577 | ret = -EEXIST; |
577 | else | 578 | else |
578 | irq->owner = owner; | 579 | irq->owner = owner; |
579 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 580 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
580 | 581 | ||
581 | return ret; | 582 | return ret; |
582 | } | 583 | } |
@@ -597,13 +598,13 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) | |||
597 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 598 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
598 | 599 | ||
599 | retry: | 600 | retry: |
600 | spin_lock(&vgic_cpu->ap_list_lock); | 601 | raw_spin_lock(&vgic_cpu->ap_list_lock); |
601 | 602 | ||
602 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { | 603 | list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { |
603 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; | 604 | struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; |
604 | bool target_vcpu_needs_kick = false; | 605 | bool target_vcpu_needs_kick = false; |
605 | 606 | ||
606 | spin_lock(&irq->irq_lock); | 607 | raw_spin_lock(&irq->irq_lock); |
607 | 608 | ||
608 | BUG_ON(vcpu != irq->vcpu); | 609 | BUG_ON(vcpu != irq->vcpu); |
609 | 610 | ||
@@ -616,7 +617,7 @@ retry: | |||
616 | */ | 617 | */ |
617 | list_del(&irq->ap_list); | 618 | list_del(&irq->ap_list); |
618 | irq->vcpu = NULL; | 619 | irq->vcpu = NULL; |
619 | spin_unlock(&irq->irq_lock); | 620 | raw_spin_unlock(&irq->irq_lock); |
620 | 621 | ||
621 | /* | 622 | /* |
622 | * This vgic_put_irq call matches the | 623 | * This vgic_put_irq call matches the |
@@ -631,14 +632,14 @@ retry: | |||
631 | 632 | ||
632 | if (target_vcpu == vcpu) { | 633 | if (target_vcpu == vcpu) { |
633 | /* We're on the right CPU */ | 634 | /* We're on the right CPU */ |
634 | spin_unlock(&irq->irq_lock); | 635 | raw_spin_unlock(&irq->irq_lock); |
635 | continue; | 636 | continue; |
636 | } | 637 | } |
637 | 638 | ||
638 | /* This interrupt looks like it has to be migrated. */ | 639 | /* This interrupt looks like it has to be migrated. */ |
639 | 640 | ||
640 | spin_unlock(&irq->irq_lock); | 641 | raw_spin_unlock(&irq->irq_lock); |
641 | spin_unlock(&vgic_cpu->ap_list_lock); | 642 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
642 | 643 | ||
643 | /* | 644 | /* |
644 | * Ensure locking order by always locking the smallest | 645 | * Ensure locking order by always locking the smallest |
@@ -652,10 +653,10 @@ retry: | |||
652 | vcpuB = vcpu; | 653 | vcpuB = vcpu; |
653 | } | 654 | } |
654 | 655 | ||
655 | spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 656 | raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
656 | spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, | 657 | raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, |
657 | SINGLE_DEPTH_NESTING); | 658 | SINGLE_DEPTH_NESTING); |
658 | spin_lock(&irq->irq_lock); | 659 | raw_spin_lock(&irq->irq_lock); |
659 | 660 | ||
660 | /* | 661 | /* |
661 | * If the affinity has been preserved, move the | 662 | * If the affinity has been preserved, move the |
@@ -675,9 +676,9 @@ retry: | |||
675 | target_vcpu_needs_kick = true; | 676 | target_vcpu_needs_kick = true; |
676 | } | 677 | } |
677 | 678 | ||
678 | spin_unlock(&irq->irq_lock); | 679 | raw_spin_unlock(&irq->irq_lock); |
679 | spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); | 680 | raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); |
680 | spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); | 681 | raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); |
681 | 682 | ||
682 | if (target_vcpu_needs_kick) { | 683 | if (target_vcpu_needs_kick) { |
683 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); | 684 | kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); |
@@ -687,7 +688,7 @@ retry: | |||
687 | goto retry; | 688 | goto retry; |
688 | } | 689 | } |
689 | 690 | ||
690 | spin_unlock(&vgic_cpu->ap_list_lock); | 691 | raw_spin_unlock(&vgic_cpu->ap_list_lock); |
691 | } | 692 | } |
692 | 693 | ||
693 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | 694 | static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) |
@@ -741,10 +742,10 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | |||
741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 742 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
742 | int w; | 743 | int w; |
743 | 744 | ||
744 | spin_lock(&irq->irq_lock); | 745 | raw_spin_lock(&irq->irq_lock); |
745 | /* GICv2 SGIs can count for more than one... */ | 746 | /* GICv2 SGIs can count for more than one... */ |
746 | w = vgic_irq_get_lr_count(irq); | 747 | w = vgic_irq_get_lr_count(irq); |
747 | spin_unlock(&irq->irq_lock); | 748 | raw_spin_unlock(&irq->irq_lock); |
748 | 749 | ||
749 | count += w; | 750 | count += w; |
750 | *multi_sgi |= (w > 1); | 751 | *multi_sgi |= (w > 1); |
@@ -770,7 +771,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
770 | count = 0; | 771 | count = 0; |
771 | 772 | ||
772 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 773 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
773 | spin_lock(&irq->irq_lock); | 774 | raw_spin_lock(&irq->irq_lock); |
774 | 775 | ||
775 | /* | 776 | /* |
776 | * If we have multi-SGIs in the pipeline, we need to | 777 | * If we have multi-SGIs in the pipeline, we need to |
@@ -780,7 +781,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
780 | * the AP list has been sorted already. | 781 | * the AP list has been sorted already. |
781 | */ | 782 | */ |
782 | if (multi_sgi && irq->priority > prio) { | 783 | if (multi_sgi && irq->priority > prio) { |
783 | spin_unlock(&irq->irq_lock); | 784 | _raw_spin_unlock(&irq->irq_lock); |
784 | break; | 785 | break; |
785 | } | 786 | } |
786 | 787 | ||
@@ -791,7 +792,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
791 | prio = irq->priority; | 792 | prio = irq->priority; |
792 | } | 793 | } |
793 | 794 | ||
794 | spin_unlock(&irq->irq_lock); | 795 | raw_spin_unlock(&irq->irq_lock); |
795 | 796 | ||
796 | if (count == kvm_vgic_global_state.nr_lr) { | 797 | if (count == kvm_vgic_global_state.nr_lr) { |
797 | if (!list_is_last(&irq->ap_list, | 798 | if (!list_is_last(&irq->ap_list, |
@@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) | |||
872 | 873 | ||
873 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); | 874 | DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); |
874 | 875 | ||
875 | spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); | 876 | raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); |
876 | vgic_flush_lr_state(vcpu); | 877 | vgic_flush_lr_state(vcpu); |
877 | spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); | 878 | raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); |
878 | 879 | ||
879 | if (can_access_vgic_from_kernel()) | 880 | if (can_access_vgic_from_kernel()) |
880 | vgic_restore_state(vcpu); | 881 | vgic_restore_state(vcpu); |
@@ -918,20 +919,20 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) | |||
918 | 919 | ||
919 | vgic_get_vmcr(vcpu, &vmcr); | 920 | vgic_get_vmcr(vcpu, &vmcr); |
920 | 921 | ||
921 | spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); | 922 | raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); |
922 | 923 | ||
923 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 924 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
924 | spin_lock(&irq->irq_lock); | 925 | raw_spin_lock(&irq->irq_lock); |
925 | pending = irq_is_pending(irq) && irq->enabled && | 926 | pending = irq_is_pending(irq) && irq->enabled && |
926 | !irq->active && | 927 | !irq->active && |
927 | irq->priority < vmcr.pmr; | 928 | irq->priority < vmcr.pmr; |
928 | spin_unlock(&irq->irq_lock); | 929 | raw_spin_unlock(&irq->irq_lock); |
929 | 930 | ||
930 | if (pending) | 931 | if (pending) |
931 | break; | 932 | break; |
932 | } | 933 | } |
933 | 934 | ||
934 | spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); | 935 | raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); |
935 | 936 | ||
936 | return pending; | 937 | return pending; |
937 | } | 938 | } |
@@ -963,11 +964,10 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid) | |||
963 | return false; | 964 | return false; |
964 | 965 | ||
965 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); | 966 | irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); |
966 | spin_lock_irqsave(&irq->irq_lock, flags); | 967 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
967 | map_is_active = irq->hw && irq->active; | 968 | map_is_active = irq->hw && irq->active; |
968 | spin_unlock_irqrestore(&irq->irq_lock, flags); | 969 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
969 | vgic_put_irq(vcpu->kvm, irq); | 970 | vgic_put_irq(vcpu->kvm, irq); |
970 | 971 | ||
971 | return map_is_active; | 972 | return map_is_active; |
972 | } | 973 | } |
973 | |||
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ecea812cb6a..585845203db8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
3000 | if (ops->init) | 3000 | if (ops->init) |
3001 | ops->init(dev); | 3001 | ops->init(dev); |
3002 | 3002 | ||
3003 | kvm_get_kvm(kvm); | ||
3003 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); | 3004 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); |
3004 | if (ret < 0) { | 3005 | if (ret < 0) { |
3006 | kvm_put_kvm(kvm); | ||
3005 | mutex_lock(&kvm->lock); | 3007 | mutex_lock(&kvm->lock); |
3006 | list_del(&dev->vm_node); | 3008 | list_del(&dev->vm_node); |
3007 | mutex_unlock(&kvm->lock); | 3009 | mutex_unlock(&kvm->lock); |
@@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
3009 | return ret; | 3011 | return ret; |
3010 | } | 3012 | } |
3011 | 3013 | ||
3012 | kvm_get_kvm(kvm); | ||
3013 | cd->fd = ret; | 3014 | cd->fd = ret; |
3014 | return 0; | 3015 | return 0; |
3015 | } | 3016 | } |