diff options
681 files changed, 6463 insertions, 2859 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-mlxreg-io b/Documentation/ABI/stable/sysfs-driver-mlxreg-io index 9b642669cb16..169fe08a649b 100644 --- a/Documentation/ABI/stable/sysfs-driver-mlxreg-io +++ b/Documentation/ABI/stable/sysfs-driver-mlxreg-io | |||
@@ -24,7 +24,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
24 | cpld3_version | 24 | cpld3_version |
25 | 25 | ||
26 | Date: November 2018 | 26 | Date: November 2018 |
27 | KernelVersion: 4.21 | 27 | KernelVersion: 5.0 |
28 | Contact: Vadim Pasternak <vadimpmellanox.com> | 28 | Contact: Vadim Pasternak <vadimpmellanox.com> |
29 | Description: These files show with which CPLD versions have been burned | 29 | Description: These files show with which CPLD versions have been burned |
30 | on LED board. | 30 | on LED board. |
@@ -35,7 +35,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
35 | jtag_enable | 35 | jtag_enable |
36 | 36 | ||
37 | Date: November 2018 | 37 | Date: November 2018 |
38 | KernelVersion: 4.21 | 38 | KernelVersion: 5.0 |
39 | Contact: Vadim Pasternak <vadimpmellanox.com> | 39 | Contact: Vadim Pasternak <vadimpmellanox.com> |
40 | Description: These files enable and disable the access to the JTAG domain. | 40 | Description: These files enable and disable the access to the JTAG domain. |
41 | By default access to the JTAG domain is disabled. | 41 | By default access to the JTAG domain is disabled. |
@@ -105,7 +105,7 @@ What: /sys/devices/platform/mlxplat/mlxreg-io/hwmon/hwmon*/ | |||
105 | reset_voltmon_upgrade_fail | 105 | reset_voltmon_upgrade_fail |
106 | 106 | ||
107 | Date: November 2018 | 107 | Date: November 2018 |
108 | KernelVersion: 4.21 | 108 | KernelVersion: 5.0 |
109 | Contact: Vadim Pasternak <vadimpmellanox.com> | 109 | Contact: Vadim Pasternak <vadimpmellanox.com> |
110 | Description: These files show the system reset cause, as following: ComEx | 110 | Description: These files show the system reset cause, as following: ComEx |
111 | power fail, reset from ComEx, system platform reset, reset | 111 | power fail, reset from ComEx, system platform reset, reset |
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index b799bcf67d7b..858b6c0b9a15 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt | |||
@@ -1696,12 +1696,11 @@ | |||
1696 | By default, super page will be supported if Intel IOMMU | 1696 | By default, super page will be supported if Intel IOMMU |
1697 | has the capability. With this option, super page will | 1697 | has the capability. With this option, super page will |
1698 | not be supported. | 1698 | not be supported. |
1699 | sm_off [Default Off] | 1699 | sm_on [Default Off] |
1700 | By default, scalable mode will be supported if the | 1700 | By default, scalable mode will be disabled even if the |
1701 | hardware advertises that it has support for the scalable | 1701 | hardware advertises that it has support for the scalable |
1702 | mode translation. With this option set, scalable mode | 1702 | mode translation. With this option set, scalable mode |
1703 | will not be used even on hardware which claims to support | 1703 | will be used on hardware which claims to support it. |
1704 | it. | ||
1705 | tboot_noforce [Default Off] | 1704 | tboot_noforce [Default Off] |
1706 | Do not force the Intel IOMMU enabled under tboot. | 1705 | Do not force the Intel IOMMU enabled under tboot. |
1707 | By default, tboot will force Intel IOMMU on, which | 1706 | By default, tboot will force Intel IOMMU on, which |
diff --git a/Documentation/devicetree/bindings/Makefile b/Documentation/devicetree/bindings/Makefile index 6e5cef0ed6fb..50daa0b3b032 100644 --- a/Documentation/devicetree/bindings/Makefile +++ b/Documentation/devicetree/bindings/Makefile | |||
@@ -17,7 +17,11 @@ extra-y += $(DT_TMP_SCHEMA) | |||
17 | quiet_cmd_mk_schema = SCHEMA $@ | 17 | quiet_cmd_mk_schema = SCHEMA $@ |
18 | cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) | 18 | cmd_mk_schema = $(DT_MK_SCHEMA) $(DT_MK_SCHEMA_FLAGS) -o $@ $(filter-out FORCE, $^) |
19 | 19 | ||
20 | DT_DOCS = $(shell cd $(srctree)/$(src) && find * -name '*.yaml') | 20 | DT_DOCS = $(shell \ |
21 | cd $(srctree)/$(src) && \ | ||
22 | find * \( -name '*.yaml' ! -name $(DT_TMP_SCHEMA) \) \ | ||
23 | ) | ||
24 | |||
21 | DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) | 25 | DT_SCHEMA_FILES ?= $(addprefix $(src)/,$(DT_DOCS)) |
22 | 26 | ||
23 | extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) | 27 | extra-y += $(patsubst $(src)/%.yaml,%.example.dts, $(DT_SCHEMA_FILES)) |
diff --git a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt index 36603419d6f8..0e72183f52bc 100644 --- a/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt +++ b/Documentation/devicetree/bindings/serio/olpc,ap-sp.txt | |||
@@ -4,14 +4,10 @@ Required properties: | |||
4 | - compatible : "olpc,ap-sp" | 4 | - compatible : "olpc,ap-sp" |
5 | - reg : base address and length of SoC's WTM registers | 5 | - reg : base address and length of SoC's WTM registers |
6 | - interrupts : SP-AP interrupt | 6 | - interrupts : SP-AP interrupt |
7 | - clocks : phandle + clock-specifier for the clock that drives the WTM | ||
8 | - clock-names: should be "sp" | ||
9 | 7 | ||
10 | Example: | 8 | Example: |
11 | ap-sp@d4290000 { | 9 | ap-sp@d4290000 { |
12 | compatible = "olpc,ap-sp"; | 10 | compatible = "olpc,ap-sp"; |
13 | reg = <0xd4290000 0x1000>; | 11 | reg = <0xd4290000 0x1000>; |
14 | interrupts = <40>; | 12 | interrupts = <40>; |
15 | clocks = <&soc_clocks MMP2_CLK_SP>; | ||
16 | clock-names = "sp"; | ||
17 | } | 13 | } |
diff --git a/Documentation/networking/operstates.txt b/Documentation/networking/operstates.txt index 355c6d8ef8ad..b203d1334822 100644 --- a/Documentation/networking/operstates.txt +++ b/Documentation/networking/operstates.txt | |||
@@ -22,8 +22,9 @@ and changeable from userspace under certain rules. | |||
22 | 2. Querying from userspace | 22 | 2. Querying from userspace |
23 | 23 | ||
24 | Both admin and operational state can be queried via the netlink | 24 | Both admin and operational state can be queried via the netlink |
25 | operation RTM_GETLINK. It is also possible to subscribe to RTMGRP_LINK | 25 | operation RTM_GETLINK. It is also possible to subscribe to RTNLGRP_LINK |
26 | to be notified of updates. This is important for setting from userspace. | 26 | to be notified of updates while the interface is admin up. This is |
27 | important for setting from userspace. | ||
27 | 28 | ||
28 | These values contain interface state: | 29 | These values contain interface state: |
29 | 30 | ||
@@ -101,8 +102,9 @@ because some driver controlled protocol establishment has to | |||
101 | complete. Corresponding functions are netif_dormant_on() to set the | 102 | complete. Corresponding functions are netif_dormant_on() to set the |
102 | flag, netif_dormant_off() to clear it and netif_dormant() to query. | 103 | flag, netif_dormant_off() to clear it and netif_dormant() to query. |
103 | 104 | ||
104 | On device allocation, networking core sets the flags equivalent to | 105 | On device allocation, both flags __LINK_STATE_NOCARRIER and |
105 | netif_carrier_ok() and !netif_dormant(). | 106 | __LINK_STATE_DORMANT are cleared, so the effective state is equivalent |
107 | to netif_carrier_ok() and !netif_dormant(). | ||
106 | 108 | ||
107 | 109 | ||
108 | Whenever the driver CHANGES one of these flags, a workqueue event is | 110 | Whenever the driver CHANGES one of these flags, a workqueue event is |
@@ -133,11 +135,11 @@ netif_carrier_ok() && !netif_dormant() is set by the | |||
133 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE | 135 | driver. Afterwards, the userspace application can set IFLA_OPERSTATE |
134 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set | 136 | to IF_OPER_DORMANT or IF_OPER_UP as long as the driver does not set |
135 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace | 137 | netif_carrier_off() or netif_dormant_on(). Changes made by userspace |
136 | are multicasted on the netlink group RTMGRP_LINK. | 138 | are multicasted on the netlink group RTNLGRP_LINK. |
137 | 139 | ||
138 | So basically a 802.1X supplicant interacts with the kernel like this: | 140 | So basically a 802.1X supplicant interacts with the kernel like this: |
139 | 141 | ||
140 | -subscribe to RTMGRP_LINK | 142 | -subscribe to RTNLGRP_LINK |
141 | -set IFLA_LINKMODE to 1 via RTM_SETLINK | 143 | -set IFLA_LINKMODE to 1 via RTM_SETLINK |
142 | -query RTM_GETLINK once to get initial state | 144 | -query RTM_GETLINK once to get initial state |
143 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until | 145 | -if initial flags are not (IFF_LOWER_UP && !IFF_DORMANT), wait until |
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt index 819caf8ca05f..ebc679bcb2dc 100644 --- a/Documentation/sysctl/fs.txt +++ b/Documentation/sysctl/fs.txt | |||
@@ -56,26 +56,34 @@ of any kernel data structures. | |||
56 | 56 | ||
57 | dentry-state: | 57 | dentry-state: |
58 | 58 | ||
59 | From linux/fs/dentry.c: | 59 | From linux/include/linux/dcache.h: |
60 | -------------------------------------------------------------- | 60 | -------------------------------------------------------------- |
61 | struct { | 61 | struct dentry_stat_t dentry_stat { |
62 | int nr_dentry; | 62 | int nr_dentry; |
63 | int nr_unused; | 63 | int nr_unused; |
64 | int age_limit; /* age in seconds */ | 64 | int age_limit; /* age in seconds */ |
65 | int want_pages; /* pages requested by system */ | 65 | int want_pages; /* pages requested by system */ |
66 | int dummy[2]; | 66 | int nr_negative; /* # of unused negative dentries */ |
67 | } dentry_stat = {0, 0, 45, 0,}; | 67 | int dummy; /* Reserved for future use */ |
68 | -------------------------------------------------------------- | 68 | }; |
69 | 69 | -------------------------------------------------------------- | |
70 | Dentries are dynamically allocated and deallocated, and | 70 | |
71 | nr_dentry seems to be 0 all the time. Hence it's safe to | 71 | Dentries are dynamically allocated and deallocated. |
72 | assume that only nr_unused, age_limit and want_pages are | 72 | |
73 | used. Nr_unused seems to be exactly what its name says. | 73 | nr_dentry shows the total number of dentries allocated (active |
74 | + unused). nr_unused shows the number of dentries that are not | ||
75 | actively used, but are saved in the LRU list for future reuse. | ||
76 | |||
74 | Age_limit is the age in seconds after which dcache entries | 77 | Age_limit is the age in seconds after which dcache entries |
75 | can be reclaimed when memory is short and want_pages is | 78 | can be reclaimed when memory is short and want_pages is |
76 | nonzero when shrink_dcache_pages() has been called and the | 79 | nonzero when shrink_dcache_pages() has been called and the |
77 | dcache isn't pruned yet. | 80 | dcache isn't pruned yet. |
78 | 81 | ||
82 | nr_negative shows the number of unused dentries that are also | ||
83 | negative dentries which do not map to any files. Instead, | ||
84 | they help speeding up rejection of non-existing files provided | ||
85 | by the users. | ||
86 | |||
79 | ============================================================== | 87 | ============================================================== |
80 | 88 | ||
81 | dquot-max & dquot-nr: | 89 | dquot-max & dquot-nr: |
diff --git a/Documentation/x86/resctrl_ui.txt b/Documentation/x86/resctrl_ui.txt index e8e8d14d3c4e..c1f95b59e14d 100644 --- a/Documentation/x86/resctrl_ui.txt +++ b/Documentation/x86/resctrl_ui.txt | |||
@@ -9,7 +9,7 @@ Fenghua Yu <fenghua.yu@intel.com> | |||
9 | Tony Luck <tony.luck@intel.com> | 9 | Tony Luck <tony.luck@intel.com> |
10 | Vikas Shivappa <vikas.shivappa@intel.com> | 10 | Vikas Shivappa <vikas.shivappa@intel.com> |
11 | 11 | ||
12 | This feature is enabled by the CONFIG_X86_RESCTRL and the x86 /proc/cpuinfo | 12 | This feature is enabled by the CONFIG_X86_CPU_RESCTRL and the x86 /proc/cpuinfo |
13 | flag bits: | 13 | flag bits: |
14 | RDT (Resource Director Technology) Allocation - "rdt_a" | 14 | RDT (Resource Director Technology) Allocation - "rdt_a" |
15 | CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" | 15 | CAT (Cache Allocation Technology) - "cat_l3", "cat_l2" |
diff --git a/MAINTAINERS b/MAINTAINERS index 9f64f8d3740e..41ce5f4ad838 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2848,6 +2848,9 @@ F: include/uapi/linux/if_bonding.h | |||
2848 | BPF (Safe dynamic programs and tools) | 2848 | BPF (Safe dynamic programs and tools) |
2849 | M: Alexei Starovoitov <ast@kernel.org> | 2849 | M: Alexei Starovoitov <ast@kernel.org> |
2850 | M: Daniel Borkmann <daniel@iogearbox.net> | 2850 | M: Daniel Borkmann <daniel@iogearbox.net> |
2851 | R: Martin KaFai Lau <kafai@fb.com> | ||
2852 | R: Song Liu <songliubraving@fb.com> | ||
2853 | R: Yonghong Song <yhs@fb.com> | ||
2851 | L: netdev@vger.kernel.org | 2854 | L: netdev@vger.kernel.org |
2852 | L: linux-kernel@vger.kernel.org | 2855 | L: linux-kernel@vger.kernel.org |
2853 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git | 2856 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git |
@@ -2873,6 +2876,8 @@ F: samples/bpf/ | |||
2873 | F: tools/bpf/ | 2876 | F: tools/bpf/ |
2874 | F: tools/lib/bpf/ | 2877 | F: tools/lib/bpf/ |
2875 | F: tools/testing/selftests/bpf/ | 2878 | F: tools/testing/selftests/bpf/ |
2879 | K: bpf | ||
2880 | N: bpf | ||
2876 | 2881 | ||
2877 | BPF JIT for ARM | 2882 | BPF JIT for ARM |
2878 | M: Shubham Bansal <illusionist.neo@gmail.com> | 2883 | M: Shubham Bansal <illusionist.neo@gmail.com> |
@@ -5181,7 +5186,7 @@ DRM DRIVERS FOR XEN | |||
5181 | M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> | 5186 | M: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> |
5182 | T: git git://anongit.freedesktop.org/drm/drm-misc | 5187 | T: git git://anongit.freedesktop.org/drm/drm-misc |
5183 | L: dri-devel@lists.freedesktop.org | 5188 | L: dri-devel@lists.freedesktop.org |
5184 | L: xen-devel@lists.xen.org | 5189 | L: xen-devel@lists.xenproject.org (moderated for non-subscribers) |
5185 | S: Supported | 5190 | S: Supported |
5186 | F: drivers/gpu/drm/xen/ | 5191 | F: drivers/gpu/drm/xen/ |
5187 | F: Documentation/gpu/xen-front.rst | 5192 | F: Documentation/gpu/xen-front.rst |
@@ -6146,7 +6151,7 @@ FREESCALE SOC SOUND DRIVERS | |||
6146 | M: Timur Tabi <timur@kernel.org> | 6151 | M: Timur Tabi <timur@kernel.org> |
6147 | M: Nicolin Chen <nicoleotsuka@gmail.com> | 6152 | M: Nicolin Chen <nicoleotsuka@gmail.com> |
6148 | M: Xiubo Li <Xiubo.Lee@gmail.com> | 6153 | M: Xiubo Li <Xiubo.Lee@gmail.com> |
6149 | R: Fabio Estevam <fabio.estevam@nxp.com> | 6154 | R: Fabio Estevam <festevam@gmail.com> |
6150 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 6155 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
6151 | L: linuxppc-dev@lists.ozlabs.org | 6156 | L: linuxppc-dev@lists.ozlabs.org |
6152 | S: Maintained | 6157 | S: Maintained |
@@ -10893,7 +10898,7 @@ F: include/linux/nvmem-consumer.h | |||
10893 | F: include/linux/nvmem-provider.h | 10898 | F: include/linux/nvmem-provider.h |
10894 | 10899 | ||
10895 | NXP SGTL5000 DRIVER | 10900 | NXP SGTL5000 DRIVER |
10896 | M: Fabio Estevam <fabio.estevam@nxp.com> | 10901 | M: Fabio Estevam <festevam@gmail.com> |
10897 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | 10902 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) |
10898 | S: Maintained | 10903 | S: Maintained |
10899 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt | 10904 | F: Documentation/devicetree/bindings/sound/sgtl5000.txt |
@@ -11307,10 +11312,12 @@ F: include/dt-bindings/ | |||
11307 | 11312 | ||
11308 | OPENCORES I2C BUS DRIVER | 11313 | OPENCORES I2C BUS DRIVER |
11309 | M: Peter Korsgaard <peter@korsgaard.com> | 11314 | M: Peter Korsgaard <peter@korsgaard.com> |
11315 | M: Andrew Lunn <andrew@lunn.ch> | ||
11310 | L: linux-i2c@vger.kernel.org | 11316 | L: linux-i2c@vger.kernel.org |
11311 | S: Maintained | 11317 | S: Maintained |
11312 | F: Documentation/i2c/busses/i2c-ocores | 11318 | F: Documentation/i2c/busses/i2c-ocores |
11313 | F: drivers/i2c/busses/i2c-ocores.c | 11319 | F: drivers/i2c/busses/i2c-ocores.c |
11320 | F: include/linux/platform_data/i2c-ocores.h | ||
11314 | 11321 | ||
11315 | OPENRISC ARCHITECTURE | 11322 | OPENRISC ARCHITECTURE |
11316 | M: Jonas Bonn <jonas@southpole.se> | 11323 | M: Jonas Bonn <jonas@southpole.se> |
@@ -12868,6 +12875,13 @@ F: Documentation/devicetree/bindings/net/dsa/realtek-smi.txt | |||
12868 | F: drivers/net/dsa/realtek-smi* | 12875 | F: drivers/net/dsa/realtek-smi* |
12869 | F: drivers/net/dsa/rtl83* | 12876 | F: drivers/net/dsa/rtl83* |
12870 | 12877 | ||
12878 | REDPINE WIRELESS DRIVER | ||
12879 | M: Amitkumar Karwar <amitkarwar@gmail.com> | ||
12880 | M: Siva Rebbagondla <siva8118@gmail.com> | ||
12881 | L: linux-wireless@vger.kernel.org | ||
12882 | S: Maintained | ||
12883 | F: drivers/net/wireless/rsi/ | ||
12884 | |||
12871 | REGISTER MAP ABSTRACTION | 12885 | REGISTER MAP ABSTRACTION |
12872 | M: Mark Brown <broonie@kernel.org> | 12886 | M: Mark Brown <broonie@kernel.org> |
12873 | L: linux-kernel@vger.kernel.org | 12887 | L: linux-kernel@vger.kernel.org |
@@ -13696,6 +13710,15 @@ L: netdev@vger.kernel.org | |||
13696 | S: Supported | 13710 | S: Supported |
13697 | F: drivers/net/ethernet/sfc/ | 13711 | F: drivers/net/ethernet/sfc/ |
13698 | 13712 | ||
13713 | SFF/SFP/SFP+ MODULE SUPPORT | ||
13714 | M: Russell King <linux@armlinux.org.uk> | ||
13715 | L: netdev@vger.kernel.org | ||
13716 | S: Maintained | ||
13717 | F: drivers/net/phy/phylink.c | ||
13718 | F: drivers/net/phy/sfp* | ||
13719 | F: include/linux/phylink.h | ||
13720 | F: include/linux/sfp.h | ||
13721 | |||
13699 | SGI GRU DRIVER | 13722 | SGI GRU DRIVER |
13700 | M: Dimitri Sivanich <sivanich@sgi.com> | 13723 | M: Dimitri Sivanich <sivanich@sgi.com> |
13701 | S: Maintained | 13724 | S: Maintained |
@@ -16641,6 +16664,15 @@ S: Maintained | |||
16641 | F: drivers/platform/x86/ | 16664 | F: drivers/platform/x86/ |
16642 | F: drivers/platform/olpc/ | 16665 | F: drivers/platform/olpc/ |
16643 | 16666 | ||
16667 | X86 PLATFORM DRIVERS - ARCH | ||
16668 | R: Darren Hart <dvhart@infradead.org> | ||
16669 | R: Andy Shevchenko <andy@infradead.org> | ||
16670 | L: platform-driver-x86@vger.kernel.org | ||
16671 | L: x86@kernel.org | ||
16672 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core | ||
16673 | S: Maintained | ||
16674 | F: arch/x86/platform | ||
16675 | |||
16644 | X86 VDSO | 16676 | X86 VDSO |
16645 | M: Andy Lutomirski <luto@kernel.org> | 16677 | M: Andy Lutomirski <luto@kernel.org> |
16646 | L: linux-kernel@vger.kernel.org | 16678 | L: linux-kernel@vger.kernel.org |
@@ -16673,6 +16705,24 @@ T: git git://linuxtv.org/media_tree.git | |||
16673 | S: Maintained | 16705 | S: Maintained |
16674 | F: drivers/media/tuners/tuner-xc2028.* | 16706 | F: drivers/media/tuners/tuner-xc2028.* |
16675 | 16707 | ||
16708 | XDP (eXpress Data Path) | ||
16709 | M: Alexei Starovoitov <ast@kernel.org> | ||
16710 | M: Daniel Borkmann <daniel@iogearbox.net> | ||
16711 | M: David S. Miller <davem@davemloft.net> | ||
16712 | M: Jakub Kicinski <jakub.kicinski@netronome.com> | ||
16713 | M: Jesper Dangaard Brouer <hawk@kernel.org> | ||
16714 | M: John Fastabend <john.fastabend@gmail.com> | ||
16715 | L: netdev@vger.kernel.org | ||
16716 | L: xdp-newbies@vger.kernel.org | ||
16717 | S: Supported | ||
16718 | F: net/core/xdp.c | ||
16719 | F: include/net/xdp.h | ||
16720 | F: kernel/bpf/devmap.c | ||
16721 | F: kernel/bpf/cpumap.c | ||
16722 | F: include/trace/events/xdp.h | ||
16723 | K: xdp | ||
16724 | N: xdp | ||
16725 | |||
16676 | XDP SOCKETS (AF_XDP) | 16726 | XDP SOCKETS (AF_XDP) |
16677 | M: Björn Töpel <bjorn.topel@intel.com> | 16727 | M: Björn Töpel <bjorn.topel@intel.com> |
16678 | M: Magnus Karlsson <magnus.karlsson@intel.com> | 16728 | M: Magnus Karlsson <magnus.karlsson@intel.com> |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 5 | 2 | VERSION = 5 |
3 | PATCHLEVEL = 0 | 3 | PATCHLEVEL = 0 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc4 | 5 | EXTRAVERSION = -rc6 |
6 | NAME = Shy Crocodile | 6 | NAME = Shy Crocodile |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/alpha/include/asm/irq.h b/arch/alpha/include/asm/irq.h index 4d17cacd1462..432402c8e47f 100644 --- a/arch/alpha/include/asm/irq.h +++ b/arch/alpha/include/asm/irq.h | |||
@@ -56,15 +56,15 @@ | |||
56 | 56 | ||
57 | #elif defined(CONFIG_ALPHA_DP264) || \ | 57 | #elif defined(CONFIG_ALPHA_DP264) || \ |
58 | defined(CONFIG_ALPHA_LYNX) || \ | 58 | defined(CONFIG_ALPHA_LYNX) || \ |
59 | defined(CONFIG_ALPHA_SHARK) || \ | 59 | defined(CONFIG_ALPHA_SHARK) |
60 | defined(CONFIG_ALPHA_EIGER) | ||
61 | # define NR_IRQS 64 | 60 | # define NR_IRQS 64 |
62 | 61 | ||
63 | #elif defined(CONFIG_ALPHA_TITAN) | 62 | #elif defined(CONFIG_ALPHA_TITAN) |
64 | #define NR_IRQS 80 | 63 | #define NR_IRQS 80 |
65 | 64 | ||
66 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ | 65 | #elif defined(CONFIG_ALPHA_RAWHIDE) || \ |
67 | defined(CONFIG_ALPHA_TAKARA) | 66 | defined(CONFIG_ALPHA_TAKARA) || \ |
67 | defined(CONFIG_ALPHA_EIGER) | ||
68 | # define NR_IRQS 128 | 68 | # define NR_IRQS 128 |
69 | 69 | ||
70 | #elif defined(CONFIG_ALPHA_WILDFIRE) | 70 | #elif defined(CONFIG_ALPHA_WILDFIRE) |
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index d73dc473fbb9..188fc9256baf 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -78,7 +78,7 @@ __load_new_mm_context(struct mm_struct *next_mm) | |||
78 | /* Macro for exception fixup code to access integer registers. */ | 78 | /* Macro for exception fixup code to access integer registers. */ |
79 | #define dpf_reg(r) \ | 79 | #define dpf_reg(r) \ |
80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ | 80 | (((unsigned long *)regs)[(r) <= 8 ? (r) : (r) <= 15 ? (r)-16 : \ |
81 | (r) <= 18 ? (r)+8 : (r)-10]) | 81 | (r) <= 18 ? (r)+10 : (r)-10]) |
82 | 82 | ||
83 | asmlinkage void | 83 | asmlinkage void |
84 | do_page_fault(unsigned long address, unsigned long mmcsr, | 84 | do_page_fault(unsigned long address, unsigned long mmcsr, |
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts index d0fd68873689..5b250060f6dd 100644 --- a/arch/arm/boot/dts/am335x-shc.dts +++ b/arch/arm/boot/dts/am335x-shc.dts | |||
@@ -215,7 +215,7 @@ | |||
215 | pinctrl-names = "default"; | 215 | pinctrl-names = "default"; |
216 | pinctrl-0 = <&mmc1_pins>; | 216 | pinctrl-0 = <&mmc1_pins>; |
217 | bus-width = <0x4>; | 217 | bus-width = <0x4>; |
218 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>; | 218 | cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; |
219 | cd-inverted; | 219 | cd-inverted; |
220 | max-frequency = <26000000>; | 220 | max-frequency = <26000000>; |
221 | vmmc-supply = <&vmmcsd_fixed>; | 221 | vmmc-supply = <&vmmcsd_fixed>; |
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi index 47aa53ba6b92..559659b399d0 100644 --- a/arch/arm/boot/dts/da850.dtsi +++ b/arch/arm/boot/dts/da850.dtsi | |||
@@ -476,7 +476,7 @@ | |||
476 | clocksource: timer@20000 { | 476 | clocksource: timer@20000 { |
477 | compatible = "ti,da830-timer"; | 477 | compatible = "ti,da830-timer"; |
478 | reg = <0x20000 0x1000>; | 478 | reg = <0x20000 0x1000>; |
479 | interrupts = <12>, <13>; | 479 | interrupts = <21>, <22>; |
480 | interrupt-names = "tint12", "tint34"; | 480 | interrupt-names = "tint12", "tint34"; |
481 | clocks = <&pll0_auxclk>; | 481 | clocks = <&pll0_auxclk>; |
482 | }; | 482 | }; |
diff --git a/arch/arm/boot/dts/imx6q-pistachio.dts b/arch/arm/boot/dts/imx6q-pistachio.dts index 5edf858c8b86..a31b17eaf51c 100644 --- a/arch/arm/boot/dts/imx6q-pistachio.dts +++ b/arch/arm/boot/dts/imx6q-pistachio.dts | |||
@@ -103,7 +103,7 @@ | |||
103 | power { | 103 | power { |
104 | label = "Power Button"; | 104 | label = "Power Button"; |
105 | gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; | 105 | gpios = <&gpio2 12 GPIO_ACTIVE_LOW>; |
106 | gpio-key,wakeup; | 106 | wakeup-source; |
107 | linux,code = <KEY_POWER>; | 107 | linux,code = <KEY_POWER>; |
108 | }; | 108 | }; |
109 | }; | 109 | }; |
diff --git a/arch/arm/boot/dts/imx6sll-evk.dts b/arch/arm/boot/dts/imx6sll-evk.dts index d8163705363e..4a31a415f88e 100644 --- a/arch/arm/boot/dts/imx6sll-evk.dts +++ b/arch/arm/boot/dts/imx6sll-evk.dts | |||
@@ -309,7 +309,7 @@ | |||
309 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 309 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
310 | cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; | 310 | cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; |
311 | keep-power-in-suspend; | 311 | keep-power-in-suspend; |
312 | enable-sdio-wakeup; | 312 | wakeup-source; |
313 | vmmc-supply = <®_sd3_vmmc>; | 313 | vmmc-supply = <®_sd3_vmmc>; |
314 | status = "okay"; | 314 | status = "okay"; |
315 | }; | 315 | }; |
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 272ff6133ec1..d1375d3650fd 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi | |||
@@ -467,7 +467,7 @@ | |||
467 | }; | 467 | }; |
468 | 468 | ||
469 | gpt: gpt@2098000 { | 469 | gpt: gpt@2098000 { |
470 | compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt"; | 470 | compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt"; |
471 | reg = <0x02098000 0x4000>; | 471 | reg = <0x02098000 0x4000>; |
472 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; | 472 | interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>; |
473 | clocks = <&clks IMX6SX_CLK_GPT_BUS>, | 473 | clocks = <&clks IMX6SX_CLK_GPT_BUS>, |
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index e4645f612712..2ab74860d962 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi | |||
@@ -274,7 +274,7 @@ | |||
274 | compatible = "amlogic,meson6-dwmac", "snps,dwmac"; | 274 | compatible = "amlogic,meson6-dwmac", "snps,dwmac"; |
275 | reg = <0xc9410000 0x10000 | 275 | reg = <0xc9410000 0x10000 |
276 | 0xc1108108 0x4>; | 276 | 0xc1108108 0x4>; |
277 | interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>; | 277 | interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>; |
278 | interrupt-names = "macirq"; | 278 | interrupt-names = "macirq"; |
279 | status = "disabled"; | 279 | status = "disabled"; |
280 | }; | 280 | }; |
diff --git a/arch/arm/boot/dts/meson8b-ec100.dts b/arch/arm/boot/dts/meson8b-ec100.dts index 0872f6e3abf5..d50fc2f60fa3 100644 --- a/arch/arm/boot/dts/meson8b-ec100.dts +++ b/arch/arm/boot/dts/meson8b-ec100.dts | |||
@@ -205,8 +205,7 @@ | |||
205 | cap-sd-highspeed; | 205 | cap-sd-highspeed; |
206 | disable-wp; | 206 | disable-wp; |
207 | 207 | ||
208 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 208 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
209 | cd-inverted; | ||
210 | 209 | ||
211 | vmmc-supply = <&vcc_3v3>; | 210 | vmmc-supply = <&vcc_3v3>; |
212 | }; | 211 | }; |
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts index 58669abda259..0f0a46ddf3ff 100644 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts | |||
@@ -221,7 +221,6 @@ | |||
221 | /* Realtek RTL8211F (0x001cc916) */ | 221 | /* Realtek RTL8211F (0x001cc916) */ |
222 | eth_phy: ethernet-phy@0 { | 222 | eth_phy: ethernet-phy@0 { |
223 | reg = <0>; | 223 | reg = <0>; |
224 | eee-broken-1000t; | ||
225 | interrupt-parent = <&gpio_intc>; | 224 | interrupt-parent = <&gpio_intc>; |
226 | /* GPIOH_3 */ | 225 | /* GPIOH_3 */ |
227 | interrupts = <17 IRQ_TYPE_LEVEL_LOW>; | 226 | interrupts = <17 IRQ_TYPE_LEVEL_LOW>; |
@@ -273,8 +272,7 @@ | |||
273 | cap-sd-highspeed; | 272 | cap-sd-highspeed; |
274 | disable-wp; | 273 | disable-wp; |
275 | 274 | ||
276 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 275 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
277 | cd-inverted; | ||
278 | 276 | ||
279 | vmmc-supply = <&tflash_vdd>; | 277 | vmmc-supply = <&tflash_vdd>; |
280 | vqmmc-supply = <&tf_io>; | 278 | vqmmc-supply = <&tf_io>; |
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts index f5853610b20b..6ac02beb5fa7 100644 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts | |||
@@ -206,8 +206,7 @@ | |||
206 | cap-sd-highspeed; | 206 | cap-sd-highspeed; |
207 | disable-wp; | 207 | disable-wp; |
208 | 208 | ||
209 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 209 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
210 | cd-inverted; | ||
211 | 210 | ||
212 | vmmc-supply = <&vcc_3v3>; | 211 | vmmc-supply = <&vcc_3v3>; |
213 | }; | 212 | }; |
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi index ddc7a7bb33c0..f57acf8f66b9 100644 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi | |||
@@ -105,7 +105,7 @@ | |||
105 | interrupts-extended = < | 105 | interrupts-extended = < |
106 | &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 | 106 | &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0 |
107 | &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 | 107 | &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0 |
108 | &cpcap 48 1 | 108 | &cpcap 48 0 |
109 | >; | 109 | >; |
110 | interrupt-names = | 110 | interrupt-names = |
111 | "id_ground", "id_float", "se0conn", "vbusvld", | 111 | "id_ground", "id_float", "se0conn", "vbusvld", |
diff --git a/arch/arm/boot/dts/omap3-gta04.dtsi b/arch/arm/boot/dts/omap3-gta04.dtsi index e53d32691308..93b420934e8e 100644 --- a/arch/arm/boot/dts/omap3-gta04.dtsi +++ b/arch/arm/boot/dts/omap3-gta04.dtsi | |||
@@ -714,11 +714,7 @@ | |||
714 | 714 | ||
715 | vdda-supply = <&vdac>; | 715 | vdda-supply = <&vdac>; |
716 | 716 | ||
717 | #address-cells = <1>; | ||
718 | #size-cells = <0>; | ||
719 | |||
720 | port { | 717 | port { |
721 | reg = <0>; | ||
722 | venc_out: endpoint { | 718 | venc_out: endpoint { |
723 | remote-endpoint = <&opa_in>; | 719 | remote-endpoint = <&opa_in>; |
724 | ti,channels = <1>; | 720 | ti,channels = <1>; |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 182a53991c90..826920e6b878 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -814,7 +814,7 @@ | |||
814 | /* For debugging, it is often good idea to remove this GPIO. | 814 | /* For debugging, it is often good idea to remove this GPIO. |
815 | It means you can remove back cover (to reboot by removing | 815 | It means you can remove back cover (to reboot by removing |
816 | battery) and still use the MMC card. */ | 816 | battery) and still use the MMC card. */ |
817 | cd-gpios = <&gpio6 0 GPIO_ACTIVE_HIGH>; /* 160 */ | 817 | cd-gpios = <&gpio6 0 GPIO_ACTIVE_LOW>; /* 160 */ |
818 | }; | 818 | }; |
819 | 819 | ||
820 | /* most boards use vaux3, only some old versions use vmmc2 instead */ | 820 | /* most boards use vaux3, only some old versions use vmmc2 instead */ |
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi index 0d9b85317529..e142e6c70a59 100644 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi | |||
@@ -370,6 +370,19 @@ | |||
370 | compatible = "ti,omap2-onenand"; | 370 | compatible = "ti,omap2-onenand"; |
371 | reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ | 371 | reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */ |
372 | 372 | ||
373 | /* | ||
374 | * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported | ||
375 | * bootloader set values when booted with v4.19 using both N950 | ||
376 | * and N9 devices (OneNAND Manufacturer: Samsung): | ||
377 | * | ||
378 | * gpmc cs0 before gpmc_cs_program_settings: | ||
379 | * cs0 GPMC_CS_CONFIG1: 0xfd001202 | ||
380 | * cs0 GPMC_CS_CONFIG2: 0x00181800 | ||
381 | * cs0 GPMC_CS_CONFIG3: 0x00030300 | ||
382 | * cs0 GPMC_CS_CONFIG4: 0x18001804 | ||
383 | * cs0 GPMC_CS_CONFIG5: 0x03171d1d | ||
384 | * cs0 GPMC_CS_CONFIG6: 0x97080000 | ||
385 | */ | ||
373 | gpmc,sync-read; | 386 | gpmc,sync-read; |
374 | gpmc,sync-write; | 387 | gpmc,sync-write; |
375 | gpmc,burst-length = <16>; | 388 | gpmc,burst-length = <16>; |
@@ -379,26 +392,27 @@ | |||
379 | gpmc,device-width = <2>; | 392 | gpmc,device-width = <2>; |
380 | gpmc,mux-add-data = <2>; | 393 | gpmc,mux-add-data = <2>; |
381 | gpmc,cs-on-ns = <0>; | 394 | gpmc,cs-on-ns = <0>; |
382 | gpmc,cs-rd-off-ns = <87>; | 395 | gpmc,cs-rd-off-ns = <122>; |
383 | gpmc,cs-wr-off-ns = <87>; | 396 | gpmc,cs-wr-off-ns = <122>; |
384 | gpmc,adv-on-ns = <0>; | 397 | gpmc,adv-on-ns = <0>; |
385 | gpmc,adv-rd-off-ns = <10>; | 398 | gpmc,adv-rd-off-ns = <15>; |
386 | gpmc,adv-wr-off-ns = <10>; | 399 | gpmc,adv-wr-off-ns = <15>; |
387 | gpmc,oe-on-ns = <15>; | 400 | gpmc,oe-on-ns = <20>; |
388 | gpmc,oe-off-ns = <87>; | 401 | gpmc,oe-off-ns = <122>; |
389 | gpmc,we-on-ns = <0>; | 402 | gpmc,we-on-ns = <0>; |
390 | gpmc,we-off-ns = <87>; | 403 | gpmc,we-off-ns = <122>; |
391 | gpmc,rd-cycle-ns = <112>; | 404 | gpmc,rd-cycle-ns = <148>; |
392 | gpmc,wr-cycle-ns = <112>; | 405 | gpmc,wr-cycle-ns = <148>; |
393 | gpmc,access-ns = <81>; | 406 | gpmc,access-ns = <117>; |
394 | gpmc,page-burst-access-ns = <15>; | 407 | gpmc,page-burst-access-ns = <15>; |
395 | gpmc,bus-turnaround-ns = <0>; | 408 | gpmc,bus-turnaround-ns = <0>; |
396 | gpmc,cycle2cycle-delay-ns = <0>; | 409 | gpmc,cycle2cycle-delay-ns = <0>; |
397 | gpmc,wait-monitoring-ns = <0>; | 410 | gpmc,wait-monitoring-ns = <0>; |
398 | gpmc,clk-activation-ns = <5>; | 411 | gpmc,clk-activation-ns = <10>; |
399 | gpmc,wr-data-mux-bus-ns = <30>; | 412 | gpmc,wr-data-mux-bus-ns = <40>; |
400 | gpmc,wr-access-ns = <81>; | 413 | gpmc,wr-access-ns = <117>; |
401 | gpmc,sync-clk-ps = <15000>; | 414 | |
415 | gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */ | ||
402 | 416 | ||
403 | /* | 417 | /* |
404 | * MTD partition table corresponding to Nokia's MeeGo 1.2 | 418 | * MTD partition table corresponding to Nokia's MeeGo 1.2 |
diff --git a/arch/arm/boot/dts/omap5-l4.dtsi b/arch/arm/boot/dts/omap5-l4.dtsi index 9c7e309d9c2c..0960348002ad 100644 --- a/arch/arm/boot/dts/omap5-l4.dtsi +++ b/arch/arm/boot/dts/omap5-l4.dtsi | |||
@@ -1046,8 +1046,6 @@ | |||
1046 | <SYSC_IDLE_SMART>, | 1046 | <SYSC_IDLE_SMART>, |
1047 | <SYSC_IDLE_SMART_WKUP>; | 1047 | <SYSC_IDLE_SMART_WKUP>; |
1048 | ti,syss-mask = <1>; | 1048 | ti,syss-mask = <1>; |
1049 | ti,no-reset-on-init; | ||
1050 | ti,no-idle-on-init; | ||
1051 | /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ | 1049 | /* Domains (V, P, C): core, core_pwrdm, l4per_clkdm */ |
1052 | clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; | 1050 | clocks = <&l4per_clkctrl OMAP5_UART3_CLKCTRL 0>; |
1053 | clock-names = "fck"; | 1051 | clock-names = "fck"; |
diff --git a/arch/arm/boot/dts/r8a7743.dtsi b/arch/arm/boot/dts/r8a7743.dtsi index 3cc33f7ff7fe..3adc158a40bb 100644 --- a/arch/arm/boot/dts/r8a7743.dtsi +++ b/arch/arm/boot/dts/r8a7743.dtsi | |||
@@ -1681,15 +1681,12 @@ | |||
1681 | 1681 | ||
1682 | du: display@feb00000 { | 1682 | du: display@feb00000 { |
1683 | compatible = "renesas,du-r8a7743"; | 1683 | compatible = "renesas,du-r8a7743"; |
1684 | reg = <0 0xfeb00000 0 0x40000>, | 1684 | reg = <0 0xfeb00000 0 0x40000>; |
1685 | <0 0xfeb90000 0 0x1c>; | ||
1686 | reg-names = "du", "lvds.0"; | ||
1687 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, | 1685 | interrupts = <GIC_SPI 256 IRQ_TYPE_LEVEL_HIGH>, |
1688 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; | 1686 | <GIC_SPI 268 IRQ_TYPE_LEVEL_HIGH>; |
1689 | clocks = <&cpg CPG_MOD 724>, | 1687 | clocks = <&cpg CPG_MOD 724>, |
1690 | <&cpg CPG_MOD 723>, | 1688 | <&cpg CPG_MOD 723>; |
1691 | <&cpg CPG_MOD 726>; | 1689 | clock-names = "du.0", "du.1"; |
1692 | clock-names = "du.0", "du.1", "lvds.0"; | ||
1693 | status = "disabled"; | 1690 | status = "disabled"; |
1694 | 1691 | ||
1695 | ports { | 1692 | ports { |
@@ -1704,6 +1701,33 @@ | |||
1704 | port@1 { | 1701 | port@1 { |
1705 | reg = <1>; | 1702 | reg = <1>; |
1706 | du_out_lvds0: endpoint { | 1703 | du_out_lvds0: endpoint { |
1704 | remote-endpoint = <&lvds0_in>; | ||
1705 | }; | ||
1706 | }; | ||
1707 | }; | ||
1708 | }; | ||
1709 | |||
1710 | lvds0: lvds@feb90000 { | ||
1711 | compatible = "renesas,r8a7743-lvds"; | ||
1712 | reg = <0 0xfeb90000 0 0x1c>; | ||
1713 | clocks = <&cpg CPG_MOD 726>; | ||
1714 | power-domains = <&sysc R8A7743_PD_ALWAYS_ON>; | ||
1715 | resets = <&cpg 726>; | ||
1716 | status = "disabled"; | ||
1717 | |||
1718 | ports { | ||
1719 | #address-cells = <1>; | ||
1720 | #size-cells = <0>; | ||
1721 | |||
1722 | port@0 { | ||
1723 | reg = <0>; | ||
1724 | lvds0_in: endpoint { | ||
1725 | remote-endpoint = <&du_out_lvds0>; | ||
1726 | }; | ||
1727 | }; | ||
1728 | port@1 { | ||
1729 | reg = <1>; | ||
1730 | lvds0_out: endpoint { | ||
1707 | }; | 1731 | }; |
1708 | }; | 1732 | }; |
1709 | }; | 1733 | }; |
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 353d90f99b40..13304b8c5139 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -216,6 +216,7 @@ | |||
216 | #clock-cells = <0>; | 216 | #clock-cells = <0>; |
217 | compatible = "fixed-clock"; | 217 | compatible = "fixed-clock"; |
218 | clock-frequency = <24000000>; | 218 | clock-frequency = <24000000>; |
219 | clock-output-names = "osc24M"; | ||
219 | }; | 220 | }; |
220 | 221 | ||
221 | osc32k: clk-32k { | 222 | osc32k: clk-32k { |
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts index 5d23667dc2d2..25540b7694d5 100644 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts | |||
@@ -53,7 +53,7 @@ | |||
53 | 53 | ||
54 | aliases { | 54 | aliases { |
55 | serial0 = &uart0; | 55 | serial0 = &uart0; |
56 | /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */ | 56 | ethernet0 = &emac; |
57 | ethernet1 = &sdiowifi; | 57 | ethernet1 = &sdiowifi; |
58 | }; | 58 | }; |
59 | 59 | ||
diff --git a/arch/arm/boot/dts/vf610-bk4.dts b/arch/arm/boot/dts/vf610-bk4.dts index 689c8930dce3..b08d561d6748 100644 --- a/arch/arm/boot/dts/vf610-bk4.dts +++ b/arch/arm/boot/dts/vf610-bk4.dts | |||
@@ -110,11 +110,11 @@ | |||
110 | bus-num = <3>; | 110 | bus-num = <3>; |
111 | status = "okay"; | 111 | status = "okay"; |
112 | spi-slave; | 112 | spi-slave; |
113 | #address-cells = <0>; | ||
113 | 114 | ||
114 | slave@0 { | 115 | slave { |
115 | compatible = "lwn,bk4"; | 116 | compatible = "lwn,bk4"; |
116 | spi-max-frequency = <30000000>; | 117 | spi-max-frequency = <30000000>; |
117 | reg = <0>; | ||
118 | }; | 118 | }; |
119 | }; | 119 | }; |
120 | 120 | ||
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c index 318394ed5c7a..95a11d5b3587 100644 --- a/arch/arm/mach-cns3xxx/pcie.c +++ b/arch/arm/mach-cns3xxx/pcie.c | |||
@@ -83,7 +83,7 @@ static void __iomem *cns3xxx_pci_map_bus(struct pci_bus *bus, | |||
83 | } else /* remote PCI bus */ | 83 | } else /* remote PCI bus */ |
84 | base = cnspci->cfg1_regs + ((busno & 0xf) << 20); | 84 | base = cnspci->cfg1_regs + ((busno & 0xf) << 20); |
85 | 85 | ||
86 | return base + (where & 0xffc) + (devfn << 12); | 86 | return base + where + (devfn << 12); |
87 | } | 87 | } |
88 | 88 | ||
89 | static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, | 89 | static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, |
@@ -93,7 +93,7 @@ static int cns3xxx_pci_read_config(struct pci_bus *bus, unsigned int devfn, | |||
93 | u32 mask = (0x1ull << (size * 8)) - 1; | 93 | u32 mask = (0x1ull << (size * 8)) - 1; |
94 | int shift = (where % 4) * 8; | 94 | int shift = (where % 4) * 8; |
95 | 95 | ||
96 | ret = pci_generic_config_read32(bus, devfn, where, size, val); | 96 | ret = pci_generic_config_read(bus, devfn, where, size, val); |
97 | 97 | ||
98 | if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && | 98 | if (ret == PCIBIOS_SUCCESSFUL && !bus->number && !devfn && |
99 | (where & 0xffc) == PCI_CLASS_REVISION) | 99 | (where & 0xffc) == PCI_CLASS_REVISION) |
diff --git a/arch/arm/mach-iop32x/n2100.c b/arch/arm/mach-iop32x/n2100.c index 3b73813c6b04..23e8c93515d4 100644 --- a/arch/arm/mach-iop32x/n2100.c +++ b/arch/arm/mach-iop32x/n2100.c | |||
@@ -75,8 +75,7 @@ void __init n2100_map_io(void) | |||
75 | /* | 75 | /* |
76 | * N2100 PCI. | 76 | * N2100 PCI. |
77 | */ | 77 | */ |
78 | static int __init | 78 | static int n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
79 | n2100_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | ||
80 | { | 79 | { |
81 | int irq; | 80 | int irq; |
82 | 81 | ||
diff --git a/arch/arm/mach-tango/pm.c b/arch/arm/mach-tango/pm.c index 028e50c6383f..a32c3b631484 100644 --- a/arch/arm/mach-tango/pm.c +++ b/arch/arm/mach-tango/pm.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/suspend.h> | 3 | #include <linux/suspend.h> |
4 | #include <asm/suspend.h> | 4 | #include <asm/suspend.h> |
5 | #include "smc.h" | 5 | #include "smc.h" |
6 | #include "pm.h" | ||
6 | 7 | ||
7 | static int tango_pm_powerdown(unsigned long arg) | 8 | static int tango_pm_powerdown(unsigned long arg) |
8 | { | 9 | { |
@@ -24,10 +25,7 @@ static const struct platform_suspend_ops tango_pm_ops = { | |||
24 | .valid = suspend_valid_only_mem, | 25 | .valid = suspend_valid_only_mem, |
25 | }; | 26 | }; |
26 | 27 | ||
27 | static int __init tango_pm_init(void) | 28 | void __init tango_pm_init(void) |
28 | { | 29 | { |
29 | suspend_set_ops(&tango_pm_ops); | 30 | suspend_set_ops(&tango_pm_ops); |
30 | return 0; | ||
31 | } | 31 | } |
32 | |||
33 | late_initcall(tango_pm_init); | ||
diff --git a/arch/arm/mach-tango/pm.h b/arch/arm/mach-tango/pm.h new file mode 100644 index 000000000000..35ea705a0ee2 --- /dev/null +++ b/arch/arm/mach-tango/pm.h | |||
@@ -0,0 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifdef CONFIG_SUSPEND | ||
4 | void __init tango_pm_init(void); | ||
5 | #else | ||
6 | #define tango_pm_init NULL | ||
7 | #endif | ||
diff --git a/arch/arm/mach-tango/setup.c b/arch/arm/mach-tango/setup.c index 677dd7b5efd9..824f90737b04 100644 --- a/arch/arm/mach-tango/setup.c +++ b/arch/arm/mach-tango/setup.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <asm/mach/arch.h> | 2 | #include <asm/mach/arch.h> |
3 | #include <asm/hardware/cache-l2x0.h> | 3 | #include <asm/hardware/cache-l2x0.h> |
4 | #include "smc.h" | 4 | #include "smc.h" |
5 | #include "pm.h" | ||
5 | 6 | ||
6 | static void tango_l2c_write(unsigned long val, unsigned int reg) | 7 | static void tango_l2c_write(unsigned long val, unsigned int reg) |
7 | { | 8 | { |
@@ -15,4 +16,5 @@ DT_MACHINE_START(TANGO_DT, "Sigma Tango DT") | |||
15 | .dt_compat = tango_dt_compat, | 16 | .dt_compat = tango_dt_compat, |
16 | .l2c_aux_mask = ~0, | 17 | .l2c_aux_mask = ~0, |
17 | .l2c_write_sec = tango_l2c_write, | 18 | .l2c_write_sec = tango_l2c_write, |
19 | .init_late = tango_pm_init, | ||
18 | MACHINE_END | 20 | MACHINE_END |
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c index ed36dcab80f1..f51919974183 100644 --- a/arch/arm/plat-pxa/ssp.c +++ b/arch/arm/plat-pxa/ssp.c | |||
@@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) | |||
190 | if (ssp == NULL) | 190 | if (ssp == NULL) |
191 | return -ENODEV; | 191 | return -ENODEV; |
192 | 192 | ||
193 | iounmap(ssp->mmio_base); | ||
194 | |||
195 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 193 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
196 | release_mem_region(res->start, resource_size(res)); | 194 | release_mem_region(res->start, resource_size(res)); |
197 | 195 | ||
@@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev) | |||
201 | list_del(&ssp->node); | 199 | list_del(&ssp->node); |
202 | mutex_unlock(&ssp_lock); | 200 | mutex_unlock(&ssp_lock); |
203 | 201 | ||
204 | kfree(ssp); | ||
205 | return 0; | 202 | return 0; |
206 | } | 203 | } |
207 | 204 | ||
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cb44aa290e73..e1d44b903dfc 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <linux/of_address.h> | 7 | #include <linux/of_address.h> |
8 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
9 | #include <linux/types.h> | 9 | #include <linux/types.h> |
10 | #include <linux/dma-mapping.h> | ||
11 | #include <linux/vmalloc.h> | 10 | #include <linux/vmalloc.h> |
12 | #include <linux/swiotlb.h> | 11 | #include <linux/swiotlb.h> |
13 | 12 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts index b0c64f75792c..8974b5a1d3b1 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-orangepi-win.dts | |||
@@ -188,6 +188,7 @@ | |||
188 | reg = <0x3a3>; | 188 | reg = <0x3a3>; |
189 | interrupt-parent = <&r_intc>; | 189 | interrupt-parent = <&r_intc>; |
190 | interrupts = <0 IRQ_TYPE_LEVEL_LOW>; | 190 | interrupts = <0 IRQ_TYPE_LEVEL_LOW>; |
191 | x-powers,drive-vbus-en; /* set N_VBUSEN as output pin */ | ||
191 | }; | 192 | }; |
192 | }; | 193 | }; |
193 | 194 | ||
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi index 837a03dee875..2abb335145a6 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64.dtsi | |||
@@ -390,7 +390,7 @@ | |||
390 | }; | 390 | }; |
391 | 391 | ||
392 | video-codec@1c0e000 { | 392 | video-codec@1c0e000 { |
393 | compatible = "allwinner,sun50i-h5-video-engine"; | 393 | compatible = "allwinner,sun50i-a64-video-engine"; |
394 | reg = <0x01c0e000 0x1000>; | 394 | reg = <0x01c0e000 0x1000>; |
395 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, | 395 | clocks = <&ccu CLK_BUS_VE>, <&ccu CLK_VE>, |
396 | <&ccu CLK_DRAM_VE>; | 396 | <&ccu CLK_DRAM_VE>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi index e14e0ce7e89f..016641a41694 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gx-p23x-q20x.dtsi | |||
@@ -187,8 +187,7 @@ | |||
187 | max-frequency = <100000000>; | 187 | max-frequency = <100000000>; |
188 | disable-wp; | 188 | disable-wp; |
189 | 189 | ||
190 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 190 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
191 | cd-inverted; | ||
192 | 191 | ||
193 | vmmc-supply = <&vddao_3v3>; | 192 | vmmc-supply = <&vddao_3v3>; |
194 | vqmmc-supply = <&vddio_boot>; | 193 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts index 8cd50b75171d..ade2ee09ae96 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nanopi-k2.dts | |||
@@ -305,8 +305,7 @@ | |||
305 | max-frequency = <200000000>; | 305 | max-frequency = <200000000>; |
306 | disable-wp; | 306 | disable-wp; |
307 | 307 | ||
308 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 308 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
309 | cd-inverted; | ||
310 | 309 | ||
311 | vmmc-supply = <&vddio_ao3v3>; | 310 | vmmc-supply = <&vddio_ao3v3>; |
312 | vqmmc-supply = <&vddio_tf>; | 311 | vqmmc-supply = <&vddio_tf>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts index 4cf7f6e80c6a..25105ac96d55 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-nexbox-a95x.dts | |||
@@ -238,8 +238,7 @@ | |||
238 | max-frequency = <100000000>; | 238 | max-frequency = <100000000>; |
239 | disable-wp; | 239 | disable-wp; |
240 | 240 | ||
241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
242 | cd-inverted; | ||
243 | 242 | ||
244 | vmmc-supply = <&vddao_3v3>; | 243 | vmmc-supply = <&vddao_3v3>; |
245 | vqmmc-supply = <&vddio_card>; | 244 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts index 2e1cd5e3a246..1cc9dc68ef00 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts | |||
@@ -258,8 +258,7 @@ | |||
258 | max-frequency = <100000000>; | 258 | max-frequency = <100000000>; |
259 | disable-wp; | 259 | disable-wp; |
260 | 260 | ||
261 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 261 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
262 | cd-inverted; | ||
263 | 262 | ||
264 | vmmc-supply = <&tflash_vdd>; | 263 | vmmc-supply = <&tflash_vdd>; |
265 | vqmmc-supply = <&tf_io>; | 264 | vqmmc-supply = <&tf_io>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi index ce862266b9aa..0be0f2a5d2fe 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-p20x.dtsi | |||
@@ -196,8 +196,7 @@ | |||
196 | max-frequency = <100000000>; | 196 | max-frequency = <100000000>; |
197 | disable-wp; | 197 | disable-wp; |
198 | 198 | ||
199 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 199 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
200 | cd-inverted; | ||
201 | 200 | ||
202 | vmmc-supply = <&vddao_3v3>; | 201 | vmmc-supply = <&vddao_3v3>; |
203 | vqmmc-supply = <&vddio_card>; | 202 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi index 93a4acf2c46c..ad4d50bd9d77 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-vega-s95.dtsi | |||
@@ -154,8 +154,7 @@ | |||
154 | max-frequency = <100000000>; | 154 | max-frequency = <100000000>; |
155 | disable-wp; | 155 | disable-wp; |
156 | 156 | ||
157 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 157 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
158 | cd-inverted; | ||
159 | 158 | ||
160 | vmmc-supply = <&vcc_3v3>; | 159 | vmmc-supply = <&vcc_3v3>; |
161 | }; | 160 | }; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi index ec09bb5792b7..2d2db783c44c 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-wetek.dtsi | |||
@@ -211,8 +211,7 @@ | |||
211 | max-frequency = <100000000>; | 211 | max-frequency = <100000000>; |
212 | disable-wp; | 212 | disable-wp; |
213 | 213 | ||
214 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 214 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
215 | cd-inverted; | ||
216 | 215 | ||
217 | vmmc-supply = <&vddao_3v3>; | 216 | vmmc-supply = <&vddao_3v3>; |
218 | vqmmc-supply = <&vcc_3v3>; | 217 | vqmmc-supply = <&vcc_3v3>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts index f1c410e2da2b..796baea7a0bf 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-hwacom-amazetv.dts | |||
@@ -131,8 +131,7 @@ | |||
131 | max-frequency = <100000000>; | 131 | max-frequency = <100000000>; |
132 | disable-wp; | 132 | disable-wp; |
133 | 133 | ||
134 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 134 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
135 | cd-inverted; | ||
136 | 135 | ||
137 | vmmc-supply = <&vddao_3v3>; | 136 | vmmc-supply = <&vddao_3v3>; |
138 | vqmmc-supply = <&vddio_card>; | 137 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts index db293440e4ca..255cede7b447 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts | |||
@@ -238,8 +238,7 @@ | |||
238 | max-frequency = <100000000>; | 238 | max-frequency = <100000000>; |
239 | disable-wp; | 239 | disable-wp; |
240 | 240 | ||
241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 241 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
242 | cd-inverted; | ||
243 | 242 | ||
244 | vmmc-supply = <&vcc_3v3>; | 243 | vmmc-supply = <&vcc_3v3>; |
245 | vqmmc-supply = <&vcc_card>; | 244 | vqmmc-supply = <&vcc_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts index 6739697be1de..9cbdb85fb591 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-nexbox-a95x.dts | |||
@@ -183,8 +183,7 @@ | |||
183 | max-frequency = <100000000>; | 183 | max-frequency = <100000000>; |
184 | disable-wp; | 184 | disable-wp; |
185 | 185 | ||
186 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 186 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
187 | cd-inverted; | ||
188 | 187 | ||
189 | vmmc-supply = <&vddao_3v3>; | 188 | vmmc-supply = <&vddao_3v3>; |
190 | vqmmc-supply = <&vddio_card>; | 189 | vqmmc-supply = <&vddio_card>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi index a1b31013ab6e..bc811a2faf42 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi +++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi | |||
@@ -137,8 +137,7 @@ | |||
137 | max-frequency = <100000000>; | 137 | max-frequency = <100000000>; |
138 | disable-wp; | 138 | disable-wp; |
139 | 139 | ||
140 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 140 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
141 | cd-inverted; | ||
142 | 141 | ||
143 | vmmc-supply = <&vddao_3v3>; | 142 | vmmc-supply = <&vddao_3v3>; |
144 | vqmmc-supply = <&vddio_boot>; | 143 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts index 3c3a667a8df8..3f086ed7de05 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts | |||
@@ -356,8 +356,7 @@ | |||
356 | max-frequency = <100000000>; | 356 | max-frequency = <100000000>; |
357 | disable-wp; | 357 | disable-wp; |
358 | 358 | ||
359 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 359 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
360 | cd-inverted; | ||
361 | 360 | ||
362 | vmmc-supply = <&vddao_3v3>; | 361 | vmmc-supply = <&vddao_3v3>; |
363 | vqmmc-supply = <&vddio_boot>; | 362 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts index f7a1cffab4a8..8acfd40090d2 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-nexbox-a1.dts | |||
@@ -147,8 +147,7 @@ | |||
147 | max-frequency = <100000000>; | 147 | max-frequency = <100000000>; |
148 | disable-wp; | 148 | disable-wp; |
149 | 149 | ||
150 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 150 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
151 | cd-inverted; | ||
152 | 151 | ||
153 | vmmc-supply = <&vddao_3v3>; | 152 | vmmc-supply = <&vddao_3v3>; |
154 | vqmmc-supply = <&vddio_boot>; | 153 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts index 7212dc4531e4..7fa20a8ede17 100644 --- a/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts +++ b/arch/arm64/boot/dts/amlogic/meson-gxm-rbox-pro.dts | |||
@@ -170,8 +170,7 @@ | |||
170 | max-frequency = <100000000>; | 170 | max-frequency = <100000000>; |
171 | disable-wp; | 171 | disable-wp; |
172 | 172 | ||
173 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>; | 173 | cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>; |
174 | cd-inverted; | ||
175 | 174 | ||
176 | vmmc-supply = <&vddao_3v3>; | 175 | vmmc-supply = <&vddao_3v3>; |
177 | vqmmc-supply = <&vddio_boot>; | 176 | vqmmc-supply = <&vddio_boot>; |
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi index 99b7495455a6..838e32cc14c9 100644 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi | |||
@@ -404,7 +404,7 @@ | |||
404 | }; | 404 | }; |
405 | 405 | ||
406 | intc: interrupt-controller@9bc0000 { | 406 | intc: interrupt-controller@9bc0000 { |
407 | compatible = "arm,gic-v3"; | 407 | compatible = "qcom,msm8996-gic-v3", "arm,gic-v3"; |
408 | #interrupt-cells = <3>; | 408 | #interrupt-cells = <3>; |
409 | interrupt-controller; | 409 | interrupt-controller; |
410 | #redistributor-regions = <1>; | 410 | #redistributor-regions = <1>; |
diff --git a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi index 20745a8528c5..719ed9d9067d 100644 --- a/arch/arm64/boot/dts/renesas/r8a774a1.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a774a1.dtsi | |||
@@ -1011,6 +1011,9 @@ | |||
1011 | <&cpg CPG_CORE R8A774A1_CLK_S3D1>, | 1011 | <&cpg CPG_CORE R8A774A1_CLK_S3D1>, |
1012 | <&scif_clk>; | 1012 | <&scif_clk>; |
1013 | clock-names = "fck", "brg_int", "scif_clk"; | 1013 | clock-names = "fck", "brg_int", "scif_clk"; |
1014 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1015 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1016 | dma-names = "tx", "rx", "tx", "rx"; | ||
1014 | power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; | 1017 | power-domains = <&sysc R8A774A1_PD_ALWAYS_ON>; |
1015 | resets = <&cpg 310>; | 1018 | resets = <&cpg 310>; |
1016 | status = "disabled"; | 1019 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi index afedbf5728ec..0648d12778ed 100644 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi | |||
@@ -1262,6 +1262,9 @@ | |||
1262 | <&cpg CPG_CORE R8A7796_CLK_S3D1>, | 1262 | <&cpg CPG_CORE R8A7796_CLK_S3D1>, |
1263 | <&scif_clk>; | 1263 | <&scif_clk>; |
1264 | clock-names = "fck", "brg_int", "scif_clk"; | 1264 | clock-names = "fck", "brg_int", "scif_clk"; |
1265 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1266 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1267 | dma-names = "tx", "rx", "tx", "rx"; | ||
1265 | power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; | 1268 | power-domains = <&sysc R8A7796_PD_ALWAYS_ON>; |
1266 | resets = <&cpg 310>; | 1269 | resets = <&cpg 310>; |
1267 | status = "disabled"; | 1270 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi index 6dc9b1fef830..4b3730f640ef 100644 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi | |||
@@ -1068,6 +1068,9 @@ | |||
1068 | <&cpg CPG_CORE R8A77965_CLK_S3D1>, | 1068 | <&cpg CPG_CORE R8A77965_CLK_S3D1>, |
1069 | <&scif_clk>; | 1069 | <&scif_clk>; |
1070 | clock-names = "fck", "brg_int", "scif_clk"; | 1070 | clock-names = "fck", "brg_int", "scif_clk"; |
1071 | dmas = <&dmac1 0x13>, <&dmac1 0x12>, | ||
1072 | <&dmac2 0x13>, <&dmac2 0x12>; | ||
1073 | dma-names = "tx", "rx", "tx", "rx"; | ||
1071 | power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; | 1074 | power-domains = <&sysc R8A77965_PD_ALWAYS_ON>; |
1072 | resets = <&cpg 310>; | 1075 | resets = <&cpg 310>; |
1073 | status = "disabled"; | 1076 | status = "disabled"; |
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 29cdc99688f3..9859e1178e6b 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c | |||
@@ -299,8 +299,10 @@ int swsusp_arch_suspend(void) | |||
299 | dcache_clean_range(__idmap_text_start, __idmap_text_end); | 299 | dcache_clean_range(__idmap_text_start, __idmap_text_end); |
300 | 300 | ||
301 | /* Clean kvm setup code to PoC? */ | 301 | /* Clean kvm setup code to PoC? */ |
302 | if (el2_reset_needed()) | 302 | if (el2_reset_needed()) { |
303 | dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); | 303 | dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); |
304 | dcache_clean_range(__hyp_text_start, __hyp_text_end); | ||
305 | } | ||
304 | 306 | ||
305 | /* make the crash dump kernel image protected again */ | 307 | /* make the crash dump kernel image protected again */ |
306 | crash_post_resume(); | 308 | crash_post_resume(); |
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index e1261fbaa374..17f325ba831e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/virt.h> | 28 | #include <asm/virt.h> |
29 | 29 | ||
30 | .text | 30 | .text |
31 | .pushsection .hyp.text, "ax" | ||
32 | |||
31 | .align 11 | 33 | .align 11 |
32 | 34 | ||
33 | ENTRY(__hyp_stub_vectors) | 35 | ENTRY(__hyp_stub_vectors) |
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index ba6b41790fcd..b09b6f75f759 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c | |||
@@ -88,6 +88,7 @@ u64 __init kaslr_early_init(u64 dt_phys) | |||
88 | * we end up running with module randomization disabled. | 88 | * we end up running with module randomization disabled. |
89 | */ | 89 | */ |
90 | module_alloc_base = (u64)_etext - MODULES_VSIZE; | 90 | module_alloc_base = (u64)_etext - MODULES_VSIZE; |
91 | __flush_dcache_area(&module_alloc_base, sizeof(module_alloc_base)); | ||
91 | 92 | ||
92 | /* | 93 | /* |
93 | * Try to map the FDT early. If this fails, we simply bail, | 94 | * Try to map the FDT early. If this fails, we simply bail, |
diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index f2c211a6229b..58871333737a 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c | |||
@@ -120,10 +120,12 @@ static int create_dtb(struct kimage *image, | |||
120 | { | 120 | { |
121 | void *buf; | 121 | void *buf; |
122 | size_t buf_size; | 122 | size_t buf_size; |
123 | size_t cmdline_len; | ||
123 | int ret; | 124 | int ret; |
124 | 125 | ||
126 | cmdline_len = cmdline ? strlen(cmdline) : 0; | ||
125 | buf_size = fdt_totalsize(initial_boot_params) | 127 | buf_size = fdt_totalsize(initial_boot_params) |
126 | + strlen(cmdline) + DTB_EXTRA_SPACE; | 128 | + cmdline_len + DTB_EXTRA_SPACE; |
127 | 129 | ||
128 | for (;;) { | 130 | for (;;) { |
129 | buf = vmalloc(buf_size); | 131 | buf = vmalloc(buf_size); |
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a5b338b2542..f17afb99890c 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c | |||
@@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr) | |||
478 | addr < (unsigned long)__entry_text_end) || | 478 | addr < (unsigned long)__entry_text_end) || |
479 | (addr >= (unsigned long)__idmap_text_start && | 479 | (addr >= (unsigned long)__idmap_text_start && |
480 | addr < (unsigned long)__idmap_text_end) || | 480 | addr < (unsigned long)__idmap_text_end) || |
481 | (addr >= (unsigned long)__hyp_text_start && | ||
482 | addr < (unsigned long)__hyp_text_end) || | ||
481 | !!search_exception_tables(addr)) | 483 | !!search_exception_tables(addr)) |
482 | return true; | 484 | return true; |
483 | 485 | ||
484 | if (!is_kernel_in_hyp_mode()) { | 486 | if (!is_kernel_in_hyp_mode()) { |
485 | if ((addr >= (unsigned long)__hyp_text_start && | 487 | if ((addr >= (unsigned long)__hyp_idmap_text_start && |
486 | addr < (unsigned long)__hyp_text_end) || | ||
487 | (addr >= (unsigned long)__hyp_idmap_text_start && | ||
488 | addr < (unsigned long)__hyp_idmap_text_end)) | 488 | addr < (unsigned long)__hyp_idmap_text_end)) |
489 | return true; | 489 | return true; |
490 | } | 490 | } |
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index fcb1f2a6d7c6..99bb8facb5cb 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
@@ -286,74 +286,73 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, | |||
286 | 286 | ||
287 | } | 287 | } |
288 | 288 | ||
289 | static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start) | 289 | static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start, |
290 | unsigned long end) | ||
290 | { | 291 | { |
291 | pte_t *ptep = pte_offset_kernel(pmdp, 0UL); | 292 | unsigned long addr = start; |
292 | unsigned long addr; | 293 | pte_t *ptep = pte_offset_kernel(pmdp, start); |
293 | unsigned i; | ||
294 | 294 | ||
295 | for (i = 0; i < PTRS_PER_PTE; i++, ptep++) { | 295 | do { |
296 | addr = start + i * PAGE_SIZE; | ||
297 | note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); | 296 | note_page(st, addr, 4, READ_ONCE(pte_val(*ptep))); |
298 | } | 297 | } while (ptep++, addr += PAGE_SIZE, addr != end); |
299 | } | 298 | } |
300 | 299 | ||
301 | static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start) | 300 | static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start, |
301 | unsigned long end) | ||
302 | { | 302 | { |
303 | pmd_t *pmdp = pmd_offset(pudp, 0UL); | 303 | unsigned long next, addr = start; |
304 | unsigned long addr; | 304 | pmd_t *pmdp = pmd_offset(pudp, start); |
305 | unsigned i; | ||
306 | 305 | ||
307 | for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) { | 306 | do { |
308 | pmd_t pmd = READ_ONCE(*pmdp); | 307 | pmd_t pmd = READ_ONCE(*pmdp); |
308 | next = pmd_addr_end(addr, end); | ||
309 | 309 | ||
310 | addr = start + i * PMD_SIZE; | ||
311 | if (pmd_none(pmd) || pmd_sect(pmd)) { | 310 | if (pmd_none(pmd) || pmd_sect(pmd)) { |
312 | note_page(st, addr, 3, pmd_val(pmd)); | 311 | note_page(st, addr, 3, pmd_val(pmd)); |
313 | } else { | 312 | } else { |
314 | BUG_ON(pmd_bad(pmd)); | 313 | BUG_ON(pmd_bad(pmd)); |
315 | walk_pte(st, pmdp, addr); | 314 | walk_pte(st, pmdp, addr, next); |
316 | } | 315 | } |
317 | } | 316 | } while (pmdp++, addr = next, addr != end); |
318 | } | 317 | } |
319 | 318 | ||
320 | static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start) | 319 | static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start, |
320 | unsigned long end) | ||
321 | { | 321 | { |
322 | pud_t *pudp = pud_offset(pgdp, 0UL); | 322 | unsigned long next, addr = start; |
323 | unsigned long addr; | 323 | pud_t *pudp = pud_offset(pgdp, start); |
324 | unsigned i; | ||
325 | 324 | ||
326 | for (i = 0; i < PTRS_PER_PUD; i++, pudp++) { | 325 | do { |
327 | pud_t pud = READ_ONCE(*pudp); | 326 | pud_t pud = READ_ONCE(*pudp); |
327 | next = pud_addr_end(addr, end); | ||
328 | 328 | ||
329 | addr = start + i * PUD_SIZE; | ||
330 | if (pud_none(pud) || pud_sect(pud)) { | 329 | if (pud_none(pud) || pud_sect(pud)) { |
331 | note_page(st, addr, 2, pud_val(pud)); | 330 | note_page(st, addr, 2, pud_val(pud)); |
332 | } else { | 331 | } else { |
333 | BUG_ON(pud_bad(pud)); | 332 | BUG_ON(pud_bad(pud)); |
334 | walk_pmd(st, pudp, addr); | 333 | walk_pmd(st, pudp, addr, next); |
335 | } | 334 | } |
336 | } | 335 | } while (pudp++, addr = next, addr != end); |
337 | } | 336 | } |
338 | 337 | ||
339 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, | 338 | static void walk_pgd(struct pg_state *st, struct mm_struct *mm, |
340 | unsigned long start) | 339 | unsigned long start) |
341 | { | 340 | { |
342 | pgd_t *pgdp = pgd_offset(mm, 0UL); | 341 | unsigned long end = (start < TASK_SIZE_64) ? TASK_SIZE_64 : 0; |
343 | unsigned i; | 342 | unsigned long next, addr = start; |
344 | unsigned long addr; | 343 | pgd_t *pgdp = pgd_offset(mm, start); |
345 | 344 | ||
346 | for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) { | 345 | do { |
347 | pgd_t pgd = READ_ONCE(*pgdp); | 346 | pgd_t pgd = READ_ONCE(*pgdp); |
347 | next = pgd_addr_end(addr, end); | ||
348 | 348 | ||
349 | addr = start + i * PGDIR_SIZE; | ||
350 | if (pgd_none(pgd)) { | 349 | if (pgd_none(pgd)) { |
351 | note_page(st, addr, 1, pgd_val(pgd)); | 350 | note_page(st, addr, 1, pgd_val(pgd)); |
352 | } else { | 351 | } else { |
353 | BUG_ON(pgd_bad(pgd)); | 352 | BUG_ON(pgd_bad(pgd)); |
354 | walk_pud(st, pgdp, addr); | 353 | walk_pud(st, pgdp, addr, next); |
355 | } | 354 | } |
356 | } | 355 | } while (pgdp++, addr = next, addr != end); |
357 | } | 356 | } |
358 | 357 | ||
359 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) | 358 | void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info) |
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 30695a868107..5c9073bace83 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c | |||
@@ -33,7 +33,11 @@ void sync_icache_aliases(void *kaddr, unsigned long len) | |||
33 | __clean_dcache_area_pou(kaddr, len); | 33 | __clean_dcache_area_pou(kaddr, len); |
34 | __flush_icache_all(); | 34 | __flush_icache_all(); |
35 | } else { | 35 | } else { |
36 | flush_icache_range(addr, addr + len); | 36 | /* |
37 | * Don't issue kick_all_cpus_sync() after I-cache invalidation | ||
38 | * for user mappings. | ||
39 | */ | ||
40 | __flush_icache_range(addr, addr + len); | ||
37 | } | 41 | } |
38 | } | 42 | } |
39 | 43 | ||
diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 33a2c94fed0d..63b4a1705182 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild | |||
@@ -30,6 +30,7 @@ generic-y += pgalloc.h | |||
30 | generic-y += preempt.h | 30 | generic-y += preempt.h |
31 | generic-y += segment.h | 31 | generic-y += segment.h |
32 | generic-y += serial.h | 32 | generic-y += serial.h |
33 | generic-y += shmparam.h | ||
33 | generic-y += tlbflush.h | 34 | generic-y += tlbflush.h |
34 | generic-y += topology.h | 35 | generic-y += topology.h |
35 | generic-y += trace_clock.h | 36 | generic-y += trace_clock.h |
diff --git a/arch/c6x/include/uapi/asm/Kbuild b/arch/c6x/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/c6x/include/uapi/asm/Kbuild +++ b/arch/c6x/include/uapi/asm/Kbuild | |||
@@ -1,5 +1,4 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += kvm_para.h | 3 | generic-y += kvm_para.h |
4 | generic-y += shmparam.h | ||
5 | generic-y += ucontext.h | 4 | generic-y += ucontext.h |
diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index cd400d353d18..961c1dc064e1 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild | |||
@@ -40,6 +40,7 @@ generic-y += preempt.h | |||
40 | generic-y += scatterlist.h | 40 | generic-y += scatterlist.h |
41 | generic-y += sections.h | 41 | generic-y += sections.h |
42 | generic-y += serial.h | 42 | generic-y += serial.h |
43 | generic-y += shmparam.h | ||
43 | generic-y += sizes.h | 44 | generic-y += sizes.h |
44 | generic-y += spinlock.h | 45 | generic-y += spinlock.h |
45 | generic-y += timex.h | 46 | generic-y += timex.h |
diff --git a/arch/h8300/include/uapi/asm/Kbuild b/arch/h8300/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/h8300/include/uapi/asm/Kbuild +++ b/arch/h8300/include/uapi/asm/Kbuild | |||
@@ -1,5 +1,4 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += kvm_para.h | 3 | generic-y += kvm_para.h |
4 | generic-y += shmparam.h | ||
5 | generic-y += ucontext.h | 4 | generic-y += ucontext.h |
diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 47c4da3d64a4..b25fd42aa0f4 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild | |||
@@ -30,6 +30,7 @@ generic-y += rwsem.h | |||
30 | generic-y += sections.h | 30 | generic-y += sections.h |
31 | generic-y += segment.h | 31 | generic-y += segment.h |
32 | generic-y += serial.h | 32 | generic-y += serial.h |
33 | generic-y += shmparam.h | ||
33 | generic-y += sizes.h | 34 | generic-y += sizes.h |
34 | generic-y += topology.h | 35 | generic-y += topology.h |
35 | generic-y += trace_clock.h | 36 | generic-y += trace_clock.h |
diff --git a/arch/hexagon/include/uapi/asm/Kbuild b/arch/hexagon/include/uapi/asm/Kbuild index 61d955c1747a..c1b06dcf6cf8 100644 --- a/arch/hexagon/include/uapi/asm/Kbuild +++ b/arch/hexagon/include/uapi/asm/Kbuild | |||
@@ -1,4 +1,3 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += shmparam.h | ||
4 | generic-y += ucontext.h | 3 | generic-y += ucontext.h |
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 9f1dd26903e3..95f8f631c4df 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
@@ -20,6 +20,7 @@ generic-y += mm-arch-hooks.h | |||
20 | generic-y += percpu.h | 20 | generic-y += percpu.h |
21 | generic-y += preempt.h | 21 | generic-y += preempt.h |
22 | generic-y += sections.h | 22 | generic-y += sections.h |
23 | generic-y += shmparam.h | ||
23 | generic-y += spinlock.h | 24 | generic-y += spinlock.h |
24 | generic-y += topology.h | 25 | generic-y += topology.h |
25 | generic-y += trace_clock.h | 26 | generic-y += trace_clock.h |
diff --git a/arch/m68k/include/uapi/asm/Kbuild b/arch/m68k/include/uapi/asm/Kbuild index b8b3525271fa..960bf1e4be53 100644 --- a/arch/m68k/include/uapi/asm/Kbuild +++ b/arch/m68k/include/uapi/asm/Kbuild | |||
@@ -2,4 +2,3 @@ include include/uapi/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | generated-y += unistd_32.h | 3 | generated-y += unistd_32.h |
4 | generic-y += kvm_para.h | 4 | generic-y += kvm_para.h |
5 | generic-y += shmparam.h | ||
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 9c7d1d25bf3d..791cc8d54d0a 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild | |||
@@ -26,6 +26,7 @@ generic-y += parport.h | |||
26 | generic-y += percpu.h | 26 | generic-y += percpu.h |
27 | generic-y += preempt.h | 27 | generic-y += preempt.h |
28 | generic-y += serial.h | 28 | generic-y += serial.h |
29 | generic-y += shmparam.h | ||
29 | generic-y += syscalls.h | 30 | generic-y += syscalls.h |
30 | generic-y += topology.h | 31 | generic-y += topology.h |
31 | generic-y += trace_clock.h | 32 | generic-y += trace_clock.h |
diff --git a/arch/microblaze/include/uapi/asm/Kbuild b/arch/microblaze/include/uapi/asm/Kbuild index 28823e3db825..97823ec46e97 100644 --- a/arch/microblaze/include/uapi/asm/Kbuild +++ b/arch/microblaze/include/uapi/asm/Kbuild | |||
@@ -2,5 +2,4 @@ include include/uapi/asm-generic/Kbuild.asm | |||
2 | 2 | ||
3 | generated-y += unistd_32.h | 3 | generated-y += unistd_32.h |
4 | generic-y += kvm_para.h | 4 | generic-y += kvm_para.h |
5 | generic-y += shmparam.h | ||
6 | generic-y += ucontext.h | 5 | generic-y += ucontext.h |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0d14f51d0002..a84c24d894aa 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1403,6 +1403,21 @@ config LOONGSON3_ENHANCEMENT | |||
1403 | please say 'N' here. If you want a high-performance kernel to run on | 1403 | please say 'N' here. If you want a high-performance kernel to run on |
1404 | new Loongson 3 machines only, please say 'Y' here. | 1404 | new Loongson 3 machines only, please say 'Y' here. |
1405 | 1405 | ||
1406 | config CPU_LOONGSON3_WORKAROUNDS | ||
1407 | bool "Old Loongson 3 LLSC Workarounds" | ||
1408 | default y if SMP | ||
1409 | depends on CPU_LOONGSON3 | ||
1410 | help | ||
1411 | Loongson 3 processors have the llsc issues which require workarounds. | ||
1412 | Without workarounds the system may hang unexpectedly. | ||
1413 | |||
1414 | Newer Loongson 3 will fix these issues and no workarounds are needed. | ||
1415 | The workarounds have no significant side effect on them but may | ||
1416 | decrease the performance of the system so this option should be | ||
1417 | disabled unless the kernel is intended to be run on old systems. | ||
1418 | |||
1419 | If unsure, please say Y. | ||
1420 | |||
1406 | config CPU_LOONGSON2E | 1421 | config CPU_LOONGSON2E |
1407 | bool "Loongson 2E" | 1422 | bool "Loongson 2E" |
1408 | depends on SYS_HAS_CPU_LOONGSON2E | 1423 | depends on SYS_HAS_CPU_LOONGSON2E |
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts index 50cff3cbcc6d..4f7b1fa31cf5 100644 --- a/arch/mips/boot/dts/ingenic/ci20.dts +++ b/arch/mips/boot/dts/ingenic/ci20.dts | |||
@@ -76,7 +76,7 @@ | |||
76 | status = "okay"; | 76 | status = "okay"; |
77 | 77 | ||
78 | pinctrl-names = "default"; | 78 | pinctrl-names = "default"; |
79 | pinctrl-0 = <&pins_uart2>; | 79 | pinctrl-0 = <&pins_uart3>; |
80 | }; | 80 | }; |
81 | 81 | ||
82 | &uart4 { | 82 | &uart4 { |
@@ -196,9 +196,9 @@ | |||
196 | bias-disable; | 196 | bias-disable; |
197 | }; | 197 | }; |
198 | 198 | ||
199 | pins_uart2: uart2 { | 199 | pins_uart3: uart3 { |
200 | function = "uart2"; | 200 | function = "uart3"; |
201 | groups = "uart2-data", "uart2-hwflow"; | 201 | groups = "uart3-data", "uart3-hwflow"; |
202 | bias-disable; | 202 | bias-disable; |
203 | }; | 203 | }; |
204 | 204 | ||
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi index 6fb16fd24035..2beb78a62b7d 100644 --- a/arch/mips/boot/dts/ingenic/jz4740.dtsi +++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi | |||
@@ -161,7 +161,7 @@ | |||
161 | #dma-cells = <2>; | 161 | #dma-cells = <2>; |
162 | 162 | ||
163 | interrupt-parent = <&intc>; | 163 | interrupt-parent = <&intc>; |
164 | interrupts = <29>; | 164 | interrupts = <20>; |
165 | 165 | ||
166 | clocks = <&cgu JZ4740_CLK_DMA>; | 166 | clocks = <&cgu JZ4740_CLK_DMA>; |
167 | 167 | ||
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts index 2152b7ba65fb..cc8dbea0911f 100644 --- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts +++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts | |||
@@ -90,11 +90,11 @@ | |||
90 | interrupts = <0>; | 90 | interrupts = <0>; |
91 | }; | 91 | }; |
92 | 92 | ||
93 | axi_i2c: i2c@10A00000 { | 93 | axi_i2c: i2c@10a00000 { |
94 | compatible = "xlnx,xps-iic-2.00.a"; | 94 | compatible = "xlnx,xps-iic-2.00.a"; |
95 | interrupt-parent = <&axi_intc>; | 95 | interrupt-parent = <&axi_intc>; |
96 | interrupts = <4>; | 96 | interrupts = <4>; |
97 | reg = < 0x10A00000 0x10000 >; | 97 | reg = < 0x10a00000 0x10000 >; |
98 | clocks = <&ext>; | 98 | clocks = <&ext>; |
99 | xlnx,clk-freq = <0x5f5e100>; | 99 | xlnx,clk-freq = <0x5f5e100>; |
100 | xlnx,family = "Artix7"; | 100 | xlnx,family = "Artix7"; |
@@ -106,9 +106,9 @@ | |||
106 | #address-cells = <1>; | 106 | #address-cells = <1>; |
107 | #size-cells = <0>; | 107 | #size-cells = <0>; |
108 | 108 | ||
109 | ad7420@4B { | 109 | ad7420@4b { |
110 | compatible = "adi,adt7420"; | 110 | compatible = "adi,adt7420"; |
111 | reg = <0x4B>; | 111 | reg = <0x4b>; |
112 | }; | 112 | }; |
113 | } ; | 113 | } ; |
114 | }; | 114 | }; |
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 43fcd35e2957..94096299fc56 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h | |||
@@ -58,6 +58,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ | |||
58 | if (kernel_uses_llsc) { \ | 58 | if (kernel_uses_llsc) { \ |
59 | int temp; \ | 59 | int temp; \ |
60 | \ | 60 | \ |
61 | loongson_llsc_mb(); \ | ||
61 | __asm__ __volatile__( \ | 62 | __asm__ __volatile__( \ |
62 | " .set push \n" \ | 63 | " .set push \n" \ |
63 | " .set "MIPS_ISA_LEVEL" \n" \ | 64 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -85,6 +86,7 @@ static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \ | |||
85 | if (kernel_uses_llsc) { \ | 86 | if (kernel_uses_llsc) { \ |
86 | int temp; \ | 87 | int temp; \ |
87 | \ | 88 | \ |
89 | loongson_llsc_mb(); \ | ||
88 | __asm__ __volatile__( \ | 90 | __asm__ __volatile__( \ |
89 | " .set push \n" \ | 91 | " .set push \n" \ |
90 | " .set "MIPS_ISA_LEVEL" \n" \ | 92 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -118,6 +120,7 @@ static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \ | |||
118 | if (kernel_uses_llsc) { \ | 120 | if (kernel_uses_llsc) { \ |
119 | int temp; \ | 121 | int temp; \ |
120 | \ | 122 | \ |
123 | loongson_llsc_mb(); \ | ||
121 | __asm__ __volatile__( \ | 124 | __asm__ __volatile__( \ |
122 | " .set push \n" \ | 125 | " .set push \n" \ |
123 | " .set "MIPS_ISA_LEVEL" \n" \ | 126 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -256,6 +259,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ | |||
256 | if (kernel_uses_llsc) { \ | 259 | if (kernel_uses_llsc) { \ |
257 | long temp; \ | 260 | long temp; \ |
258 | \ | 261 | \ |
262 | loongson_llsc_mb(); \ | ||
259 | __asm__ __volatile__( \ | 263 | __asm__ __volatile__( \ |
260 | " .set push \n" \ | 264 | " .set push \n" \ |
261 | " .set "MIPS_ISA_LEVEL" \n" \ | 265 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -283,6 +287,7 @@ static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \ | |||
283 | if (kernel_uses_llsc) { \ | 287 | if (kernel_uses_llsc) { \ |
284 | long temp; \ | 288 | long temp; \ |
285 | \ | 289 | \ |
290 | loongson_llsc_mb(); \ | ||
286 | __asm__ __volatile__( \ | 291 | __asm__ __volatile__( \ |
287 | " .set push \n" \ | 292 | " .set push \n" \ |
288 | " .set "MIPS_ISA_LEVEL" \n" \ | 293 | " .set "MIPS_ISA_LEVEL" \n" \ |
@@ -316,6 +321,7 @@ static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \ | |||
316 | if (kernel_uses_llsc) { \ | 321 | if (kernel_uses_llsc) { \ |
317 | long temp; \ | 322 | long temp; \ |
318 | \ | 323 | \ |
324 | loongson_llsc_mb(); \ | ||
319 | __asm__ __volatile__( \ | 325 | __asm__ __volatile__( \ |
320 | " .set push \n" \ | 326 | " .set push \n" \ |
321 | " .set "MIPS_ISA_LEVEL" \n" \ | 327 | " .set "MIPS_ISA_LEVEL" \n" \ |
diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h index a5eb1bb199a7..b7f6ac5e513c 100644 --- a/arch/mips/include/asm/barrier.h +++ b/arch/mips/include/asm/barrier.h | |||
@@ -222,6 +222,42 @@ | |||
222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() | 222 | #define __smp_mb__before_atomic() __smp_mb__before_llsc() |
223 | #define __smp_mb__after_atomic() smp_llsc_mb() | 223 | #define __smp_mb__after_atomic() smp_llsc_mb() |
224 | 224 | ||
225 | /* | ||
226 | * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, | ||
227 | * store or pref) in between an ll & sc can cause the sc instruction to | ||
228 | * erroneously succeed, breaking atomicity. Whilst it's unusual to write code | ||
229 | * containing such sequences, this bug bites harder than we might otherwise | ||
230 | * expect due to reordering & speculation: | ||
231 | * | ||
232 | * 1) A memory access appearing prior to the ll in program order may actually | ||
233 | * be executed after the ll - this is the reordering case. | ||
234 | * | ||
235 | * In order to avoid this we need to place a memory barrier (ie. a sync | ||
236 | * instruction) prior to every ll instruction, in between it & any earlier | ||
237 | * memory access instructions. Many of these cases are already covered by | ||
238 | * smp_mb__before_llsc() but for the remaining cases, typically ones in | ||
239 | * which multiple CPUs may operate on a memory location but ordering is not | ||
240 | * usually guaranteed, we use loongson_llsc_mb() below. | ||
241 | * | ||
242 | * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. | ||
243 | * | ||
244 | * 2) If a conditional branch exists between an ll & sc with a target outside | ||
245 | * of the ll-sc loop, for example an exit upon value mismatch in cmpxchg() | ||
246 | * or similar, then misprediction of the branch may allow speculative | ||
247 | * execution of memory accesses from outside of the ll-sc loop. | ||
248 | * | ||
249 | * In order to avoid this we need a memory barrier (ie. a sync instruction) | ||
250 | * at each affected branch target, for which we also use loongson_llsc_mb() | ||
251 | * defined below. | ||
252 | * | ||
253 | * This case affects all current Loongson 3 CPUs. | ||
254 | */ | ||
255 | #ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ | ||
256 | #define loongson_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") | ||
257 | #else | ||
258 | #define loongson_llsc_mb() do { } while (0) | ||
259 | #endif | ||
260 | |||
225 | #include <asm-generic/barrier.h> | 261 | #include <asm-generic/barrier.h> |
226 | 262 | ||
227 | #endif /* __ASM_BARRIER_H */ | 263 | #endif /* __ASM_BARRIER_H */ |
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index c4675957b21b..830c93a010c3 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h | |||
@@ -69,6 +69,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
69 | : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); | 69 | : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); |
70 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) | 70 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
71 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 71 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
72 | loongson_llsc_mb(); | ||
72 | do { | 73 | do { |
73 | __asm__ __volatile__( | 74 | __asm__ __volatile__( |
74 | " " __LL "%0, %1 # set_bit \n" | 75 | " " __LL "%0, %1 # set_bit \n" |
@@ -79,6 +80,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
79 | } while (unlikely(!temp)); | 80 | } while (unlikely(!temp)); |
80 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ | 81 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
81 | } else if (kernel_uses_llsc) { | 82 | } else if (kernel_uses_llsc) { |
83 | loongson_llsc_mb(); | ||
82 | do { | 84 | do { |
83 | __asm__ __volatile__( | 85 | __asm__ __volatile__( |
84 | " .set push \n" | 86 | " .set push \n" |
@@ -123,6 +125,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
123 | : "ir" (~(1UL << bit))); | 125 | : "ir" (~(1UL << bit))); |
124 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) | 126 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) |
125 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { | 127 | } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { |
128 | loongson_llsc_mb(); | ||
126 | do { | 129 | do { |
127 | __asm__ __volatile__( | 130 | __asm__ __volatile__( |
128 | " " __LL "%0, %1 # clear_bit \n" | 131 | " " __LL "%0, %1 # clear_bit \n" |
@@ -133,6 +136,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
133 | } while (unlikely(!temp)); | 136 | } while (unlikely(!temp)); |
134 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ | 137 | #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ |
135 | } else if (kernel_uses_llsc) { | 138 | } else if (kernel_uses_llsc) { |
139 | loongson_llsc_mb(); | ||
136 | do { | 140 | do { |
137 | __asm__ __volatile__( | 141 | __asm__ __volatile__( |
138 | " .set push \n" | 142 | " .set push \n" |
@@ -193,6 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
193 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | 197 | unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); |
194 | unsigned long temp; | 198 | unsigned long temp; |
195 | 199 | ||
200 | loongson_llsc_mb(); | ||
196 | do { | 201 | do { |
197 | __asm__ __volatile__( | 202 | __asm__ __volatile__( |
198 | " .set push \n" | 203 | " .set push \n" |
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index c14d798f3888..b83b0397462d 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h | |||
@@ -50,6 +50,7 @@ | |||
50 | "i" (-EFAULT) \ | 50 | "i" (-EFAULT) \ |
51 | : "memory"); \ | 51 | : "memory"); \ |
52 | } else if (cpu_has_llsc) { \ | 52 | } else if (cpu_has_llsc) { \ |
53 | loongson_llsc_mb(); \ | ||
53 | __asm__ __volatile__( \ | 54 | __asm__ __volatile__( \ |
54 | " .set push \n" \ | 55 | " .set push \n" \ |
55 | " .set noat \n" \ | 56 | " .set noat \n" \ |
@@ -163,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
163 | "i" (-EFAULT) | 164 | "i" (-EFAULT) |
164 | : "memory"); | 165 | : "memory"); |
165 | } else if (cpu_has_llsc) { | 166 | } else if (cpu_has_llsc) { |
167 | loongson_llsc_mb(); | ||
166 | __asm__ __volatile__( | 168 | __asm__ __volatile__( |
167 | "# futex_atomic_cmpxchg_inatomic \n" | 169 | "# futex_atomic_cmpxchg_inatomic \n" |
168 | " .set push \n" | 170 | " .set push \n" |
@@ -192,6 +194,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
192 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), | 194 | : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), |
193 | "i" (-EFAULT) | 195 | "i" (-EFAULT) |
194 | : "memory"); | 196 | : "memory"); |
197 | loongson_llsc_mb(); | ||
195 | } else | 198 | } else |
196 | return -ENOSYS; | 199 | return -ENOSYS; |
197 | 200 | ||
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 57933fc8fd98..910851c62db3 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -228,6 +228,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
228 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) | 228 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
229 | : [global] "r" (page_global)); | 229 | : [global] "r" (page_global)); |
230 | } else if (kernel_uses_llsc) { | 230 | } else if (kernel_uses_llsc) { |
231 | loongson_llsc_mb(); | ||
231 | __asm__ __volatile__ ( | 232 | __asm__ __volatile__ ( |
232 | " .set push \n" | 233 | " .set push \n" |
233 | " .set "MIPS_ISA_ARCH_LEVEL" \n" | 234 | " .set "MIPS_ISA_ARCH_LEVEL" \n" |
@@ -242,6 +243,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) | |||
242 | " .set pop \n" | 243 | " .set pop \n" |
243 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) | 244 | : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) |
244 | : [global] "r" (page_global)); | 245 | : [global] "r" (page_global)); |
246 | loongson_llsc_mb(); | ||
245 | } | 247 | } |
246 | #else /* !CONFIG_SMP */ | 248 | #else /* !CONFIG_SMP */ |
247 | if (pte_none(*buddy)) | 249 | if (pte_none(*buddy)) |
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 8f5bd04f320a..7f3f136572de 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c | |||
@@ -457,5 +457,5 @@ void mips_cm_error_report(void) | |||
457 | } | 457 | } |
458 | 458 | ||
459 | /* reprime cause register */ | 459 | /* reprime cause register */ |
460 | write_gcr_error_cause(0); | 460 | write_gcr_error_cause(cm_error); |
461 | } | 461 | } |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 6829a064aac8..339870ed92f7 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) | |||
371 | static int get_frame_info(struct mips_frame_info *info) | 371 | static int get_frame_info(struct mips_frame_info *info) |
372 | { | 372 | { |
373 | bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); | 373 | bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); |
374 | union mips_instruction insn, *ip, *ip_end; | 374 | union mips_instruction insn, *ip; |
375 | const unsigned int max_insns = 128; | 375 | const unsigned int max_insns = 128; |
376 | unsigned int last_insn_size = 0; | 376 | unsigned int last_insn_size = 0; |
377 | unsigned int i; | 377 | unsigned int i; |
@@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info) | |||
384 | if (!ip) | 384 | if (!ip) |
385 | goto err; | 385 | goto err; |
386 | 386 | ||
387 | ip_end = (void *)ip + info->func_size; | 387 | for (i = 0; i < max_insns; i++) { |
388 | |||
389 | for (i = 0; i < max_insns && ip < ip_end; i++) { | ||
390 | ip = (void *)ip + last_insn_size; | 388 | ip = (void *)ip + last_insn_size; |
389 | |||
391 | if (is_mmips && mm_insn_16bit(ip->halfword[0])) { | 390 | if (is_mmips && mm_insn_16bit(ip->halfword[0])) { |
392 | insn.word = ip->halfword[0] << 16; | 391 | insn.word = ip->halfword[0] << 16; |
393 | last_insn_size = 2; | 392 | last_insn_size = 2; |
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform index 0fce4608aa88..c1a4d4dc4665 100644 --- a/arch/mips/loongson64/Platform +++ b/arch/mips/loongson64/Platform | |||
@@ -23,6 +23,29 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS | |||
23 | endif | 23 | endif |
24 | 24 | ||
25 | cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap | 25 | cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap |
26 | |||
27 | # | ||
28 | # Some versions of binutils, not currently mainline as of 2019/02/04, support | ||
29 | # an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction | ||
30 | # to work around a CPU bug (see loongson_llsc_mb() in asm/barrier.h for a | ||
31 | # description). | ||
32 | # | ||
33 | # We disable this in order to prevent the assembler meddling with the | ||
34 | # instruction that labels refer to, ie. if we label an ll instruction: | ||
35 | # | ||
36 | # 1: ll v0, 0(a0) | ||
37 | # | ||
38 | # ...then with the assembler fix applied the label may actually point at a sync | ||
39 | # instruction inserted by the assembler, and if we were using the label in an | ||
40 | # exception table the table would no longer contain the address of the ll | ||
41 | # instruction. | ||
42 | # | ||
43 | # Avoid this by explicitly disabling that assembler behaviour. If upstream | ||
44 | # binutils does not merge support for the flag then we can revisit & remove | ||
45 | # this later - for now it ensures vendor toolchains don't cause problems. | ||
46 | # | ||
47 | cflags-$(CONFIG_CPU_LOONGSON3) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,) | ||
48 | |||
26 | # | 49 | # |
27 | # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a | 50 | # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a |
28 | # as MIPS64 R2; older versions as just R1. This leaves the possibility open | 51 | # as MIPS64 R2; older versions as just R1. This leaves the possibility open |
diff --git a/arch/mips/loongson64/common/reset.c b/arch/mips/loongson64/common/reset.c index a60715e11306..b26892ce871c 100644 --- a/arch/mips/loongson64/common/reset.c +++ b/arch/mips/loongson64/common/reset.c | |||
@@ -59,7 +59,12 @@ static void loongson_poweroff(void) | |||
59 | { | 59 | { |
60 | #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE | 60 | #ifndef CONFIG_LEFI_FIRMWARE_INTERFACE |
61 | mach_prepare_shutdown(); | 61 | mach_prepare_shutdown(); |
62 | unreachable(); | 62 | |
63 | /* | ||
64 | * It needs a wait loop here, but mips/kernel/reset.c already calls | ||
65 | * a generic delay loop, machine_hang(), so simply return. | ||
66 | */ | ||
67 | return; | ||
63 | #else | 68 | #else |
64 | void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; | 69 | void (*fw_poweroff)(void) = (void *)loongson_sysconf.poweroff_addr; |
65 | 70 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 37b1cb246332..65b6e85447b1 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -932,6 +932,8 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, | |||
932 | * to mimic that here by taking a load/istream page | 932 | * to mimic that here by taking a load/istream page |
933 | * fault. | 933 | * fault. |
934 | */ | 934 | */ |
935 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
936 | uasm_i_sync(p, 0); | ||
935 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); | 937 | UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0); |
936 | uasm_i_jr(p, ptr); | 938 | uasm_i_jr(p, ptr); |
937 | 939 | ||
@@ -1646,6 +1648,8 @@ static void | |||
1646 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) | 1648 | iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) |
1647 | { | 1649 | { |
1648 | #ifdef CONFIG_SMP | 1650 | #ifdef CONFIG_SMP |
1651 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
1652 | uasm_i_sync(p, 0); | ||
1649 | # ifdef CONFIG_PHYS_ADDR_T_64BIT | 1653 | # ifdef CONFIG_PHYS_ADDR_T_64BIT |
1650 | if (cpu_has_64bits) | 1654 | if (cpu_has_64bits) |
1651 | uasm_i_lld(p, pte, 0, ptr); | 1655 | uasm_i_lld(p, pte, 0, ptr); |
@@ -2259,6 +2263,8 @@ static void build_r4000_tlb_load_handler(void) | |||
2259 | #endif | 2263 | #endif |
2260 | 2264 | ||
2261 | uasm_l_nopage_tlbl(&l, p); | 2265 | uasm_l_nopage_tlbl(&l, p); |
2266 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2267 | uasm_i_sync(&p, 0); | ||
2262 | build_restore_work_registers(&p); | 2268 | build_restore_work_registers(&p); |
2263 | #ifdef CONFIG_CPU_MICROMIPS | 2269 | #ifdef CONFIG_CPU_MICROMIPS |
2264 | if ((unsigned long)tlb_do_page_fault_0 & 1) { | 2270 | if ((unsigned long)tlb_do_page_fault_0 & 1) { |
@@ -2313,6 +2319,8 @@ static void build_r4000_tlb_store_handler(void) | |||
2313 | #endif | 2319 | #endif |
2314 | 2320 | ||
2315 | uasm_l_nopage_tlbs(&l, p); | 2321 | uasm_l_nopage_tlbs(&l, p); |
2322 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2323 | uasm_i_sync(&p, 0); | ||
2316 | build_restore_work_registers(&p); | 2324 | build_restore_work_registers(&p); |
2317 | #ifdef CONFIG_CPU_MICROMIPS | 2325 | #ifdef CONFIG_CPU_MICROMIPS |
2318 | if ((unsigned long)tlb_do_page_fault_1 & 1) { | 2326 | if ((unsigned long)tlb_do_page_fault_1 & 1) { |
@@ -2368,6 +2376,8 @@ static void build_r4000_tlb_modify_handler(void) | |||
2368 | #endif | 2376 | #endif |
2369 | 2377 | ||
2370 | uasm_l_nopage_tlbm(&l, p); | 2378 | uasm_l_nopage_tlbm(&l, p); |
2379 | if (IS_ENABLED(CONFIG_CPU_LOONGSON3_WORKAROUNDS)) | ||
2380 | uasm_i_sync(&p, 0); | ||
2371 | build_restore_work_registers(&p); | 2381 | build_restore_work_registers(&p); |
2372 | #ifdef CONFIG_CPU_MICROMIPS | 2382 | #ifdef CONFIG_CPU_MICROMIPS |
2373 | if ((unsigned long)tlb_do_page_fault_1 & 1) { | 2383 | if ((unsigned long)tlb_do_page_fault_1 & 1) { |
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index 5017d5843c5a..fc29b85cfa92 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c | |||
@@ -568,6 +568,11 @@ static int __init octeon_pci_setup(void) | |||
568 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) | 568 | if (octeon_has_feature(OCTEON_FEATURE_PCIE)) |
569 | return 0; | 569 | return 0; |
570 | 570 | ||
571 | if (!octeon_is_pci_host()) { | ||
572 | pr_notice("Not in host mode, PCI Controller not initialized\n"); | ||
573 | return 0; | ||
574 | } | ||
575 | |||
571 | /* Point pcibios_map_irq() to the PCI version of it */ | 576 | /* Point pcibios_map_irq() to the PCI version of it */ |
572 | octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; | 577 | octeon_pcibios_map_irq = octeon_pci_pcibios_map_irq; |
573 | 578 | ||
@@ -579,11 +584,6 @@ static int __init octeon_pci_setup(void) | |||
579 | else | 584 | else |
580 | octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; | 585 | octeon_dma_bar_type = OCTEON_DMA_BAR_TYPE_BIG; |
581 | 586 | ||
582 | if (!octeon_is_pci_host()) { | ||
583 | pr_notice("Not in host mode, PCI Controller not initialized\n"); | ||
584 | return 0; | ||
585 | } | ||
586 | |||
587 | /* PCI I/O and PCI MEM values */ | 587 | /* PCI I/O and PCI MEM values */ |
588 | set_io_port_base(OCTEON_PCI_IOSPACE_BASE); | 588 | set_io_port_base(OCTEON_PCI_IOSPACE_BASE); |
589 | ioport_resource.start = 0; | 589 | ioport_resource.start = 0; |
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile index f6fd340e39c2..0ede4deb8181 100644 --- a/arch/mips/vdso/Makefile +++ b/arch/mips/vdso/Makefile | |||
@@ -8,6 +8,7 @@ ccflags-vdso := \ | |||
8 | $(filter -E%,$(KBUILD_CFLAGS)) \ | 8 | $(filter -E%,$(KBUILD_CFLAGS)) \ |
9 | $(filter -mmicromips,$(KBUILD_CFLAGS)) \ | 9 | $(filter -mmicromips,$(KBUILD_CFLAGS)) \ |
10 | $(filter -march=%,$(KBUILD_CFLAGS)) \ | 10 | $(filter -march=%,$(KBUILD_CFLAGS)) \ |
11 | $(filter -m%-float,$(KBUILD_CFLAGS)) \ | ||
11 | -D__VDSO__ | 12 | -D__VDSO__ |
12 | 13 | ||
13 | ifdef CONFIG_CC_IS_CLANG | 14 | ifdef CONFIG_CC_IS_CLANG |
@@ -129,7 +130,7 @@ $(obj)/%-o32.o: $(src)/%.c FORCE | |||
129 | $(call cmd,force_checksrc) | 130 | $(call cmd,force_checksrc) |
130 | $(call if_changed_rule,cc_o_c) | 131 | $(call if_changed_rule,cc_o_c) |
131 | 132 | ||
132 | $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32 | 133 | $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=32 |
133 | $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE | 134 | $(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE |
134 | $(call if_changed_dep,cpp_lds_S) | 135 | $(call if_changed_dep,cpp_lds_S) |
135 | 136 | ||
@@ -169,7 +170,7 @@ $(obj)/%-n32.o: $(src)/%.c FORCE | |||
169 | $(call cmd,force_checksrc) | 170 | $(call cmd,force_checksrc) |
170 | $(call if_changed_rule,cc_o_c) | 171 | $(call if_changed_rule,cc_o_c) |
171 | 172 | ||
172 | $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32 | 173 | $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := $(ccflags-vdso) -mabi=n32 |
173 | $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE | 174 | $(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE |
174 | $(call if_changed_dep,cpp_lds_S) | 175 | $(call if_changed_dep,cpp_lds_S) |
175 | 176 | ||
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index eb87cd8327c8..1f04844b6b82 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild | |||
@@ -34,6 +34,7 @@ generic-y += qrwlock_types.h | |||
34 | generic-y += qrwlock.h | 34 | generic-y += qrwlock.h |
35 | generic-y += sections.h | 35 | generic-y += sections.h |
36 | generic-y += segment.h | 36 | generic-y += segment.h |
37 | generic-y += shmparam.h | ||
37 | generic-y += string.h | 38 | generic-y += string.h |
38 | generic-y += switch_to.h | 39 | generic-y += switch_to.h |
39 | generic-y += topology.h | 40 | generic-y += topology.h |
diff --git a/arch/openrisc/include/uapi/asm/Kbuild b/arch/openrisc/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/openrisc/include/uapi/asm/Kbuild +++ b/arch/openrisc/include/uapi/asm/Kbuild | |||
@@ -1,5 +1,4 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += kvm_para.h | 3 | generic-y += kvm_para.h |
4 | generic-y += shmparam.h | ||
5 | generic-y += ucontext.h | 4 | generic-y += ucontext.h |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2e6ada28da64..c9bfe526ca9d 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -1258,21 +1258,13 @@ extern pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, | |||
1258 | 1258 | ||
1259 | #define pmd_move_must_withdraw pmd_move_must_withdraw | 1259 | #define pmd_move_must_withdraw pmd_move_must_withdraw |
1260 | struct spinlock; | 1260 | struct spinlock; |
1261 | static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | 1261 | extern int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, |
1262 | struct spinlock *old_pmd_ptl, | 1262 | struct spinlock *old_pmd_ptl, |
1263 | struct vm_area_struct *vma) | 1263 | struct vm_area_struct *vma); |
1264 | { | 1264 | /* |
1265 | if (radix_enabled()) | 1265 | * Hash translation mode use the deposited table to store hash pte |
1266 | return false; | 1266 | * slot information. |
1267 | /* | 1267 | */ |
1268 | * Archs like ppc64 use pgtable to store per pmd | ||
1269 | * specific information. So when we switch the pmd, | ||
1270 | * we should also withdraw and deposit the pgtable | ||
1271 | */ | ||
1272 | return true; | ||
1273 | } | ||
1274 | |||
1275 | |||
1276 | #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit | 1268 | #define arch_needs_pgtable_deposit arch_needs_pgtable_deposit |
1277 | static inline bool arch_needs_pgtable_deposit(void) | 1269 | static inline bool arch_needs_pgtable_deposit(void) |
1278 | { | 1270 | { |
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c index f3c31f5e1026..ecd31569a120 100644 --- a/arch/powerpc/mm/pgtable-book3s64.c +++ b/arch/powerpc/mm/pgtable-book3s64.c | |||
@@ -400,3 +400,25 @@ void arch_report_meminfo(struct seq_file *m) | |||
400 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); | 400 | atomic_long_read(&direct_pages_count[MMU_PAGE_1G]) << 20); |
401 | } | 401 | } |
402 | #endif /* CONFIG_PROC_FS */ | 402 | #endif /* CONFIG_PROC_FS */ |
403 | |||
404 | /* | ||
405 | * For hash translation mode, we use the deposited table to store hash slot | ||
406 | * information and they are stored at PTRS_PER_PMD offset from related pmd | ||
407 | * location. Hence a pmd move requires deposit and withdraw. | ||
408 | * | ||
409 | * For radix translation with split pmd ptl, we store the deposited table in the | ||
410 | * pmd page. Hence if we have different pmd page we need to withdraw during pmd | ||
411 | * move. | ||
412 | * | ||
413 | * With hash we use deposited table always irrespective of anon or not. | ||
414 | * With radix we use deposited table only for anonymous mapping. | ||
415 | */ | ||
416 | int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl, | ||
417 | struct spinlock *old_pmd_ptl, | ||
418 | struct vm_area_struct *vma) | ||
419 | { | ||
420 | if (radix_enabled()) | ||
421 | return (new_pmd_ptl != old_pmd_ptl) && vma_is_anonymous(vma); | ||
422 | |||
423 | return true; | ||
424 | } | ||
diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index 7d6457ab5d34..bba281b1fe1b 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c | |||
@@ -43,6 +43,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
43 | { | 43 | { |
44 | unsigned long ret[PLPAR_HCALL_BUFSIZE]; | 44 | unsigned long ret[PLPAR_HCALL_BUFSIZE]; |
45 | uint64_t rc, token; | 45 | uint64_t rc, token; |
46 | uint64_t saved = 0; | ||
46 | 47 | ||
47 | /* | 48 | /* |
48 | * When the hypervisor cannot map all the requested memory in a single | 49 | * When the hypervisor cannot map all the requested memory in a single |
@@ -56,6 +57,8 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
56 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, | 57 | rc = plpar_hcall(H_SCM_BIND_MEM, ret, p->drc_index, 0, |
57 | p->blocks, BIND_ANY_ADDR, token); | 58 | p->blocks, BIND_ANY_ADDR, token); |
58 | token = ret[0]; | 59 | token = ret[0]; |
60 | if (!saved) | ||
61 | saved = ret[1]; | ||
59 | cond_resched(); | 62 | cond_resched(); |
60 | } while (rc == H_BUSY); | 63 | } while (rc == H_BUSY); |
61 | 64 | ||
@@ -64,7 +67,7 @@ static int drc_pmem_bind(struct papr_scm_priv *p) | |||
64 | return -ENXIO; | 67 | return -ENXIO; |
65 | } | 68 | } |
66 | 69 | ||
67 | p->bound_addr = ret[1]; | 70 | p->bound_addr = saved; |
68 | 71 | ||
69 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); | 72 | dev_dbg(&p->pdev->dev, "bound drc %x to %pR\n", p->drc_index, &p->res); |
70 | 73 | ||
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index feeeaa60697c..515fc3cc9687 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -103,7 +103,7 @@ choice | |||
103 | prompt "Base ISA" | 103 | prompt "Base ISA" |
104 | default ARCH_RV64I | 104 | default ARCH_RV64I |
105 | help | 105 | help |
106 | This selects the base ISA that this kernel will traget and must match | 106 | This selects the base ISA that this kernel will target and must match |
107 | the target platform. | 107 | the target platform. |
108 | 108 | ||
109 | config ARCH_RV32I | 109 | config ARCH_RV32I |
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index f399659d3b8d..2fd3461e50ab 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig | |||
@@ -13,8 +13,6 @@ CONFIG_BLK_DEV_INITRD=y | |||
13 | CONFIG_EXPERT=y | 13 | CONFIG_EXPERT=y |
14 | CONFIG_BPF_SYSCALL=y | 14 | CONFIG_BPF_SYSCALL=y |
15 | CONFIG_SMP=y | 15 | CONFIG_SMP=y |
16 | CONFIG_PCI=y | ||
17 | CONFIG_PCIE_XILINX=y | ||
18 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
19 | CONFIG_MODULE_UNLOAD=y | 17 | CONFIG_MODULE_UNLOAD=y |
20 | CONFIG_NET=y | 18 | CONFIG_NET=y |
@@ -28,6 +26,10 @@ CONFIG_IP_PNP_DHCP=y | |||
28 | CONFIG_IP_PNP_BOOTP=y | 26 | CONFIG_IP_PNP_BOOTP=y |
29 | CONFIG_IP_PNP_RARP=y | 27 | CONFIG_IP_PNP_RARP=y |
30 | CONFIG_NETLINK_DIAG=y | 28 | CONFIG_NETLINK_DIAG=y |
29 | CONFIG_PCI=y | ||
30 | CONFIG_PCIEPORTBUS=y | ||
31 | CONFIG_PCI_HOST_GENERIC=y | ||
32 | CONFIG_PCIE_XILINX=y | ||
31 | CONFIG_DEVTMPFS=y | 33 | CONFIG_DEVTMPFS=y |
32 | CONFIG_BLK_DEV_LOOP=y | 34 | CONFIG_BLK_DEV_LOOP=y |
33 | CONFIG_VIRTIO_BLK=y | 35 | CONFIG_VIRTIO_BLK=y |
@@ -63,7 +65,6 @@ CONFIG_USB_STORAGE=y | |||
63 | CONFIG_USB_UAS=y | 65 | CONFIG_USB_UAS=y |
64 | CONFIG_VIRTIO_MMIO=y | 66 | CONFIG_VIRTIO_MMIO=y |
65 | CONFIG_SIFIVE_PLIC=y | 67 | CONFIG_SIFIVE_PLIC=y |
66 | CONFIG_RAS=y | ||
67 | CONFIG_EXT4_FS=y | 68 | CONFIG_EXT4_FS=y |
68 | CONFIG_EXT4_FS_POSIX_ACL=y | 69 | CONFIG_EXT4_FS_POSIX_ACL=y |
69 | CONFIG_AUTOFS4_FS=y | 70 | CONFIG_AUTOFS4_FS=y |
@@ -77,5 +78,6 @@ CONFIG_NFS_V4_1=y | |||
77 | CONFIG_NFS_V4_2=y | 78 | CONFIG_NFS_V4_2=y |
78 | CONFIG_ROOT_NFS=y | 79 | CONFIG_ROOT_NFS=y |
79 | CONFIG_CRYPTO_USER_API_HASH=y | 80 | CONFIG_CRYPTO_USER_API_HASH=y |
81 | CONFIG_CRYPTO_DEV_VIRTIO=y | ||
80 | CONFIG_PRINTK_TIME=y | 82 | CONFIG_PRINTK_TIME=y |
81 | # CONFIG_RCU_TRACE is not set | 83 | # CONFIG_RCU_TRACE is not set |
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 06cfbb3aacbb..2a546a52f02a 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h | |||
@@ -80,7 +80,7 @@ typedef struct page *pgtable_t; | |||
80 | #define __pgd(x) ((pgd_t) { (x) }) | 80 | #define __pgd(x) ((pgd_t) { (x) }) |
81 | #define __pgprot(x) ((pgprot_t) { (x) }) | 81 | #define __pgprot(x) ((pgprot_t) { (x) }) |
82 | 82 | ||
83 | #ifdef CONFIG_64BITS | 83 | #ifdef CONFIG_64BIT |
84 | #define PTE_FMT "%016lx" | 84 | #define PTE_FMT "%016lx" |
85 | #else | 85 | #else |
86 | #define PTE_FMT "%08lx" | 86 | #define PTE_FMT "%08lx" |
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index 2fa2942be221..470755cb7558 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h | |||
@@ -35,6 +35,12 @@ | |||
35 | #define _PAGE_SPECIAL _PAGE_SOFT | 35 | #define _PAGE_SPECIAL _PAGE_SOFT |
36 | #define _PAGE_TABLE _PAGE_PRESENT | 36 | #define _PAGE_TABLE _PAGE_PRESENT |
37 | 37 | ||
38 | /* | ||
39 | * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to | ||
40 | * distinguish them from swapped out pages | ||
41 | */ | ||
42 | #define _PAGE_PROT_NONE _PAGE_READ | ||
43 | |||
38 | #define _PAGE_PFN_SHIFT 10 | 44 | #define _PAGE_PFN_SHIFT 10 |
39 | 45 | ||
40 | /* Set of bits to preserve across pte_modify() */ | 46 | /* Set of bits to preserve across pte_modify() */ |
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 16301966d65b..a8179a8c1491 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h | |||
@@ -44,7 +44,7 @@ | |||
44 | /* Page protection bits */ | 44 | /* Page protection bits */ |
45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) | 45 | #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) |
46 | 46 | ||
47 | #define PAGE_NONE __pgprot(0) | 47 | #define PAGE_NONE __pgprot(_PAGE_PROT_NONE) |
48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) | 48 | #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) |
49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) | 49 | #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) |
50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) | 50 | #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) |
@@ -98,7 +98,7 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; | |||
98 | 98 | ||
99 | static inline int pmd_present(pmd_t pmd) | 99 | static inline int pmd_present(pmd_t pmd) |
100 | { | 100 | { |
101 | return (pmd_val(pmd) & _PAGE_PRESENT); | 101 | return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
102 | } | 102 | } |
103 | 103 | ||
104 | static inline int pmd_none(pmd_t pmd) | 104 | static inline int pmd_none(pmd_t pmd) |
@@ -178,7 +178,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long addr) | |||
178 | 178 | ||
179 | static inline int pte_present(pte_t pte) | 179 | static inline int pte_present(pte_t pte) |
180 | { | 180 | { |
181 | return (pte_val(pte) & _PAGE_PRESENT); | 181 | return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); |
182 | } | 182 | } |
183 | 183 | ||
184 | static inline int pte_none(pte_t pte) | 184 | static inline int pte_none(pte_t pte) |
@@ -380,7 +380,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | |||
380 | * | 380 | * |
381 | * Format of swap PTE: | 381 | * Format of swap PTE: |
382 | * bit 0: _PAGE_PRESENT (zero) | 382 | * bit 0: _PAGE_PRESENT (zero) |
383 | * bit 1: reserved for future use (zero) | 383 | * bit 1: _PAGE_PROT_NONE (zero) |
384 | * bits 2 to 6: swap type | 384 | * bits 2 to 6: swap type |
385 | * bits 7 to XLEN-1: swap offset | 385 | * bits 7 to XLEN-1: swap offset |
386 | */ | 386 | */ |
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 0531f49af5c3..ce70bceb8872 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * This decides where the kernel will search for a free chunk of vm | 22 | * This decides where the kernel will search for a free chunk of vm |
23 | * space during mmap's. | 23 | * space during mmap's. |
24 | */ | 24 | */ |
25 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1) | 25 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) |
26 | 26 | ||
27 | #define STACK_TOP TASK_SIZE | 27 | #define STACK_TOP TASK_SIZE |
28 | #define STACK_TOP_MAX STACK_TOP | 28 | #define STACK_TOP_MAX STACK_TOP |
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c index 6a92a2fe198e..dac98348c6a3 100644 --- a/arch/riscv/kernel/asm-offsets.c +++ b/arch/riscv/kernel/asm-offsets.c | |||
@@ -39,6 +39,7 @@ void asm_offsets(void) | |||
39 | OFFSET(TASK_STACK, task_struct, stack); | 39 | OFFSET(TASK_STACK, task_struct, stack); |
40 | OFFSET(TASK_TI, task_struct, thread_info); | 40 | OFFSET(TASK_TI, task_struct, thread_info); |
41 | OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); | 41 | OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); |
42 | OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); | ||
42 | OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); | 43 | OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); |
43 | OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); | 44 | OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); |
44 | OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); | 45 | OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu); |
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 355166f57205..fd9b57c8b4ce 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S | |||
@@ -144,6 +144,10 @@ _save_context: | |||
144 | REG_L x2, PT_SP(sp) | 144 | REG_L x2, PT_SP(sp) |
145 | .endm | 145 | .endm |
146 | 146 | ||
147 | #if !IS_ENABLED(CONFIG_PREEMPT) | ||
148 | .set resume_kernel, restore_all | ||
149 | #endif | ||
150 | |||
147 | ENTRY(handle_exception) | 151 | ENTRY(handle_exception) |
148 | SAVE_ALL | 152 | SAVE_ALL |
149 | 153 | ||
@@ -228,7 +232,7 @@ ret_from_exception: | |||
228 | REG_L s0, PT_SSTATUS(sp) | 232 | REG_L s0, PT_SSTATUS(sp) |
229 | csrc sstatus, SR_SIE | 233 | csrc sstatus, SR_SIE |
230 | andi s0, s0, SR_SPP | 234 | andi s0, s0, SR_SPP |
231 | bnez s0, restore_all | 235 | bnez s0, resume_kernel |
232 | 236 | ||
233 | resume_userspace: | 237 | resume_userspace: |
234 | /* Interrupts must be disabled here so flags are checked atomically */ | 238 | /* Interrupts must be disabled here so flags are checked atomically */ |
@@ -250,6 +254,18 @@ restore_all: | |||
250 | RESTORE_ALL | 254 | RESTORE_ALL |
251 | sret | 255 | sret |
252 | 256 | ||
257 | #if IS_ENABLED(CONFIG_PREEMPT) | ||
258 | resume_kernel: | ||
259 | REG_L s0, TASK_TI_PREEMPT_COUNT(tp) | ||
260 | bnez s0, restore_all | ||
261 | need_resched: | ||
262 | REG_L s0, TASK_TI_FLAGS(tp) | ||
263 | andi s0, s0, _TIF_NEED_RESCHED | ||
264 | beqz s0, restore_all | ||
265 | call preempt_schedule_irq | ||
266 | j need_resched | ||
267 | #endif | ||
268 | |||
253 | work_pending: | 269 | work_pending: |
254 | /* Enter slow path for supplementary processing */ | 270 | /* Enter slow path for supplementary processing */ |
255 | la ra, ret_from_exception | 271 | la ra, ret_from_exception |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 6e079e94b638..77564310235f 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
@@ -181,7 +181,7 @@ static void __init setup_bootmem(void) | |||
181 | BUG_ON(mem_size == 0); | 181 | BUG_ON(mem_size == 0); |
182 | 182 | ||
183 | set_max_mapnr(PFN_DOWN(mem_size)); | 183 | set_max_mapnr(PFN_DOWN(mem_size)); |
184 | max_low_pfn = memblock_end_of_DRAM(); | 184 | max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); |
185 | 185 | ||
186 | #ifdef CONFIG_BLK_DEV_INITRD | 186 | #ifdef CONFIG_BLK_DEV_INITRD |
187 | setup_initrd(); | 187 | setup_initrd(); |
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index fc185ecabb0a..18cda0e8cf94 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c | |||
@@ -57,15 +57,12 @@ void __init setup_smp(void) | |||
57 | 57 | ||
58 | while ((dn = of_find_node_by_type(dn, "cpu"))) { | 58 | while ((dn = of_find_node_by_type(dn, "cpu"))) { |
59 | hart = riscv_of_processor_hartid(dn); | 59 | hart = riscv_of_processor_hartid(dn); |
60 | if (hart < 0) { | 60 | if (hart < 0) |
61 | of_node_put(dn); | ||
62 | continue; | 61 | continue; |
63 | } | ||
64 | 62 | ||
65 | if (hart == cpuid_to_hartid_map(0)) { | 63 | if (hart == cpuid_to_hartid_map(0)) { |
66 | BUG_ON(found_boot_cpu); | 64 | BUG_ON(found_boot_cpu); |
67 | found_boot_cpu = 1; | 65 | found_boot_cpu = 1; |
68 | of_node_put(dn); | ||
69 | continue; | 66 | continue; |
70 | } | 67 | } |
71 | 68 | ||
@@ -73,7 +70,6 @@ void __init setup_smp(void) | |||
73 | set_cpu_possible(cpuid, true); | 70 | set_cpu_possible(cpuid, true); |
74 | set_cpu_present(cpuid, true); | 71 | set_cpu_present(cpuid, true); |
75 | cpuid++; | 72 | cpuid++; |
76 | of_node_put(dn); | ||
77 | } | 73 | } |
78 | 74 | ||
79 | BUG_ON(!found_boot_cpu); | 75 | BUG_ON(!found_boot_cpu); |
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index 1e1395d63dab..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <asm/cache.h> | 18 | #include <asm/cache.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | 20 | ||
21 | #define MAX_BYTES_PER_LONG 0x10 | ||
22 | |||
23 | OUTPUT_ARCH(riscv) | 21 | OUTPUT_ARCH(riscv) |
24 | ENTRY(_start) | 22 | ENTRY(_start) |
25 | 23 | ||
@@ -76,6 +74,8 @@ SECTIONS | |||
76 | *(.sbss*) | 74 | *(.sbss*) |
77 | } | 75 | } |
78 | 76 | ||
77 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) | ||
78 | |||
79 | EXCEPTION_TABLE(0x10) | 79 | EXCEPTION_TABLE(0x10) |
80 | NOTES | 80 | NOTES |
81 | 81 | ||
@@ -83,10 +83,6 @@ SECTIONS | |||
83 | *(.rel.dyn*) | 83 | *(.rel.dyn*) |
84 | } | 84 | } |
85 | 85 | ||
86 | BSS_SECTION(MAX_BYTES_PER_LONG, | ||
87 | MAX_BYTES_PER_LONG, | ||
88 | MAX_BYTES_PER_LONG) | ||
89 | |||
90 | _end = .; | 86 | _end = .; |
91 | 87 | ||
92 | STABS_DEBUG | 88 | STABS_DEBUG |
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 1d9bfaff60bc..658ebf645f42 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c | |||
@@ -28,7 +28,8 @@ static void __init zone_sizes_init(void) | |||
28 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; | 28 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; |
29 | 29 | ||
30 | #ifdef CONFIG_ZONE_DMA32 | 30 | #ifdef CONFIG_ZONE_DMA32 |
31 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn)); | 31 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, |
32 | (unsigned long) PFN_PHYS(max_low_pfn))); | ||
32 | #endif | 33 | #endif |
33 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 34 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
34 | 35 | ||
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S index 537f97fde37f..b6796e616812 100644 --- a/arch/s390/kernel/swsusp.S +++ b/arch/s390/kernel/swsusp.S | |||
@@ -30,10 +30,10 @@ | |||
30 | .section .text | 30 | .section .text |
31 | ENTRY(swsusp_arch_suspend) | 31 | ENTRY(swsusp_arch_suspend) |
32 | lg %r1,__LC_NODAT_STACK | 32 | lg %r1,__LC_NODAT_STACK |
33 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
34 | stmg %r6,%r15,__SF_GPRS(%r1) | 33 | stmg %r6,%r15,__SF_GPRS(%r1) |
34 | aghi %r1,-STACK_FRAME_OVERHEAD | ||
35 | stg %r15,__SF_BACKCHAIN(%r1) | 35 | stg %r15,__SF_BACKCHAIN(%r1) |
36 | lgr %r1,%r15 | 36 | lgr %r15,%r1 |
37 | 37 | ||
38 | /* Store FPU registers */ | 38 | /* Store FPU registers */ |
39 | brasl %r14,save_fpu_regs | 39 | brasl %r14,save_fpu_regs |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index a966d7bfac57..4266a4de3160 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -382,7 +382,9 @@ static void zpci_irq_handler(struct airq_struct *airq) | |||
382 | if (ai == -1UL) | 382 | if (ai == -1UL) |
383 | break; | 383 | break; |
384 | inc_irq_stat(IRQIO_MSI); | 384 | inc_irq_stat(IRQIO_MSI); |
385 | airq_iv_lock(aibv, ai); | ||
385 | generic_handle_irq(airq_iv_get_data(aibv, ai)); | 386 | generic_handle_irq(airq_iv_get_data(aibv, ai)); |
387 | airq_iv_unlock(aibv, ai); | ||
386 | } | 388 | } |
387 | } | 389 | } |
388 | } | 390 | } |
@@ -408,7 +410,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
408 | zdev->aisb = aisb; | 410 | zdev->aisb = aisb; |
409 | 411 | ||
410 | /* Create adapter interrupt vector */ | 412 | /* Create adapter interrupt vector */ |
411 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA); | 413 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); |
412 | if (!zdev->aibv) | 414 | if (!zdev->aibv) |
413 | return -ENOMEM; | 415 | return -ENOMEM; |
414 | 416 | ||
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 1372553dc0a9..1d1544b6ca74 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild | |||
@@ -28,6 +28,7 @@ generic-y += preempt.h | |||
28 | generic-y += sections.h | 28 | generic-y += sections.h |
29 | generic-y += segment.h | 29 | generic-y += segment.h |
30 | generic-y += serial.h | 30 | generic-y += serial.h |
31 | generic-y += shmparam.h | ||
31 | generic-y += sizes.h | 32 | generic-y += sizes.h |
32 | generic-y += syscalls.h | 33 | generic-y += syscalls.h |
33 | generic-y += topology.h | 34 | generic-y += topology.h |
diff --git a/arch/unicore32/include/uapi/asm/Kbuild b/arch/unicore32/include/uapi/asm/Kbuild index 6c6f6301012e..0febf1a07c30 100644 --- a/arch/unicore32/include/uapi/asm/Kbuild +++ b/arch/unicore32/include/uapi/asm/Kbuild | |||
@@ -1,5 +1,4 @@ | |||
1 | include include/uapi/asm-generic/Kbuild.asm | 1 | include include/uapi/asm-generic/Kbuild.asm |
2 | 2 | ||
3 | generic-y += kvm_para.h | 3 | generic-y += kvm_para.h |
4 | generic-y += shmparam.h | ||
5 | generic-y += ucontext.h | 4 | generic-y += ucontext.h |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 26387c7bf305..68261430fe6e 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -446,12 +446,12 @@ config RETPOLINE | |||
446 | branches. Requires a compiler with -mindirect-branch=thunk-extern | 446 | branches. Requires a compiler with -mindirect-branch=thunk-extern |
447 | support for full protection. The kernel may run slower. | 447 | support for full protection. The kernel may run slower. |
448 | 448 | ||
449 | config X86_RESCTRL | 449 | config X86_CPU_RESCTRL |
450 | bool "Resource Control support" | 450 | bool "x86 CPU resource control support" |
451 | depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) | 451 | depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) |
452 | select KERNFS | 452 | select KERNFS |
453 | help | 453 | help |
454 | Enable Resource Control support. | 454 | Enable x86 CPU resource control support. |
455 | 455 | ||
456 | Provide support for the allocation and monitoring of system resources | 456 | Provide support for the allocation and monitoring of system resources |
457 | usage by the CPU. | 457 | usage by the CPU. |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 64037895b085..f62e347862cc 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
@@ -600,6 +600,16 @@ ENTRY(trampoline_32bit_src) | |||
600 | leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax | 600 | leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax |
601 | movl %eax, %cr3 | 601 | movl %eax, %cr3 |
602 | 3: | 602 | 3: |
603 | /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */ | ||
604 | pushl %ecx | ||
605 | pushl %edx | ||
606 | movl $MSR_EFER, %ecx | ||
607 | rdmsr | ||
608 | btsl $_EFER_LME, %eax | ||
609 | wrmsr | ||
610 | popl %edx | ||
611 | popl %ecx | ||
612 | |||
603 | /* Enable PAE and LA57 (if required) paging modes */ | 613 | /* Enable PAE and LA57 (if required) paging modes */ |
604 | movl $X86_CR4_PAE, %eax | 614 | movl $X86_CR4_PAE, %eax |
605 | cmpl $0, %edx | 615 | cmpl $0, %edx |
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h index 91f75638f6e6..6ff7e81b5628 100644 --- a/arch/x86/boot/compressed/pgtable.h +++ b/arch/x86/boot/compressed/pgtable.h | |||
@@ -6,7 +6,7 @@ | |||
6 | #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 | 6 | #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 |
7 | 7 | ||
8 | #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE | 8 | #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE |
9 | #define TRAMPOLINE_32BIT_CODE_SIZE 0x60 | 9 | #define TRAMPOLINE_32BIT_CODE_SIZE 0x70 |
10 | 10 | ||
11 | #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE | 11 | #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE |
12 | 12 | ||
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 40e12cfc87f6..daafb893449b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
@@ -3559,6 +3559,14 @@ static void free_excl_cntrs(int cpu) | |||
3559 | 3559 | ||
3560 | static void intel_pmu_cpu_dying(int cpu) | 3560 | static void intel_pmu_cpu_dying(int cpu) |
3561 | { | 3561 | { |
3562 | fini_debug_store_on_cpu(cpu); | ||
3563 | |||
3564 | if (x86_pmu.counter_freezing) | ||
3565 | disable_counter_freeze(); | ||
3566 | } | ||
3567 | |||
3568 | static void intel_pmu_cpu_dead(int cpu) | ||
3569 | { | ||
3562 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); | 3570 | struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); |
3563 | struct intel_shared_regs *pc; | 3571 | struct intel_shared_regs *pc; |
3564 | 3572 | ||
@@ -3570,11 +3578,6 @@ static void intel_pmu_cpu_dying(int cpu) | |||
3570 | } | 3578 | } |
3571 | 3579 | ||
3572 | free_excl_cntrs(cpu); | 3580 | free_excl_cntrs(cpu); |
3573 | |||
3574 | fini_debug_store_on_cpu(cpu); | ||
3575 | |||
3576 | if (x86_pmu.counter_freezing) | ||
3577 | disable_counter_freeze(); | ||
3578 | } | 3581 | } |
3579 | 3582 | ||
3580 | static void intel_pmu_sched_task(struct perf_event_context *ctx, | 3583 | static void intel_pmu_sched_task(struct perf_event_context *ctx, |
@@ -3663,6 +3666,7 @@ static __initconst const struct x86_pmu core_pmu = { | |||
3663 | .cpu_prepare = intel_pmu_cpu_prepare, | 3666 | .cpu_prepare = intel_pmu_cpu_prepare, |
3664 | .cpu_starting = intel_pmu_cpu_starting, | 3667 | .cpu_starting = intel_pmu_cpu_starting, |
3665 | .cpu_dying = intel_pmu_cpu_dying, | 3668 | .cpu_dying = intel_pmu_cpu_dying, |
3669 | .cpu_dead = intel_pmu_cpu_dead, | ||
3666 | }; | 3670 | }; |
3667 | 3671 | ||
3668 | static struct attribute *intel_pmu_attrs[]; | 3672 | static struct attribute *intel_pmu_attrs[]; |
@@ -3703,6 +3707,8 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
3703 | .cpu_prepare = intel_pmu_cpu_prepare, | 3707 | .cpu_prepare = intel_pmu_cpu_prepare, |
3704 | .cpu_starting = intel_pmu_cpu_starting, | 3708 | .cpu_starting = intel_pmu_cpu_starting, |
3705 | .cpu_dying = intel_pmu_cpu_dying, | 3709 | .cpu_dying = intel_pmu_cpu_dying, |
3710 | .cpu_dead = intel_pmu_cpu_dead, | ||
3711 | |||
3706 | .guest_get_msrs = intel_guest_get_msrs, | 3712 | .guest_get_msrs = intel_guest_get_msrs, |
3707 | .sched_task = intel_pmu_sched_task, | 3713 | .sched_task = intel_pmu_sched_task, |
3708 | }; | 3714 | }; |
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c07bee31abe8..b10e04387f38 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c | |||
@@ -1222,6 +1222,8 @@ static struct pci_driver snbep_uncore_pci_driver = { | |||
1222 | .id_table = snbep_uncore_pci_ids, | 1222 | .id_table = snbep_uncore_pci_ids, |
1223 | }; | 1223 | }; |
1224 | 1224 | ||
1225 | #define NODE_ID_MASK 0x7 | ||
1226 | |||
1225 | /* | 1227 | /* |
1226 | * build pci bus to socket mapping | 1228 | * build pci bus to socket mapping |
1227 | */ | 1229 | */ |
@@ -1243,7 +1245,7 @@ static int snbep_pci2phy_map_init(int devid, int nodeid_loc, int idmap_loc, bool | |||
1243 | err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); | 1245 | err = pci_read_config_dword(ubox_dev, nodeid_loc, &config); |
1244 | if (err) | 1246 | if (err) |
1245 | break; | 1247 | break; |
1246 | nodeid = config; | 1248 | nodeid = config & NODE_ID_MASK; |
1247 | /* get the Node ID mapping */ | 1249 | /* get the Node ID mapping */ |
1248 | err = pci_read_config_dword(ubox_dev, idmap_loc, &config); | 1250 | err = pci_read_config_dword(ubox_dev, idmap_loc, &config); |
1249 | if (err) | 1251 | if (err) |
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h index 0dd6b0f4000e..d9a9993af882 100644 --- a/arch/x86/include/asm/intel-family.h +++ b/arch/x86/include/asm/intel-family.h | |||
@@ -6,7 +6,7 @@ | |||
6 | * "Big Core" Processors (Branded as Core, Xeon, etc...) | 6 | * "Big Core" Processors (Branded as Core, Xeon, etc...) |
7 | * | 7 | * |
8 | * The "_X" parts are generally the EP and EX Xeons, or the | 8 | * The "_X" parts are generally the EP and EX Xeons, or the |
9 | * "Extreme" ones, like Broadwell-E. | 9 | * "Extreme" ones, like Broadwell-E, or Atom microserver. |
10 | * | 10 | * |
11 | * While adding a new CPUID for a new microarchitecture, add a new | 11 | * While adding a new CPUID for a new microarchitecture, add a new |
12 | * group to keep logically sorted out in chronological order. Within | 12 | * group to keep logically sorted out in chronological order. Within |
@@ -71,6 +71,7 @@ | |||
71 | #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ | 71 | #define INTEL_FAM6_ATOM_GOLDMONT 0x5C /* Apollo Lake */ |
72 | #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ | 72 | #define INTEL_FAM6_ATOM_GOLDMONT_X 0x5F /* Denverton */ |
73 | #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ | 73 | #define INTEL_FAM6_ATOM_GOLDMONT_PLUS 0x7A /* Gemini Lake */ |
74 | #define INTEL_FAM6_ATOM_TREMONT_X 0x86 /* Jacobsville */ | ||
74 | 75 | ||
75 | /* Xeon Phi */ | 76 | /* Xeon Phi */ |
76 | 77 | ||
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index 8f657286d599..0ce558a8150d 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
@@ -7,7 +7,11 @@ | |||
7 | #endif | 7 | #endif |
8 | 8 | ||
9 | #ifdef CONFIG_KASAN | 9 | #ifdef CONFIG_KASAN |
10 | #ifdef CONFIG_KASAN_EXTRA | ||
11 | #define KASAN_STACK_ORDER 2 | ||
12 | #else | ||
10 | #define KASAN_STACK_ORDER 1 | 13 | #define KASAN_STACK_ORDER 1 |
14 | #endif | ||
11 | #else | 15 | #else |
12 | #define KASAN_STACK_ORDER 0 | 16 | #define KASAN_STACK_ORDER 0 |
13 | #endif | 17 | #endif |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 40616e805292..2779ace16d23 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -1065,7 +1065,7 @@ static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 1065 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1066 | pmd_t *pmdp, pmd_t pmd) | 1066 | pmd_t *pmdp, pmd_t pmd) |
1067 | { | 1067 | { |
1068 | native_set_pmd(pmdp, pmd); | 1068 | set_pmd(pmdp, pmd); |
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, | 1071 | static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, |
diff --git a/arch/x86/include/asm/resctrl_sched.h b/arch/x86/include/asm/resctrl_sched.h index 40ebddde6ac2..f6b7fe2833cc 100644 --- a/arch/x86/include/asm/resctrl_sched.h +++ b/arch/x86/include/asm/resctrl_sched.h | |||
@@ -2,7 +2,7 @@ | |||
2 | #ifndef _ASM_X86_RESCTRL_SCHED_H | 2 | #ifndef _ASM_X86_RESCTRL_SCHED_H |
3 | #define _ASM_X86_RESCTRL_SCHED_H | 3 | #define _ASM_X86_RESCTRL_SCHED_H |
4 | 4 | ||
5 | #ifdef CONFIG_X86_RESCTRL | 5 | #ifdef CONFIG_X86_CPU_RESCTRL |
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/jump_label.h> | 8 | #include <linux/jump_label.h> |
@@ -88,6 +88,6 @@ static inline void resctrl_sched_in(void) | |||
88 | 88 | ||
89 | static inline void resctrl_sched_in(void) {} | 89 | static inline void resctrl_sched_in(void) {} |
90 | 90 | ||
91 | #endif /* CONFIG_X86_RESCTRL */ | 91 | #endif /* CONFIG_X86_CPU_RESCTRL */ |
92 | 92 | ||
93 | #endif /* _ASM_X86_RESCTRL_SCHED_H */ | 93 | #endif /* _ASM_X86_RESCTRL_SCHED_H */ |
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index b6fa0869f7aa..cfd24f9f7614 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile | |||
@@ -39,7 +39,7 @@ obj-$(CONFIG_CPU_SUP_UMC_32) += umc.o | |||
39 | obj-$(CONFIG_X86_MCE) += mce/ | 39 | obj-$(CONFIG_X86_MCE) += mce/ |
40 | obj-$(CONFIG_MTRR) += mtrr/ | 40 | obj-$(CONFIG_MTRR) += mtrr/ |
41 | obj-$(CONFIG_MICROCODE) += microcode/ | 41 | obj-$(CONFIG_MICROCODE) += microcode/ |
42 | obj-$(CONFIG_X86_RESCTRL) += resctrl/ | 42 | obj-$(CONFIG_X86_CPU_RESCTRL) += resctrl/ |
43 | 43 | ||
44 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o | 44 | obj-$(CONFIG_X86_LOCAL_APIC) += perfctr-watchdog.o |
45 | 45 | ||
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 1de0f4170178..01874d54f4fd 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -71,7 +71,7 @@ void __init check_bugs(void) | |||
71 | * identify_boot_cpu() initialized SMT support information, let the | 71 | * identify_boot_cpu() initialized SMT support information, let the |
72 | * core code know. | 72 | * core code know. |
73 | */ | 73 | */ |
74 | cpu_smt_check_topology_early(); | 74 | cpu_smt_check_topology(); |
75 | 75 | ||
76 | if (!IS_ENABLED(CONFIG_SMP)) { | 76 | if (!IS_ENABLED(CONFIG_SMP)) { |
77 | pr_info("CPU: "); | 77 | pr_info("CPU: "); |
diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 672c7225cb1b..6ce290c506d9 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c | |||
@@ -784,6 +784,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, | |||
784 | quirk_no_way_out(i, m, regs); | 784 | quirk_no_way_out(i, m, regs); |
785 | 785 | ||
786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { | 786 | if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) { |
787 | m->bank = i; | ||
787 | mce_read_aux(m, i); | 788 | mce_read_aux(m, i); |
788 | *msg = tmp; | 789 | *msg = tmp; |
789 | return 1; | 790 | return 1; |
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index 51adde0a0f1a..e1f3ba19ba54 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c | |||
@@ -855,7 +855,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size) | |||
855 | if (!p) { | 855 | if (!p) { |
856 | return ret; | 856 | return ret; |
857 | } else { | 857 | } else { |
858 | if (boot_cpu_data.microcode == p->patch_id) | 858 | if (boot_cpu_data.microcode >= p->patch_id) |
859 | return ret; | 859 | return ret; |
860 | 860 | ||
861 | ret = UCODE_NEW; | 861 | ret = UCODE_NEW; |
diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 1cabe6fd8e11..4a06c37b9cf1 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | obj-$(CONFIG_X86_RESCTRL) += core.o rdtgroup.o monitor.o | 2 | obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o |
3 | obj-$(CONFIG_X86_RESCTRL) += ctrlmondata.o pseudo_lock.o | 3 | obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o |
4 | CFLAGS_pseudo_lock.o = -I$(src) | 4 | CFLAGS_pseudo_lock.o = -I$(src) |
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 0d5efa34f359..53917a3ebf94 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c | |||
@@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr, | |||
167 | struct efi_info *current_ei = &boot_params.efi_info; | 167 | struct efi_info *current_ei = &boot_params.efi_info; |
168 | struct efi_info *ei = ¶ms->efi_info; | 168 | struct efi_info *ei = ¶ms->efi_info; |
169 | 169 | ||
170 | if (!efi_enabled(EFI_RUNTIME_SERVICES)) | ||
171 | return 0; | ||
172 | |||
170 | if (!current_ei->efi_memmap_size) | 173 | if (!current_ei->efi_memmap_size) |
171 | return 0; | 174 | return 0; |
172 | 175 | ||
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8ff20523661b..d8ea4ebd79e7 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c | |||
@@ -211,6 +211,7 @@ static void free_nested(struct kvm_vcpu *vcpu) | |||
211 | if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) | 211 | if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) |
212 | return; | 212 | return; |
213 | 213 | ||
214 | hrtimer_cancel(&vmx->nested.preemption_timer); | ||
214 | vmx->nested.vmxon = false; | 215 | vmx->nested.vmxon = false; |
215 | vmx->nested.smm.vmxon = false; | 216 | vmx->nested.smm.vmxon = false; |
216 | free_vpid(vmx->nested.vpid02); | 217 | free_vpid(vmx->nested.vpid02); |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 4341175339f3..95d618045001 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/mod_devicetable.h> | 26 | #include <linux/mod_devicetable.h> |
27 | #include <linux/mm.h> | 27 | #include <linux/mm.h> |
28 | #include <linux/sched.h> | 28 | #include <linux/sched.h> |
29 | #include <linux/sched/smt.h> | ||
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | #include <linux/tboot.h> | 31 | #include <linux/tboot.h> |
31 | #include <linux/trace_events.h> | 32 | #include <linux/trace_events.h> |
@@ -6823,7 +6824,7 @@ static int vmx_vm_init(struct kvm *kvm) | |||
6823 | * Warn upon starting the first VM in a potentially | 6824 | * Warn upon starting the first VM in a potentially |
6824 | * insecure environment. | 6825 | * insecure environment. |
6825 | */ | 6826 | */ |
6826 | if (cpu_smt_control == CPU_SMT_ENABLED) | 6827 | if (sched_smt_active()) |
6827 | pr_warn_once(L1TF_MSG_SMT); | 6828 | pr_warn_once(L1TF_MSG_SMT); |
6828 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) | 6829 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER) |
6829 | pr_warn_once(L1TF_MSG_L1D); | 6830 | pr_warn_once(L1TF_MSG_L1D); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3d27206f6c01..e67ecf25e690 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -5116,6 +5116,13 @@ int kvm_read_guest_virt(struct kvm_vcpu *vcpu, | |||
5116 | { | 5116 | { |
5117 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | 5117 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; |
5118 | 5118 | ||
5119 | /* | ||
5120 | * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED | ||
5121 | * is returned, but our callers are not ready for that and they blindly | ||
5122 | * call kvm_inject_page_fault. Ensure that they at least do not leak | ||
5123 | * uninitialized kernel stack memory into cr2 and error code. | ||
5124 | */ | ||
5125 | memset(exception, 0, sizeof(*exception)); | ||
5119 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, | 5126 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, |
5120 | exception); | 5127 | exception); |
5121 | } | 5128 | } |
diff --git a/arch/x86/lib/iomem.c b/arch/x86/lib/iomem.c index 66894675f3c8..df50451d94ef 100644 --- a/arch/x86/lib/iomem.c +++ b/arch/x86/lib/iomem.c | |||
@@ -2,8 +2,11 @@ | |||
2 | #include <linux/module.h> | 2 | #include <linux/module.h> |
3 | #include <linux/io.h> | 3 | #include <linux/io.h> |
4 | 4 | ||
5 | #define movs(type,to,from) \ | ||
6 | asm volatile("movs" type:"=&D" (to), "=&S" (from):"0" (to), "1" (from):"memory") | ||
7 | |||
5 | /* Originally from i386/string.h */ | 8 | /* Originally from i386/string.h */ |
6 | static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) | 9 | static __always_inline void rep_movs(void *to, const void *from, size_t n) |
7 | { | 10 | { |
8 | unsigned long d0, d1, d2; | 11 | unsigned long d0, d1, d2; |
9 | asm volatile("rep ; movsl\n\t" | 12 | asm volatile("rep ; movsl\n\t" |
@@ -21,13 +24,37 @@ static __always_inline void __iomem_memcpy(void *to, const void *from, size_t n) | |||
21 | 24 | ||
22 | void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) | 25 | void memcpy_fromio(void *to, const volatile void __iomem *from, size_t n) |
23 | { | 26 | { |
24 | __iomem_memcpy(to, (const void *)from, n); | 27 | if (unlikely(!n)) |
28 | return; | ||
29 | |||
30 | /* Align any unaligned source IO */ | ||
31 | if (unlikely(1 & (unsigned long)from)) { | ||
32 | movs("b", to, from); | ||
33 | n--; | ||
34 | } | ||
35 | if (n > 1 && unlikely(2 & (unsigned long)from)) { | ||
36 | movs("w", to, from); | ||
37 | n-=2; | ||
38 | } | ||
39 | rep_movs(to, (const void *)from, n); | ||
25 | } | 40 | } |
26 | EXPORT_SYMBOL(memcpy_fromio); | 41 | EXPORT_SYMBOL(memcpy_fromio); |
27 | 42 | ||
28 | void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) | 43 | void memcpy_toio(volatile void __iomem *to, const void *from, size_t n) |
29 | { | 44 | { |
30 | __iomem_memcpy((void *)to, (const void *) from, n); | 45 | if (unlikely(!n)) |
46 | return; | ||
47 | |||
48 | /* Align any unaligned destination IO */ | ||
49 | if (unlikely(1 & (unsigned long)to)) { | ||
50 | movs("b", to, from); | ||
51 | n--; | ||
52 | } | ||
53 | if (n > 1 && unlikely(2 & (unsigned long)to)) { | ||
54 | movs("w", to, from); | ||
55 | n-=2; | ||
56 | } | ||
57 | rep_movs((void *)to, (const void *) from, n); | ||
31 | } | 58 | } |
32 | EXPORT_SYMBOL(memcpy_toio); | 59 | EXPORT_SYMBOL(memcpy_toio); |
33 | 60 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 2ff25ad33233..9d5c75f02295 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -595,7 +595,7 @@ static void show_ldttss(const struct desc_ptr *gdt, const char *name, u16 index) | |||
595 | return; | 595 | return; |
596 | } | 596 | } |
597 | 597 | ||
598 | addr = desc.base0 | (desc.base1 << 16) | (desc.base2 << 24); | 598 | addr = desc.base0 | (desc.base1 << 16) | ((unsigned long)desc.base2 << 24); |
599 | #ifdef CONFIG_X86_64 | 599 | #ifdef CONFIG_X86_64 |
600 | addr |= ((u64)desc.base3 << 32); | 600 | addr |= ((u64)desc.base3 << 32); |
601 | #endif | 601 | #endif |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4f8972311a77..14e6119838a6 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -230,6 +230,29 @@ static bool __cpa_pfn_in_highmap(unsigned long pfn) | |||
230 | 230 | ||
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | /* | ||
234 | * See set_mce_nospec(). | ||
235 | * | ||
236 | * Machine check recovery code needs to change cache mode of poisoned pages to | ||
237 | * UC to avoid speculative access logging another error. But passing the | ||
238 | * address of the 1:1 mapping to set_memory_uc() is a fine way to encourage a | ||
239 | * speculative access. So we cheat and flip the top bit of the address. This | ||
240 | * works fine for the code that updates the page tables. But at the end of the | ||
241 | * process we need to flush the TLB and cache and the non-canonical address | ||
242 | * causes a #GP fault when used by the INVLPG and CLFLUSH instructions. | ||
243 | * | ||
244 | * But in the common case we already have a canonical address. This code | ||
245 | * will fix the top bit if needed and is a no-op otherwise. | ||
246 | */ | ||
247 | static inline unsigned long fix_addr(unsigned long addr) | ||
248 | { | ||
249 | #ifdef CONFIG_X86_64 | ||
250 | return (long)(addr << 1) >> 1; | ||
251 | #else | ||
252 | return addr; | ||
253 | #endif | ||
254 | } | ||
255 | |||
233 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) | 256 | static unsigned long __cpa_addr(struct cpa_data *cpa, unsigned long idx) |
234 | { | 257 | { |
235 | if (cpa->flags & CPA_PAGES_ARRAY) { | 258 | if (cpa->flags & CPA_PAGES_ARRAY) { |
@@ -313,7 +336,7 @@ void __cpa_flush_tlb(void *data) | |||
313 | unsigned int i; | 336 | unsigned int i; |
314 | 337 | ||
315 | for (i = 0; i < cpa->numpages; i++) | 338 | for (i = 0; i < cpa->numpages; i++) |
316 | __flush_tlb_one_kernel(__cpa_addr(cpa, i)); | 339 | __flush_tlb_one_kernel(fix_addr(__cpa_addr(cpa, i))); |
317 | } | 340 | } |
318 | 341 | ||
319 | static void cpa_flush(struct cpa_data *data, int cache) | 342 | static void cpa_flush(struct cpa_data *data, int cache) |
@@ -347,7 +370,7 @@ static void cpa_flush(struct cpa_data *data, int cache) | |||
347 | * Only flush present addresses: | 370 | * Only flush present addresses: |
348 | */ | 371 | */ |
349 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | 372 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
350 | clflush_cache_range_opt((void *)addr, PAGE_SIZE); | 373 | clflush_cache_range_opt((void *)fix_addr(addr), PAGE_SIZE); |
351 | } | 374 | } |
352 | mb(); | 375 | mb(); |
353 | } | 376 | } |
@@ -1627,29 +1650,6 @@ out: | |||
1627 | return ret; | 1650 | return ret; |
1628 | } | 1651 | } |
1629 | 1652 | ||
1630 | /* | ||
1631 | * Machine check recovery code needs to change cache mode of poisoned | ||
1632 | * pages to UC to avoid speculative access logging another error. But | ||
1633 | * passing the address of the 1:1 mapping to set_memory_uc() is a fine | ||
1634 | * way to encourage a speculative access. So we cheat and flip the top | ||
1635 | * bit of the address. This works fine for the code that updates the | ||
1636 | * page tables. But at the end of the process we need to flush the cache | ||
1637 | * and the non-canonical address causes a #GP fault when used by the | ||
1638 | * CLFLUSH instruction. | ||
1639 | * | ||
1640 | * But in the common case we already have a canonical address. This code | ||
1641 | * will fix the top bit if needed and is a no-op otherwise. | ||
1642 | */ | ||
1643 | static inline unsigned long make_addr_canonical_again(unsigned long addr) | ||
1644 | { | ||
1645 | #ifdef CONFIG_X86_64 | ||
1646 | return (long)(addr << 1) >> 1; | ||
1647 | #else | ||
1648 | return addr; | ||
1649 | #endif | ||
1650 | } | ||
1651 | |||
1652 | |||
1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | 1653 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, |
1654 | pgprot_t mask_set, pgprot_t mask_clr, | 1654 | pgprot_t mask_set, pgprot_t mask_clr, |
1655 | int force_split, int in_flag, | 1655 | int force_split, int in_flag, |
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 20a0756f27ef..ce91682770cb 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -164,7 +164,7 @@ config XTENSA_FAKE_NMI | |||
164 | If unsure, say N. | 164 | If unsure, say N. |
165 | 165 | ||
166 | config XTENSA_UNALIGNED_USER | 166 | config XTENSA_UNALIGNED_USER |
167 | bool "Unaligned memory access in use space" | 167 | bool "Unaligned memory access in user space" |
168 | help | 168 | help |
169 | The Xtensa architecture currently does not handle unaligned | 169 | The Xtensa architecture currently does not handle unaligned |
170 | memory accesses in hardware but through an exception handler. | 170 | memory accesses in hardware but through an exception handler. |
@@ -451,7 +451,7 @@ config USE_OF | |||
451 | help | 451 | help |
452 | Include support for flattened device tree machine descriptions. | 452 | Include support for flattened device tree machine descriptions. |
453 | 453 | ||
454 | config BUILTIN_DTB | 454 | config BUILTIN_DTB_SOURCE |
455 | string "DTB to build into the kernel image" | 455 | string "DTB to build into the kernel image" |
456 | depends on OF | 456 | depends on OF |
457 | 457 | ||
diff --git a/arch/xtensa/boot/dts/Makefile b/arch/xtensa/boot/dts/Makefile index f8052ba5aea8..0b8d00cdae7c 100644 --- a/arch/xtensa/boot/dts/Makefile +++ b/arch/xtensa/boot/dts/Makefile | |||
@@ -7,9 +7,9 @@ | |||
7 | # | 7 | # |
8 | # | 8 | # |
9 | 9 | ||
10 | BUILTIN_DTB := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB)).dtb.o | 10 | BUILTIN_DTB_SOURCE := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_SOURCE)).dtb.o |
11 | ifneq ($(CONFIG_BUILTIN_DTB),"") | 11 | ifneq ($(CONFIG_BUILTIN_DTB_SOURCE),"") |
12 | obj-$(CONFIG_OF) += $(BUILTIN_DTB) | 12 | obj-$(CONFIG_OF) += $(BUILTIN_DTB_SOURCE) |
13 | endif | 13 | endif |
14 | 14 | ||
15 | # for CONFIG_OF_ALL_DTBS test | 15 | # for CONFIG_OF_ALL_DTBS test |
diff --git a/arch/xtensa/configs/audio_kc705_defconfig b/arch/xtensa/configs/audio_kc705_defconfig index 2bf964df37ba..f378e56f9ce6 100644 --- a/arch/xtensa/configs/audio_kc705_defconfig +++ b/arch/xtensa/configs/audio_kc705_defconfig | |||
@@ -34,7 +34,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y | |||
34 | CONFIG_CMDLINE_BOOL=y | 34 | CONFIG_CMDLINE_BOOL=y |
35 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" | 35 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" |
36 | CONFIG_USE_OF=y | 36 | CONFIG_USE_OF=y |
37 | CONFIG_BUILTIN_DTB="kc705" | 37 | CONFIG_BUILTIN_DTB_SOURCE="kc705" |
38 | # CONFIG_COMPACTION is not set | 38 | # CONFIG_COMPACTION is not set |
39 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 39 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
40 | CONFIG_PM=y | 40 | CONFIG_PM=y |
diff --git a/arch/xtensa/configs/cadence_csp_defconfig b/arch/xtensa/configs/cadence_csp_defconfig index 3221b7053fa3..62f32a902568 100644 --- a/arch/xtensa/configs/cadence_csp_defconfig +++ b/arch/xtensa/configs/cadence_csp_defconfig | |||
@@ -38,7 +38,7 @@ CONFIG_HIGHMEM=y | |||
38 | # CONFIG_PCI is not set | 38 | # CONFIG_PCI is not set |
39 | CONFIG_XTENSA_PLATFORM_XTFPGA=y | 39 | CONFIG_XTENSA_PLATFORM_XTFPGA=y |
40 | CONFIG_USE_OF=y | 40 | CONFIG_USE_OF=y |
41 | CONFIG_BUILTIN_DTB="csp" | 41 | CONFIG_BUILTIN_DTB_SOURCE="csp" |
42 | # CONFIG_COMPACTION is not set | 42 | # CONFIG_COMPACTION is not set |
43 | CONFIG_XTFPGA_LCD=y | 43 | CONFIG_XTFPGA_LCD=y |
44 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 44 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig index 985fa8546e4e..8bebe07f1060 100644 --- a/arch/xtensa/configs/generic_kc705_defconfig +++ b/arch/xtensa/configs/generic_kc705_defconfig | |||
@@ -33,7 +33,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y | |||
33 | CONFIG_CMDLINE_BOOL=y | 33 | CONFIG_CMDLINE_BOOL=y |
34 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" | 34 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000@0" |
35 | CONFIG_USE_OF=y | 35 | CONFIG_USE_OF=y |
36 | CONFIG_BUILTIN_DTB="kc705" | 36 | CONFIG_BUILTIN_DTB_SOURCE="kc705" |
37 | # CONFIG_COMPACTION is not set | 37 | # CONFIG_COMPACTION is not set |
38 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 38 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
39 | CONFIG_NET=y | 39 | CONFIG_NET=y |
diff --git a/arch/xtensa/configs/nommu_kc705_defconfig b/arch/xtensa/configs/nommu_kc705_defconfig index f3fc4f970ca8..933ab2adf434 100644 --- a/arch/xtensa/configs/nommu_kc705_defconfig +++ b/arch/xtensa/configs/nommu_kc705_defconfig | |||
@@ -39,7 +39,7 @@ CONFIG_XTENSA_PLATFORM_XTFPGA=y | |||
39 | CONFIG_CMDLINE_BOOL=y | 39 | CONFIG_CMDLINE_BOOL=y |
40 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" | 40 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0x9d050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=256M@0x60000000" |
41 | CONFIG_USE_OF=y | 41 | CONFIG_USE_OF=y |
42 | CONFIG_BUILTIN_DTB="kc705_nommu" | 42 | CONFIG_BUILTIN_DTB_SOURCE="kc705_nommu" |
43 | CONFIG_BINFMT_FLAT=y | 43 | CONFIG_BINFMT_FLAT=y |
44 | CONFIG_NET=y | 44 | CONFIG_NET=y |
45 | CONFIG_PACKET=y | 45 | CONFIG_PACKET=y |
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig index 11fed6c06a7c..e29c5b179a5b 100644 --- a/arch/xtensa/configs/smp_lx200_defconfig +++ b/arch/xtensa/configs/smp_lx200_defconfig | |||
@@ -33,11 +33,12 @@ CONFIG_SMP=y | |||
33 | CONFIG_HOTPLUG_CPU=y | 33 | CONFIG_HOTPLUG_CPU=y |
34 | # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set | 34 | # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set |
35 | # CONFIG_PCI is not set | 35 | # CONFIG_PCI is not set |
36 | CONFIG_VECTORS_OFFSET=0x00002000 | ||
36 | CONFIG_XTENSA_PLATFORM_XTFPGA=y | 37 | CONFIG_XTENSA_PLATFORM_XTFPGA=y |
37 | CONFIG_CMDLINE_BOOL=y | 38 | CONFIG_CMDLINE_BOOL=y |
38 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" | 39 | CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0" |
39 | CONFIG_USE_OF=y | 40 | CONFIG_USE_OF=y |
40 | CONFIG_BUILTIN_DTB="lx200mx" | 41 | CONFIG_BUILTIN_DTB_SOURCE="lx200mx" |
41 | # CONFIG_COMPACTION is not set | 42 | # CONFIG_COMPACTION is not set |
42 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set | 43 | # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set |
43 | CONFIG_NET=y | 44 | CONFIG_NET=y |
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index da08e75100ab..7f009719304e 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S | |||
@@ -276,12 +276,13 @@ should_never_return: | |||
276 | 276 | ||
277 | movi a2, cpu_start_ccount | 277 | movi a2, cpu_start_ccount |
278 | 1: | 278 | 1: |
279 | memw | ||
279 | l32i a3, a2, 0 | 280 | l32i a3, a2, 0 |
280 | beqi a3, 0, 1b | 281 | beqi a3, 0, 1b |
281 | movi a3, 0 | 282 | movi a3, 0 |
282 | s32i a3, a2, 0 | 283 | s32i a3, a2, 0 |
283 | memw | ||
284 | 1: | 284 | 1: |
285 | memw | ||
285 | l32i a3, a2, 0 | 286 | l32i a3, a2, 0 |
286 | beqi a3, 0, 1b | 287 | beqi a3, 0, 1b |
287 | wsr a3, ccount | 288 | wsr a3, ccount |
@@ -317,11 +318,13 @@ ENTRY(cpu_restart) | |||
317 | rsr a0, prid | 318 | rsr a0, prid |
318 | neg a2, a0 | 319 | neg a2, a0 |
319 | movi a3, cpu_start_id | 320 | movi a3, cpu_start_id |
321 | memw | ||
320 | s32i a2, a3, 0 | 322 | s32i a2, a3, 0 |
321 | #if XCHAL_DCACHE_IS_WRITEBACK | 323 | #if XCHAL_DCACHE_IS_WRITEBACK |
322 | dhwbi a3, 0 | 324 | dhwbi a3, 0 |
323 | #endif | 325 | #endif |
324 | 1: | 326 | 1: |
327 | memw | ||
325 | l32i a2, a3, 0 | 328 | l32i a2, a3, 0 |
326 | dhi a3, 0 | 329 | dhi a3, 0 |
327 | bne a2, a0, 1b | 330 | bne a2, a0, 1b |
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 932d64689bac..be1f280c322c 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c | |||
@@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
83 | { | 83 | { |
84 | unsigned i; | 84 | unsigned i; |
85 | 85 | ||
86 | for (i = 0; i < max_cpus; ++i) | 86 | for_each_possible_cpu(i) |
87 | set_cpu_present(i, true); | 87 | set_cpu_present(i, true); |
88 | } | 88 | } |
89 | 89 | ||
@@ -96,6 +96,11 @@ void __init smp_init_cpus(void) | |||
96 | pr_info("%s: Core Count = %d\n", __func__, ncpus); | 96 | pr_info("%s: Core Count = %d\n", __func__, ncpus); |
97 | pr_info("%s: Core Id = %d\n", __func__, core_id); | 97 | pr_info("%s: Core Id = %d\n", __func__, core_id); |
98 | 98 | ||
99 | if (ncpus > NR_CPUS) { | ||
100 | ncpus = NR_CPUS; | ||
101 | pr_info("%s: limiting core count by %d\n", __func__, ncpus); | ||
102 | } | ||
103 | |||
99 | for (i = 0; i < ncpus; ++i) | 104 | for (i = 0; i < ncpus; ++i) |
100 | set_cpu_possible(i, true); | 105 | set_cpu_possible(i, true); |
101 | } | 106 | } |
@@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) | |||
195 | int i; | 200 | int i; |
196 | 201 | ||
197 | #ifdef CONFIG_HOTPLUG_CPU | 202 | #ifdef CONFIG_HOTPLUG_CPU |
198 | cpu_start_id = cpu; | 203 | WRITE_ONCE(cpu_start_id, cpu); |
199 | system_flush_invalidate_dcache_range( | 204 | /* Pairs with the third memw in the cpu_restart */ |
200 | (unsigned long)&cpu_start_id, sizeof(cpu_start_id)); | 205 | mb(); |
206 | system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id, | ||
207 | sizeof(cpu_start_id)); | ||
201 | #endif | 208 | #endif |
202 | smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); | 209 | smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1); |
203 | 210 | ||
@@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts) | |||
206 | ccount = get_ccount(); | 213 | ccount = get_ccount(); |
207 | while (!ccount); | 214 | while (!ccount); |
208 | 215 | ||
209 | cpu_start_ccount = ccount; | 216 | WRITE_ONCE(cpu_start_ccount, ccount); |
210 | 217 | ||
211 | while (time_before(jiffies, timeout)) { | 218 | do { |
219 | /* | ||
220 | * Pairs with the first two memws in the | ||
221 | * .Lboot_secondary. | ||
222 | */ | ||
212 | mb(); | 223 | mb(); |
213 | if (!cpu_start_ccount) | 224 | ccount = READ_ONCE(cpu_start_ccount); |
214 | break; | 225 | } while (ccount && time_before(jiffies, timeout)); |
215 | } | ||
216 | 226 | ||
217 | if (cpu_start_ccount) { | 227 | if (ccount) { |
218 | smp_call_function_single(0, mx_cpu_stop, | 228 | smp_call_function_single(0, mx_cpu_stop, |
219 | (void *)cpu, 1); | 229 | (void *)cpu, 1); |
220 | cpu_start_ccount = 0; | 230 | WRITE_ONCE(cpu_start_ccount, 0); |
221 | return -EIO; | 231 | return -EIO; |
222 | } | 232 | } |
223 | } | 233 | } |
@@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
237 | pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", | 247 | pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n", |
238 | __func__, cpu, idle, start_info.stack); | 248 | __func__, cpu, idle, start_info.stack); |
239 | 249 | ||
250 | init_completion(&cpu_running); | ||
240 | ret = boot_secondary(cpu, idle); | 251 | ret = boot_secondary(cpu, idle); |
241 | if (ret == 0) { | 252 | if (ret == 0) { |
242 | wait_for_completion_timeout(&cpu_running, | 253 | wait_for_completion_timeout(&cpu_running, |
@@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu) | |||
298 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); | 309 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
299 | while (time_before(jiffies, timeout)) { | 310 | while (time_before(jiffies, timeout)) { |
300 | system_invalidate_dcache_range((unsigned long)&cpu_start_id, | 311 | system_invalidate_dcache_range((unsigned long)&cpu_start_id, |
301 | sizeof(cpu_start_id)); | 312 | sizeof(cpu_start_id)); |
302 | if (cpu_start_id == -cpu) { | 313 | /* Pairs with the second memw in the cpu_restart */ |
314 | mb(); | ||
315 | if (READ_ONCE(cpu_start_id) == -cpu) { | ||
303 | platform_cpu_kill(cpu); | 316 | platform_cpu_kill(cpu); |
304 | return; | 317 | return; |
305 | } | 318 | } |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index fd524a54d2ab..378186b5eb40 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt) | |||
89 | container_of(evt, struct ccount_timer, evt); | 89 | container_of(evt, struct ccount_timer, evt); |
90 | 90 | ||
91 | if (timer->irq_enabled) { | 91 | if (timer->irq_enabled) { |
92 | disable_irq(evt->irq); | 92 | disable_irq_nosync(evt->irq); |
93 | timer->irq_enabled = 0; | 93 | timer->irq_enabled = 0; |
94 | } | 94 | } |
95 | return 0; | 95 | return 0; |
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index f8120832ca7b..7921573aebbc 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c | |||
@@ -839,6 +839,9 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = { | |||
839 | static bool debugfs_create_files(struct dentry *parent, void *data, | 839 | static bool debugfs_create_files(struct dentry *parent, void *data, |
840 | const struct blk_mq_debugfs_attr *attr) | 840 | const struct blk_mq_debugfs_attr *attr) |
841 | { | 841 | { |
842 | if (IS_ERR_OR_NULL(parent)) | ||
843 | return false; | ||
844 | |||
842 | d_inode(parent)->i_private = data; | 845 | d_inode(parent)->i_private = data; |
843 | 846 | ||
844 | for (; attr->name; attr++) { | 847 | for (; attr->name; attr++) { |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5c093ce01bcd..147f6c7ea59c 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -1029,6 +1029,9 @@ void __init acpi_early_init(void) | |||
1029 | 1029 | ||
1030 | acpi_permanent_mmap = true; | 1030 | acpi_permanent_mmap = true; |
1031 | 1031 | ||
1032 | /* Initialize debug output. Linux does not use ACPICA defaults */ | ||
1033 | acpi_dbg_level = ACPI_LV_INFO | ACPI_LV_REPAIR; | ||
1034 | |||
1032 | #ifdef CONFIG_X86 | 1035 | #ifdef CONFIG_X86 |
1033 | /* | 1036 | /* |
1034 | * If the machine falls into the DMI check table, | 1037 | * If the machine falls into the DMI check table, |
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..4d2b2ad1ee0e 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -5854,9 +5854,10 @@ static int __init init_binder_device(const char *name) | |||
5854 | static int __init binder_init(void) | 5854 | static int __init binder_init(void) |
5855 | { | 5855 | { |
5856 | int ret; | 5856 | int ret; |
5857 | char *device_name, *device_names, *device_tmp; | 5857 | char *device_name, *device_tmp; |
5858 | struct binder_device *device; | 5858 | struct binder_device *device; |
5859 | struct hlist_node *tmp; | 5859 | struct hlist_node *tmp; |
5860 | char *device_names = NULL; | ||
5860 | 5861 | ||
5861 | ret = binder_alloc_shrinker_init(); | 5862 | ret = binder_alloc_shrinker_init(); |
5862 | if (ret) | 5863 | if (ret) |
@@ -5898,23 +5899,29 @@ static int __init binder_init(void) | |||
5898 | &transaction_log_fops); | 5899 | &transaction_log_fops); |
5899 | } | 5900 | } |
5900 | 5901 | ||
5901 | /* | 5902 | if (strcmp(binder_devices_param, "") != 0) { |
5902 | * Copy the module_parameter string, because we don't want to | 5903 | /* |
5903 | * tokenize it in-place. | 5904 | * Copy the module_parameter string, because we don't want to |
5904 | */ | 5905 | * tokenize it in-place. |
5905 | device_names = kstrdup(binder_devices_param, GFP_KERNEL); | 5906 | */ |
5906 | if (!device_names) { | 5907 | device_names = kstrdup(binder_devices_param, GFP_KERNEL); |
5907 | ret = -ENOMEM; | 5908 | if (!device_names) { |
5908 | goto err_alloc_device_names_failed; | 5909 | ret = -ENOMEM; |
5909 | } | 5910 | goto err_alloc_device_names_failed; |
5911 | } | ||
5910 | 5912 | ||
5911 | device_tmp = device_names; | 5913 | device_tmp = device_names; |
5912 | while ((device_name = strsep(&device_tmp, ","))) { | 5914 | while ((device_name = strsep(&device_tmp, ","))) { |
5913 | ret = init_binder_device(device_name); | 5915 | ret = init_binder_device(device_name); |
5914 | if (ret) | 5916 | if (ret) |
5915 | goto err_init_binder_device_failed; | 5917 | goto err_init_binder_device_failed; |
5918 | } | ||
5916 | } | 5919 | } |
5917 | 5920 | ||
5921 | ret = init_binderfs(); | ||
5922 | if (ret) | ||
5923 | goto err_init_binder_device_failed; | ||
5924 | |||
5918 | return ret; | 5925 | return ret; |
5919 | 5926 | ||
5920 | err_init_binder_device_failed: | 5927 | err_init_binder_device_failed: |
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h index 7fb97f503ef2..045b3e42d98b 100644 --- a/drivers/android/binder_internal.h +++ b/drivers/android/binder_internal.h | |||
@@ -46,4 +46,13 @@ static inline bool is_binderfs_device(const struct inode *inode) | |||
46 | } | 46 | } |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_ANDROID_BINDERFS | ||
50 | extern int __init init_binderfs(void); | ||
51 | #else | ||
52 | static inline int __init init_binderfs(void) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | #endif | ||
57 | |||
49 | #endif /* _LINUX_BINDER_INTERNAL_H */ | 58 | #endif /* _LINUX_BINDER_INTERNAL_H */ |
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 6a2185eb66c5..e773f45d19d9 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c | |||
@@ -395,6 +395,11 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
395 | struct inode *inode = NULL; | 395 | struct inode *inode = NULL; |
396 | struct dentry *root = sb->s_root; | 396 | struct dentry *root = sb->s_root; |
397 | struct binderfs_info *info = sb->s_fs_info; | 397 | struct binderfs_info *info = sb->s_fs_info; |
398 | #if defined(CONFIG_IPC_NS) | ||
399 | bool use_reserve = (info->ipc_ns == &init_ipc_ns); | ||
400 | #else | ||
401 | bool use_reserve = true; | ||
402 | #endif | ||
398 | 403 | ||
399 | device = kzalloc(sizeof(*device), GFP_KERNEL); | 404 | device = kzalloc(sizeof(*device), GFP_KERNEL); |
400 | if (!device) | 405 | if (!device) |
@@ -413,7 +418,10 @@ static int binderfs_binder_ctl_create(struct super_block *sb) | |||
413 | 418 | ||
414 | /* Reserve a new minor number for the new device. */ | 419 | /* Reserve a new minor number for the new device. */ |
415 | mutex_lock(&binderfs_minors_mutex); | 420 | mutex_lock(&binderfs_minors_mutex); |
416 | minor = ida_alloc_max(&binderfs_minors, BINDERFS_MAX_MINOR, GFP_KERNEL); | 421 | minor = ida_alloc_max(&binderfs_minors, |
422 | use_reserve ? BINDERFS_MAX_MINOR : | ||
423 | BINDERFS_MAX_MINOR_CAPPED, | ||
424 | GFP_KERNEL); | ||
417 | mutex_unlock(&binderfs_minors_mutex); | 425 | mutex_unlock(&binderfs_minors_mutex); |
418 | if (minor < 0) { | 426 | if (minor < 0) { |
419 | ret = minor; | 427 | ret = minor; |
@@ -542,7 +550,7 @@ static struct file_system_type binder_fs_type = { | |||
542 | .fs_flags = FS_USERNS_MOUNT, | 550 | .fs_flags = FS_USERNS_MOUNT, |
543 | }; | 551 | }; |
544 | 552 | ||
545 | static int __init init_binderfs(void) | 553 | int __init init_binderfs(void) |
546 | { | 554 | { |
547 | int ret; | 555 | int ret; |
548 | 556 | ||
@@ -560,5 +568,3 @@ static int __init init_binderfs(void) | |||
560 | 568 | ||
561 | return ret; | 569 | return ret; |
562 | } | 570 | } |
563 | |||
564 | device_initcall(init_binderfs); | ||
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cf78fa6d470d..a7359535caf5 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c | |||
@@ -79,8 +79,7 @@ static void cache_size(struct cacheinfo *this_leaf, struct device_node *np) | |||
79 | ct_idx = get_cacheinfo_idx(this_leaf->type); | 79 | ct_idx = get_cacheinfo_idx(this_leaf->type); |
80 | propname = cache_type_info[ct_idx].size_prop; | 80 | propname = cache_type_info[ct_idx].size_prop; |
81 | 81 | ||
82 | if (of_property_read_u32(np, propname, &this_leaf->size)) | 82 | of_property_read_u32(np, propname, &this_leaf->size); |
83 | this_leaf->size = 0; | ||
84 | } | 83 | } |
85 | 84 | ||
86 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ | 85 | /* not cache_line_size() because that's a macro in include/linux/cache.h */ |
@@ -114,8 +113,7 @@ static void cache_nr_sets(struct cacheinfo *this_leaf, struct device_node *np) | |||
114 | ct_idx = get_cacheinfo_idx(this_leaf->type); | 113 | ct_idx = get_cacheinfo_idx(this_leaf->type); |
115 | propname = cache_type_info[ct_idx].nr_sets_prop; | 114 | propname = cache_type_info[ct_idx].nr_sets_prop; |
116 | 115 | ||
117 | if (of_property_read_u32(np, propname, &this_leaf->number_of_sets)) | 116 | of_property_read_u32(np, propname, &this_leaf->number_of_sets); |
118 | this_leaf->number_of_sets = 0; | ||
119 | } | 117 | } |
120 | 118 | ||
121 | static void cache_associativity(struct cacheinfo *this_leaf) | 119 | static void cache_associativity(struct cacheinfo *this_leaf) |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 457be03b744d..0ea2139c50d8 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -130,7 +130,7 @@ u64 pm_runtime_autosuspend_expiration(struct device *dev) | |||
130 | { | 130 | { |
131 | int autosuspend_delay; | 131 | int autosuspend_delay; |
132 | u64 last_busy, expires = 0; | 132 | u64 last_busy, expires = 0; |
133 | u64 now = ktime_to_ns(ktime_get()); | 133 | u64 now = ktime_get_mono_fast_ns(); |
134 | 134 | ||
135 | if (!dev->power.use_autosuspend) | 135 | if (!dev->power.use_autosuspend) |
136 | goto out; | 136 | goto out; |
@@ -909,7 +909,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) | |||
909 | * If 'expires' is after the current time, we've been called | 909 | * If 'expires' is after the current time, we've been called |
910 | * too early. | 910 | * too early. |
911 | */ | 911 | */ |
912 | if (expires > 0 && expires < ktime_to_ns(ktime_get())) { | 912 | if (expires > 0 && expires < ktime_get_mono_fast_ns()) { |
913 | dev->power.timer_expires = 0; | 913 | dev->power.timer_expires = 0; |
914 | rpm_suspend(dev, dev->power.timer_autosuspends ? | 914 | rpm_suspend(dev, dev->power.timer_autosuspends ? |
915 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); | 915 | (RPM_ASYNC | RPM_AUTO) : RPM_ASYNC); |
@@ -928,7 +928,7 @@ static enum hrtimer_restart pm_suspend_timer_fn(struct hrtimer *timer) | |||
928 | int pm_schedule_suspend(struct device *dev, unsigned int delay) | 928 | int pm_schedule_suspend(struct device *dev, unsigned int delay) |
929 | { | 929 | { |
930 | unsigned long flags; | 930 | unsigned long flags; |
931 | ktime_t expires; | 931 | u64 expires; |
932 | int retval; | 932 | int retval; |
933 | 933 | ||
934 | spin_lock_irqsave(&dev->power.lock, flags); | 934 | spin_lock_irqsave(&dev->power.lock, flags); |
@@ -945,8 +945,8 @@ int pm_schedule_suspend(struct device *dev, unsigned int delay) | |||
945 | /* Other scheduled or pending requests need to be canceled. */ | 945 | /* Other scheduled or pending requests need to be canceled. */ |
946 | pm_runtime_cancel_pending(dev); | 946 | pm_runtime_cancel_pending(dev); |
947 | 947 | ||
948 | expires = ktime_add(ktime_get(), ms_to_ktime(delay)); | 948 | expires = ktime_get_mono_fast_ns() + (u64)delay * NSEC_PER_MSEC; |
949 | dev->power.timer_expires = ktime_to_ns(expires); | 949 | dev->power.timer_expires = expires; |
950 | dev->power.timer_autosuspends = 0; | 950 | dev->power.timer_autosuspends = 0; |
951 | hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); | 951 | hrtimer_start(&dev->power.suspend_timer, expires, HRTIMER_MODE_ABS); |
952 | 952 | ||
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 6ccdbedb02f3..d2477a5058ac 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c | |||
@@ -1513,9 +1513,19 @@ static int clk_fetch_parent_index(struct clk_core *core, | |||
1513 | if (!parent) | 1513 | if (!parent) |
1514 | return -EINVAL; | 1514 | return -EINVAL; |
1515 | 1515 | ||
1516 | for (i = 0; i < core->num_parents; i++) | 1516 | for (i = 0; i < core->num_parents; i++) { |
1517 | if (clk_core_get_parent_by_index(core, i) == parent) | 1517 | if (core->parents[i] == parent) |
1518 | return i; | ||
1519 | |||
1520 | if (core->parents[i]) | ||
1521 | continue; | ||
1522 | |||
1523 | /* Fallback to comparing globally unique names */ | ||
1524 | if (!strcmp(parent->name, core->parent_names[i])) { | ||
1525 | core->parents[i] = parent; | ||
1518 | return i; | 1526 | return i; |
1527 | } | ||
1528 | } | ||
1519 | 1529 | ||
1520 | return -EINVAL; | 1530 | return -EINVAL; |
1521 | } | 1531 | } |
diff --git a/drivers/clk/imx/clk-frac-pll.c b/drivers/clk/imx/clk-frac-pll.c index 0026c3969b1e..76b9eb15604e 100644 --- a/drivers/clk/imx/clk-frac-pll.c +++ b/drivers/clk/imx/clk-frac-pll.c | |||
@@ -155,13 +155,14 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
155 | { | 155 | { |
156 | struct clk_frac_pll *pll = to_clk_frac_pll(hw); | 156 | struct clk_frac_pll *pll = to_clk_frac_pll(hw); |
157 | u32 val, divfi, divff; | 157 | u32 val, divfi, divff; |
158 | u64 temp64 = parent_rate; | 158 | u64 temp64; |
159 | int ret; | 159 | int ret; |
160 | 160 | ||
161 | parent_rate *= 8; | 161 | parent_rate *= 8; |
162 | rate *= 2; | 162 | rate *= 2; |
163 | divfi = rate / parent_rate; | 163 | divfi = rate / parent_rate; |
164 | temp64 *= rate - divfi; | 164 | temp64 = parent_rate * divfi; |
165 | temp64 = rate - temp64; | ||
165 | temp64 *= PLL_FRAC_DENOM; | 166 | temp64 *= PLL_FRAC_DENOM; |
166 | do_div(temp64, parent_rate); | 167 | do_div(temp64, parent_rate); |
167 | divff = temp64; | 168 | divff = temp64; |
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c index 61fefc046ec5..d083b860f083 100644 --- a/drivers/clk/mmp/clk-of-mmp2.c +++ b/drivers/clk/mmp/clk-of-mmp2.c | |||
@@ -53,7 +53,6 @@ | |||
53 | #define APMU_DISP1 0x110 | 53 | #define APMU_DISP1 0x110 |
54 | #define APMU_CCIC0 0x50 | 54 | #define APMU_CCIC0 0x50 |
55 | #define APMU_CCIC1 0xf4 | 55 | #define APMU_CCIC1 0xf4 |
56 | #define APMU_SP 0x68 | ||
57 | #define MPMU_UART_PLL 0x14 | 56 | #define MPMU_UART_PLL 0x14 |
58 | 57 | ||
59 | struct mmp2_clk_unit { | 58 | struct mmp2_clk_unit { |
@@ -210,8 +209,6 @@ static struct mmp_clk_mix_config ccic1_mix_config = { | |||
210 | .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), | 209 | .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32), |
211 | }; | 210 | }; |
212 | 211 | ||
213 | static DEFINE_SPINLOCK(sp_lock); | ||
214 | |||
215 | static struct mmp_param_mux_clk apmu_mux_clks[] = { | 212 | static struct mmp_param_mux_clk apmu_mux_clks[] = { |
216 | {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, | 213 | {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock}, |
217 | {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, | 214 | {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock}, |
@@ -242,7 +239,6 @@ static struct mmp_param_gate_clk apmu_gate_clks[] = { | |||
242 | {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, | 239 | {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock}, |
243 | {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, | 240 | {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock}, |
244 | {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, | 241 | {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock}, |
245 | {MMP2_CLK_SP, "sp_clk", NULL, CLK_SET_RATE_PARENT, APMU_SP, 0x1b, 0x1b, 0x0, 0, &sp_lock}, | ||
246 | }; | 242 | }; |
247 | 243 | ||
248 | static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) | 244 | static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit) |
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c index c782e62dd98b..58fa5c247af1 100644 --- a/drivers/clk/qcom/gcc-sdm845.c +++ b/drivers/clk/qcom/gcc-sdm845.c | |||
@@ -115,8 +115,8 @@ static const char * const gcc_parent_names_6[] = { | |||
115 | "core_bi_pll_test_se", | 115 | "core_bi_pll_test_se", |
116 | }; | 116 | }; |
117 | 117 | ||
118 | static const char * const gcc_parent_names_7[] = { | 118 | static const char * const gcc_parent_names_7_ao[] = { |
119 | "bi_tcxo", | 119 | "bi_tcxo_ao", |
120 | "gpll0", | 120 | "gpll0", |
121 | "gpll0_out_even", | 121 | "gpll0_out_even", |
122 | "core_bi_pll_test_se", | 122 | "core_bi_pll_test_se", |
@@ -128,6 +128,12 @@ static const char * const gcc_parent_names_8[] = { | |||
128 | "core_bi_pll_test_se", | 128 | "core_bi_pll_test_se", |
129 | }; | 129 | }; |
130 | 130 | ||
131 | static const char * const gcc_parent_names_8_ao[] = { | ||
132 | "bi_tcxo_ao", | ||
133 | "gpll0", | ||
134 | "core_bi_pll_test_se", | ||
135 | }; | ||
136 | |||
131 | static const struct parent_map gcc_parent_map_10[] = { | 137 | static const struct parent_map gcc_parent_map_10[] = { |
132 | { P_BI_TCXO, 0 }, | 138 | { P_BI_TCXO, 0 }, |
133 | { P_GPLL0_OUT_MAIN, 1 }, | 139 | { P_GPLL0_OUT_MAIN, 1 }, |
@@ -210,7 +216,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = { | |||
210 | .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, | 216 | .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src, |
211 | .clkr.hw.init = &(struct clk_init_data){ | 217 | .clkr.hw.init = &(struct clk_init_data){ |
212 | .name = "gcc_cpuss_ahb_clk_src", | 218 | .name = "gcc_cpuss_ahb_clk_src", |
213 | .parent_names = gcc_parent_names_7, | 219 | .parent_names = gcc_parent_names_7_ao, |
214 | .num_parents = 4, | 220 | .num_parents = 4, |
215 | .ops = &clk_rcg2_ops, | 221 | .ops = &clk_rcg2_ops, |
216 | }, | 222 | }, |
@@ -229,7 +235,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = { | |||
229 | .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, | 235 | .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src, |
230 | .clkr.hw.init = &(struct clk_init_data){ | 236 | .clkr.hw.init = &(struct clk_init_data){ |
231 | .name = "gcc_cpuss_rbcpr_clk_src", | 237 | .name = "gcc_cpuss_rbcpr_clk_src", |
232 | .parent_names = gcc_parent_names_8, | 238 | .parent_names = gcc_parent_names_8_ao, |
233 | .num_parents = 3, | 239 | .num_parents = 3, |
234 | .ops = &clk_rcg2_ops, | 240 | .ops = &clk_rcg2_ops, |
235 | }, | 241 | }, |
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index 8d77090ad94a..0241450f3eb3 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c | |||
@@ -403,8 +403,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div, | |||
403 | num_dividers = i; | 403 | num_dividers = i; |
404 | 404 | ||
405 | tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); | 405 | tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL); |
406 | if (!tmp) | 406 | if (!tmp) { |
407 | *table = ERR_PTR(-ENOMEM); | ||
407 | return -ENOMEM; | 408 | return -ENOMEM; |
409 | } | ||
408 | 410 | ||
409 | valid_div = 0; | 411 | valid_div = 0; |
410 | *width = 0; | 412 | *width = 0; |
@@ -439,6 +441,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) | |||
439 | { | 441 | { |
440 | struct clk_omap_divider *div; | 442 | struct clk_omap_divider *div; |
441 | struct clk_omap_reg *reg; | 443 | struct clk_omap_reg *reg; |
444 | int ret; | ||
442 | 445 | ||
443 | if (!setup) | 446 | if (!setup) |
444 | return NULL; | 447 | return NULL; |
@@ -458,6 +461,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) | |||
458 | div->flags |= CLK_DIVIDER_POWER_OF_TWO; | 461 | div->flags |= CLK_DIVIDER_POWER_OF_TWO; |
459 | 462 | ||
460 | div->table = _get_div_table_from_setup(setup, &div->width); | 463 | div->table = _get_div_table_from_setup(setup, &div->width); |
464 | if (IS_ERR(div->table)) { | ||
465 | ret = PTR_ERR(div->table); | ||
466 | kfree(div); | ||
467 | return ERR_PTR(ret); | ||
468 | } | ||
469 | |||
461 | 470 | ||
462 | div->shift = setup->bit_shift; | 471 | div->shift = setup->bit_shift; |
463 | div->latch = -EINVAL; | 472 | div->latch = -EINVAL; |
diff --git a/drivers/cpuidle/poll_state.c b/drivers/cpuidle/poll_state.c index b17d153e724f..23a1b27579a5 100644 --- a/drivers/cpuidle/poll_state.c +++ b/drivers/cpuidle/poll_state.c | |||
@@ -21,7 +21,7 @@ static int __cpuidle poll_idle(struct cpuidle_device *dev, | |||
21 | local_irq_enable(); | 21 | local_irq_enable(); |
22 | if (!current_set_polling_and_test()) { | 22 | if (!current_set_polling_and_test()) { |
23 | unsigned int loop_count = 0; | 23 | unsigned int loop_count = 0; |
24 | u64 limit = TICK_USEC; | 24 | u64 limit = TICK_NSEC; |
25 | int i; | 25 | int i; |
26 | 26 | ||
27 | for (i = 1; i < drv->state_count; i++) { | 27 | for (i = 1; i < drv->state_count; i++) { |
diff --git a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c index fe070d75c842..4c97478d44bd 100644 --- a/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c +++ b/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c | |||
@@ -537,6 +537,8 @@ static void process_response_list(struct nitrox_cmdq *cmdq) | |||
537 | struct nitrox_device *ndev = cmdq->ndev; | 537 | struct nitrox_device *ndev = cmdq->ndev; |
538 | struct nitrox_softreq *sr; | 538 | struct nitrox_softreq *sr; |
539 | int req_completed = 0, err = 0, budget; | 539 | int req_completed = 0, err = 0, budget; |
540 | completion_t callback; | ||
541 | void *cb_arg; | ||
540 | 542 | ||
541 | /* check all pending requests */ | 543 | /* check all pending requests */ |
542 | budget = atomic_read(&cmdq->pending_count); | 544 | budget = atomic_read(&cmdq->pending_count); |
@@ -564,13 +566,13 @@ static void process_response_list(struct nitrox_cmdq *cmdq) | |||
564 | smp_mb__after_atomic(); | 566 | smp_mb__after_atomic(); |
565 | /* remove from response list */ | 567 | /* remove from response list */ |
566 | response_list_del(sr, cmdq); | 568 | response_list_del(sr, cmdq); |
567 | |||
568 | /* ORH error code */ | 569 | /* ORH error code */ |
569 | err = READ_ONCE(*sr->resp.orh) & 0xff; | 570 | err = READ_ONCE(*sr->resp.orh) & 0xff; |
570 | 571 | callback = sr->callback; | |
571 | if (sr->callback) | 572 | cb_arg = sr->cb_arg; |
572 | sr->callback(sr->cb_arg, err); | ||
573 | softreq_destroy(sr); | 573 | softreq_destroy(sr); |
574 | if (callback) | ||
575 | callback(cb_arg, err); | ||
574 | 576 | ||
575 | req_completed++; | 577 | req_completed++; |
576 | } | 578 | } |
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c index 8ada308d72ee..b0125ad65825 100644 --- a/drivers/crypto/ccree/cc_driver.c +++ b/drivers/crypto/ccree/cc_driver.c | |||
@@ -380,7 +380,7 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
380 | rc = cc_ivgen_init(new_drvdata); | 380 | rc = cc_ivgen_init(new_drvdata); |
381 | if (rc) { | 381 | if (rc) { |
382 | dev_err(dev, "cc_ivgen_init failed\n"); | 382 | dev_err(dev, "cc_ivgen_init failed\n"); |
383 | goto post_power_mgr_err; | 383 | goto post_buf_mgr_err; |
384 | } | 384 | } |
385 | 385 | ||
386 | /* Allocate crypto algs */ | 386 | /* Allocate crypto algs */ |
@@ -403,6 +403,9 @@ static int init_cc_resources(struct platform_device *plat_dev) | |||
403 | goto post_hash_err; | 403 | goto post_hash_err; |
404 | } | 404 | } |
405 | 405 | ||
406 | /* All set, we can allow autosuspend */ | ||
407 | cc_pm_go(new_drvdata); | ||
408 | |||
406 | /* If we got here and FIPS mode is enabled | 409 | /* If we got here and FIPS mode is enabled |
407 | * it means all FIPS test passed, so let TEE | 410 | * it means all FIPS test passed, so let TEE |
408 | * know we're good. | 411 | * know we're good. |
@@ -417,8 +420,6 @@ post_cipher_err: | |||
417 | cc_cipher_free(new_drvdata); | 420 | cc_cipher_free(new_drvdata); |
418 | post_ivgen_err: | 421 | post_ivgen_err: |
419 | cc_ivgen_fini(new_drvdata); | 422 | cc_ivgen_fini(new_drvdata); |
420 | post_power_mgr_err: | ||
421 | cc_pm_fini(new_drvdata); | ||
422 | post_buf_mgr_err: | 423 | post_buf_mgr_err: |
423 | cc_buffer_mgr_fini(new_drvdata); | 424 | cc_buffer_mgr_fini(new_drvdata); |
424 | post_req_mgr_err: | 425 | post_req_mgr_err: |
diff --git a/drivers/crypto/ccree/cc_pm.c b/drivers/crypto/ccree/cc_pm.c index d990f472e89f..6ff7e75ad90e 100644 --- a/drivers/crypto/ccree/cc_pm.c +++ b/drivers/crypto/ccree/cc_pm.c | |||
@@ -100,20 +100,19 @@ int cc_pm_put_suspend(struct device *dev) | |||
100 | 100 | ||
101 | int cc_pm_init(struct cc_drvdata *drvdata) | 101 | int cc_pm_init(struct cc_drvdata *drvdata) |
102 | { | 102 | { |
103 | int rc = 0; | ||
104 | struct device *dev = drvdata_to_dev(drvdata); | 103 | struct device *dev = drvdata_to_dev(drvdata); |
105 | 104 | ||
106 | /* must be before the enabling to avoid resdundent suspending */ | 105 | /* must be before the enabling to avoid resdundent suspending */ |
107 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); | 106 | pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT); |
108 | pm_runtime_use_autosuspend(dev); | 107 | pm_runtime_use_autosuspend(dev); |
109 | /* activate the PM module */ | 108 | /* activate the PM module */ |
110 | rc = pm_runtime_set_active(dev); | 109 | return pm_runtime_set_active(dev); |
111 | if (rc) | 110 | } |
112 | return rc; | ||
113 | /* enable the PM module*/ | ||
114 | pm_runtime_enable(dev); | ||
115 | 111 | ||
116 | return rc; | 112 | /* enable the PM module*/ |
113 | void cc_pm_go(struct cc_drvdata *drvdata) | ||
114 | { | ||
115 | pm_runtime_enable(drvdata_to_dev(drvdata)); | ||
117 | } | 116 | } |
118 | 117 | ||
119 | void cc_pm_fini(struct cc_drvdata *drvdata) | 118 | void cc_pm_fini(struct cc_drvdata *drvdata) |
diff --git a/drivers/crypto/ccree/cc_pm.h b/drivers/crypto/ccree/cc_pm.h index 020a5403c58b..f62624357020 100644 --- a/drivers/crypto/ccree/cc_pm.h +++ b/drivers/crypto/ccree/cc_pm.h | |||
@@ -16,6 +16,7 @@ | |||
16 | extern const struct dev_pm_ops ccree_pm; | 16 | extern const struct dev_pm_ops ccree_pm; |
17 | 17 | ||
18 | int cc_pm_init(struct cc_drvdata *drvdata); | 18 | int cc_pm_init(struct cc_drvdata *drvdata); |
19 | void cc_pm_go(struct cc_drvdata *drvdata); | ||
19 | void cc_pm_fini(struct cc_drvdata *drvdata); | 20 | void cc_pm_fini(struct cc_drvdata *drvdata); |
20 | int cc_pm_suspend(struct device *dev); | 21 | int cc_pm_suspend(struct device *dev); |
21 | int cc_pm_resume(struct device *dev); | 22 | int cc_pm_resume(struct device *dev); |
@@ -29,6 +30,8 @@ static inline int cc_pm_init(struct cc_drvdata *drvdata) | |||
29 | return 0; | 30 | return 0; |
30 | } | 31 | } |
31 | 32 | ||
33 | static void cc_pm_go(struct cc_drvdata *drvdata) {} | ||
34 | |||
32 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} | 35 | static inline void cc_pm_fini(struct cc_drvdata *drvdata) {} |
33 | 36 | ||
34 | static inline int cc_pm_suspend(struct device *dev) | 37 | static inline int cc_pm_suspend(struct device *dev) |
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 4e557684f792..fe69dccfa0c0 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c | |||
@@ -203,6 +203,7 @@ struct at_xdmac_chan { | |||
203 | u32 save_cim; | 203 | u32 save_cim; |
204 | u32 save_cnda; | 204 | u32 save_cnda; |
205 | u32 save_cndc; | 205 | u32 save_cndc; |
206 | u32 irq_status; | ||
206 | unsigned long status; | 207 | unsigned long status; |
207 | struct tasklet_struct tasklet; | 208 | struct tasklet_struct tasklet; |
208 | struct dma_slave_config sconfig; | 209 | struct dma_slave_config sconfig; |
@@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data) | |||
1580 | struct at_xdmac_desc *desc; | 1581 | struct at_xdmac_desc *desc; |
1581 | u32 error_mask; | 1582 | u32 error_mask; |
1582 | 1583 | ||
1583 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n", | 1584 | dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n", |
1584 | __func__, atchan->status); | 1585 | __func__, atchan->irq_status); |
1585 | 1586 | ||
1586 | error_mask = AT_XDMAC_CIS_RBEIS | 1587 | error_mask = AT_XDMAC_CIS_RBEIS |
1587 | | AT_XDMAC_CIS_WBEIS | 1588 | | AT_XDMAC_CIS_WBEIS |
@@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data) | |||
1589 | 1590 | ||
1590 | if (at_xdmac_chan_is_cyclic(atchan)) { | 1591 | if (at_xdmac_chan_is_cyclic(atchan)) { |
1591 | at_xdmac_handle_cyclic(atchan); | 1592 | at_xdmac_handle_cyclic(atchan); |
1592 | } else if ((atchan->status & AT_XDMAC_CIS_LIS) | 1593 | } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS) |
1593 | || (atchan->status & error_mask)) { | 1594 | || (atchan->irq_status & error_mask)) { |
1594 | struct dma_async_tx_descriptor *txd; | 1595 | struct dma_async_tx_descriptor *txd; |
1595 | 1596 | ||
1596 | if (atchan->status & AT_XDMAC_CIS_RBEIS) | 1597 | if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) |
1597 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); | 1598 | dev_err(chan2dev(&atchan->chan), "read bus error!!!"); |
1598 | if (atchan->status & AT_XDMAC_CIS_WBEIS) | 1599 | if (atchan->irq_status & AT_XDMAC_CIS_WBEIS) |
1599 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); | 1600 | dev_err(chan2dev(&atchan->chan), "write bus error!!!"); |
1600 | if (atchan->status & AT_XDMAC_CIS_ROIS) | 1601 | if (atchan->irq_status & AT_XDMAC_CIS_ROIS) |
1601 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); | 1602 | dev_err(chan2dev(&atchan->chan), "request overflow error!!!"); |
1602 | 1603 | ||
1603 | spin_lock(&atchan->lock); | 1604 | spin_lock(&atchan->lock); |
@@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | |||
1652 | atchan = &atxdmac->chan[i]; | 1653 | atchan = &atxdmac->chan[i]; |
1653 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); | 1654 | chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); |
1654 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); | 1655 | chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS); |
1655 | atchan->status = chan_status & chan_imr; | 1656 | atchan->irq_status = chan_status & chan_imr; |
1656 | dev_vdbg(atxdmac->dma.dev, | 1657 | dev_vdbg(atxdmac->dma.dev, |
1657 | "%s: chan%d: imr=0x%x, status=0x%x\n", | 1658 | "%s: chan%d: imr=0x%x, status=0x%x\n", |
1658 | __func__, i, chan_imr, chan_status); | 1659 | __func__, i, chan_imr, chan_status); |
@@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id) | |||
1666 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), | 1667 | at_xdmac_chan_read(atchan, AT_XDMAC_CDA), |
1667 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); | 1668 | at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); |
1668 | 1669 | ||
1669 | if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) | 1670 | if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS)) |
1670 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); | 1671 | at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); |
1671 | 1672 | ||
1672 | tasklet_schedule(&atchan->tasklet); | 1673 | tasklet_schedule(&atchan->tasklet); |
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 1a44c8086d77..ae10f5614f95 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c | |||
@@ -406,38 +406,32 @@ static void bcm2835_dma_fill_cb_chain_with_sg( | |||
406 | } | 406 | } |
407 | } | 407 | } |
408 | 408 | ||
409 | static int bcm2835_dma_abort(void __iomem *chan_base) | 409 | static int bcm2835_dma_abort(struct bcm2835_chan *c) |
410 | { | 410 | { |
411 | unsigned long cs; | 411 | void __iomem *chan_base = c->chan_base; |
412 | long int timeout = 10000; | 412 | long int timeout = 10000; |
413 | 413 | ||
414 | cs = readl(chan_base + BCM2835_DMA_CS); | 414 | /* |
415 | if (!(cs & BCM2835_DMA_ACTIVE)) | 415 | * A zero control block address means the channel is idle. |
416 | * (The ACTIVE flag in the CS register is not a reliable indicator.) | ||
417 | */ | ||
418 | if (!readl(chan_base + BCM2835_DMA_ADDR)) | ||
416 | return 0; | 419 | return 0; |
417 | 420 | ||
418 | /* Write 0 to the active bit - Pause the DMA */ | 421 | /* Write 0 to the active bit - Pause the DMA */ |
419 | writel(0, chan_base + BCM2835_DMA_CS); | 422 | writel(0, chan_base + BCM2835_DMA_CS); |
420 | 423 | ||
421 | /* Wait for any current AXI transfer to complete */ | 424 | /* Wait for any current AXI transfer to complete */ |
422 | while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { | 425 | while ((readl(chan_base + BCM2835_DMA_CS) & |
426 | BCM2835_DMA_WAITING_FOR_WRITES) && --timeout) | ||
423 | cpu_relax(); | 427 | cpu_relax(); |
424 | cs = readl(chan_base + BCM2835_DMA_CS); | ||
425 | } | ||
426 | 428 | ||
427 | /* We'll un-pause when we set of our next DMA */ | 429 | /* Peripheral might be stuck and fail to signal AXI write responses */ |
428 | if (!timeout) | 430 | if (!timeout) |
429 | return -ETIMEDOUT; | 431 | dev_err(c->vc.chan.device->dev, |
430 | 432 | "failed to complete outstanding writes\n"); | |
431 | if (!(cs & BCM2835_DMA_ACTIVE)) | ||
432 | return 0; | ||
433 | |||
434 | /* Terminate the control block chain */ | ||
435 | writel(0, chan_base + BCM2835_DMA_NEXTCB); | ||
436 | |||
437 | /* Abort the whole DMA */ | ||
438 | writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, | ||
439 | chan_base + BCM2835_DMA_CS); | ||
440 | 433 | ||
434 | writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS); | ||
441 | return 0; | 435 | return 0; |
442 | } | 436 | } |
443 | 437 | ||
@@ -476,8 +470,15 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
476 | 470 | ||
477 | spin_lock_irqsave(&c->vc.lock, flags); | 471 | spin_lock_irqsave(&c->vc.lock, flags); |
478 | 472 | ||
479 | /* Acknowledge interrupt */ | 473 | /* |
480 | writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); | 474 | * Clear the INT flag to receive further interrupts. Keep the channel |
475 | * active in case the descriptor is cyclic or in case the client has | ||
476 | * already terminated the descriptor and issued a new one. (May happen | ||
477 | * if this IRQ handler is threaded.) If the channel is finished, it | ||
478 | * will remain idle despite the ACTIVE flag being set. | ||
479 | */ | ||
480 | writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE, | ||
481 | c->chan_base + BCM2835_DMA_CS); | ||
481 | 482 | ||
482 | d = c->desc; | 483 | d = c->desc; |
483 | 484 | ||
@@ -485,11 +486,7 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) | |||
485 | if (d->cyclic) { | 486 | if (d->cyclic) { |
486 | /* call the cyclic callback */ | 487 | /* call the cyclic callback */ |
487 | vchan_cyclic_callback(&d->vd); | 488 | vchan_cyclic_callback(&d->vd); |
488 | 489 | } else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) { | |
489 | /* Keep the DMA engine running */ | ||
490 | writel(BCM2835_DMA_ACTIVE, | ||
491 | c->chan_base + BCM2835_DMA_CS); | ||
492 | } else { | ||
493 | vchan_cookie_complete(&c->desc->vd); | 490 | vchan_cookie_complete(&c->desc->vd); |
494 | bcm2835_dma_start_desc(c); | 491 | bcm2835_dma_start_desc(c); |
495 | } | 492 | } |
@@ -779,7 +776,6 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
779 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); | 776 | struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); |
780 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); | 777 | struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); |
781 | unsigned long flags; | 778 | unsigned long flags; |
782 | int timeout = 10000; | ||
783 | LIST_HEAD(head); | 779 | LIST_HEAD(head); |
784 | 780 | ||
785 | spin_lock_irqsave(&c->vc.lock, flags); | 781 | spin_lock_irqsave(&c->vc.lock, flags); |
@@ -789,27 +785,11 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) | |||
789 | list_del_init(&c->node); | 785 | list_del_init(&c->node); |
790 | spin_unlock(&d->lock); | 786 | spin_unlock(&d->lock); |
791 | 787 | ||
792 | /* | 788 | /* stop DMA activity */ |
793 | * Stop DMA activity: we assume the callback will not be called | ||
794 | * after bcm_dma_abort() returns (even if it does, it will see | ||
795 | * c->desc is NULL and exit.) | ||
796 | */ | ||
797 | if (c->desc) { | 789 | if (c->desc) { |
798 | vchan_terminate_vdesc(&c->desc->vd); | 790 | vchan_terminate_vdesc(&c->desc->vd); |
799 | c->desc = NULL; | 791 | c->desc = NULL; |
800 | bcm2835_dma_abort(c->chan_base); | 792 | bcm2835_dma_abort(c); |
801 | |||
802 | /* Wait for stopping */ | ||
803 | while (--timeout) { | ||
804 | if (!(readl(c->chan_base + BCM2835_DMA_CS) & | ||
805 | BCM2835_DMA_ACTIVE)) | ||
806 | break; | ||
807 | |||
808 | cpu_relax(); | ||
809 | } | ||
810 | |||
811 | if (!timeout) | ||
812 | dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); | ||
813 | } | 793 | } |
814 | 794 | ||
815 | vchan_get_all_descriptors(&c->vc, &head); | 795 | vchan_get_all_descriptors(&c->vc, &head); |
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 2eea4ef72915..6511928b4cdf 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
@@ -711,11 +711,9 @@ static int dmatest_func(void *data) | |||
711 | srcs[i] = um->addr[i] + src_off; | 711 | srcs[i] = um->addr[i] + src_off; |
712 | ret = dma_mapping_error(dev->dev, um->addr[i]); | 712 | ret = dma_mapping_error(dev->dev, um->addr[i]); |
713 | if (ret) { | 713 | if (ret) { |
714 | dmaengine_unmap_put(um); | ||
715 | result("src mapping error", total_tests, | 714 | result("src mapping error", total_tests, |
716 | src_off, dst_off, len, ret); | 715 | src_off, dst_off, len, ret); |
717 | failed_tests++; | 716 | goto error_unmap_continue; |
718 | continue; | ||
719 | } | 717 | } |
720 | um->to_cnt++; | 718 | um->to_cnt++; |
721 | } | 719 | } |
@@ -730,11 +728,9 @@ static int dmatest_func(void *data) | |||
730 | DMA_BIDIRECTIONAL); | 728 | DMA_BIDIRECTIONAL); |
731 | ret = dma_mapping_error(dev->dev, dsts[i]); | 729 | ret = dma_mapping_error(dev->dev, dsts[i]); |
732 | if (ret) { | 730 | if (ret) { |
733 | dmaengine_unmap_put(um); | ||
734 | result("dst mapping error", total_tests, | 731 | result("dst mapping error", total_tests, |
735 | src_off, dst_off, len, ret); | 732 | src_off, dst_off, len, ret); |
736 | failed_tests++; | 733 | goto error_unmap_continue; |
737 | continue; | ||
738 | } | 734 | } |
739 | um->bidi_cnt++; | 735 | um->bidi_cnt++; |
740 | } | 736 | } |
@@ -762,12 +758,10 @@ static int dmatest_func(void *data) | |||
762 | } | 758 | } |
763 | 759 | ||
764 | if (!tx) { | 760 | if (!tx) { |
765 | dmaengine_unmap_put(um); | ||
766 | result("prep error", total_tests, src_off, | 761 | result("prep error", total_tests, src_off, |
767 | dst_off, len, ret); | 762 | dst_off, len, ret); |
768 | msleep(100); | 763 | msleep(100); |
769 | failed_tests++; | 764 | goto error_unmap_continue; |
770 | continue; | ||
771 | } | 765 | } |
772 | 766 | ||
773 | done->done = false; | 767 | done->done = false; |
@@ -776,12 +770,10 @@ static int dmatest_func(void *data) | |||
776 | cookie = tx->tx_submit(tx); | 770 | cookie = tx->tx_submit(tx); |
777 | 771 | ||
778 | if (dma_submit_error(cookie)) { | 772 | if (dma_submit_error(cookie)) { |
779 | dmaengine_unmap_put(um); | ||
780 | result("submit error", total_tests, src_off, | 773 | result("submit error", total_tests, src_off, |
781 | dst_off, len, ret); | 774 | dst_off, len, ret); |
782 | msleep(100); | 775 | msleep(100); |
783 | failed_tests++; | 776 | goto error_unmap_continue; |
784 | continue; | ||
785 | } | 777 | } |
786 | dma_async_issue_pending(chan); | 778 | dma_async_issue_pending(chan); |
787 | 779 | ||
@@ -790,22 +782,20 @@ static int dmatest_func(void *data) | |||
790 | 782 | ||
791 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 783 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
792 | 784 | ||
793 | dmaengine_unmap_put(um); | ||
794 | |||
795 | if (!done->done) { | 785 | if (!done->done) { |
796 | result("test timed out", total_tests, src_off, dst_off, | 786 | result("test timed out", total_tests, src_off, dst_off, |
797 | len, 0); | 787 | len, 0); |
798 | failed_tests++; | 788 | goto error_unmap_continue; |
799 | continue; | ||
800 | } else if (status != DMA_COMPLETE) { | 789 | } else if (status != DMA_COMPLETE) { |
801 | result(status == DMA_ERROR ? | 790 | result(status == DMA_ERROR ? |
802 | "completion error status" : | 791 | "completion error status" : |
803 | "completion busy status", total_tests, src_off, | 792 | "completion busy status", total_tests, src_off, |
804 | dst_off, len, ret); | 793 | dst_off, len, ret); |
805 | failed_tests++; | 794 | goto error_unmap_continue; |
806 | continue; | ||
807 | } | 795 | } |
808 | 796 | ||
797 | dmaengine_unmap_put(um); | ||
798 | |||
809 | if (params->noverify) { | 799 | if (params->noverify) { |
810 | verbose_result("test passed", total_tests, src_off, | 800 | verbose_result("test passed", total_tests, src_off, |
811 | dst_off, len, 0); | 801 | dst_off, len, 0); |
@@ -846,6 +836,12 @@ static int dmatest_func(void *data) | |||
846 | verbose_result("test passed", total_tests, src_off, | 836 | verbose_result("test passed", total_tests, src_off, |
847 | dst_off, len, 0); | 837 | dst_off, len, 0); |
848 | } | 838 | } |
839 | |||
840 | continue; | ||
841 | |||
842 | error_unmap_continue: | ||
843 | dmaengine_unmap_put(um); | ||
844 | failed_tests++; | ||
849 | } | 845 | } |
850 | ktime = ktime_sub(ktime_get(), ktime); | 846 | ktime = ktime_sub(ktime_get(), ktime); |
851 | ktime = ktime_sub(ktime, comparetime); | 847 | ktime = ktime_sub(ktime, comparetime); |
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index c2fff3f6c9ca..4a09af3cd546 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c | |||
@@ -618,7 +618,7 @@ static void imxdma_tasklet(unsigned long data) | |||
618 | { | 618 | { |
619 | struct imxdma_channel *imxdmac = (void *)data; | 619 | struct imxdma_channel *imxdmac = (void *)data; |
620 | struct imxdma_engine *imxdma = imxdmac->imxdma; | 620 | struct imxdma_engine *imxdma = imxdmac->imxdma; |
621 | struct imxdma_desc *desc; | 621 | struct imxdma_desc *desc, *next_desc; |
622 | unsigned long flags; | 622 | unsigned long flags; |
623 | 623 | ||
624 | spin_lock_irqsave(&imxdma->lock, flags); | 624 | spin_lock_irqsave(&imxdma->lock, flags); |
@@ -648,10 +648,10 @@ static void imxdma_tasklet(unsigned long data) | |||
648 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); | 648 | list_move_tail(imxdmac->ld_active.next, &imxdmac->ld_free); |
649 | 649 | ||
650 | if (!list_empty(&imxdmac->ld_queue)) { | 650 | if (!list_empty(&imxdmac->ld_queue)) { |
651 | desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, | 651 | next_desc = list_first_entry(&imxdmac->ld_queue, |
652 | node); | 652 | struct imxdma_desc, node); |
653 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); | 653 | list_move_tail(imxdmac->ld_queue.next, &imxdmac->ld_active); |
654 | if (imxdma_xfer_desc(desc) < 0) | 654 | if (imxdma_xfer_desc(next_desc) < 0) |
655 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", | 655 | dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", |
656 | __func__, imxdmac->channel); | 656 | __func__, imxdmac->channel); |
657 | } | 657 | } |
diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 472c88ae1c0f..92f843eaf1e0 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c | |||
@@ -119,6 +119,11 @@ void scmi_driver_unregister(struct scmi_driver *driver) | |||
119 | } | 119 | } |
120 | EXPORT_SYMBOL_GPL(scmi_driver_unregister); | 120 | EXPORT_SYMBOL_GPL(scmi_driver_unregister); |
121 | 121 | ||
122 | static void scmi_device_release(struct device *dev) | ||
123 | { | ||
124 | kfree(to_scmi_dev(dev)); | ||
125 | } | ||
126 | |||
122 | struct scmi_device * | 127 | struct scmi_device * |
123 | scmi_device_create(struct device_node *np, struct device *parent, int protocol) | 128 | scmi_device_create(struct device_node *np, struct device *parent, int protocol) |
124 | { | 129 | { |
@@ -138,6 +143,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol) | |||
138 | scmi_dev->dev.parent = parent; | 143 | scmi_dev->dev.parent = parent; |
139 | scmi_dev->dev.of_node = np; | 144 | scmi_dev->dev.of_node = np; |
140 | scmi_dev->dev.bus = &scmi_bus_type; | 145 | scmi_dev->dev.bus = &scmi_bus_type; |
146 | scmi_dev->dev.release = scmi_device_release; | ||
141 | dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); | 147 | dev_set_name(&scmi_dev->dev, "scmi_dev.%d", id); |
142 | 148 | ||
143 | retval = device_register(&scmi_dev->dev); | 149 | retval = device_register(&scmi_dev->dev); |
@@ -156,9 +162,8 @@ free_mem: | |||
156 | void scmi_device_destroy(struct scmi_device *scmi_dev) | 162 | void scmi_device_destroy(struct scmi_device *scmi_dev) |
157 | { | 163 | { |
158 | scmi_handle_put(scmi_dev->handle); | 164 | scmi_handle_put(scmi_dev->handle); |
159 | device_unregister(&scmi_dev->dev); | ||
160 | ida_simple_remove(&scmi_bus_id, scmi_dev->id); | 165 | ida_simple_remove(&scmi_bus_id, scmi_dev->id); |
161 | kfree(scmi_dev); | 166 | device_unregister(&scmi_dev->dev); |
162 | } | 167 | } |
163 | 168 | ||
164 | void scmi_set_handle(struct scmi_device *scmi_dev) | 169 | void scmi_set_handle(struct scmi_device *scmi_dev) |
diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 23ea1ed409d1..352bd2473162 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c | |||
@@ -37,8 +37,9 @@ extern u64 efi_system_table; | |||
37 | static struct ptdump_info efi_ptdump_info = { | 37 | static struct ptdump_info efi_ptdump_info = { |
38 | .mm = &efi_mm, | 38 | .mm = &efi_mm, |
39 | .markers = (struct addr_marker[]){ | 39 | .markers = (struct addr_marker[]){ |
40 | { 0, "UEFI runtime start" }, | 40 | { 0, "UEFI runtime start" }, |
41 | { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" } | 41 | { DEFAULT_MAP_WINDOW_64, "UEFI runtime end" }, |
42 | { -1, NULL } | ||
42 | }, | 43 | }, |
43 | .base_addr = 0, | 44 | .base_addr = 0, |
44 | }; | 45 | }; |
diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index a1a09e04fab8..13851b3d1c56 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c | |||
@@ -508,14 +508,11 @@ static int __init s10_init(void) | |||
508 | return -ENODEV; | 508 | return -ENODEV; |
509 | 509 | ||
510 | np = of_find_matching_node(fw_np, s10_of_match); | 510 | np = of_find_matching_node(fw_np, s10_of_match); |
511 | if (!np) { | 511 | if (!np) |
512 | of_node_put(fw_np); | ||
513 | return -ENODEV; | 512 | return -ENODEV; |
514 | } | ||
515 | 513 | ||
516 | of_node_put(np); | 514 | of_node_put(np); |
517 | ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); | 515 | ret = of_platform_populate(fw_np, s10_of_match, NULL, NULL); |
518 | of_node_put(fw_np); | ||
519 | if (ret) | 516 | if (ret) |
520 | return ret; | 517 | return ret; |
521 | 518 | ||
diff --git a/drivers/gpio/gpio-altera-a10sr.c b/drivers/gpio/gpio-altera-a10sr.c index 6b11f1314248..7f9e0304b510 100644 --- a/drivers/gpio/gpio-altera-a10sr.c +++ b/drivers/gpio/gpio-altera-a10sr.c | |||
@@ -66,8 +66,10 @@ static int altr_a10sr_gpio_direction_input(struct gpio_chip *gc, | |||
66 | static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, | 66 | static int altr_a10sr_gpio_direction_output(struct gpio_chip *gc, |
67 | unsigned int nr, int value) | 67 | unsigned int nr, int value) |
68 | { | 68 | { |
69 | if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) | 69 | if (nr <= (ALTR_A10SR_OUT_VALID_RANGE_HI - ALTR_A10SR_LED_VALID_SHIFT)) { |
70 | altr_a10sr_gpio_set(gc, nr, value); | ||
70 | return 0; | 71 | return 0; |
72 | } | ||
71 | return -EINVAL; | 73 | return -EINVAL; |
72 | } | 74 | } |
73 | 75 | ||
diff --git a/drivers/gpio/gpio-eic-sprd.c b/drivers/gpio/gpio-eic-sprd.c index e0d6a0a7bc69..e41223c05f6e 100644 --- a/drivers/gpio/gpio-eic-sprd.c +++ b/drivers/gpio/gpio-eic-sprd.c | |||
@@ -180,7 +180,18 @@ static void sprd_eic_free(struct gpio_chip *chip, unsigned int offset) | |||
180 | 180 | ||
181 | static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) | 181 | static int sprd_eic_get(struct gpio_chip *chip, unsigned int offset) |
182 | { | 182 | { |
183 | return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); | 183 | struct sprd_eic *sprd_eic = gpiochip_get_data(chip); |
184 | |||
185 | switch (sprd_eic->type) { | ||
186 | case SPRD_EIC_DEBOUNCE: | ||
187 | return sprd_eic_read(chip, offset, SPRD_EIC_DBNC_DATA); | ||
188 | case SPRD_EIC_ASYNC: | ||
189 | return sprd_eic_read(chip, offset, SPRD_EIC_ASYNC_DATA); | ||
190 | case SPRD_EIC_SYNC: | ||
191 | return sprd_eic_read(chip, offset, SPRD_EIC_SYNC_DATA); | ||
192 | default: | ||
193 | return -ENOTSUPP; | ||
194 | } | ||
184 | } | 195 | } |
185 | 196 | ||
186 | static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) | 197 | static int sprd_eic_direction_input(struct gpio_chip *chip, unsigned int offset) |
@@ -368,6 +379,7 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type) | |||
368 | irq_set_handler_locked(data, handle_edge_irq); | 379 | irq_set_handler_locked(data, handle_edge_irq); |
369 | break; | 380 | break; |
370 | case IRQ_TYPE_EDGE_BOTH: | 381 | case IRQ_TYPE_EDGE_BOTH: |
382 | sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0); | ||
371 | sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); | 383 | sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1); |
372 | irq_set_handler_locked(data, handle_edge_irq); | 384 | irq_set_handler_locked(data, handle_edge_irq); |
373 | break; | 385 | break; |
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c index adf72dda25a2..68a35b65925a 100644 --- a/drivers/gpio/gpio-pcf857x.c +++ b/drivers/gpio/gpio-pcf857x.c | |||
@@ -84,6 +84,7 @@ MODULE_DEVICE_TABLE(of, pcf857x_of_table); | |||
84 | */ | 84 | */ |
85 | struct pcf857x { | 85 | struct pcf857x { |
86 | struct gpio_chip chip; | 86 | struct gpio_chip chip; |
87 | struct irq_chip irqchip; | ||
87 | struct i2c_client *client; | 88 | struct i2c_client *client; |
88 | struct mutex lock; /* protect 'out' */ | 89 | struct mutex lock; /* protect 'out' */ |
89 | unsigned out; /* software latch */ | 90 | unsigned out; /* software latch */ |
@@ -252,18 +253,6 @@ static void pcf857x_irq_bus_sync_unlock(struct irq_data *data) | |||
252 | mutex_unlock(&gpio->lock); | 253 | mutex_unlock(&gpio->lock); |
253 | } | 254 | } |
254 | 255 | ||
255 | static struct irq_chip pcf857x_irq_chip = { | ||
256 | .name = "pcf857x", | ||
257 | .irq_enable = pcf857x_irq_enable, | ||
258 | .irq_disable = pcf857x_irq_disable, | ||
259 | .irq_ack = noop, | ||
260 | .irq_mask = noop, | ||
261 | .irq_unmask = noop, | ||
262 | .irq_set_wake = pcf857x_irq_set_wake, | ||
263 | .irq_bus_lock = pcf857x_irq_bus_lock, | ||
264 | .irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, | ||
265 | }; | ||
266 | |||
267 | /*-------------------------------------------------------------------------*/ | 256 | /*-------------------------------------------------------------------------*/ |
268 | 257 | ||
269 | static int pcf857x_probe(struct i2c_client *client, | 258 | static int pcf857x_probe(struct i2c_client *client, |
@@ -376,8 +365,17 @@ static int pcf857x_probe(struct i2c_client *client, | |||
376 | 365 | ||
377 | /* Enable irqchip if we have an interrupt */ | 366 | /* Enable irqchip if we have an interrupt */ |
378 | if (client->irq) { | 367 | if (client->irq) { |
368 | gpio->irqchip.name = "pcf857x", | ||
369 | gpio->irqchip.irq_enable = pcf857x_irq_enable, | ||
370 | gpio->irqchip.irq_disable = pcf857x_irq_disable, | ||
371 | gpio->irqchip.irq_ack = noop, | ||
372 | gpio->irqchip.irq_mask = noop, | ||
373 | gpio->irqchip.irq_unmask = noop, | ||
374 | gpio->irqchip.irq_set_wake = pcf857x_irq_set_wake, | ||
375 | gpio->irqchip.irq_bus_lock = pcf857x_irq_bus_lock, | ||
376 | gpio->irqchip.irq_bus_sync_unlock = pcf857x_irq_bus_sync_unlock, | ||
379 | status = gpiochip_irqchip_add_nested(&gpio->chip, | 377 | status = gpiochip_irqchip_add_nested(&gpio->chip, |
380 | &pcf857x_irq_chip, | 378 | &gpio->irqchip, |
381 | 0, handle_level_irq, | 379 | 0, handle_level_irq, |
382 | IRQ_TYPE_NONE); | 380 | IRQ_TYPE_NONE); |
383 | if (status) { | 381 | if (status) { |
@@ -392,7 +390,7 @@ static int pcf857x_probe(struct i2c_client *client, | |||
392 | if (status) | 390 | if (status) |
393 | goto fail; | 391 | goto fail; |
394 | 392 | ||
395 | gpiochip_set_nested_irqchip(&gpio->chip, &pcf857x_irq_chip, | 393 | gpiochip_set_nested_irqchip(&gpio->chip, &gpio->irqchip, |
396 | client->irq); | 394 | client->irq); |
397 | gpio->irq_parent = client->irq; | 395 | gpio->irq_parent = client->irq; |
398 | } | 396 | } |
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c index 1b79ebcfce3e..541fa6ac399d 100644 --- a/drivers/gpio/gpio-vf610.c +++ b/drivers/gpio/gpio-vf610.c | |||
@@ -253,6 +253,7 @@ static int vf610_gpio_probe(struct platform_device *pdev) | |||
253 | struct vf610_gpio_port *port; | 253 | struct vf610_gpio_port *port; |
254 | struct resource *iores; | 254 | struct resource *iores; |
255 | struct gpio_chip *gc; | 255 | struct gpio_chip *gc; |
256 | int i; | ||
256 | int ret; | 257 | int ret; |
257 | 258 | ||
258 | port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); | 259 | port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL); |
@@ -319,6 +320,10 @@ static int vf610_gpio_probe(struct platform_device *pdev) | |||
319 | if (ret < 0) | 320 | if (ret < 0) |
320 | return ret; | 321 | return ret; |
321 | 322 | ||
323 | /* Mask all GPIO interrupts */ | ||
324 | for (i = 0; i < gc->ngpio; i++) | ||
325 | vf610_gpio_writel(0, port->base + PORT_PCR(i)); | ||
326 | |||
322 | /* Clear the interrupt status register for all GPIO's */ | 327 | /* Clear the interrupt status register for all GPIO's */ |
323 | vf610_gpio_writel(~0, port->base + PORT_ISFR); | 328 | vf610_gpio_writel(~0, port->base + PORT_ISFR); |
324 | 329 | ||
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 1651d7f0a303..d1adfdf50fb3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -828,7 +828,14 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p) | |||
828 | /* Do not leak kernel stack to userspace */ | 828 | /* Do not leak kernel stack to userspace */ |
829 | memset(&ge, 0, sizeof(ge)); | 829 | memset(&ge, 0, sizeof(ge)); |
830 | 830 | ||
831 | ge.timestamp = le->timestamp; | 831 | /* |
832 | * We may be running from a nested threaded interrupt in which case | ||
833 | * we didn't get the timestamp from lineevent_irq_handler(). | ||
834 | */ | ||
835 | if (!le->timestamp) | ||
836 | ge.timestamp = ktime_get_real_ns(); | ||
837 | else | ||
838 | ge.timestamp = le->timestamp; | ||
832 | 839 | ||
833 | if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE | 840 | if (le->eflags & GPIOEVENT_REQUEST_RISING_EDGE |
834 | && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { | 841 | && le->eflags & GPIOEVENT_REQUEST_FALLING_EDGE) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 6896dec97fc7..0ed41a9d2d77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c | |||
@@ -1686,7 +1686,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, | |||
1686 | effective_mode &= ~S_IWUSR; | 1686 | effective_mode &= ~S_IWUSR; |
1687 | 1687 | ||
1688 | if ((adev->flags & AMD_IS_APU) && | 1688 | if ((adev->flags & AMD_IS_APU) && |
1689 | (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | 1689 | (attr == &sensor_dev_attr_power1_average.dev_attr.attr || |
1690 | attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || | ||
1690 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| | 1691 | attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| |
1691 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) | 1692 | attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) |
1692 | return 0; | 1693 | return 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 71913a18d142..a38e0fb4a6fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "amdgpu_gem.h" | 38 | #include "amdgpu_gem.h" |
39 | #include <drm/amdgpu_drm.h> | 39 | #include <drm/amdgpu_drm.h> |
40 | #include <linux/dma-buf.h> | 40 | #include <linux/dma-buf.h> |
41 | #include <linux/dma-fence-array.h> | ||
41 | 42 | ||
42 | /** | 43 | /** |
43 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table | 44 | * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table |
@@ -187,6 +188,48 @@ error: | |||
187 | return ERR_PTR(ret); | 188 | return ERR_PTR(ret); |
188 | } | 189 | } |
189 | 190 | ||
191 | static int | ||
192 | __reservation_object_make_exclusive(struct reservation_object *obj) | ||
193 | { | ||
194 | struct dma_fence **fences; | ||
195 | unsigned int count; | ||
196 | int r; | ||
197 | |||
198 | if (!reservation_object_get_list(obj)) /* no shared fences to convert */ | ||
199 | return 0; | ||
200 | |||
201 | r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences); | ||
202 | if (r) | ||
203 | return r; | ||
204 | |||
205 | if (count == 0) { | ||
206 | /* Now that was unexpected. */ | ||
207 | } else if (count == 1) { | ||
208 | reservation_object_add_excl_fence(obj, fences[0]); | ||
209 | dma_fence_put(fences[0]); | ||
210 | kfree(fences); | ||
211 | } else { | ||
212 | struct dma_fence_array *array; | ||
213 | |||
214 | array = dma_fence_array_create(count, fences, | ||
215 | dma_fence_context_alloc(1), 0, | ||
216 | false); | ||
217 | if (!array) | ||
218 | goto err_fences_put; | ||
219 | |||
220 | reservation_object_add_excl_fence(obj, &array->base); | ||
221 | dma_fence_put(&array->base); | ||
222 | } | ||
223 | |||
224 | return 0; | ||
225 | |||
226 | err_fences_put: | ||
227 | while (count--) | ||
228 | dma_fence_put(fences[count]); | ||
229 | kfree(fences); | ||
230 | return -ENOMEM; | ||
231 | } | ||
232 | |||
190 | /** | 233 | /** |
191 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation | 234 | * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation |
192 | * @dma_buf: Shared DMA buffer | 235 | * @dma_buf: Shared DMA buffer |
@@ -218,16 +261,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf, | |||
218 | 261 | ||
219 | if (attach->dev->driver != adev->dev->driver) { | 262 | if (attach->dev->driver != adev->dev->driver) { |
220 | /* | 263 | /* |
221 | * Wait for all shared fences to complete before we switch to future | 264 | * We only create shared fences for internal use, but importers |
222 | * use of exclusive fence on this prime shared bo. | 265 | * of the dmabuf rely on exclusive fences for implicitly |
266 | * tracking write hazards. As any of the current fences may | ||
267 | * correspond to a write, we need to convert all existing | ||
268 | * fences on the reservation object into a single exclusive | ||
269 | * fence. | ||
223 | */ | 270 | */ |
224 | r = reservation_object_wait_timeout_rcu(bo->tbo.resv, | 271 | r = __reservation_object_make_exclusive(bo->tbo.resv); |
225 | true, false, | 272 | if (r) |
226 | MAX_SCHEDULE_TIMEOUT); | ||
227 | if (unlikely(r < 0)) { | ||
228 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", r); | ||
229 | goto error_unreserve; | 273 | goto error_unreserve; |
230 | } | ||
231 | } | 274 | } |
232 | 275 | ||
233 | /* pin buffer into GTT */ | 276 | /* pin buffer into GTT */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 8fab0d637ee5..3a9b48b227ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | |||
@@ -90,8 +90,10 @@ static int psp_sw_fini(void *handle) | |||
90 | adev->psp.sos_fw = NULL; | 90 | adev->psp.sos_fw = NULL; |
91 | release_firmware(adev->psp.asd_fw); | 91 | release_firmware(adev->psp.asd_fw); |
92 | adev->psp.asd_fw = NULL; | 92 | adev->psp.asd_fw = NULL; |
93 | release_firmware(adev->psp.ta_fw); | 93 | if (adev->psp.ta_fw) { |
94 | adev->psp.ta_fw = NULL; | 94 | release_firmware(adev->psp.ta_fw); |
95 | adev->psp.ta_fw = NULL; | ||
96 | } | ||
95 | return 0; | 97 | return 0; |
96 | } | 98 | } |
97 | 99 | ||
@@ -435,6 +437,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) | |||
435 | struct ta_xgmi_shared_memory *xgmi_cmd; | 437 | struct ta_xgmi_shared_memory *xgmi_cmd; |
436 | int ret; | 438 | int ret; |
437 | 439 | ||
440 | if (!psp->adev->psp.ta_fw) | ||
441 | return -ENOENT; | ||
442 | |||
438 | if (!psp->xgmi_context.initialized) { | 443 | if (!psp->xgmi_context.initialized) { |
439 | ret = psp_xgmi_init_shared_buf(psp); | 444 | ret = psp_xgmi_init_shared_buf(psp); |
440 | if (ret) | 445 | if (ret) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index d2ea5ce2cefb..7c108e687683 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | |||
@@ -3363,14 +3363,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, | |||
3363 | struct amdgpu_task_info *task_info) | 3363 | struct amdgpu_task_info *task_info) |
3364 | { | 3364 | { |
3365 | struct amdgpu_vm *vm; | 3365 | struct amdgpu_vm *vm; |
3366 | unsigned long flags; | ||
3366 | 3367 | ||
3367 | spin_lock(&adev->vm_manager.pasid_lock); | 3368 | spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); |
3368 | 3369 | ||
3369 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); | 3370 | vm = idr_find(&adev->vm_manager.pasid_idr, pasid); |
3370 | if (vm) | 3371 | if (vm) |
3371 | *task_info = vm->task_info; | 3372 | *task_info = vm->task_info; |
3372 | 3373 | ||
3373 | spin_unlock(&adev->vm_manager.pasid_lock); | 3374 | spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); |
3374 | } | 3375 | } |
3375 | 3376 | ||
3376 | /** | 3377 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 4cd31a276dcd..186db182f924 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | |||
@@ -93,7 +93,20 @@ static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, | |||
93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, | 93 | static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, |
94 | bool enable) | 94 | bool enable) |
95 | { | 95 | { |
96 | u32 tmp = 0; | ||
96 | 97 | ||
98 | if (enable) { | ||
99 | tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | | ||
100 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | | ||
101 | REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); | ||
102 | |||
103 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, | ||
104 | lower_32_bits(adev->doorbell.base)); | ||
105 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, | ||
106 | upper_32_bits(adev->doorbell.base)); | ||
107 | } | ||
108 | |||
109 | WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); | ||
97 | } | 110 | } |
98 | 111 | ||
99 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, | 112 | static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, |
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 0c6e7f9b143f..189fcb004579 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | |||
@@ -152,18 +152,22 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) | |||
152 | 152 | ||
153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); | 153 | snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); |
154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); | 154 | err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); |
155 | if (err) | 155 | if (err) { |
156 | goto out2; | 156 | release_firmware(adev->psp.ta_fw); |
157 | 157 | adev->psp.ta_fw = NULL; | |
158 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | 158 | dev_info(adev->dev, |
159 | if (err) | 159 | "psp v11.0: Failed to load firmware \"%s\"\n", fw_name); |
160 | goto out2; | 160 | } else { |
161 | 161 | err = amdgpu_ucode_validate(adev->psp.ta_fw); | |
162 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; | 162 | if (err) |
163 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); | 163 | goto out2; |
164 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | 164 | |
165 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | 165 | ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data; |
166 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | 166 | adev->psp.ta_xgmi_ucode_version = le32_to_cpu(ta_hdr->ta_xgmi_ucode_version); |
167 | adev->psp.ta_xgmi_ucode_size = le32_to_cpu(ta_hdr->ta_xgmi_size_bytes); | ||
168 | adev->psp.ta_xgmi_start_addr = (uint8_t *)ta_hdr + | ||
169 | le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); | ||
170 | } | ||
167 | 171 | ||
168 | return 0; | 172 | return 0; |
169 | 173 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 8849b74078d6..9b639974c70c 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c | |||
@@ -729,11 +729,13 @@ static int soc15_common_early_init(void *handle) | |||
729 | case CHIP_RAVEN: | 729 | case CHIP_RAVEN: |
730 | adev->asic_funcs = &soc15_asic_funcs; | 730 | adev->asic_funcs = &soc15_asic_funcs; |
731 | if (adev->rev_id >= 0x8) | 731 | if (adev->rev_id >= 0x8) |
732 | adev->external_rev_id = adev->rev_id + 0x81; | 732 | adev->external_rev_id = adev->rev_id + 0x79; |
733 | else if (adev->pdev->device == 0x15d8) | 733 | else if (adev->pdev->device == 0x15d8) |
734 | adev->external_rev_id = adev->rev_id + 0x41; | 734 | adev->external_rev_id = adev->rev_id + 0x41; |
735 | else if (adev->rev_id == 1) | ||
736 | adev->external_rev_id = adev->rev_id + 0x20; | ||
735 | else | 737 | else |
736 | adev->external_rev_id = 0x1; | 738 | adev->external_rev_id = adev->rev_id + 0x01; |
737 | 739 | ||
738 | if (adev->rev_id >= 0x8) { | 740 | if (adev->rev_id >= 0x8) { |
739 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | | 741 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5d85ff341385..2e7c44955f43 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c | |||
@@ -863,7 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, | |||
863 | return 0; | 863 | return 0; |
864 | } | 864 | } |
865 | 865 | ||
866 | #if CONFIG_X86_64 | 866 | #ifdef CONFIG_X86_64 |
867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, | 867 | static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, |
868 | uint32_t *num_entries, | 868 | uint32_t *num_entries, |
869 | struct crat_subtype_iolink *sub_type_hdr) | 869 | struct crat_subtype_iolink *sub_type_hdr) |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f4fa40c387d3..0b392bfca284 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | |||
@@ -4082,7 +4082,8 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, | |||
4082 | } | 4082 | } |
4083 | 4083 | ||
4084 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || | 4084 | if (connector_type == DRM_MODE_CONNECTOR_HDMIA || |
4085 | connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 4085 | connector_type == DRM_MODE_CONNECTOR_DisplayPort || |
4086 | connector_type == DRM_MODE_CONNECTOR_eDP) { | ||
4086 | drm_connector_attach_vrr_capable_property( | 4087 | drm_connector_attach_vrr_capable_property( |
4087 | &aconnector->base); | 4088 | &aconnector->base); |
4088 | } | 4089 | } |
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 9a7ac58eb18e..ddd75a4d8ba5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | |||
@@ -671,6 +671,25 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us | |||
671 | return bytes_from_user; | 671 | return bytes_from_user; |
672 | } | 672 | } |
673 | 673 | ||
674 | /* | ||
675 | * Returns the min and max vrr vfreq through the connector's debugfs file. | ||
676 | * Example usage: cat /sys/kernel/debug/dri/0/DP-1/vrr_range | ||
677 | */ | ||
678 | static int vrr_range_show(struct seq_file *m, void *data) | ||
679 | { | ||
680 | struct drm_connector *connector = m->private; | ||
681 | struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); | ||
682 | |||
683 | if (connector->status != connector_status_connected) | ||
684 | return -ENODEV; | ||
685 | |||
686 | seq_printf(m, "Min: %u\n", (unsigned int)aconnector->min_vfreq); | ||
687 | seq_printf(m, "Max: %u\n", (unsigned int)aconnector->max_vfreq); | ||
688 | |||
689 | return 0; | ||
690 | } | ||
691 | DEFINE_SHOW_ATTRIBUTE(vrr_range); | ||
692 | |||
674 | static const struct file_operations dp_link_settings_debugfs_fops = { | 693 | static const struct file_operations dp_link_settings_debugfs_fops = { |
675 | .owner = THIS_MODULE, | 694 | .owner = THIS_MODULE, |
676 | .read = dp_link_settings_read, | 695 | .read = dp_link_settings_read, |
@@ -697,7 +716,8 @@ static const struct { | |||
697 | } dp_debugfs_entries[] = { | 716 | } dp_debugfs_entries[] = { |
698 | {"link_settings", &dp_link_settings_debugfs_fops}, | 717 | {"link_settings", &dp_link_settings_debugfs_fops}, |
699 | {"phy_settings", &dp_phy_settings_debugfs_fop}, | 718 | {"phy_settings", &dp_phy_settings_debugfs_fop}, |
700 | {"test_pattern", &dp_phy_test_pattern_fops} | 719 | {"test_pattern", &dp_phy_test_pattern_fops}, |
720 | {"vrr_range", &vrr_range_fops} | ||
701 | }; | 721 | }; |
702 | 722 | ||
703 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) | 723 | int connector_debugfs_init(struct amdgpu_dm_connector *connector) |
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c index afd287f08bc9..19801bdba0d2 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clk_mgr.c | |||
@@ -591,7 +591,15 @@ static void dce11_pplib_apply_display_requirements( | |||
591 | dc, | 591 | dc, |
592 | context->bw.dce.sclk_khz); | 592 | context->bw.dce.sclk_khz); |
593 | 593 | ||
594 | pp_display_cfg->min_dcfclock_khz = pp_display_cfg->min_engine_clock_khz; | 594 | /* |
595 | * As workaround for >4x4K lightup set dcfclock to min_engine_clock value. | ||
596 | * This is not required for less than 5 displays, | ||
597 | * thus don't request decfclk in dc to avoid impact | ||
598 | * on power saving. | ||
599 | * | ||
600 | */ | ||
601 | pp_display_cfg->min_dcfclock_khz = (context->stream_count > 4)? | ||
602 | pp_display_cfg->min_engine_clock_khz : 0; | ||
595 | 603 | ||
596 | pp_display_cfg->min_engine_clock_deep_sleep_khz | 604 | pp_display_cfg->min_engine_clock_deep_sleep_khz |
597 | = context->bw.dce.sclk_deep_sleep_khz; | 605 | = context->bw.dce.sclk_deep_sleep_khz; |
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index f95c5f50eb0f..5273de3c5b98 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c | |||
@@ -1033,6 +1033,7 @@ static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, | |||
1033 | break; | 1033 | break; |
1034 | case amd_pp_dpp_clock: | 1034 | case amd_pp_dpp_clock: |
1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; | 1035 | pclk_vol_table = pinfo->vdd_dep_on_dppclk; |
1036 | break; | ||
1036 | default: | 1037 | default: |
1037 | return -EINVAL; | 1038 | return -EINVAL; |
1038 | } | 1039 | } |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index 99cba8ea5d82..5df1256618cc 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
@@ -528,7 +528,8 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev, | |||
528 | 528 | ||
529 | object_count = cl->object_count; | 529 | object_count = cl->object_count; |
530 | 530 | ||
531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), object_count * sizeof(__u32)); | 531 | object_ids = memdup_user(u64_to_user_ptr(cl->object_ids), |
532 | array_size(object_count, sizeof(__u32))); | ||
532 | if (IS_ERR(object_ids)) | 533 | if (IS_ERR(object_ids)) |
533 | return PTR_ERR(object_ids); | 534 | return PTR_ERR(object_ids); |
534 | 535 | ||
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 24a750436559..f91e02c87fd8 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c | |||
@@ -758,7 +758,7 @@ int drm_mode_hsync(const struct drm_display_mode *mode) | |||
758 | if (mode->hsync) | 758 | if (mode->hsync) |
759 | return mode->hsync; | 759 | return mode->hsync; |
760 | 760 | ||
761 | if (mode->htotal < 0) | 761 | if (mode->htotal <= 0) |
762 | return 0; | 762 | return 0; |
763 | 763 | ||
764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ | 764 | calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */ |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 216f52b744a6..c882ea94172c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1824,6 +1824,16 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | |||
1824 | return 0; | 1824 | return 0; |
1825 | } | 1825 | } |
1826 | 1826 | ||
1827 | static inline bool | ||
1828 | __vma_matches(struct vm_area_struct *vma, struct file *filp, | ||
1829 | unsigned long addr, unsigned long size) | ||
1830 | { | ||
1831 | if (vma->vm_file != filp) | ||
1832 | return false; | ||
1833 | |||
1834 | return vma->vm_start == addr && (vma->vm_end - vma->vm_start) == size; | ||
1835 | } | ||
1836 | |||
1827 | /** | 1837 | /** |
1828 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address | 1838 | * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address |
1829 | * it is mapped to. | 1839 | * it is mapped to. |
@@ -1882,7 +1892,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | |||
1882 | return -EINTR; | 1892 | return -EINTR; |
1883 | } | 1893 | } |
1884 | vma = find_vma(mm, addr); | 1894 | vma = find_vma(mm, addr); |
1885 | if (vma) | 1895 | if (vma && __vma_matches(vma, obj->base.filp, addr, args->size)) |
1886 | vma->vm_page_prot = | 1896 | vma->vm_page_prot = |
1887 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); | 1897 | pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); |
1888 | else | 1898 | else |
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d6c8f8fdfda5..017fc602a10e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c | |||
@@ -594,7 +594,8 @@ static void i915_pmu_enable(struct perf_event *event) | |||
594 | * Update the bitmask of enabled events and increment | 594 | * Update the bitmask of enabled events and increment |
595 | * the event reference counter. | 595 | * the event reference counter. |
596 | */ | 596 | */ |
597 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 597 | BUILD_BUG_ON(ARRAY_SIZE(i915->pmu.enable_count) != I915_PMU_MASK_BITS); |
598 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); | ||
598 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); | 599 | GEM_BUG_ON(i915->pmu.enable_count[bit] == ~0); |
599 | i915->pmu.enable |= BIT_ULL(bit); | 600 | i915->pmu.enable |= BIT_ULL(bit); |
600 | i915->pmu.enable_count[bit]++; | 601 | i915->pmu.enable_count[bit]++; |
@@ -615,11 +616,16 @@ static void i915_pmu_enable(struct perf_event *event) | |||
615 | engine = intel_engine_lookup_user(i915, | 616 | engine = intel_engine_lookup_user(i915, |
616 | engine_event_class(event), | 617 | engine_event_class(event), |
617 | engine_event_instance(event)); | 618 | engine_event_instance(event)); |
618 | GEM_BUG_ON(!engine); | ||
619 | engine->pmu.enable |= BIT(sample); | ||
620 | 619 | ||
621 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 620 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.enable_count) != |
621 | I915_ENGINE_SAMPLE_COUNT); | ||
622 | BUILD_BUG_ON(ARRAY_SIZE(engine->pmu.sample) != | ||
623 | I915_ENGINE_SAMPLE_COUNT); | ||
624 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); | ||
625 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
622 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); | 626 | GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0); |
627 | |||
628 | engine->pmu.enable |= BIT(sample); | ||
623 | engine->pmu.enable_count[sample]++; | 629 | engine->pmu.enable_count[sample]++; |
624 | } | 630 | } |
625 | 631 | ||
@@ -649,9 +655,11 @@ static void i915_pmu_disable(struct perf_event *event) | |||
649 | engine = intel_engine_lookup_user(i915, | 655 | engine = intel_engine_lookup_user(i915, |
650 | engine_event_class(event), | 656 | engine_event_class(event), |
651 | engine_event_instance(event)); | 657 | engine_event_instance(event)); |
652 | GEM_BUG_ON(!engine); | 658 | |
653 | GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS); | 659 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.enable_count)); |
660 | GEM_BUG_ON(sample >= ARRAY_SIZE(engine->pmu.sample)); | ||
654 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); | 661 | GEM_BUG_ON(engine->pmu.enable_count[sample] == 0); |
662 | |||
655 | /* | 663 | /* |
656 | * Decrement the reference count and clear the enabled | 664 | * Decrement the reference count and clear the enabled |
657 | * bitmask when the last listener on an event goes away. | 665 | * bitmask when the last listener on an event goes away. |
@@ -660,7 +668,7 @@ static void i915_pmu_disable(struct perf_event *event) | |||
660 | engine->pmu.enable &= ~BIT(sample); | 668 | engine->pmu.enable &= ~BIT(sample); |
661 | } | 669 | } |
662 | 670 | ||
663 | GEM_BUG_ON(bit >= I915_PMU_MASK_BITS); | 671 | GEM_BUG_ON(bit >= ARRAY_SIZE(i915->pmu.enable_count)); |
664 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); | 672 | GEM_BUG_ON(i915->pmu.enable_count[bit] == 0); |
665 | /* | 673 | /* |
666 | * Decrement the reference count and clear the enabled | 674 | * Decrement the reference count and clear the enabled |
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 7f164ca3db12..b3728c5f13e7 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h | |||
@@ -31,6 +31,8 @@ enum { | |||
31 | ((1 << I915_PMU_SAMPLE_BITS) + \ | 31 | ((1 << I915_PMU_SAMPLE_BITS) + \ |
32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) | 32 | (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) |
33 | 33 | ||
34 | #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) | ||
35 | |||
34 | struct i915_pmu_sample { | 36 | struct i915_pmu_sample { |
35 | u64 cur; | 37 | u64 cur; |
36 | }; | 38 | }; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0a7d60509ca7..067054cf4a86 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1790,7 +1790,7 @@ enum i915_power_well_id { | |||
1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 | 1790 | #define _CNL_PORT_TX_C_LN0_OFFSET 0x162C40 |
1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 | 1791 | #define _CNL_PORT_TX_D_LN0_OFFSET 0x162E40 |
1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 | 1792 | #define _CNL_PORT_TX_F_LN0_OFFSET 0x162840 |
1793 | #define _CNL_PORT_TX_DW_GRP(port, dw) (_PICK((port), \ | 1793 | #define _CNL_PORT_TX_DW_GRP(dw, port) (_PICK((port), \ |
1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1794 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1795 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ | 1796 | _CNL_PORT_TX_B_GRP_OFFSET, \ |
@@ -1798,7 +1798,7 @@ enum i915_power_well_id { | |||
1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ | 1798 | _CNL_PORT_TX_AE_GRP_OFFSET, \ |
1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ | 1799 | _CNL_PORT_TX_F_GRP_OFFSET) + \ |
1800 | 4 * (dw)) | 1800 | 4 * (dw)) |
1801 | #define _CNL_PORT_TX_DW_LN0(port, dw) (_PICK((port), \ | 1801 | #define _CNL_PORT_TX_DW_LN0(dw, port) (_PICK((port), \ |
1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ | 1802 | _CNL_PORT_TX_AE_LN0_OFFSET, \ |
1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1803 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ | 1804 | _CNL_PORT_TX_B_LN0_OFFSET, \ |
@@ -1834,9 +1834,9 @@ enum i915_power_well_id { | |||
1834 | 1834 | ||
1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 | 1835 | #define _CNL_PORT_TX_DW4_LN0_AE 0x162450 |
1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 | 1836 | #define _CNL_PORT_TX_DW4_LN1_AE 0x1624D0 |
1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 4)) | 1837 | #define CNL_PORT_TX_DW4_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(4, (port))) |
1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4)) | 1838 | #define CNL_PORT_TX_DW4_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port))) |
1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0((port), 4) + \ | 1839 | #define CNL_PORT_TX_DW4_LN(port, ln) _MMIO(_CNL_PORT_TX_DW_LN0(4, (port)) + \ |
1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ | 1840 | ((ln) * (_CNL_PORT_TX_DW4_LN1_AE - \ |
1841 | _CNL_PORT_TX_DW4_LN0_AE))) | 1841 | _CNL_PORT_TX_DW4_LN0_AE))) |
1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) | 1842 | #define ICL_PORT_TX_DW4_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(4, port)) |
@@ -1864,8 +1864,12 @@ enum i915_power_well_id { | |||
1864 | #define RTERM_SELECT(x) ((x) << 3) | 1864 | #define RTERM_SELECT(x) ((x) << 3) |
1865 | #define RTERM_SELECT_MASK (0x7 << 3) | 1865 | #define RTERM_SELECT_MASK (0x7 << 3) |
1866 | 1866 | ||
1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP((port), 7)) | 1867 | #define CNL_PORT_TX_DW7_GRP(port) _MMIO(_CNL_PORT_TX_DW_GRP(7, (port))) |
1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0((port), 7)) | 1868 | #define CNL_PORT_TX_DW7_LN0(port) _MMIO(_CNL_PORT_TX_DW_LN0(7, (port))) |
1869 | #define ICL_PORT_TX_DW7_AUX(port) _MMIO(_ICL_PORT_TX_DW_AUX(7, port)) | ||
1870 | #define ICL_PORT_TX_DW7_GRP(port) _MMIO(_ICL_PORT_TX_DW_GRP(7, port)) | ||
1871 | #define ICL_PORT_TX_DW7_LN0(port) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, port)) | ||
1872 | #define ICL_PORT_TX_DW7_LN(port, ln) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, port)) | ||
1869 | #define N_SCALAR(x) ((x) << 24) | 1873 | #define N_SCALAR(x) ((x) << 24) |
1870 | #define N_SCALAR_MASK (0x7F << 24) | 1874 | #define N_SCALAR_MASK (0x7F << 24) |
1871 | 1875 | ||
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index f3e1d6a0b7dd..7edce1b7b348 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -494,103 +494,58 @@ static const struct cnl_ddi_buf_trans cnl_ddi_translations_edp_1_05V[] = { | |||
494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ | 494 | { 0x2, 0x7F, 0x3F, 0x00, 0x00 }, /* 400 400 0.0 */ |
495 | }; | 495 | }; |
496 | 496 | ||
497 | struct icl_combo_phy_ddi_buf_trans { | 497 | /* icl_combo_phy_ddi_translations */ |
498 | u32 dw2_swing_select; | 498 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2[] = { |
499 | u32 dw2_swing_scalar; | 499 | /* NT mV Trans mV db */ |
500 | u32 dw4_scaling; | 500 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
501 | }; | 501 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
502 | 502 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ | |
503 | /* Voltage Swing Programming for VccIO 0.85V for DP */ | 503 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
504 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_85V[] = { | 504 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
505 | /* Voltage mV db */ | 505 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
506 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 506 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
507 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 507 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
508 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 508 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
509 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 509 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
510 | { 0xB, 0x70, 0x0018 }, /* 600 0.0 */ | ||
511 | { 0xB, 0x70, 0x3015 }, /* 600 3.5 */ | ||
512 | { 0xB, 0x70, 0x6012 }, /* 600 6.0 */ | ||
513 | { 0x5, 0x00, 0x0018 }, /* 800 0.0 */ | ||
514 | { 0x5, 0x00, 0x3015 }, /* 800 3.5 */ | ||
515 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
516 | }; | ||
517 | |||
518 | /* FIXME - After table is updated in Bspec */ | ||
519 | /* Voltage Swing Programming for VccIO 0.85V for eDP */ | ||
520 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_85V[] = { | ||
521 | /* Voltage mV db */ | ||
522 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | ||
523 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | ||
524 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | ||
525 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | ||
526 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | ||
527 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
528 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
529 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
530 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
531 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
532 | }; | ||
533 | |||
534 | /* Voltage Swing Programming for VccIO 0.95V for DP */ | ||
535 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_0_95V[] = { | ||
536 | /* Voltage mV db */ | ||
537 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | ||
538 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | ||
539 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | ||
540 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | ||
541 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | ||
542 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | ||
543 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | ||
544 | { 0x5, 0x76, 0x0018 }, /* 800 0.0 */ | ||
545 | { 0x5, 0x76, 0x3015 }, /* 800 3.5 */ | ||
546 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
547 | }; | 510 | }; |
548 | 511 | ||
549 | /* FIXME - After table is updated in Bspec */ | 512 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2[] = { |
550 | /* Voltage Swing Programming for VccIO 0.95V for eDP */ | 513 | /* NT mV Trans mV db */ |
551 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_0_95V[] = { | 514 | { 0x0, 0x7F, 0x3F, 0x00, 0x00 }, /* 200 200 0.0 */ |
552 | /* Voltage mV db */ | 515 | { 0x8, 0x7F, 0x38, 0x00, 0x07 }, /* 200 250 1.9 */ |
553 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 516 | { 0x1, 0x7F, 0x33, 0x00, 0x0C }, /* 200 300 3.5 */ |
554 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 517 | { 0x9, 0x7F, 0x31, 0x00, 0x0E }, /* 200 350 4.9 */ |
555 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 518 | { 0x8, 0x7F, 0x3F, 0x00, 0x00 }, /* 250 250 0.0 */ |
556 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 519 | { 0x1, 0x7F, 0x38, 0x00, 0x07 }, /* 250 300 1.6 */ |
557 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 520 | { 0x9, 0x7F, 0x35, 0x00, 0x0A }, /* 250 350 2.9 */ |
558 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | 521 | { 0x1, 0x7F, 0x3F, 0x00, 0x00 }, /* 300 300 0.0 */ |
559 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | 522 | { 0x9, 0x7F, 0x38, 0x00, 0x07 }, /* 300 350 1.3 */ |
560 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | 523 | { 0x9, 0x7F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
561 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
562 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
563 | }; | 524 | }; |
564 | 525 | ||
565 | /* Voltage Swing Programming for VccIO 1.05V for DP */ | 526 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr3[] = { |
566 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hdmi_1_05V[] = { | 527 | /* NT mV Trans mV db */ |
567 | /* Voltage mV db */ | 528 | { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ |
568 | { 0x2, 0x98, 0x0018 }, /* 400 0.0 */ | 529 | { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ |
569 | { 0x2, 0x98, 0x3015 }, /* 400 3.5 */ | 530 | { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ |
570 | { 0x2, 0x98, 0x6012 }, /* 400 6.0 */ | 531 | { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */ |
571 | { 0x2, 0x98, 0x900F }, /* 400 9.5 */ | 532 | { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ |
572 | { 0x4, 0x98, 0x0018 }, /* 600 0.0 */ | 533 | { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ |
573 | { 0x4, 0x98, 0x3015 }, /* 600 3.5 */ | 534 | { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ |
574 | { 0x4, 0x98, 0x6012 }, /* 600 6.0 */ | 535 | { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */ |
575 | { 0x5, 0x71, 0x0018 }, /* 800 0.0 */ | 536 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ |
576 | { 0x5, 0x71, 0x3015 }, /* 800 3.5 */ | 537 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ |
577 | { 0x6, 0x98, 0x0018 }, /* 1200 0.0 */ | ||
578 | }; | 538 | }; |
579 | 539 | ||
580 | /* FIXME - After table is updated in Bspec */ | 540 | static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = { |
581 | /* Voltage Swing Programming for VccIO 1.05V for eDP */ | 541 | /* NT mV Trans mV db */ |
582 | static const struct icl_combo_phy_ddi_buf_trans icl_combo_phy_ddi_translations_edp_1_05V[] = { | 542 | { 0xA, 0x60, 0x3F, 0x00, 0x00 }, /* 450 450 0.0 */ |
583 | /* Voltage mV db */ | 543 | { 0xB, 0x73, 0x36, 0x00, 0x09 }, /* 450 650 3.2 */ |
584 | { 0x0, 0x00, 0x00 }, /* 200 0.0 */ | 544 | { 0x6, 0x7F, 0x31, 0x00, 0x0E }, /* 450 850 5.5 */ |
585 | { 0x0, 0x00, 0x00 }, /* 200 1.5 */ | 545 | { 0xB, 0x73, 0x3F, 0x00, 0x00 }, /* 650 650 0.0 ALS */ |
586 | { 0x0, 0x00, 0x00 }, /* 200 4.0 */ | 546 | { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 650 850 2.3 */ |
587 | { 0x0, 0x00, 0x00 }, /* 200 6.0 */ | 547 | { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 850 850 0.0 */ |
588 | { 0x0, 0x00, 0x00 }, /* 250 0.0 */ | 548 | { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */ |
589 | { 0x0, 0x00, 0x00 }, /* 250 1.5 */ | ||
590 | { 0x0, 0x00, 0x00 }, /* 250 4.0 */ | ||
591 | { 0x0, 0x00, 0x00 }, /* 300 0.0 */ | ||
592 | { 0x0, 0x00, 0x00 }, /* 300 1.5 */ | ||
593 | { 0x0, 0x00, 0x00 }, /* 350 0.0 */ | ||
594 | }; | 549 | }; |
595 | 550 | ||
596 | struct icl_mg_phy_ddi_buf_trans { | 551 | struct icl_mg_phy_ddi_buf_trans { |
@@ -871,43 +826,23 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) | |||
871 | } | 826 | } |
872 | } | 827 | } |
873 | 828 | ||
874 | static const struct icl_combo_phy_ddi_buf_trans * | 829 | static const struct cnl_ddi_buf_trans * |
875 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, | 830 | icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, enum port port, |
876 | int type, int *n_entries) | 831 | int type, int rate, int *n_entries) |
877 | { | 832 | { |
878 | u32 voltage = I915_READ(ICL_PORT_COMP_DW3(port)) & VOLTAGE_INFO_MASK; | 833 | if (type == INTEL_OUTPUT_HDMI) { |
879 | 834 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_hdmi); | |
880 | if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { | 835 | return icl_combo_phy_ddi_translations_hdmi; |
881 | switch (voltage) { | 836 | } else if (rate > 540000 && type == INTEL_OUTPUT_EDP) { |
882 | case VOLTAGE_INFO_0_85V: | 837 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr3); |
883 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_85V); | 838 | return icl_combo_phy_ddi_translations_edp_hbr3; |
884 | return icl_combo_phy_ddi_translations_edp_0_85V; | 839 | } else if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { |
885 | case VOLTAGE_INFO_0_95V: | 840 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); |
886 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_0_95V); | 841 | return icl_combo_phy_ddi_translations_edp_hbr2; |
887 | return icl_combo_phy_ddi_translations_edp_0_95V; | ||
888 | case VOLTAGE_INFO_1_05V: | ||
889 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_1_05V); | ||
890 | return icl_combo_phy_ddi_translations_edp_1_05V; | ||
891 | default: | ||
892 | MISSING_CASE(voltage); | ||
893 | return NULL; | ||
894 | } | ||
895 | } else { | ||
896 | switch (voltage) { | ||
897 | case VOLTAGE_INFO_0_85V: | ||
898 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_85V); | ||
899 | return icl_combo_phy_ddi_translations_dp_hdmi_0_85V; | ||
900 | case VOLTAGE_INFO_0_95V: | ||
901 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_0_95V); | ||
902 | return icl_combo_phy_ddi_translations_dp_hdmi_0_95V; | ||
903 | case VOLTAGE_INFO_1_05V: | ||
904 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hdmi_1_05V); | ||
905 | return icl_combo_phy_ddi_translations_dp_hdmi_1_05V; | ||
906 | default: | ||
907 | MISSING_CASE(voltage); | ||
908 | return NULL; | ||
909 | } | ||
910 | } | 842 | } |
843 | |||
844 | *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_dp_hbr2); | ||
845 | return icl_combo_phy_ddi_translations_dp_hbr2; | ||
911 | } | 846 | } |
912 | 847 | ||
913 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) | 848 | static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port) |
@@ -918,8 +853,8 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por | |||
918 | 853 | ||
919 | if (IS_ICELAKE(dev_priv)) { | 854 | if (IS_ICELAKE(dev_priv)) { |
920 | if (intel_port_is_combophy(dev_priv, port)) | 855 | if (intel_port_is_combophy(dev_priv, port)) |
921 | icl_get_combo_buf_trans(dev_priv, port, | 856 | icl_get_combo_buf_trans(dev_priv, port, INTEL_OUTPUT_HDMI, |
922 | INTEL_OUTPUT_HDMI, &n_entries); | 857 | 0, &n_entries); |
923 | else | 858 | else |
924 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 859 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
925 | default_entry = n_entries - 1; | 860 | default_entry = n_entries - 1; |
@@ -1086,7 +1021,7 @@ static uint32_t icl_pll_to_ddi_pll_sel(struct intel_encoder *encoder, | |||
1086 | return DDI_CLK_SEL_TBT_810; | 1021 | return DDI_CLK_SEL_TBT_810; |
1087 | default: | 1022 | default: |
1088 | MISSING_CASE(clock); | 1023 | MISSING_CASE(clock); |
1089 | break; | 1024 | return DDI_CLK_SEL_NONE; |
1090 | } | 1025 | } |
1091 | case DPLL_ID_ICL_MGPLL1: | 1026 | case DPLL_ID_ICL_MGPLL1: |
1092 | case DPLL_ID_ICL_MGPLL2: | 1027 | case DPLL_ID_ICL_MGPLL2: |
@@ -2275,13 +2210,14 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2275 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) | 2210 | u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) |
2276 | { | 2211 | { |
2277 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 2212 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
2213 | struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); | ||
2278 | enum port port = encoder->port; | 2214 | enum port port = encoder->port; |
2279 | int n_entries; | 2215 | int n_entries; |
2280 | 2216 | ||
2281 | if (IS_ICELAKE(dev_priv)) { | 2217 | if (IS_ICELAKE(dev_priv)) { |
2282 | if (intel_port_is_combophy(dev_priv, port)) | 2218 | if (intel_port_is_combophy(dev_priv, port)) |
2283 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, | 2219 | icl_get_combo_buf_trans(dev_priv, port, encoder->type, |
2284 | &n_entries); | 2220 | intel_dp->link_rate, &n_entries); |
2285 | else | 2221 | else |
2286 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); | 2222 | n_entries = ARRAY_SIZE(icl_mg_phy_ddi_translations); |
2287 | } else if (IS_CANNONLAKE(dev_priv)) { | 2223 | } else if (IS_CANNONLAKE(dev_priv)) { |
@@ -2462,14 +2398,15 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2462 | } | 2398 | } |
2463 | 2399 | ||
2464 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | 2400 | static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, |
2465 | u32 level, enum port port, int type) | 2401 | u32 level, enum port port, int type, |
2402 | int rate) | ||
2466 | { | 2403 | { |
2467 | const struct icl_combo_phy_ddi_buf_trans *ddi_translations = NULL; | 2404 | const struct cnl_ddi_buf_trans *ddi_translations = NULL; |
2468 | u32 n_entries, val; | 2405 | u32 n_entries, val; |
2469 | int ln; | 2406 | int ln; |
2470 | 2407 | ||
2471 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, | 2408 | ddi_translations = icl_get_combo_buf_trans(dev_priv, port, type, |
2472 | &n_entries); | 2409 | rate, &n_entries); |
2473 | if (!ddi_translations) | 2410 | if (!ddi_translations) |
2474 | return; | 2411 | return; |
2475 | 2412 | ||
@@ -2478,34 +2415,23 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2478 | level = n_entries - 1; | 2415 | level = n_entries - 1; |
2479 | } | 2416 | } |
2480 | 2417 | ||
2481 | /* Set PORT_TX_DW5 Rterm Sel to 110b. */ | 2418 | /* Set PORT_TX_DW5 */ |
2482 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2419 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
2483 | val &= ~RTERM_SELECT_MASK; | 2420 | val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK | |
2421 | TAP2_DISABLE | TAP3_DISABLE); | ||
2422 | val |= SCALING_MODE_SEL(0x2); | ||
2484 | val |= RTERM_SELECT(0x6); | 2423 | val |= RTERM_SELECT(0x6); |
2485 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2424 | val |= TAP3_DISABLE; |
2486 | |||
2487 | /* Program PORT_TX_DW5 */ | ||
2488 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | ||
2489 | /* Set DisableTap2 and DisableTap3 if MIPI DSI | ||
2490 | * Clear DisableTap2 and DisableTap3 for all other Ports | ||
2491 | */ | ||
2492 | if (type == INTEL_OUTPUT_DSI) { | ||
2493 | val |= TAP2_DISABLE; | ||
2494 | val |= TAP3_DISABLE; | ||
2495 | } else { | ||
2496 | val &= ~TAP2_DISABLE; | ||
2497 | val &= ~TAP3_DISABLE; | ||
2498 | } | ||
2499 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2425 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2500 | 2426 | ||
2501 | /* Program PORT_TX_DW2 */ | 2427 | /* Program PORT_TX_DW2 */ |
2502 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); | 2428 | val = I915_READ(ICL_PORT_TX_DW2_LN0(port)); |
2503 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | | 2429 | val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK | |
2504 | RCOMP_SCALAR_MASK); | 2430 | RCOMP_SCALAR_MASK); |
2505 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_select); | 2431 | val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel); |
2506 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_select); | 2432 | val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel); |
2507 | /* Program Rcomp scalar for every table entry */ | 2433 | /* Program Rcomp scalar for every table entry */ |
2508 | val |= RCOMP_SCALAR(ddi_translations[level].dw2_swing_scalar); | 2434 | val |= RCOMP_SCALAR(0x98); |
2509 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); | 2435 | I915_WRITE(ICL_PORT_TX_DW2_GRP(port), val); |
2510 | 2436 | ||
2511 | /* Program PORT_TX_DW4 */ | 2437 | /* Program PORT_TX_DW4 */ |
@@ -2514,9 +2440,17 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv, | |||
2514 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); | 2440 | val = I915_READ(ICL_PORT_TX_DW4_LN(port, ln)); |
2515 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | | 2441 | val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK | |
2516 | CURSOR_COEFF_MASK); | 2442 | CURSOR_COEFF_MASK); |
2517 | val |= ddi_translations[level].dw4_scaling; | 2443 | val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1); |
2444 | val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2); | ||
2445 | val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff); | ||
2518 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); | 2446 | I915_WRITE(ICL_PORT_TX_DW4_LN(port, ln), val); |
2519 | } | 2447 | } |
2448 | |||
2449 | /* Program PORT_TX_DW7 */ | ||
2450 | val = I915_READ(ICL_PORT_TX_DW7_LN0(port)); | ||
2451 | val &= ~N_SCALAR_MASK; | ||
2452 | val |= N_SCALAR(ddi_translations[level].dw7_n_scalar); | ||
2453 | I915_WRITE(ICL_PORT_TX_DW7_GRP(port), val); | ||
2520 | } | 2454 | } |
2521 | 2455 | ||
2522 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | 2456 | static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, |
@@ -2581,7 +2515,7 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder, | |||
2581 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); | 2515 | I915_WRITE(ICL_PORT_TX_DW5_GRP(port), val); |
2582 | 2516 | ||
2583 | /* 5. Program swing and de-emphasis */ | 2517 | /* 5. Program swing and de-emphasis */ |
2584 | icl_ddi_combo_vswing_program(dev_priv, level, port, type); | 2518 | icl_ddi_combo_vswing_program(dev_priv, level, port, type, rate); |
2585 | 2519 | ||
2586 | /* 6. Set training enable to trigger update */ | 2520 | /* 6. Set training enable to trigger update */ |
2587 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); | 2521 | val = I915_READ(ICL_PORT_TX_DW5_LN0(port)); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3da9c0f9e948..248128126422 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -15415,16 +15415,45 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc, | |||
15415 | } | 15415 | } |
15416 | } | 15416 | } |
15417 | 15417 | ||
15418 | static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state) | ||
15419 | { | ||
15420 | struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); | ||
15421 | |||
15422 | /* | ||
15423 | * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram | ||
15424 | * the hardware when a high res displays plugged in. DPLL P | ||
15425 | * divider is zero, and the pipe timings are bonkers. We'll | ||
15426 | * try to disable everything in that case. | ||
15427 | * | ||
15428 | * FIXME would be nice to be able to sanitize this state | ||
15429 | * without several WARNs, but for now let's take the easy | ||
15430 | * road. | ||
15431 | */ | ||
15432 | return IS_GEN6(dev_priv) && | ||
15433 | crtc_state->base.active && | ||
15434 | crtc_state->shared_dpll && | ||
15435 | crtc_state->port_clock == 0; | ||
15436 | } | ||
15437 | |||
15418 | static void intel_sanitize_encoder(struct intel_encoder *encoder) | 15438 | static void intel_sanitize_encoder(struct intel_encoder *encoder) |
15419 | { | 15439 | { |
15420 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); | 15440 | struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); |
15421 | struct intel_connector *connector; | 15441 | struct intel_connector *connector; |
15442 | struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); | ||
15443 | struct intel_crtc_state *crtc_state = crtc ? | ||
15444 | to_intel_crtc_state(crtc->base.state) : NULL; | ||
15422 | 15445 | ||
15423 | /* We need to check both for a crtc link (meaning that the | 15446 | /* We need to check both for a crtc link (meaning that the |
15424 | * encoder is active and trying to read from a pipe) and the | 15447 | * encoder is active and trying to read from a pipe) and the |
15425 | * pipe itself being active. */ | 15448 | * pipe itself being active. */ |
15426 | bool has_active_crtc = encoder->base.crtc && | 15449 | bool has_active_crtc = crtc_state && |
15427 | to_intel_crtc(encoder->base.crtc)->active; | 15450 | crtc_state->base.active; |
15451 | |||
15452 | if (crtc_state && has_bogus_dpll_config(crtc_state)) { | ||
15453 | DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n", | ||
15454 | pipe_name(crtc->pipe)); | ||
15455 | has_active_crtc = false; | ||
15456 | } | ||
15428 | 15457 | ||
15429 | connector = intel_encoder_find_connector(encoder); | 15458 | connector = intel_encoder_find_connector(encoder); |
15430 | if (connector && !has_active_crtc) { | 15459 | if (connector && !has_active_crtc) { |
@@ -15435,16 +15464,25 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) | |||
15435 | /* Connector is active, but has no active pipe. This is | 15464 | /* Connector is active, but has no active pipe. This is |
15436 | * fallout from our resume register restoring. Disable | 15465 | * fallout from our resume register restoring. Disable |
15437 | * the encoder manually again. */ | 15466 | * the encoder manually again. */ |
15438 | if (encoder->base.crtc) { | 15467 | if (crtc_state) { |
15439 | struct drm_crtc_state *crtc_state = encoder->base.crtc->state; | 15468 | struct drm_encoder *best_encoder; |
15440 | 15469 | ||
15441 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", | 15470 | DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n", |
15442 | encoder->base.base.id, | 15471 | encoder->base.base.id, |
15443 | encoder->base.name); | 15472 | encoder->base.name); |
15473 | |||
15474 | /* avoid oopsing in case the hooks consult best_encoder */ | ||
15475 | best_encoder = connector->base.state->best_encoder; | ||
15476 | connector->base.state->best_encoder = &encoder->base; | ||
15477 | |||
15444 | if (encoder->disable) | 15478 | if (encoder->disable) |
15445 | encoder->disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15479 | encoder->disable(encoder, crtc_state, |
15480 | connector->base.state); | ||
15446 | if (encoder->post_disable) | 15481 | if (encoder->post_disable) |
15447 | encoder->post_disable(encoder, to_intel_crtc_state(crtc_state), connector->base.state); | 15482 | encoder->post_disable(encoder, crtc_state, |
15483 | connector->base.state); | ||
15484 | |||
15485 | connector->base.state->best_encoder = best_encoder; | ||
15448 | } | 15486 | } |
15449 | encoder->base.crtc = NULL; | 15487 | encoder->base.crtc = NULL; |
15450 | 15488 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index fdd2cbc56fa3..22a74608c6e4 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -304,9 +304,11 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp) | |||
304 | static int icl_max_source_rate(struct intel_dp *intel_dp) | 304 | static int icl_max_source_rate(struct intel_dp *intel_dp) |
305 | { | 305 | { |
306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); | 306 | struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); |
307 | struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); | ||
307 | enum port port = dig_port->base.port; | 308 | enum port port = dig_port->base.port; |
308 | 309 | ||
309 | if (port == PORT_B) | 310 | if (intel_port_is_combophy(dev_priv, port) && |
311 | !intel_dp_is_edp(intel_dp)) | ||
310 | return 540000; | 312 | return 540000; |
311 | 313 | ||
312 | return 810000; | 314 | return 810000; |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f94a04b4ad87..e9ddeaf05a14 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,6 +209,16 @@ struct intel_fbdev { | |||
209 | unsigned long vma_flags; | 209 | unsigned long vma_flags; |
210 | async_cookie_t cookie; | 210 | async_cookie_t cookie; |
211 | int preferred_bpp; | 211 | int preferred_bpp; |
212 | |||
213 | /* Whether or not fbdev hpd processing is temporarily suspended */ | ||
214 | bool hpd_suspended : 1; | ||
215 | /* Set when a hotplug was received while HPD processing was | ||
216 | * suspended | ||
217 | */ | ||
218 | bool hpd_waiting : 1; | ||
219 | |||
220 | /* Protects hpd_suspended */ | ||
221 | struct mutex hpd_lock; | ||
212 | }; | 222 | }; |
213 | 223 | ||
214 | struct intel_encoder { | 224 | struct intel_encoder { |
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index fb5bb5b32a60..7f365ac0b549 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c | |||
@@ -679,6 +679,7 @@ int intel_fbdev_init(struct drm_device *dev) | |||
679 | if (ifbdev == NULL) | 679 | if (ifbdev == NULL) |
680 | return -ENOMEM; | 680 | return -ENOMEM; |
681 | 681 | ||
682 | mutex_init(&ifbdev->hpd_lock); | ||
682 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); | 683 | drm_fb_helper_prepare(dev, &ifbdev->helper, &intel_fb_helper_funcs); |
683 | 684 | ||
684 | if (!intel_fbdev_init_bios(dev, ifbdev)) | 685 | if (!intel_fbdev_init_bios(dev, ifbdev)) |
@@ -752,6 +753,26 @@ void intel_fbdev_fini(struct drm_i915_private *dev_priv) | |||
752 | intel_fbdev_destroy(ifbdev); | 753 | intel_fbdev_destroy(ifbdev); |
753 | } | 754 | } |
754 | 755 | ||
756 | /* Suspends/resumes fbdev processing of incoming HPD events. When resuming HPD | ||
757 | * processing, fbdev will perform a full connector reprobe if a hotplug event | ||
758 | * was received while HPD was suspended. | ||
759 | */ | ||
760 | static void intel_fbdev_hpd_set_suspend(struct intel_fbdev *ifbdev, int state) | ||
761 | { | ||
762 | bool send_hpd = false; | ||
763 | |||
764 | mutex_lock(&ifbdev->hpd_lock); | ||
765 | ifbdev->hpd_suspended = state == FBINFO_STATE_SUSPENDED; | ||
766 | send_hpd = !ifbdev->hpd_suspended && ifbdev->hpd_waiting; | ||
767 | ifbdev->hpd_waiting = false; | ||
768 | mutex_unlock(&ifbdev->hpd_lock); | ||
769 | |||
770 | if (send_hpd) { | ||
771 | DRM_DEBUG_KMS("Handling delayed fbcon HPD event\n"); | ||
772 | drm_fb_helper_hotplug_event(&ifbdev->helper); | ||
773 | } | ||
774 | } | ||
775 | |||
755 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) | 776 | void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) |
756 | { | 777 | { |
757 | struct drm_i915_private *dev_priv = to_i915(dev); | 778 | struct drm_i915_private *dev_priv = to_i915(dev); |
@@ -773,6 +794,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
773 | */ | 794 | */ |
774 | if (state != FBINFO_STATE_RUNNING) | 795 | if (state != FBINFO_STATE_RUNNING) |
775 | flush_work(&dev_priv->fbdev_suspend_work); | 796 | flush_work(&dev_priv->fbdev_suspend_work); |
797 | |||
776 | console_lock(); | 798 | console_lock(); |
777 | } else { | 799 | } else { |
778 | /* | 800 | /* |
@@ -800,17 +822,26 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous | |||
800 | 822 | ||
801 | drm_fb_helper_set_suspend(&ifbdev->helper, state); | 823 | drm_fb_helper_set_suspend(&ifbdev->helper, state); |
802 | console_unlock(); | 824 | console_unlock(); |
825 | |||
826 | intel_fbdev_hpd_set_suspend(ifbdev, state); | ||
803 | } | 827 | } |
804 | 828 | ||
805 | void intel_fbdev_output_poll_changed(struct drm_device *dev) | 829 | void intel_fbdev_output_poll_changed(struct drm_device *dev) |
806 | { | 830 | { |
807 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; | 831 | struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; |
832 | bool send_hpd; | ||
808 | 833 | ||
809 | if (!ifbdev) | 834 | if (!ifbdev) |
810 | return; | 835 | return; |
811 | 836 | ||
812 | intel_fbdev_sync(ifbdev); | 837 | intel_fbdev_sync(ifbdev); |
813 | if (ifbdev->vma || ifbdev->helper.deferred_setup) | 838 | |
839 | mutex_lock(&ifbdev->hpd_lock); | ||
840 | send_hpd = !ifbdev->hpd_suspended; | ||
841 | ifbdev->hpd_waiting = true; | ||
842 | mutex_unlock(&ifbdev->hpd_lock); | ||
843 | |||
844 | if (send_hpd && (ifbdev->vma || ifbdev->helper.deferred_setup)) | ||
814 | drm_fb_helper_hotplug_event(&ifbdev->helper); | 845 | drm_fb_helper_hotplug_event(&ifbdev->helper); |
815 | } | 846 | } |
816 | 847 | ||
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c index b8f106d9ecf8..3ac20153705a 100644 --- a/drivers/gpu/drm/i915/intel_opregion.c +++ b/drivers/gpu/drm/i915/intel_opregion.c | |||
@@ -55,7 +55,12 @@ | |||
55 | struct opregion_header { | 55 | struct opregion_header { |
56 | u8 signature[16]; | 56 | u8 signature[16]; |
57 | u32 size; | 57 | u32 size; |
58 | u32 opregion_ver; | 58 | struct { |
59 | u8 rsvd; | ||
60 | u8 revision; | ||
61 | u8 minor; | ||
62 | u8 major; | ||
63 | } __packed over; | ||
59 | u8 bios_ver[32]; | 64 | u8 bios_ver[32]; |
60 | u8 vbios_ver[16]; | 65 | u8 vbios_ver[16]; |
61 | u8 driver_ver[16]; | 66 | u8 driver_ver[16]; |
@@ -119,7 +124,8 @@ struct opregion_asle { | |||
119 | u64 fdss; | 124 | u64 fdss; |
120 | u32 fdsp; | 125 | u32 fdsp; |
121 | u32 stat; | 126 | u32 stat; |
122 | u64 rvda; /* Physical address of raw vbt data */ | 127 | u64 rvda; /* Physical (2.0) or relative from opregion (2.1+) |
128 | * address of raw VBT data. */ | ||
123 | u32 rvds; /* Size of raw vbt data */ | 129 | u32 rvds; /* Size of raw vbt data */ |
124 | u8 rsvd[58]; | 130 | u8 rsvd[58]; |
125 | } __packed; | 131 | } __packed; |
@@ -925,6 +931,11 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
925 | opregion->header = base; | 931 | opregion->header = base; |
926 | opregion->lid_state = base + ACPI_CLID; | 932 | opregion->lid_state = base + ACPI_CLID; |
927 | 933 | ||
934 | DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n", | ||
935 | opregion->header->over.major, | ||
936 | opregion->header->over.minor, | ||
937 | opregion->header->over.revision); | ||
938 | |||
928 | mboxes = opregion->header->mboxes; | 939 | mboxes = opregion->header->mboxes; |
929 | if (mboxes & MBOX_ACPI) { | 940 | if (mboxes & MBOX_ACPI) { |
930 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); | 941 | DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); |
@@ -953,11 +964,26 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
953 | if (dmi_check_system(intel_no_opregion_vbt)) | 964 | if (dmi_check_system(intel_no_opregion_vbt)) |
954 | goto out; | 965 | goto out; |
955 | 966 | ||
956 | if (opregion->header->opregion_ver >= 2 && opregion->asle && | 967 | if (opregion->header->over.major >= 2 && opregion->asle && |
957 | opregion->asle->rvda && opregion->asle->rvds) { | 968 | opregion->asle->rvda && opregion->asle->rvds) { |
958 | opregion->rvda = memremap(opregion->asle->rvda, | 969 | resource_size_t rvda = opregion->asle->rvda; |
959 | opregion->asle->rvds, | 970 | |
971 | /* | ||
972 | * opregion 2.0: rvda is the physical VBT address. | ||
973 | * | ||
974 | * opregion 2.1+: rvda is unsigned, relative offset from | ||
975 | * opregion base, and should never point within opregion. | ||
976 | */ | ||
977 | if (opregion->header->over.major > 2 || | ||
978 | opregion->header->over.minor >= 1) { | ||
979 | WARN_ON(rvda < OPREGION_SIZE); | ||
980 | |||
981 | rvda += asls; | ||
982 | } | ||
983 | |||
984 | opregion->rvda = memremap(rvda, opregion->asle->rvds, | ||
960 | MEMREMAP_WB); | 985 | MEMREMAP_WB); |
986 | |||
961 | vbt = opregion->rvda; | 987 | vbt = opregion->rvda; |
962 | vbt_size = opregion->asle->rvds; | 988 | vbt_size = opregion->asle->rvds; |
963 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { | 989 | if (intel_bios_is_valid_vbt(vbt, vbt_size)) { |
@@ -967,6 +993,8 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) | |||
967 | goto out; | 993 | goto out; |
968 | } else { | 994 | } else { |
969 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); | 995 | DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n"); |
996 | memunmap(opregion->rvda); | ||
997 | opregion->rvda = NULL; | ||
970 | } | 998 | } |
971 | } | 999 | } |
972 | 1000 | ||
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 72edaa7ff411..a1a7cc29fdd1 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -415,16 +415,17 @@ struct intel_engine_cs { | |||
415 | /** | 415 | /** |
416 | * @enable_count: Reference count for the enabled samplers. | 416 | * @enable_count: Reference count for the enabled samplers. |
417 | * | 417 | * |
418 | * Index number corresponds to the bit number from @enable. | 418 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. |
419 | */ | 419 | */ |
420 | unsigned int enable_count[I915_PMU_SAMPLE_BITS]; | 420 | unsigned int enable_count[I915_ENGINE_SAMPLE_COUNT]; |
421 | /** | 421 | /** |
422 | * @sample: Counter values for sampling events. | 422 | * @sample: Counter values for sampling events. |
423 | * | 423 | * |
424 | * Our internal timer stores the current counters in this field. | 424 | * Our internal timer stores the current counters in this field. |
425 | * | ||
426 | * Index number corresponds to @enum drm_i915_pmu_engine_sample. | ||
425 | */ | 427 | */ |
426 | #define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1) | 428 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_COUNT]; |
427 | struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX]; | ||
428 | } pmu; | 429 | } pmu; |
429 | 430 | ||
430 | /* | 431 | /* |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index d2e003d8f3db..5170a0f5fe7b 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -494,7 +494,7 @@ skl_program_plane(struct intel_plane *plane, | |||
494 | 494 | ||
495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); | 495 | keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); |
496 | 496 | ||
497 | keymsk = key->channel_mask & 0x3ffffff; | 497 | keymsk = key->channel_mask & 0x7ffffff; |
498 | if (alpha < 0xff) | 498 | if (alpha < 0xff) |
499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; | 499 | keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; |
500 | 500 | ||
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 2c5bbe317353..e31e263cf86b 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c | |||
@@ -643,8 +643,10 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
643 | int bus_format; | 643 | int bus_format; |
644 | 644 | ||
645 | ret = of_property_read_u32(child, "reg", &i); | 645 | ret = of_property_read_u32(child, "reg", &i); |
646 | if (ret || i < 0 || i > 1) | 646 | if (ret || i < 0 || i > 1) { |
647 | return -EINVAL; | 647 | ret = -EINVAL; |
648 | goto free_child; | ||
649 | } | ||
648 | 650 | ||
649 | if (!of_device_is_available(child)) | 651 | if (!of_device_is_available(child)) |
650 | continue; | 652 | continue; |
@@ -657,7 +659,6 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
657 | channel = &imx_ldb->channel[i]; | 659 | channel = &imx_ldb->channel[i]; |
658 | channel->ldb = imx_ldb; | 660 | channel->ldb = imx_ldb; |
659 | channel->chno = i; | 661 | channel->chno = i; |
660 | channel->child = child; | ||
661 | 662 | ||
662 | /* | 663 | /* |
663 | * The output port is port@4 with an external 4-port mux or | 664 | * The output port is port@4 with an external 4-port mux or |
@@ -667,13 +668,13 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
667 | imx_ldb->lvds_mux ? 4 : 2, 0, | 668 | imx_ldb->lvds_mux ? 4 : 2, 0, |
668 | &channel->panel, &channel->bridge); | 669 | &channel->panel, &channel->bridge); |
669 | if (ret && ret != -ENODEV) | 670 | if (ret && ret != -ENODEV) |
670 | return ret; | 671 | goto free_child; |
671 | 672 | ||
672 | /* panel ddc only if there is no bridge */ | 673 | /* panel ddc only if there is no bridge */ |
673 | if (!channel->bridge) { | 674 | if (!channel->bridge) { |
674 | ret = imx_ldb_panel_ddc(dev, channel, child); | 675 | ret = imx_ldb_panel_ddc(dev, channel, child); |
675 | if (ret) | 676 | if (ret) |
676 | return ret; | 677 | goto free_child; |
677 | } | 678 | } |
678 | 679 | ||
679 | bus_format = of_get_bus_format(dev, child); | 680 | bus_format = of_get_bus_format(dev, child); |
@@ -689,18 +690,26 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) | |||
689 | if (bus_format < 0) { | 690 | if (bus_format < 0) { |
690 | dev_err(dev, "could not determine data mapping: %d\n", | 691 | dev_err(dev, "could not determine data mapping: %d\n", |
691 | bus_format); | 692 | bus_format); |
692 | return bus_format; | 693 | ret = bus_format; |
694 | goto free_child; | ||
693 | } | 695 | } |
694 | channel->bus_format = bus_format; | 696 | channel->bus_format = bus_format; |
697 | channel->child = child; | ||
695 | 698 | ||
696 | ret = imx_ldb_register(drm, channel); | 699 | ret = imx_ldb_register(drm, channel); |
697 | if (ret) | 700 | if (ret) { |
698 | return ret; | 701 | channel->child = NULL; |
702 | goto free_child; | ||
703 | } | ||
699 | } | 704 | } |
700 | 705 | ||
701 | dev_set_drvdata(dev, imx_ldb); | 706 | dev_set_drvdata(dev, imx_ldb); |
702 | 707 | ||
703 | return 0; | 708 | return 0; |
709 | |||
710 | free_child: | ||
711 | of_node_put(child); | ||
712 | return ret; | ||
704 | } | 713 | } |
705 | 714 | ||
706 | static void imx_ldb_unbind(struct device *dev, struct device *master, | 715 | static void imx_ldb_unbind(struct device *dev, struct device *master, |
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index c390924de93d..21e964f6ab5c 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c | |||
@@ -370,9 +370,9 @@ static int ipu_plane_atomic_check(struct drm_plane *plane, | |||
370 | if (ret) | 370 | if (ret) |
371 | return ret; | 371 | return ret; |
372 | 372 | ||
373 | /* CRTC should be enabled */ | 373 | /* nothing to check when disabling or disabled */ |
374 | if (!crtc_state->enable) | 374 | if (!crtc_state->enable) |
375 | return -EINVAL; | 375 | return 0; |
376 | 376 | ||
377 | switch (plane->type) { | 377 | switch (plane->type) { |
378 | case DRM_PLANE_TYPE_PRIMARY: | 378 | case DRM_PLANE_TYPE_PRIMARY: |
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 00a9c2ab9e6c..64fb788b6647 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c | |||
@@ -1406,7 +1406,7 @@ static void dsi_pll_disable(struct dss_pll *pll) | |||
1406 | 1406 | ||
1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | 1407 | static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) |
1408 | { | 1408 | { |
1409 | struct dsi_data *dsi = p; | 1409 | struct dsi_data *dsi = s->private; |
1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; | 1410 | struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; |
1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; | 1411 | enum dss_clk_source dispc_clk_src, dsi_clk_src; |
1412 | int dsi_module = dsi->module_id; | 1412 | int dsi_module = dsi->module_id; |
@@ -1467,7 +1467,7 @@ static int dsi_dump_dsi_clocks(struct seq_file *s, void *p) | |||
1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 1467 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | 1468 | static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) |
1469 | { | 1469 | { |
1470 | struct dsi_data *dsi = p; | 1470 | struct dsi_data *dsi = s->private; |
1471 | unsigned long flags; | 1471 | unsigned long flags; |
1472 | struct dsi_irq_stats stats; | 1472 | struct dsi_irq_stats stats; |
1473 | 1473 | ||
@@ -1558,7 +1558,7 @@ static int dsi_dump_dsi_irqs(struct seq_file *s, void *p) | |||
1558 | 1558 | ||
1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) | 1559 | static int dsi_dump_dsi_regs(struct seq_file *s, void *p) |
1560 | { | 1560 | { |
1561 | struct dsi_data *dsi = p; | 1561 | struct dsi_data *dsi = s->private; |
1562 | 1562 | ||
1563 | if (dsi_runtime_get(dsi)) | 1563 | if (dsi_runtime_get(dsi)) |
1564 | return 0; | 1564 | return 0; |
@@ -4751,6 +4751,17 @@ static int dsi_set_config(struct omap_dss_device *dssdev, | |||
4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; | 4751 | dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH; |
4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; | 4752 | dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW; |
4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; | 4753 | dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH; |
4754 | /* | ||
4755 | * HACK: These flags should be handled through the omap_dss_device bus | ||
4756 | * flags, but this will only be possible when the DSI encoder will be | ||
4757 | * converted to the omapdrm-managed encoder model. | ||
4758 | */ | ||
4759 | dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE; | ||
4760 | dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE; | ||
4761 | dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW; | ||
4762 | dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH; | ||
4763 | dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE; | ||
4764 | dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE; | ||
4754 | 4765 | ||
4755 | dss_mgr_set_timings(&dsi->output, &dsi->vm); | 4766 | dss_mgr_set_timings(&dsi->output, &dsi->vm); |
4756 | 4767 | ||
@@ -5083,15 +5094,15 @@ static int dsi_bind(struct device *dev, struct device *master, void *data) | |||
5083 | 5094 | ||
5084 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); | 5095 | snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1); |
5085 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, | 5096 | dsi->debugfs.regs = dss_debugfs_create_file(dss, name, |
5086 | dsi_dump_dsi_regs, &dsi); | 5097 | dsi_dump_dsi_regs, dsi); |
5087 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS | 5098 | #ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS |
5088 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); | 5099 | snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1); |
5089 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, | 5100 | dsi->debugfs.irqs = dss_debugfs_create_file(dss, name, |
5090 | dsi_dump_dsi_irqs, &dsi); | 5101 | dsi_dump_dsi_irqs, dsi); |
5091 | #endif | 5102 | #endif |
5092 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); | 5103 | snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1); |
5093 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, | 5104 | dsi->debugfs.clks = dss_debugfs_create_file(dss, name, |
5094 | dsi_dump_dsi_clocks, &dsi); | 5105 | dsi_dump_dsi_clocks, dsi); |
5095 | 5106 | ||
5096 | return 0; | 5107 | return 0; |
5097 | } | 5108 | } |
@@ -5104,8 +5115,6 @@ static void dsi_unbind(struct device *dev, struct device *master, void *data) | |||
5104 | dss_debugfs_remove_file(dsi->debugfs.irqs); | 5115 | dss_debugfs_remove_file(dsi->debugfs.irqs); |
5105 | dss_debugfs_remove_file(dsi->debugfs.regs); | 5116 | dss_debugfs_remove_file(dsi->debugfs.regs); |
5106 | 5117 | ||
5107 | of_platform_depopulate(dev); | ||
5108 | |||
5109 | WARN_ON(dsi->scp_clk_refcount > 0); | 5118 | WARN_ON(dsi->scp_clk_refcount > 0); |
5110 | 5119 | ||
5111 | dss_pll_unregister(&dsi->pll); | 5120 | dss_pll_unregister(&dsi->pll); |
@@ -5457,6 +5466,8 @@ static int dsi_remove(struct platform_device *pdev) | |||
5457 | 5466 | ||
5458 | dsi_uninit_output(dsi); | 5467 | dsi_uninit_output(dsi); |
5459 | 5468 | ||
5469 | of_platform_depopulate(&pdev->dev); | ||
5470 | |||
5460 | pm_runtime_disable(&pdev->dev); | 5471 | pm_runtime_disable(&pdev->dev); |
5461 | 5472 | ||
5462 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { | 5473 | if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) { |
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c index d587779a80b4..a97294ac96d5 100644 --- a/drivers/gpu/drm/radeon/ci_dpm.c +++ b/drivers/gpu/drm/radeon/ci_dpm.c | |||
@@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5676 | u16 data_offset, size; | 5676 | u16 data_offset, size; |
5677 | u8 frev, crev; | 5677 | u8 frev, crev; |
5678 | struct ci_power_info *pi; | 5678 | struct ci_power_info *pi; |
5679 | enum pci_bus_speed speed_cap; | 5679 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
5680 | struct pci_dev *root = rdev->pdev->bus->self; | 5680 | struct pci_dev *root = rdev->pdev->bus->self; |
5681 | int ret; | 5681 | int ret; |
5682 | 5682 | ||
@@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev) | |||
5685 | return -ENOMEM; | 5685 | return -ENOMEM; |
5686 | rdev->pm.dpm.priv = pi; | 5686 | rdev->pm.dpm.priv = pi; |
5687 | 5687 | ||
5688 | speed_cap = pcie_get_speed_cap(root); | 5688 | if (!pci_is_root_bus(rdev->pdev->bus)) |
5689 | speed_cap = pcie_get_speed_cap(root); | ||
5689 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 5690 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
5690 | pi->sys_pcie_mask = 0; | 5691 | pi->sys_pcie_mask = 0; |
5691 | } else { | 5692 | } else { |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 8fb60b3af015..0a785ef0ab66 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6899 | struct ni_power_info *ni_pi; | 6899 | struct ni_power_info *ni_pi; |
6900 | struct si_power_info *si_pi; | 6900 | struct si_power_info *si_pi; |
6901 | struct atom_clock_dividers dividers; | 6901 | struct atom_clock_dividers dividers; |
6902 | enum pci_bus_speed speed_cap; | 6902 | enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN; |
6903 | struct pci_dev *root = rdev->pdev->bus->self; | 6903 | struct pci_dev *root = rdev->pdev->bus->self; |
6904 | int ret; | 6904 | int ret; |
6905 | 6905 | ||
@@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6911 | eg_pi = &ni_pi->eg; | 6911 | eg_pi = &ni_pi->eg; |
6912 | pi = &eg_pi->rv7xx; | 6912 | pi = &eg_pi->rv7xx; |
6913 | 6913 | ||
6914 | speed_cap = pcie_get_speed_cap(root); | 6914 | if (!pci_is_root_bus(rdev->pdev->bus)) |
6915 | speed_cap = pcie_get_speed_cap(root); | ||
6915 | if (speed_cap == PCI_SPEED_UNKNOWN) { | 6916 | if (speed_cap == PCI_SPEED_UNKNOWN) { |
6916 | si_pi->sys_pcie_mask = 0; | 6917 | si_pi->sys_pcie_mask = 0; |
6917 | } else { | 6918 | } else { |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 37f93022a106..c0351abf83a3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c | |||
@@ -1,17 +1,8 @@ | |||
1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | 2 | /* |
3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
4 | * Author: | 4 | * Author: |
5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | 6 | */ |
16 | 7 | ||
17 | #include <drm/drmP.h> | 8 | #include <drm/drmP.h> |
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h index 38b52e63b2b0..27b9635124bc 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.h +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h | |||
@@ -1,17 +1,8 @@ | |||
1 | //SPDX-License-Identifier: GPL-2.0+ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | 2 | /* |
3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd | 3 | * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd |
4 | * Author: | 4 | * Author: |
5 | * Sandy Huang <hjc@rock-chips.com> | 5 | * Sandy Huang <hjc@rock-chips.com> |
6 | * | ||
7 | * This software is licensed under the terms of the GNU General Public | ||
8 | * License version 2, as published by the Free Software Foundation, and | ||
9 | * may be copied, distributed, and modified under those terms. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | 6 | */ |
16 | 7 | ||
17 | #ifdef CONFIG_ROCKCHIP_RGB | 8 | #ifdef CONFIG_ROCKCHIP_RGB |
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 4463d3826ecb..e2942c9a11a7 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c | |||
@@ -440,13 +440,10 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) | |||
440 | 440 | ||
441 | while ((entity->dependency = | 441 | while ((entity->dependency = |
442 | sched->ops->dependency(sched_job, entity))) { | 442 | sched->ops->dependency(sched_job, entity))) { |
443 | trace_drm_sched_job_wait_dep(sched_job, entity->dependency); | ||
443 | 444 | ||
444 | if (drm_sched_entity_add_dependency_cb(entity)) { | 445 | if (drm_sched_entity_add_dependency_cb(entity)) |
445 | |||
446 | trace_drm_sched_job_wait_dep(sched_job, | ||
447 | entity->dependency); | ||
448 | return NULL; | 446 | return NULL; |
449 | } | ||
450 | } | 447 | } |
451 | 448 | ||
452 | /* skip jobs from entity that marked guilty */ | 449 | /* skip jobs from entity that marked guilty */ |
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 0420f5c978b9..cf45d0f940f9 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c | |||
@@ -761,6 +761,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
761 | return PTR_ERR(tcon->sclk0); | 761 | return PTR_ERR(tcon->sclk0); |
762 | } | 762 | } |
763 | } | 763 | } |
764 | clk_prepare_enable(tcon->sclk0); | ||
764 | 765 | ||
765 | if (tcon->quirks->has_channel_1) { | 766 | if (tcon->quirks->has_channel_1) { |
766 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); | 767 | tcon->sclk1 = devm_clk_get(dev, "tcon-ch1"); |
@@ -775,6 +776,7 @@ static int sun4i_tcon_init_clocks(struct device *dev, | |||
775 | 776 | ||
776 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) | 777 | static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon) |
777 | { | 778 | { |
779 | clk_disable_unprepare(tcon->sclk0); | ||
778 | clk_disable_unprepare(tcon->clk); | 780 | clk_disable_unprepare(tcon->clk); |
779 | } | 781 | } |
780 | 782 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c index 9d9e8146db90..d7b409a3c0f8 100644 --- a/drivers/gpu/drm/vkms/vkms_crc.c +++ b/drivers/gpu/drm/vkms/vkms_crc.c | |||
@@ -1,4 +1,5 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | |||
2 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
3 | #include <linux/crc32.h> | 4 | #include <linux/crc32.h> |
4 | #include <drm/drm_atomic.h> | 5 | #include <drm/drm_atomic.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 177bbcb38306..eb56ee893761 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_atomic_helper.h> | 4 | #include <drm/drm_atomic_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 83087877565c..7dcbecb5fac2 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c | |||
@@ -1,9 +1,4 @@ | |||
1 | /* | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | */ | ||
7 | 2 | ||
8 | /** | 3 | /** |
9 | * DOC: vkms (Virtual Kernel Modesetting) | 4 | * DOC: vkms (Virtual Kernel Modesetting) |
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index e4469cd3d254..81f1cfbeb936 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h | |||
@@ -1,3 +1,5 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | |||
1 | #ifndef _VKMS_DRV_H_ | 3 | #ifndef _VKMS_DRV_H_ |
2 | #define _VKMS_DRV_H_ | 4 | #define _VKMS_DRV_H_ |
3 | 5 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 80311daed47a..138b0bb325cf 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include <linux/shmem_fs.h> | 3 | #include <linux/shmem_fs.h> |
10 | 4 | ||
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 271a0eb9042c..4173e4f48334 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_crtc_helper.h> | 4 | #include <drm/drm_crtc_helper.h> |
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index 418817600ad1..0e67d2d42f0c 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c | |||
@@ -1,10 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | 1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License as published by | ||
5 | * the Free Software Foundation; either version 2 of the License, or | ||
6 | * (at your option) any later version. | ||
7 | */ | ||
8 | 2 | ||
9 | #include "vkms_drv.h" | 3 | #include "vkms_drv.h" |
10 | #include <drm/drm_plane_helper.h> | 4 | #include <drm/drm_plane_helper.h> |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 25afb1d594e3..7ef5dcb06104 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
@@ -26,6 +26,7 @@ | |||
26 | **************************************************************************/ | 26 | **************************************************************************/ |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/console.h> | 28 | #include <linux/console.h> |
29 | #include <linux/dma-mapping.h> | ||
29 | 30 | ||
30 | #include <drm/drmP.h> | 31 | #include <drm/drmP.h> |
31 | #include "vmwgfx_drv.h" | 32 | #include "vmwgfx_drv.h" |
@@ -34,7 +35,6 @@ | |||
34 | #include <drm/ttm/ttm_placement.h> | 35 | #include <drm/ttm/ttm_placement.h> |
35 | #include <drm/ttm/ttm_bo_driver.h> | 36 | #include <drm/ttm/ttm_bo_driver.h> |
36 | #include <drm/ttm/ttm_module.h> | 37 | #include <drm/ttm/ttm_module.h> |
37 | #include <linux/intel-iommu.h> | ||
38 | 38 | ||
39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" | 39 | #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" |
40 | #define VMWGFX_CHIP_SVGAII 0 | 40 | #define VMWGFX_CHIP_SVGAII 0 |
@@ -546,6 +546,21 @@ static void vmw_get_initial_size(struct vmw_private *dev_priv) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | /** | 548 | /** |
549 | * vmw_assume_iommu - Figure out whether coherent dma-remapping might be | ||
550 | * taking place. | ||
551 | * @dev: Pointer to the struct drm_device. | ||
552 | * | ||
553 | * Return: true if iommu present, false otherwise. | ||
554 | */ | ||
555 | static bool vmw_assume_iommu(struct drm_device *dev) | ||
556 | { | ||
557 | const struct dma_map_ops *ops = get_dma_ops(dev->dev); | ||
558 | |||
559 | return !dma_is_direct(ops) && ops && | ||
560 | ops->map_page != dma_direct_map_page; | ||
561 | } | ||
562 | |||
563 | /** | ||
549 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this | 564 | * vmw_dma_select_mode - Determine how DMA mappings should be set up for this |
550 | * system. | 565 | * system. |
551 | * | 566 | * |
@@ -565,55 +580,27 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) | |||
565 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", | 580 | [vmw_dma_alloc_coherent] = "Using coherent TTM pages.", |
566 | [vmw_dma_map_populate] = "Keeping DMA mappings.", | 581 | [vmw_dma_map_populate] = "Keeping DMA mappings.", |
567 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; | 582 | [vmw_dma_map_bind] = "Giving up DMA mappings early."}; |
568 | #ifdef CONFIG_X86 | ||
569 | const struct dma_map_ops *dma_ops = get_dma_ops(dev_priv->dev->dev); | ||
570 | 583 | ||
571 | #ifdef CONFIG_INTEL_IOMMU | 584 | if (vmw_force_coherent) |
572 | if (intel_iommu_enabled) { | 585 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
586 | else if (vmw_assume_iommu(dev_priv->dev)) | ||
573 | dev_priv->map_mode = vmw_dma_map_populate; | 587 | dev_priv->map_mode = vmw_dma_map_populate; |
574 | goto out_fixup; | 588 | else if (!vmw_force_iommu) |
575 | } | ||
576 | #endif | ||
577 | |||
578 | if (!(vmw_force_iommu || vmw_force_coherent)) { | ||
579 | dev_priv->map_mode = vmw_dma_phys; | 589 | dev_priv->map_mode = vmw_dma_phys; |
580 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 590 | else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl()) |
581 | return 0; | ||
582 | } | ||
583 | |||
584 | dev_priv->map_mode = vmw_dma_map_populate; | ||
585 | |||
586 | if (dma_ops && dma_ops->sync_single_for_cpu) | ||
587 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 591 | dev_priv->map_mode = vmw_dma_alloc_coherent; |
588 | #ifdef CONFIG_SWIOTLB | 592 | else |
589 | if (swiotlb_nr_tbl() == 0) | ||
590 | dev_priv->map_mode = vmw_dma_map_populate; | 593 | dev_priv->map_mode = vmw_dma_map_populate; |
591 | #endif | ||
592 | 594 | ||
593 | #ifdef CONFIG_INTEL_IOMMU | 595 | if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu) |
594 | out_fixup: | ||
595 | #endif | ||
596 | if (dev_priv->map_mode == vmw_dma_map_populate && | ||
597 | vmw_restrict_iommu) | ||
598 | dev_priv->map_mode = vmw_dma_map_bind; | 596 | dev_priv->map_mode = vmw_dma_map_bind; |
599 | 597 | ||
600 | if (vmw_force_coherent) | 598 | /* No TTM coherent page pool? FIXME: Ask TTM instead! */ |
601 | dev_priv->map_mode = vmw_dma_alloc_coherent; | 599 | if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && |
602 | 600 | (dev_priv->map_mode == vmw_dma_alloc_coherent)) | |
603 | #if !defined(CONFIG_SWIOTLB) && !defined(CONFIG_INTEL_IOMMU) | ||
604 | /* | ||
605 | * No coherent page pool | ||
606 | */ | ||
607 | if (dev_priv->map_mode == vmw_dma_alloc_coherent) | ||
608 | return -EINVAL; | 601 | return -EINVAL; |
609 | #endif | ||
610 | |||
611 | #else /* CONFIG_X86 */ | ||
612 | dev_priv->map_mode = vmw_dma_map_populate; | ||
613 | #endif /* CONFIG_X86 */ | ||
614 | 602 | ||
615 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); | 603 | DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]); |
616 | |||
617 | return 0; | 604 | return 0; |
618 | } | 605 | } |
619 | 606 | ||
@@ -625,24 +612,20 @@ out_fixup: | |||
625 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that | 612 | * With 32-bit we can only handle 32 bit PFNs. Optionally set that |
626 | * restriction also for 64-bit systems. | 613 | * restriction also for 64-bit systems. |
627 | */ | 614 | */ |
628 | #ifdef CONFIG_INTEL_IOMMU | ||
629 | static int vmw_dma_masks(struct vmw_private *dev_priv) | 615 | static int vmw_dma_masks(struct vmw_private *dev_priv) |
630 | { | 616 | { |
631 | struct drm_device *dev = dev_priv->dev; | 617 | struct drm_device *dev = dev_priv->dev; |
618 | int ret = 0; | ||
632 | 619 | ||
633 | if (intel_iommu_enabled && | 620 | ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); |
621 | if (dev_priv->map_mode != vmw_dma_phys && | ||
634 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { | 622 | (sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) { |
635 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); | 623 | DRM_INFO("Restricting DMA addresses to 44 bits.\n"); |
636 | return dma_set_mask(dev->dev, DMA_BIT_MASK(44)); | 624 | return dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(44)); |
637 | } | 625 | } |
638 | return 0; | 626 | |
639 | } | 627 | return ret; |
640 | #else | ||
641 | static int vmw_dma_masks(struct vmw_private *dev_priv) | ||
642 | { | ||
643 | return 0; | ||
644 | } | 628 | } |
645 | #endif | ||
646 | 629 | ||
647 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) | 630 | static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) |
648 | { | 631 | { |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f2d13a72c05d..88b8178d4687 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
@@ -3570,7 +3570,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, | |||
3570 | *p_fence = NULL; | 3570 | *p_fence = NULL; |
3571 | } | 3571 | } |
3572 | 3572 | ||
3573 | return 0; | 3573 | return ret; |
3574 | } | 3574 | } |
3575 | 3575 | ||
3576 | /** | 3576 | /** |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index b351fb5214d3..ed2f67822f45 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
@@ -1646,7 +1646,7 @@ static int vmw_kms_check_topology(struct drm_device *dev, | |||
1646 | struct drm_connector_state *conn_state; | 1646 | struct drm_connector_state *conn_state; |
1647 | struct vmw_connector_state *vmw_conn_state; | 1647 | struct vmw_connector_state *vmw_conn_state; |
1648 | 1648 | ||
1649 | if (!du->pref_active) { | 1649 | if (!du->pref_active && new_crtc_state->enable) { |
1650 | ret = -EINVAL; | 1650 | ret = -EINVAL; |
1651 | goto clean; | 1651 | goto clean; |
1652 | } | 1652 | } |
@@ -2554,8 +2554,8 @@ void vmw_kms_helper_validation_finish(struct vmw_private *dev_priv, | |||
2554 | user_fence_rep) | 2554 | user_fence_rep) |
2555 | { | 2555 | { |
2556 | struct vmw_fence_obj *fence = NULL; | 2556 | struct vmw_fence_obj *fence = NULL; |
2557 | uint32_t handle; | 2557 | uint32_t handle = 0; |
2558 | int ret; | 2558 | int ret = 0; |
2559 | 2559 | ||
2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || | 2560 | if (file_priv || user_fence_rep || vmw_validation_has_bos(ctx) || |
2561 | out_fence) | 2561 | out_fence) |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 474b00e19697..0a7d4395d427 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -898,8 +898,8 @@ static struct ipu_devtype ipu_type_imx51 = { | |||
898 | .cpmem_ofs = 0x1f000000, | 898 | .cpmem_ofs = 0x1f000000, |
899 | .srm_ofs = 0x1f040000, | 899 | .srm_ofs = 0x1f040000, |
900 | .tpm_ofs = 0x1f060000, | 900 | .tpm_ofs = 0x1f060000, |
901 | .csi0_ofs = 0x1f030000, | 901 | .csi0_ofs = 0x1e030000, |
902 | .csi1_ofs = 0x1f038000, | 902 | .csi1_ofs = 0x1e038000, |
903 | .ic_ofs = 0x1e020000, | 903 | .ic_ofs = 0x1e020000, |
904 | .disp0_ofs = 0x1e040000, | 904 | .disp0_ofs = 0x1e040000, |
905 | .disp1_ofs = 0x1e048000, | 905 | .disp1_ofs = 0x1e048000, |
@@ -914,8 +914,8 @@ static struct ipu_devtype ipu_type_imx53 = { | |||
914 | .cpmem_ofs = 0x07000000, | 914 | .cpmem_ofs = 0x07000000, |
915 | .srm_ofs = 0x07040000, | 915 | .srm_ofs = 0x07040000, |
916 | .tpm_ofs = 0x07060000, | 916 | .tpm_ofs = 0x07060000, |
917 | .csi0_ofs = 0x07030000, | 917 | .csi0_ofs = 0x06030000, |
918 | .csi1_ofs = 0x07038000, | 918 | .csi1_ofs = 0x06038000, |
919 | .ic_ofs = 0x06020000, | 919 | .ic_ofs = 0x06020000, |
920 | .disp0_ofs = 0x06040000, | 920 | .disp0_ofs = 0x06040000, |
921 | .disp1_ofs = 0x06048000, | 921 | .disp1_ofs = 0x06048000, |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index 2f8db9d62551..4a28f3fbb0a2 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -106,6 +106,7 @@ struct ipu_pre { | |||
106 | void *buffer_virt; | 106 | void *buffer_virt; |
107 | bool in_use; | 107 | bool in_use; |
108 | unsigned int safe_window_end; | 108 | unsigned int safe_window_end; |
109 | unsigned int last_bufaddr; | ||
109 | }; | 110 | }; |
110 | 111 | ||
111 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 112 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
@@ -185,6 +186,7 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
185 | 186 | ||
186 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 187 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
187 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 188 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
189 | pre->last_bufaddr = bufaddr; | ||
188 | 190 | ||
189 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | | 191 | val = IPU_PRE_PREF_ENG_CTRL_INPUT_PIXEL_FORMAT(0) | |
190 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | | 192 | IPU_PRE_PREF_ENG_CTRL_INPUT_ACTIVE_BPP(active_bpp) | |
@@ -242,7 +244,11 @@ void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | |||
242 | unsigned short current_yblock; | 244 | unsigned short current_yblock; |
243 | u32 val; | 245 | u32 val; |
244 | 246 | ||
247 | if (bufaddr == pre->last_bufaddr) | ||
248 | return; | ||
249 | |||
245 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 250 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
251 | pre->last_bufaddr = bufaddr; | ||
246 | 252 | ||
247 | do { | 253 | do { |
248 | if (time_after(jiffies, timeout)) { | 254 | if (time_after(jiffies, timeout)) { |
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c index c530476edba6..ac9fda1b5a72 100644 --- a/drivers/hid/hid-debug.c +++ b/drivers/hid/hid-debug.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/kfifo.h> | ||
33 | #include <linux/sched/signal.h> | 34 | #include <linux/sched/signal.h> |
34 | #include <linux/export.h> | 35 | #include <linux/export.h> |
35 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
@@ -661,17 +662,12 @@ EXPORT_SYMBOL_GPL(hid_dump_device); | |||
661 | /* enqueue string to 'events' ring buffer */ | 662 | /* enqueue string to 'events' ring buffer */ |
662 | void hid_debug_event(struct hid_device *hdev, char *buf) | 663 | void hid_debug_event(struct hid_device *hdev, char *buf) |
663 | { | 664 | { |
664 | unsigned i; | ||
665 | struct hid_debug_list *list; | 665 | struct hid_debug_list *list; |
666 | unsigned long flags; | 666 | unsigned long flags; |
667 | 667 | ||
668 | spin_lock_irqsave(&hdev->debug_list_lock, flags); | 668 | spin_lock_irqsave(&hdev->debug_list_lock, flags); |
669 | list_for_each_entry(list, &hdev->debug_list, node) { | 669 | list_for_each_entry(list, &hdev->debug_list, node) |
670 | for (i = 0; buf[i]; i++) | 670 | kfifo_in(&list->hid_debug_fifo, buf, strlen(buf)); |
671 | list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = | ||
672 | buf[i]; | ||
673 | list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; | ||
674 | } | ||
675 | spin_unlock_irqrestore(&hdev->debug_list_lock, flags); | 671 | spin_unlock_irqrestore(&hdev->debug_list_lock, flags); |
676 | 672 | ||
677 | wake_up_interruptible(&hdev->debug_wait); | 673 | wake_up_interruptible(&hdev->debug_wait); |
@@ -722,8 +718,7 @@ void hid_dump_input(struct hid_device *hdev, struct hid_usage *usage, __s32 valu | |||
722 | hid_debug_event(hdev, buf); | 718 | hid_debug_event(hdev, buf); |
723 | 719 | ||
724 | kfree(buf); | 720 | kfree(buf); |
725 | wake_up_interruptible(&hdev->debug_wait); | 721 | wake_up_interruptible(&hdev->debug_wait); |
726 | |||
727 | } | 722 | } |
728 | EXPORT_SYMBOL_GPL(hid_dump_input); | 723 | EXPORT_SYMBOL_GPL(hid_dump_input); |
729 | 724 | ||
@@ -1083,8 +1078,8 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) | |||
1083 | goto out; | 1078 | goto out; |
1084 | } | 1079 | } |
1085 | 1080 | ||
1086 | if (!(list->hid_debug_buf = kzalloc(HID_DEBUG_BUFSIZE, GFP_KERNEL))) { | 1081 | err = kfifo_alloc(&list->hid_debug_fifo, HID_DEBUG_FIFOSIZE, GFP_KERNEL); |
1087 | err = -ENOMEM; | 1082 | if (err) { |
1088 | kfree(list); | 1083 | kfree(list); |
1089 | goto out; | 1084 | goto out; |
1090 | } | 1085 | } |
@@ -1104,77 +1099,57 @@ static ssize_t hid_debug_events_read(struct file *file, char __user *buffer, | |||
1104 | size_t count, loff_t *ppos) | 1099 | size_t count, loff_t *ppos) |
1105 | { | 1100 | { |
1106 | struct hid_debug_list *list = file->private_data; | 1101 | struct hid_debug_list *list = file->private_data; |
1107 | int ret = 0, len; | 1102 | int ret = 0, copied; |
1108 | DECLARE_WAITQUEUE(wait, current); | 1103 | DECLARE_WAITQUEUE(wait, current); |
1109 | 1104 | ||
1110 | mutex_lock(&list->read_mutex); | 1105 | mutex_lock(&list->read_mutex); |
1111 | while (ret == 0) { | 1106 | if (kfifo_is_empty(&list->hid_debug_fifo)) { |
1112 | if (list->head == list->tail) { | 1107 | add_wait_queue(&list->hdev->debug_wait, &wait); |
1113 | add_wait_queue(&list->hdev->debug_wait, &wait); | 1108 | set_current_state(TASK_INTERRUPTIBLE); |
1114 | set_current_state(TASK_INTERRUPTIBLE); | 1109 | |
1115 | 1110 | while (kfifo_is_empty(&list->hid_debug_fifo)) { | |
1116 | while (list->head == list->tail) { | 1111 | if (file->f_flags & O_NONBLOCK) { |
1117 | if (file->f_flags & O_NONBLOCK) { | 1112 | ret = -EAGAIN; |
1118 | ret = -EAGAIN; | 1113 | break; |
1119 | break; | 1114 | } |
1120 | } | ||
1121 | if (signal_pending(current)) { | ||
1122 | ret = -ERESTARTSYS; | ||
1123 | break; | ||
1124 | } | ||
1125 | 1115 | ||
1126 | if (!list->hdev || !list->hdev->debug) { | 1116 | if (signal_pending(current)) { |
1127 | ret = -EIO; | 1117 | ret = -ERESTARTSYS; |
1128 | set_current_state(TASK_RUNNING); | 1118 | break; |
1129 | goto out; | 1119 | } |
1130 | } | ||
1131 | 1120 | ||
1132 | /* allow O_NONBLOCK from other threads */ | 1121 | /* if list->hdev is NULL we cannot remove_wait_queue(). |
1133 | mutex_unlock(&list->read_mutex); | 1122 | * if list->hdev->debug is 0 then hid_debug_unregister() |
1134 | schedule(); | 1123 | * was already called and list->hdev is being destroyed. |
1135 | mutex_lock(&list->read_mutex); | 1124 | * if we add remove_wait_queue() here we can hit a race. |
1136 | set_current_state(TASK_INTERRUPTIBLE); | 1125 | */ |
1126 | if (!list->hdev || !list->hdev->debug) { | ||
1127 | ret = -EIO; | ||
1128 | set_current_state(TASK_RUNNING); | ||
1129 | goto out; | ||
1137 | } | 1130 | } |
1138 | 1131 | ||
1139 | set_current_state(TASK_RUNNING); | 1132 | /* allow O_NONBLOCK from other threads */ |
1140 | remove_wait_queue(&list->hdev->debug_wait, &wait); | 1133 | mutex_unlock(&list->read_mutex); |
1134 | schedule(); | ||
1135 | mutex_lock(&list->read_mutex); | ||
1136 | set_current_state(TASK_INTERRUPTIBLE); | ||
1141 | } | 1137 | } |
1142 | 1138 | ||
1143 | if (ret) | 1139 | __set_current_state(TASK_RUNNING); |
1144 | goto out; | 1140 | remove_wait_queue(&list->hdev->debug_wait, &wait); |
1145 | 1141 | ||
1146 | /* pass the ringbuffer contents to userspace */ | 1142 | if (ret) |
1147 | copy_rest: | ||
1148 | if (list->tail == list->head) | ||
1149 | goto out; | 1143 | goto out; |
1150 | if (list->tail > list->head) { | ||
1151 | len = list->tail - list->head; | ||
1152 | if (len > count) | ||
1153 | len = count; | ||
1154 | |||
1155 | if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) { | ||
1156 | ret = -EFAULT; | ||
1157 | goto out; | ||
1158 | } | ||
1159 | ret += len; | ||
1160 | list->head += len; | ||
1161 | } else { | ||
1162 | len = HID_DEBUG_BUFSIZE - list->head; | ||
1163 | if (len > count) | ||
1164 | len = count; | ||
1165 | |||
1166 | if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) { | ||
1167 | ret = -EFAULT; | ||
1168 | goto out; | ||
1169 | } | ||
1170 | list->head = 0; | ||
1171 | ret += len; | ||
1172 | count -= len; | ||
1173 | if (count > 0) | ||
1174 | goto copy_rest; | ||
1175 | } | ||
1176 | |||
1177 | } | 1144 | } |
1145 | |||
1146 | /* pass the fifo content to userspace, locking is not needed with only | ||
1147 | * one concurrent reader and one concurrent writer | ||
1148 | */ | ||
1149 | ret = kfifo_to_user(&list->hid_debug_fifo, buffer, count, &copied); | ||
1150 | if (ret) | ||
1151 | goto out; | ||
1152 | ret = copied; | ||
1178 | out: | 1153 | out: |
1179 | mutex_unlock(&list->read_mutex); | 1154 | mutex_unlock(&list->read_mutex); |
1180 | return ret; | 1155 | return ret; |
@@ -1185,7 +1160,7 @@ static __poll_t hid_debug_events_poll(struct file *file, poll_table *wait) | |||
1185 | struct hid_debug_list *list = file->private_data; | 1160 | struct hid_debug_list *list = file->private_data; |
1186 | 1161 | ||
1187 | poll_wait(file, &list->hdev->debug_wait, wait); | 1162 | poll_wait(file, &list->hdev->debug_wait, wait); |
1188 | if (list->head != list->tail) | 1163 | if (!kfifo_is_empty(&list->hid_debug_fifo)) |
1189 | return EPOLLIN | EPOLLRDNORM; | 1164 | return EPOLLIN | EPOLLRDNORM; |
1190 | if (!list->hdev->debug) | 1165 | if (!list->hdev->debug) |
1191 | return EPOLLERR | EPOLLHUP; | 1166 | return EPOLLERR | EPOLLHUP; |
@@ -1200,7 +1175,7 @@ static int hid_debug_events_release(struct inode *inode, struct file *file) | |||
1200 | spin_lock_irqsave(&list->hdev->debug_list_lock, flags); | 1175 | spin_lock_irqsave(&list->hdev->debug_list_lock, flags); |
1201 | list_del(&list->node); | 1176 | list_del(&list->node); |
1202 | spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); | 1177 | spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); |
1203 | kfree(list->hid_debug_buf); | 1178 | kfifo_free(&list->hid_debug_fifo); |
1204 | kfree(list); | 1179 | kfree(list); |
1205 | 1180 | ||
1206 | return 0; | 1181 | return 0; |
@@ -1246,4 +1221,3 @@ void hid_debug_exit(void) | |||
1246 | { | 1221 | { |
1247 | debugfs_remove_recursive(hid_debug_root); | 1222 | debugfs_remove_recursive(hid_debug_root); |
1248 | } | 1223 | } |
1249 | |||
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c index 4adec4ab7d06..59ee01f3d022 100644 --- a/drivers/hwmon/nct6775.c +++ b/drivers/hwmon/nct6775.c | |||
@@ -3594,7 +3594,8 @@ nct6775_check_fan_inputs(struct nct6775_data *data) | |||
3594 | fan5pin |= cr1b & BIT(5); | 3594 | fan5pin |= cr1b & BIT(5); |
3595 | fan5pin |= creb & BIT(5); | 3595 | fan5pin |= creb & BIT(5); |
3596 | 3596 | ||
3597 | fan6pin = creb & BIT(3); | 3597 | fan6pin = !dsw_en && (cr2d & BIT(1)); |
3598 | fan6pin |= creb & BIT(3); | ||
3598 | 3599 | ||
3599 | pwm5pin |= cr2d & BIT(7); | 3600 | pwm5pin |= cr2d & BIT(7); |
3600 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); | 3601 | pwm5pin |= (creb & BIT(4)) && !(cr2a & BIT(0)); |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b1086bfb0465..cd9c65f3d404 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -1500,8 +1500,7 @@ static int omap_i2c_remove(struct platform_device *pdev) | |||
1500 | return 0; | 1500 | return 0; |
1501 | } | 1501 | } |
1502 | 1502 | ||
1503 | #ifdef CONFIG_PM | 1503 | static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev) |
1504 | static int omap_i2c_runtime_suspend(struct device *dev) | ||
1505 | { | 1504 | { |
1506 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); | 1505 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); |
1507 | 1506 | ||
@@ -1527,7 +1526,7 @@ static int omap_i2c_runtime_suspend(struct device *dev) | |||
1527 | return 0; | 1526 | return 0; |
1528 | } | 1527 | } |
1529 | 1528 | ||
1530 | static int omap_i2c_runtime_resume(struct device *dev) | 1529 | static int __maybe_unused omap_i2c_runtime_resume(struct device *dev) |
1531 | { | 1530 | { |
1532 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); | 1531 | struct omap_i2c_dev *omap = dev_get_drvdata(dev); |
1533 | 1532 | ||
@@ -1542,20 +1541,18 @@ static int omap_i2c_runtime_resume(struct device *dev) | |||
1542 | } | 1541 | } |
1543 | 1542 | ||
1544 | static const struct dev_pm_ops omap_i2c_pm_ops = { | 1543 | static const struct dev_pm_ops omap_i2c_pm_ops = { |
1544 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, | ||
1545 | pm_runtime_force_resume) | ||
1545 | SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, | 1546 | SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend, |
1546 | omap_i2c_runtime_resume, NULL) | 1547 | omap_i2c_runtime_resume, NULL) |
1547 | }; | 1548 | }; |
1548 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1549 | #else | ||
1550 | #define OMAP_I2C_PM_OPS NULL | ||
1551 | #endif /* CONFIG_PM */ | ||
1552 | 1549 | ||
1553 | static struct platform_driver omap_i2c_driver = { | 1550 | static struct platform_driver omap_i2c_driver = { |
1554 | .probe = omap_i2c_probe, | 1551 | .probe = omap_i2c_probe, |
1555 | .remove = omap_i2c_remove, | 1552 | .remove = omap_i2c_remove, |
1556 | .driver = { | 1553 | .driver = { |
1557 | .name = "omap_i2c", | 1554 | .name = "omap_i2c", |
1558 | .pm = OMAP_I2C_PM_OPS, | 1555 | .pm = &omap_i2c_pm_ops, |
1559 | .of_match_table = of_match_ptr(omap_i2c_of_match), | 1556 | .of_match_table = of_match_ptr(omap_i2c_of_match), |
1560 | }, | 1557 | }, |
1561 | }; | 1558 | }; |
diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index c39f89d2deba..2dc628d4f1ae 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c | |||
@@ -1828,7 +1828,7 @@ int i3c_master_add_i3c_dev_locked(struct i3c_master_controller *master, | |||
1828 | 1828 | ||
1829 | ret = i3c_master_retrieve_dev_info(newdev); | 1829 | ret = i3c_master_retrieve_dev_info(newdev); |
1830 | if (ret) | 1830 | if (ret) |
1831 | goto err_free_dev; | 1831 | goto err_detach_dev; |
1832 | 1832 | ||
1833 | olddev = i3c_master_search_i3c_dev_duplicate(newdev); | 1833 | olddev = i3c_master_search_i3c_dev_duplicate(newdev); |
1834 | if (olddev) { | 1834 | if (olddev) { |
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index f8c00b94817f..bb03079fbade 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c | |||
@@ -419,12 +419,9 @@ static void dw_i3c_master_enqueue_xfer(struct dw_i3c_master *master, | |||
419 | spin_unlock_irqrestore(&master->xferqueue.lock, flags); | 419 | spin_unlock_irqrestore(&master->xferqueue.lock, flags); |
420 | } | 420 | } |
421 | 421 | ||
422 | static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, | 422 | static void dw_i3c_master_dequeue_xfer_locked(struct dw_i3c_master *master, |
423 | struct dw_i3c_xfer *xfer) | 423 | struct dw_i3c_xfer *xfer) |
424 | { | 424 | { |
425 | unsigned long flags; | ||
426 | |||
427 | spin_lock_irqsave(&master->xferqueue.lock, flags); | ||
428 | if (master->xferqueue.cur == xfer) { | 425 | if (master->xferqueue.cur == xfer) { |
429 | u32 status; | 426 | u32 status; |
430 | 427 | ||
@@ -439,6 +436,15 @@ static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, | |||
439 | } else { | 436 | } else { |
440 | list_del_init(&xfer->node); | 437 | list_del_init(&xfer->node); |
441 | } | 438 | } |
439 | } | ||
440 | |||
441 | static void dw_i3c_master_dequeue_xfer(struct dw_i3c_master *master, | ||
442 | struct dw_i3c_xfer *xfer) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&master->xferqueue.lock, flags); | ||
447 | dw_i3c_master_dequeue_xfer_locked(master, xfer); | ||
442 | spin_unlock_irqrestore(&master->xferqueue.lock, flags); | 448 | spin_unlock_irqrestore(&master->xferqueue.lock, flags); |
443 | } | 449 | } |
444 | 450 | ||
@@ -494,7 +500,7 @@ static void dw_i3c_master_end_xfer_locked(struct dw_i3c_master *master, u32 isr) | |||
494 | complete(&xfer->comp); | 500 | complete(&xfer->comp); |
495 | 501 | ||
496 | if (ret < 0) { | 502 | if (ret < 0) { |
497 | dw_i3c_master_dequeue_xfer(master, xfer); | 503 | dw_i3c_master_dequeue_xfer_locked(master, xfer); |
498 | writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, | 504 | writel(readl(master->regs + DEVICE_CTRL) | DEV_CTRL_RESUME, |
499 | master->regs + DEVICE_CTRL); | 505 | master->regs + DEVICE_CTRL); |
500 | } | 506 | } |
diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index 031d568b4972..4e339cfd0c54 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c | |||
@@ -27,9 +27,18 @@ | |||
27 | #include <linux/iio/machine.h> | 27 | #include <linux/iio/machine.h> |
28 | #include <linux/iio/driver.h> | 28 | #include <linux/iio/driver.h> |
29 | 29 | ||
30 | #define AXP288_ADC_EN_MASK 0xF1 | 30 | /* |
31 | #define AXP288_ADC_TS_PIN_GPADC 0xF2 | 31 | * This mask enables all ADCs except for the battery temp-sensor (TS), that is |
32 | #define AXP288_ADC_TS_PIN_ON 0xF3 | 32 | * left as-is to avoid breaking charging on devices without a temp-sensor. |
33 | */ | ||
34 | #define AXP288_ADC_EN_MASK 0xF0 | ||
35 | #define AXP288_ADC_TS_ENABLE 0x01 | ||
36 | |||
37 | #define AXP288_ADC_TS_CURRENT_ON_OFF_MASK GENMASK(1, 0) | ||
38 | #define AXP288_ADC_TS_CURRENT_OFF (0 << 0) | ||
39 | #define AXP288_ADC_TS_CURRENT_ON_WHEN_CHARGING (1 << 0) | ||
40 | #define AXP288_ADC_TS_CURRENT_ON_ONDEMAND (2 << 0) | ||
41 | #define AXP288_ADC_TS_CURRENT_ON (3 << 0) | ||
33 | 42 | ||
34 | enum axp288_adc_id { | 43 | enum axp288_adc_id { |
35 | AXP288_ADC_TS, | 44 | AXP288_ADC_TS, |
@@ -44,6 +53,7 @@ enum axp288_adc_id { | |||
44 | struct axp288_adc_info { | 53 | struct axp288_adc_info { |
45 | int irq; | 54 | int irq; |
46 | struct regmap *regmap; | 55 | struct regmap *regmap; |
56 | bool ts_enabled; | ||
47 | }; | 57 | }; |
48 | 58 | ||
49 | static const struct iio_chan_spec axp288_adc_channels[] = { | 59 | static const struct iio_chan_spec axp288_adc_channels[] = { |
@@ -115,21 +125,33 @@ static int axp288_adc_read_channel(int *val, unsigned long address, | |||
115 | return IIO_VAL_INT; | 125 | return IIO_VAL_INT; |
116 | } | 126 | } |
117 | 127 | ||
118 | static int axp288_adc_set_ts(struct regmap *regmap, unsigned int mode, | 128 | /* |
119 | unsigned long address) | 129 | * The current-source used for the battery temp-sensor (TS) is shared |
130 | * with the GPADC. For proper fuel-gauge and charger operation the TS | ||
131 | * current-source needs to be permanently on. But to read the GPADC we | ||
132 | * need to temporary switch the TS current-source to ondemand, so that | ||
133 | * the GPADC can use it, otherwise we will always read an all 0 value. | ||
134 | */ | ||
135 | static int axp288_adc_set_ts(struct axp288_adc_info *info, | ||
136 | unsigned int mode, unsigned long address) | ||
120 | { | 137 | { |
121 | int ret; | 138 | int ret; |
122 | 139 | ||
123 | /* channels other than GPADC do not need to switch TS pin */ | 140 | /* No need to switch the current-source if the TS pin is disabled */ |
141 | if (!info->ts_enabled) | ||
142 | return 0; | ||
143 | |||
144 | /* Channels other than GPADC do not need the current source */ | ||
124 | if (address != AXP288_GP_ADC_H) | 145 | if (address != AXP288_GP_ADC_H) |
125 | return 0; | 146 | return 0; |
126 | 147 | ||
127 | ret = regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, mode); | 148 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, |
149 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, mode); | ||
128 | if (ret) | 150 | if (ret) |
129 | return ret; | 151 | return ret; |
130 | 152 | ||
131 | /* When switching to the GPADC pin give things some time to settle */ | 153 | /* When switching to the GPADC pin give things some time to settle */ |
132 | if (mode == AXP288_ADC_TS_PIN_GPADC) | 154 | if (mode == AXP288_ADC_TS_CURRENT_ON_ONDEMAND) |
133 | usleep_range(6000, 10000); | 155 | usleep_range(6000, 10000); |
134 | 156 | ||
135 | return 0; | 157 | return 0; |
@@ -145,14 +167,14 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
145 | mutex_lock(&indio_dev->mlock); | 167 | mutex_lock(&indio_dev->mlock); |
146 | switch (mask) { | 168 | switch (mask) { |
147 | case IIO_CHAN_INFO_RAW: | 169 | case IIO_CHAN_INFO_RAW: |
148 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_GPADC, | 170 | if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON_ONDEMAND, |
149 | chan->address)) { | 171 | chan->address)) { |
150 | dev_err(&indio_dev->dev, "GPADC mode\n"); | 172 | dev_err(&indio_dev->dev, "GPADC mode\n"); |
151 | ret = -EINVAL; | 173 | ret = -EINVAL; |
152 | break; | 174 | break; |
153 | } | 175 | } |
154 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); | 176 | ret = axp288_adc_read_channel(val, chan->address, info->regmap); |
155 | if (axp288_adc_set_ts(info->regmap, AXP288_ADC_TS_PIN_ON, | 177 | if (axp288_adc_set_ts(info, AXP288_ADC_TS_CURRENT_ON, |
156 | chan->address)) | 178 | chan->address)) |
157 | dev_err(&indio_dev->dev, "TS pin restore\n"); | 179 | dev_err(&indio_dev->dev, "TS pin restore\n"); |
158 | break; | 180 | break; |
@@ -164,13 +186,35 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev, | |||
164 | return ret; | 186 | return ret; |
165 | } | 187 | } |
166 | 188 | ||
167 | static int axp288_adc_set_state(struct regmap *regmap) | 189 | static int axp288_adc_initialize(struct axp288_adc_info *info) |
168 | { | 190 | { |
169 | /* ADC should be always enabled for internal FG to function */ | 191 | int ret, adc_enable_val; |
170 | if (regmap_write(regmap, AXP288_ADC_TS_PIN_CTRL, AXP288_ADC_TS_PIN_ON)) | 192 | |
171 | return -EIO; | 193 | /* |
194 | * Determine if the TS pin is enabled and set the TS current-source | ||
195 | * accordingly. | ||
196 | */ | ||
197 | ret = regmap_read(info->regmap, AXP20X_ADC_EN1, &adc_enable_val); | ||
198 | if (ret) | ||
199 | return ret; | ||
200 | |||
201 | if (adc_enable_val & AXP288_ADC_TS_ENABLE) { | ||
202 | info->ts_enabled = true; | ||
203 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, | ||
204 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, | ||
205 | AXP288_ADC_TS_CURRENT_ON); | ||
206 | } else { | ||
207 | info->ts_enabled = false; | ||
208 | ret = regmap_update_bits(info->regmap, AXP288_ADC_TS_PIN_CTRL, | ||
209 | AXP288_ADC_TS_CURRENT_ON_OFF_MASK, | ||
210 | AXP288_ADC_TS_CURRENT_OFF); | ||
211 | } | ||
212 | if (ret) | ||
213 | return ret; | ||
172 | 214 | ||
173 | return regmap_write(regmap, AXP20X_ADC_EN1, AXP288_ADC_EN_MASK); | 215 | /* Turn on the ADC for all channels except TS, leave TS as is */ |
216 | return regmap_update_bits(info->regmap, AXP20X_ADC_EN1, | ||
217 | AXP288_ADC_EN_MASK, AXP288_ADC_EN_MASK); | ||
174 | } | 218 | } |
175 | 219 | ||
176 | static const struct iio_info axp288_adc_iio_info = { | 220 | static const struct iio_info axp288_adc_iio_info = { |
@@ -200,7 +244,7 @@ static int axp288_adc_probe(struct platform_device *pdev) | |||
200 | * Set ADC to enabled state at all time, including system suspend. | 244 | * Set ADC to enabled state at all time, including system suspend. |
201 | * otherwise internal fuel gauge functionality may be affected. | 245 | * otherwise internal fuel gauge functionality may be affected. |
202 | */ | 246 | */ |
203 | ret = axp288_adc_set_state(axp20x->regmap); | 247 | ret = axp288_adc_initialize(info); |
204 | if (ret) { | 248 | if (ret) { |
205 | dev_err(&pdev->dev, "unable to enable ADC device\n"); | 249 | dev_err(&pdev->dev, "unable to enable ADC device\n"); |
206 | return ret; | 250 | return ret; |
diff --git a/drivers/iio/adc/ti-ads8688.c b/drivers/iio/adc/ti-ads8688.c index 184d686ebd99..8b4568edd5cb 100644 --- a/drivers/iio/adc/ti-ads8688.c +++ b/drivers/iio/adc/ti-ads8688.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #define ADS8688_VREF_MV 4096 | 42 | #define ADS8688_VREF_MV 4096 |
43 | #define ADS8688_REALBITS 16 | 43 | #define ADS8688_REALBITS 16 |
44 | #define ADS8688_MAX_CHANNELS 8 | ||
44 | 45 | ||
45 | /* | 46 | /* |
46 | * enum ads8688_range - ADS8688 reference voltage range | 47 | * enum ads8688_range - ADS8688 reference voltage range |
@@ -385,7 +386,7 @@ static irqreturn_t ads8688_trigger_handler(int irq, void *p) | |||
385 | { | 386 | { |
386 | struct iio_poll_func *pf = p; | 387 | struct iio_poll_func *pf = p; |
387 | struct iio_dev *indio_dev = pf->indio_dev; | 388 | struct iio_dev *indio_dev = pf->indio_dev; |
388 | u16 buffer[8]; | 389 | u16 buffer[ADS8688_MAX_CHANNELS + sizeof(s64)/sizeof(u16)]; |
389 | int i, j = 0; | 390 | int i, j = 0; |
390 | 391 | ||
391 | for (i = 0; i < indio_dev->masklength; i++) { | 392 | for (i = 0; i < indio_dev->masklength; i++) { |
diff --git a/drivers/iio/chemical/atlas-ph-sensor.c b/drivers/iio/chemical/atlas-ph-sensor.c index a406ad31b096..3a20cb5d9bff 100644 --- a/drivers/iio/chemical/atlas-ph-sensor.c +++ b/drivers/iio/chemical/atlas-ph-sensor.c | |||
@@ -444,9 +444,8 @@ static int atlas_read_raw(struct iio_dev *indio_dev, | |||
444 | case IIO_CHAN_INFO_SCALE: | 444 | case IIO_CHAN_INFO_SCALE: |
445 | switch (chan->type) { | 445 | switch (chan->type) { |
446 | case IIO_TEMP: | 446 | case IIO_TEMP: |
447 | *val = 1; /* 0.01 */ | 447 | *val = 10; |
448 | *val2 = 100; | 448 | return IIO_VAL_INT; |
449 | break; | ||
450 | case IIO_PH: | 449 | case IIO_PH: |
451 | *val = 1; /* 0.001 */ | 450 | *val = 1; /* 0.001 */ |
452 | *val2 = 1000; | 451 | *val2 = 1000; |
@@ -477,7 +476,7 @@ static int atlas_write_raw(struct iio_dev *indio_dev, | |||
477 | int val, int val2, long mask) | 476 | int val, int val2, long mask) |
478 | { | 477 | { |
479 | struct atlas_data *data = iio_priv(indio_dev); | 478 | struct atlas_data *data = iio_priv(indio_dev); |
480 | __be32 reg = cpu_to_be32(val); | 479 | __be32 reg = cpu_to_be32(val / 10); |
481 | 480 | ||
482 | if (val2 != 0 || val < 0 || val > 20000) | 481 | if (val2 != 0 || val < 0 || val > 20000) |
483 | return -EINVAL; | 482 | return -EINVAL; |
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index 3cd830d52967..616734313f0c 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h | |||
@@ -267,7 +267,6 @@ static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, | |||
267 | #endif | 267 | #endif |
268 | 268 | ||
269 | struct ib_device *ib_device_get_by_index(u32 ifindex); | 269 | struct ib_device *ib_device_get_by_index(u32 ifindex); |
270 | void ib_device_put(struct ib_device *device); | ||
271 | /* RDMA device netlink */ | 270 | /* RDMA device netlink */ |
272 | void nldev_init(void); | 271 | void nldev_init(void); |
273 | void nldev_exit(void); | 272 | void nldev_exit(void); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 8872453e26c0..238ec42778ef 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -156,19 +156,26 @@ struct ib_device *ib_device_get_by_index(u32 index) | |||
156 | down_read(&lists_rwsem); | 156 | down_read(&lists_rwsem); |
157 | device = __ib_device_get_by_index(index); | 157 | device = __ib_device_get_by_index(index); |
158 | if (device) { | 158 | if (device) { |
159 | /* Do not return a device if unregistration has started. */ | 159 | if (!ib_device_try_get(device)) |
160 | if (!refcount_inc_not_zero(&device->refcount)) | ||
161 | device = NULL; | 160 | device = NULL; |
162 | } | 161 | } |
163 | up_read(&lists_rwsem); | 162 | up_read(&lists_rwsem); |
164 | return device; | 163 | return device; |
165 | } | 164 | } |
166 | 165 | ||
166 | /** | ||
167 | * ib_device_put - Release IB device reference | ||
168 | * @device: device whose reference to be released | ||
169 | * | ||
170 | * ib_device_put() releases reference to the IB device to allow it to be | ||
171 | * unregistered and eventually free. | ||
172 | */ | ||
167 | void ib_device_put(struct ib_device *device) | 173 | void ib_device_put(struct ib_device *device) |
168 | { | 174 | { |
169 | if (refcount_dec_and_test(&device->refcount)) | 175 | if (refcount_dec_and_test(&device->refcount)) |
170 | complete(&device->unreg_completion); | 176 | complete(&device->unreg_completion); |
171 | } | 177 | } |
178 | EXPORT_SYMBOL(ib_device_put); | ||
172 | 179 | ||
173 | static struct ib_device *__ib_device_get_by_name(const char *name) | 180 | static struct ib_device *__ib_device_get_by_name(const char *name) |
174 | { | 181 | { |
@@ -303,7 +310,6 @@ struct ib_device *ib_alloc_device(size_t size) | |||
303 | rwlock_init(&device->client_data_lock); | 310 | rwlock_init(&device->client_data_lock); |
304 | INIT_LIST_HEAD(&device->client_data_list); | 311 | INIT_LIST_HEAD(&device->client_data_list); |
305 | INIT_LIST_HEAD(&device->port_list); | 312 | INIT_LIST_HEAD(&device->port_list); |
306 | refcount_set(&device->refcount, 1); | ||
307 | init_completion(&device->unreg_completion); | 313 | init_completion(&device->unreg_completion); |
308 | 314 | ||
309 | return device; | 315 | return device; |
@@ -620,6 +626,7 @@ int ib_register_device(struct ib_device *device, const char *name, | |||
620 | goto cg_cleanup; | 626 | goto cg_cleanup; |
621 | } | 627 | } |
622 | 628 | ||
629 | refcount_set(&device->refcount, 1); | ||
623 | device->reg_state = IB_DEV_REGISTERED; | 630 | device->reg_state = IB_DEV_REGISTERED; |
624 | 631 | ||
625 | list_for_each_entry(client, &client_list, list) | 632 | list_for_each_entry(client, &client_list, list) |
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index a4ec43093cb3..acb882f279cb 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c | |||
@@ -352,6 +352,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, | |||
352 | umem->writable = 1; | 352 | umem->writable = 1; |
353 | umem->is_odp = 1; | 353 | umem->is_odp = 1; |
354 | odp_data->per_mm = per_mm; | 354 | odp_data->per_mm = per_mm; |
355 | umem->owning_mm = per_mm->mm; | ||
356 | mmgrab(umem->owning_mm); | ||
355 | 357 | ||
356 | mutex_init(&odp_data->umem_mutex); | 358 | mutex_init(&odp_data->umem_mutex); |
357 | init_completion(&odp_data->notifier_completion); | 359 | init_completion(&odp_data->notifier_completion); |
@@ -384,6 +386,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm, | |||
384 | out_page_list: | 386 | out_page_list: |
385 | vfree(odp_data->page_list); | 387 | vfree(odp_data->page_list); |
386 | out_odp_data: | 388 | out_odp_data: |
389 | mmdrop(umem->owning_mm); | ||
387 | kfree(odp_data); | 390 | kfree(odp_data); |
388 | return ERR_PTR(ret); | 391 | return ERR_PTR(ret); |
389 | } | 392 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 2890a77339e1..5f366838b7ff 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -204,6 +204,9 @@ void ib_uverbs_release_file(struct kref *ref) | |||
204 | if (atomic_dec_and_test(&file->device->refcount)) | 204 | if (atomic_dec_and_test(&file->device->refcount)) |
205 | ib_uverbs_comp_dev(file->device); | 205 | ib_uverbs_comp_dev(file->device); |
206 | 206 | ||
207 | if (file->async_file) | ||
208 | kref_put(&file->async_file->ref, | ||
209 | ib_uverbs_release_async_event_file); | ||
207 | put_device(&file->device->dev); | 210 | put_device(&file->device->dev); |
208 | kfree(file); | 211 | kfree(file); |
209 | } | 212 | } |
@@ -964,11 +967,19 @@ void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile) | |||
964 | 967 | ||
965 | /* Get an arbitrary mm pointer that hasn't been cleaned yet */ | 968 | /* Get an arbitrary mm pointer that hasn't been cleaned yet */ |
966 | mutex_lock(&ufile->umap_lock); | 969 | mutex_lock(&ufile->umap_lock); |
967 | if (!list_empty(&ufile->umaps)) { | 970 | while (!list_empty(&ufile->umaps)) { |
968 | mm = list_first_entry(&ufile->umaps, | 971 | int ret; |
969 | struct rdma_umap_priv, list) | 972 | |
970 | ->vma->vm_mm; | 973 | priv = list_first_entry(&ufile->umaps, |
971 | mmget(mm); | 974 | struct rdma_umap_priv, list); |
975 | mm = priv->vma->vm_mm; | ||
976 | ret = mmget_not_zero(mm); | ||
977 | if (!ret) { | ||
978 | list_del_init(&priv->list); | ||
979 | mm = NULL; | ||
980 | continue; | ||
981 | } | ||
982 | break; | ||
972 | } | 983 | } |
973 | mutex_unlock(&ufile->umap_lock); | 984 | mutex_unlock(&ufile->umap_lock); |
974 | if (!mm) | 985 | if (!mm) |
@@ -1096,10 +1107,6 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp) | |||
1096 | list_del_init(&file->list); | 1107 | list_del_init(&file->list); |
1097 | mutex_unlock(&file->device->lists_mutex); | 1108 | mutex_unlock(&file->device->lists_mutex); |
1098 | 1109 | ||
1099 | if (file->async_file) | ||
1100 | kref_put(&file->async_file->ref, | ||
1101 | ib_uverbs_release_async_event_file); | ||
1102 | |||
1103 | kref_put(&file->ref, ib_uverbs_release_file); | 1110 | kref_put(&file->ref, ib_uverbs_release_file); |
1104 | 1111 | ||
1105 | return 0; | 1112 | return 0; |
diff --git a/drivers/infiniband/core/uverbs_std_types_device.c b/drivers/infiniband/core/uverbs_std_types_device.c index 5030ec480370..2a3f2f01028d 100644 --- a/drivers/infiniband/core/uverbs_std_types_device.c +++ b/drivers/infiniband/core/uverbs_std_types_device.c | |||
@@ -168,12 +168,18 @@ void copy_port_attr_to_resp(struct ib_port_attr *attr, | |||
168 | static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( | 168 | static int UVERBS_HANDLER(UVERBS_METHOD_QUERY_PORT)( |
169 | struct uverbs_attr_bundle *attrs) | 169 | struct uverbs_attr_bundle *attrs) |
170 | { | 170 | { |
171 | struct ib_device *ib_dev = attrs->ufile->device->ib_dev; | 171 | struct ib_device *ib_dev; |
172 | struct ib_port_attr attr = {}; | 172 | struct ib_port_attr attr = {}; |
173 | struct ib_uverbs_query_port_resp_ex resp = {}; | 173 | struct ib_uverbs_query_port_resp_ex resp = {}; |
174 | struct ib_ucontext *ucontext; | ||
174 | int ret; | 175 | int ret; |
175 | u8 port_num; | 176 | u8 port_num; |
176 | 177 | ||
178 | ucontext = ib_uverbs_get_ucontext(attrs); | ||
179 | if (IS_ERR(ucontext)) | ||
180 | return PTR_ERR(ucontext); | ||
181 | ib_dev = ucontext->device; | ||
182 | |||
177 | /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ | 183 | /* FIXME: Extend the UAPI_DEF_OBJ_NEEDS_FN stuff.. */ |
178 | if (!ib_dev->ops.query_port) | 184 | if (!ib_dev->ops.query_port) |
179 | return -EOPNOTSUPP; | 185 | return -EOPNOTSUPP; |
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index c22ebc774a6a..f9a7e9d29c8b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -488,7 +488,7 @@ static int hfi1_file_mmap(struct file *fp, struct vm_area_struct *vma) | |||
488 | vmf = 1; | 488 | vmf = 1; |
489 | break; | 489 | break; |
490 | case STATUS: | 490 | case STATUS: |
491 | if (flags & (unsigned long)(VM_WRITE | VM_EXEC)) { | 491 | if (flags & VM_WRITE) { |
492 | ret = -EPERM; | 492 | ret = -EPERM; |
493 | goto done; | 493 | goto done; |
494 | } | 494 | } |
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 88242fe95eaa..bf96067876c9 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c | |||
@@ -987,7 +987,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet) | |||
987 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | 987 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { |
988 | wc.ex.imm_data = packet->ohdr->u.ud.imm_data; | 988 | wc.ex.imm_data = packet->ohdr->u.ud.imm_data; |
989 | wc.wc_flags = IB_WC_WITH_IMM; | 989 | wc.wc_flags = IB_WC_WITH_IMM; |
990 | tlen -= sizeof(u32); | ||
991 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | 990 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { |
992 | wc.ex.imm_data = 0; | 991 | wc.ex.imm_data = 0; |
993 | wc.wc_flags = 0; | 992 | wc.wc_flags = 0; |
diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 960b1946c365..12deacf442cf 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c | |||
@@ -210,6 +210,7 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, | |||
210 | struct ib_udata *udata) | 210 | struct ib_udata *udata) |
211 | { | 211 | { |
212 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); | 212 | struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); |
213 | struct hns_roce_ib_create_srq_resp resp = {}; | ||
213 | struct hns_roce_srq *srq; | 214 | struct hns_roce_srq *srq; |
214 | int srq_desc_size; | 215 | int srq_desc_size; |
215 | int srq_buf_size; | 216 | int srq_buf_size; |
@@ -378,16 +379,21 @@ struct ib_srq *hns_roce_create_srq(struct ib_pd *pd, | |||
378 | 379 | ||
379 | srq->event = hns_roce_ib_srq_event; | 380 | srq->event = hns_roce_ib_srq_event; |
380 | srq->ibsrq.ext.xrc.srq_num = srq->srqn; | 381 | srq->ibsrq.ext.xrc.srq_num = srq->srqn; |
382 | resp.srqn = srq->srqn; | ||
381 | 383 | ||
382 | if (udata) { | 384 | if (udata) { |
383 | if (ib_copy_to_udata(udata, &srq->srqn, sizeof(__u32))) { | 385 | if (ib_copy_to_udata(udata, &resp, |
386 | min(udata->outlen, sizeof(resp)))) { | ||
384 | ret = -EFAULT; | 387 | ret = -EFAULT; |
385 | goto err_wrid; | 388 | goto err_srqc_alloc; |
386 | } | 389 | } |
387 | } | 390 | } |
388 | 391 | ||
389 | return &srq->ibsrq; | 392 | return &srq->ibsrq; |
390 | 393 | ||
394 | err_srqc_alloc: | ||
395 | hns_roce_srq_free(hr_dev, srq); | ||
396 | |||
391 | err_wrid: | 397 | err_wrid: |
392 | kvfree(srq->wrid); | 398 | kvfree(srq->wrid); |
393 | 399 | ||
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 25439da8976c..936ee1314bcd 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c | |||
@@ -1411,7 +1411,7 @@ int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port, | |||
1411 | 1411 | ||
1412 | sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); | 1412 | sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr); |
1413 | if (sqp->tx_ring[wire_tx_ix].ah) | 1413 | if (sqp->tx_ring[wire_tx_ix].ah) |
1414 | rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); | 1414 | mlx4_ib_destroy_ah(sqp->tx_ring[wire_tx_ix].ah, 0); |
1415 | sqp->tx_ring[wire_tx_ix].ah = ah; | 1415 | sqp->tx_ring[wire_tx_ix].ah = ah; |
1416 | ib_dma_sync_single_for_cpu(&dev->ib_dev, | 1416 | ib_dma_sync_single_for_cpu(&dev->ib_dev, |
1417 | sqp->tx_ring[wire_tx_ix].buf.map, | 1417 | sqp->tx_ring[wire_tx_ix].buf.map, |
@@ -1902,7 +1902,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) | |||
1902 | if (wc.status == IB_WC_SUCCESS) { | 1902 | if (wc.status == IB_WC_SUCCESS) { |
1903 | switch (wc.opcode) { | 1903 | switch (wc.opcode) { |
1904 | case IB_WC_SEND: | 1904 | case IB_WC_SEND: |
1905 | rdma_destroy_ah(sqp->tx_ring[wc.wr_id & | 1905 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & |
1906 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); | 1906 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); |
1907 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | 1907 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah |
1908 | = NULL; | 1908 | = NULL; |
@@ -1931,7 +1931,7 @@ static void mlx4_ib_sqp_comp_worker(struct work_struct *work) | |||
1931 | " status = %d, wrid = 0x%llx\n", | 1931 | " status = %d, wrid = 0x%llx\n", |
1932 | ctx->slave, wc.status, wc.wr_id); | 1932 | ctx->slave, wc.status, wc.wr_id); |
1933 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { | 1933 | if (!MLX4_TUN_IS_RECV(wc.wr_id)) { |
1934 | rdma_destroy_ah(sqp->tx_ring[wc.wr_id & | 1934 | mlx4_ib_destroy_ah(sqp->tx_ring[wc.wr_id & |
1935 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); | 1935 | (MLX4_NUM_TUNNEL_BUFS - 1)].ah, 0); |
1936 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah | 1936 | sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah |
1937 | = NULL; | 1937 | = NULL; |
diff --git a/drivers/infiniband/hw/mlx5/flow.c b/drivers/infiniband/hw/mlx5/flow.c index e8a1e4498e3f..798591a18484 100644 --- a/drivers/infiniband/hw/mlx5/flow.c +++ b/drivers/infiniband/hw/mlx5/flow.c | |||
@@ -630,8 +630,7 @@ const struct uapi_definition mlx5_ib_flow_defs[] = { | |||
630 | UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), | 630 | UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), |
631 | UAPI_DEF_CHAIN_OBJ_TREE( | 631 | UAPI_DEF_CHAIN_OBJ_TREE( |
632 | UVERBS_OBJECT_FLOW, | 632 | UVERBS_OBJECT_FLOW, |
633 | &mlx5_ib_fs, | 633 | &mlx5_ib_fs), |
634 | UAPI_DEF_IS_OBJ_SUPPORTED(flow_is_supported)), | ||
635 | UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, | 634 | UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_FLOW_ACTION, |
636 | &mlx5_ib_flow_actions), | 635 | &mlx5_ib_flow_actions), |
637 | {}, | 636 | {}, |
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index 01e0f6200631..4ee32964e1dd 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c | |||
@@ -1595,10 +1595,12 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *work) | |||
1595 | struct prefetch_mr_work *w = | 1595 | struct prefetch_mr_work *w = |
1596 | container_of(work, struct prefetch_mr_work, work); | 1596 | container_of(work, struct prefetch_mr_work, work); |
1597 | 1597 | ||
1598 | if (w->dev->ib_dev.reg_state == IB_DEV_REGISTERED) | 1598 | if (ib_device_try_get(&w->dev->ib_dev)) { |
1599 | mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, | 1599 | mlx5_ib_prefetch_sg_list(w->dev, w->pf_flags, w->sg_list, |
1600 | w->num_sge); | 1600 | w->num_sge); |
1601 | 1601 | ib_device_put(&w->dev->ib_dev); | |
1602 | } | ||
1603 | put_device(&w->dev->ib_dev.dev); | ||
1602 | kfree(w); | 1604 | kfree(w); |
1603 | } | 1605 | } |
1604 | 1606 | ||
@@ -1617,15 +1619,13 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, | |||
1617 | return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, | 1619 | return mlx5_ib_prefetch_sg_list(dev, pf_flags, sg_list, |
1618 | num_sge); | 1620 | num_sge); |
1619 | 1621 | ||
1620 | if (dev->ib_dev.reg_state != IB_DEV_REGISTERED) | ||
1621 | return -ENODEV; | ||
1622 | |||
1623 | work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); | 1622 | work = kvzalloc(struct_size(work, sg_list, num_sge), GFP_KERNEL); |
1624 | if (!work) | 1623 | if (!work) |
1625 | return -ENOMEM; | 1624 | return -ENOMEM; |
1626 | 1625 | ||
1627 | memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); | 1626 | memcpy(work->sg_list, sg_list, num_sge * sizeof(struct ib_sge)); |
1628 | 1627 | ||
1628 | get_device(&dev->ib_dev.dev); | ||
1629 | work->dev = dev; | 1629 | work->dev = dev; |
1630 | work->pf_flags = pf_flags; | 1630 | work->pf_flags = pf_flags; |
1631 | work->num_sge = num_sge; | 1631 | work->num_sge = num_sge; |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index dd2ae640bc84..7db778d96ef5 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -1912,14 +1912,16 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, | |||
1912 | } | 1912 | } |
1913 | 1913 | ||
1914 | if (!check_flags_mask(ucmd.flags, | 1914 | if (!check_flags_mask(ucmd.flags, |
1915 | MLX5_QP_FLAG_ALLOW_SCATTER_CQE | | ||
1916 | MLX5_QP_FLAG_BFREG_INDEX | | ||
1917 | MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE | | ||
1918 | MLX5_QP_FLAG_SCATTER_CQE | | ||
1915 | MLX5_QP_FLAG_SIGNATURE | | 1919 | MLX5_QP_FLAG_SIGNATURE | |
1916 | MLX5_QP_FLAG_SCATTER_CQE | | 1920 | MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC | |
1917 | MLX5_QP_FLAG_TUNNEL_OFFLOADS | | 1921 | MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC | |
1918 | MLX5_QP_FLAG_BFREG_INDEX | | 1922 | MLX5_QP_FLAG_TUNNEL_OFFLOADS | |
1919 | MLX5_QP_FLAG_TYPE_DCT | | 1923 | MLX5_QP_FLAG_TYPE_DCI | |
1920 | MLX5_QP_FLAG_TYPE_DCI | | 1924 | MLX5_QP_FLAG_TYPE_DCT)) |
1921 | MLX5_QP_FLAG_ALLOW_SCATTER_CQE | | ||
1922 | MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE)) | ||
1923 | return -EINVAL; | 1925 | return -EINVAL; |
1924 | 1926 | ||
1925 | err = get_qp_user_index(to_mucontext(pd->uobject->context), | 1927 | err = get_qp_user_index(to_mucontext(pd->uobject->context), |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index 868da0ece7ba..445ea19a2ec8 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -512,7 +512,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr, | |||
512 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { | 512 | opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) { |
513 | wc.ex.imm_data = ohdr->u.ud.imm_data; | 513 | wc.ex.imm_data = ohdr->u.ud.imm_data; |
514 | wc.wc_flags = IB_WC_WITH_IMM; | 514 | wc.wc_flags = IB_WC_WITH_IMM; |
515 | tlen -= sizeof(u32); | ||
516 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { | 515 | } else if (opcode == IB_OPCODE_UD_SEND_ONLY) { |
517 | wc.ex.imm_data = 0; | 516 | wc.ex.imm_data = 0; |
518 | wc.wc_flags = 0; | 517 | wc.wc_flags = 0; |
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index a1bd8cfc2c25..c6cc3e4ab71d 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c | |||
@@ -2910,6 +2910,8 @@ send: | |||
2910 | goto op_err; | 2910 | goto op_err; |
2911 | if (!ret) | 2911 | if (!ret) |
2912 | goto rnr_nak; | 2912 | goto rnr_nak; |
2913 | if (wqe->length > qp->r_len) | ||
2914 | goto inv_err; | ||
2913 | break; | 2915 | break; |
2914 | 2916 | ||
2915 | case IB_WR_RDMA_WRITE_WITH_IMM: | 2917 | case IB_WR_RDMA_WRITE_WITH_IMM: |
@@ -3078,7 +3080,10 @@ op_err: | |||
3078 | goto err; | 3080 | goto err; |
3079 | 3081 | ||
3080 | inv_err: | 3082 | inv_err: |
3081 | send_status = IB_WC_REM_INV_REQ_ERR; | 3083 | send_status = |
3084 | sqp->ibqp.qp_type == IB_QPT_RC ? | ||
3085 | IB_WC_REM_INV_REQ_ERR : | ||
3086 | IB_WC_SUCCESS; | ||
3082 | wc.status = IB_WC_LOC_QP_OP_ERR; | 3087 | wc.status = IB_WC_LOC_QP_OP_ERR; |
3083 | goto err; | 3088 | goto err; |
3084 | 3089 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index 1da119d901a9..73e808c1e6ad 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -248,7 +248,6 @@ struct ipoib_cm_tx { | |||
248 | struct list_head list; | 248 | struct list_head list; |
249 | struct net_device *dev; | 249 | struct net_device *dev; |
250 | struct ipoib_neigh *neigh; | 250 | struct ipoib_neigh *neigh; |
251 | struct ipoib_path *path; | ||
252 | struct ipoib_tx_buf *tx_ring; | 251 | struct ipoib_tx_buf *tx_ring; |
253 | unsigned int tx_head; | 252 | unsigned int tx_head; |
254 | unsigned int tx_tail; | 253 | unsigned int tx_tail; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 0428e01e8f69..aa9dcfc36cd3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path | |||
1312 | 1312 | ||
1313 | neigh->cm = tx; | 1313 | neigh->cm = tx; |
1314 | tx->neigh = neigh; | 1314 | tx->neigh = neigh; |
1315 | tx->path = path; | ||
1316 | tx->dev = dev; | 1315 | tx->dev = dev; |
1317 | list_add(&tx->list, &priv->cm.start_list); | 1316 | list_add(&tx->list, &priv->cm.start_list); |
1318 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); | 1317 | set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); |
@@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work) | |||
1371 | neigh->daddr + QPN_AND_OPTIONS_OFFSET); | 1370 | neigh->daddr + QPN_AND_OPTIONS_OFFSET); |
1372 | goto free_neigh; | 1371 | goto free_neigh; |
1373 | } | 1372 | } |
1374 | memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec)); | 1373 | memcpy(&pathrec, &path->pathrec, sizeof(pathrec)); |
1375 | 1374 | ||
1376 | spin_unlock_irqrestore(&priv->lock, flags); | 1375 | spin_unlock_irqrestore(&priv->lock, flags); |
1377 | netif_tx_unlock_bh(dev); | 1376 | netif_tx_unlock_bh(dev); |
diff --git a/drivers/input/serio/olpc_apsp.c b/drivers/input/serio/olpc_apsp.c index bae08226e3d9..a7cfab3db9ee 100644 --- a/drivers/input/serio/olpc_apsp.c +++ b/drivers/input/serio/olpc_apsp.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/of.h> | 23 | #include <linux/of.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/delay.h> | 25 | #include <linux/delay.h> |
26 | #include <linux/clk.h> | ||
27 | 26 | ||
28 | /* | 27 | /* |
29 | * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. | 28 | * The OLPC XO-1.75 and XO-4 laptops do not have a hardware PS/2 controller. |
@@ -75,7 +74,6 @@ struct olpc_apsp { | |||
75 | struct serio *kbio; | 74 | struct serio *kbio; |
76 | struct serio *padio; | 75 | struct serio *padio; |
77 | void __iomem *base; | 76 | void __iomem *base; |
78 | struct clk *clk; | ||
79 | int open_count; | 77 | int open_count; |
80 | int irq; | 78 | int irq; |
81 | }; | 79 | }; |
@@ -148,17 +146,11 @@ static int olpc_apsp_open(struct serio *port) | |||
148 | struct olpc_apsp *priv = port->port_data; | 146 | struct olpc_apsp *priv = port->port_data; |
149 | unsigned int tmp; | 147 | unsigned int tmp; |
150 | unsigned long l; | 148 | unsigned long l; |
151 | int error; | ||
152 | 149 | ||
153 | if (priv->open_count++ == 0) { | 150 | if (priv->open_count++ == 0) { |
154 | error = clk_prepare_enable(priv->clk); | ||
155 | if (error) | ||
156 | return error; | ||
157 | |||
158 | l = readl(priv->base + COMMAND_FIFO_STATUS); | 151 | l = readl(priv->base + COMMAND_FIFO_STATUS); |
159 | if (!(l & CMD_STS_MASK)) { | 152 | if (!(l & CMD_STS_MASK)) { |
160 | dev_err(priv->dev, "SP cannot accept commands.\n"); | 153 | dev_err(priv->dev, "SP cannot accept commands.\n"); |
161 | clk_disable_unprepare(priv->clk); | ||
162 | return -EIO; | 154 | return -EIO; |
163 | } | 155 | } |
164 | 156 | ||
@@ -179,8 +171,6 @@ static void olpc_apsp_close(struct serio *port) | |||
179 | /* Disable interrupt 0 */ | 171 | /* Disable interrupt 0 */ |
180 | tmp = readl(priv->base + PJ_INTERRUPT_MASK); | 172 | tmp = readl(priv->base + PJ_INTERRUPT_MASK); |
181 | writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); | 173 | writel(tmp | INT_0, priv->base + PJ_INTERRUPT_MASK); |
182 | |||
183 | clk_disable_unprepare(priv->clk); | ||
184 | } | 174 | } |
185 | } | 175 | } |
186 | 176 | ||
@@ -208,10 +198,6 @@ static int olpc_apsp_probe(struct platform_device *pdev) | |||
208 | if (priv->irq < 0) | 198 | if (priv->irq < 0) |
209 | return priv->irq; | 199 | return priv->irq; |
210 | 200 | ||
211 | priv->clk = devm_clk_get(&pdev->dev, "sp"); | ||
212 | if (IS_ERR(priv->clk)) | ||
213 | return PTR_ERR(priv->clk); | ||
214 | |||
215 | /* KEYBOARD */ | 201 | /* KEYBOARD */ |
216 | kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); | 202 | kb_serio = kzalloc(sizeof(struct serio), GFP_KERNEL); |
217 | if (!kb_serio) | 203 | if (!kb_serio) |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 87ba23a75b38..2a7b78bb98b4 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -1991,16 +1991,13 @@ static void do_attach(struct iommu_dev_data *dev_data, | |||
1991 | 1991 | ||
1992 | static void do_detach(struct iommu_dev_data *dev_data) | 1992 | static void do_detach(struct iommu_dev_data *dev_data) |
1993 | { | 1993 | { |
1994 | struct protection_domain *domain = dev_data->domain; | ||
1994 | struct amd_iommu *iommu; | 1995 | struct amd_iommu *iommu; |
1995 | u16 alias; | 1996 | u16 alias; |
1996 | 1997 | ||
1997 | iommu = amd_iommu_rlookup_table[dev_data->devid]; | 1998 | iommu = amd_iommu_rlookup_table[dev_data->devid]; |
1998 | alias = dev_data->alias; | 1999 | alias = dev_data->alias; |
1999 | 2000 | ||
2000 | /* decrease reference counters */ | ||
2001 | dev_data->domain->dev_iommu[iommu->index] -= 1; | ||
2002 | dev_data->domain->dev_cnt -= 1; | ||
2003 | |||
2004 | /* Update data structures */ | 2001 | /* Update data structures */ |
2005 | dev_data->domain = NULL; | 2002 | dev_data->domain = NULL; |
2006 | list_del(&dev_data->list); | 2003 | list_del(&dev_data->list); |
@@ -2010,6 +2007,16 @@ static void do_detach(struct iommu_dev_data *dev_data) | |||
2010 | 2007 | ||
2011 | /* Flush the DTE entry */ | 2008 | /* Flush the DTE entry */ |
2012 | device_flush_dte(dev_data); | 2009 | device_flush_dte(dev_data); |
2010 | |||
2011 | /* Flush IOTLB */ | ||
2012 | domain_flush_tlb_pde(domain); | ||
2013 | |||
2014 | /* Wait for the flushes to finish */ | ||
2015 | domain_flush_complete(domain); | ||
2016 | |||
2017 | /* decrease reference counters - needs to happen after the flushes */ | ||
2018 | domain->dev_iommu[iommu->index] -= 1; | ||
2019 | domain->dev_cnt -= 1; | ||
2013 | } | 2020 | } |
2014 | 2021 | ||
2015 | /* | 2022 | /* |
@@ -2617,13 +2624,13 @@ out_unmap: | |||
2617 | bus_addr = address + s->dma_address + (j << PAGE_SHIFT); | 2624 | bus_addr = address + s->dma_address + (j << PAGE_SHIFT); |
2618 | iommu_unmap_page(domain, bus_addr, PAGE_SIZE); | 2625 | iommu_unmap_page(domain, bus_addr, PAGE_SIZE); |
2619 | 2626 | ||
2620 | if (--mapped_pages) | 2627 | if (--mapped_pages == 0) |
2621 | goto out_free_iova; | 2628 | goto out_free_iova; |
2622 | } | 2629 | } |
2623 | } | 2630 | } |
2624 | 2631 | ||
2625 | out_free_iova: | 2632 | out_free_iova: |
2626 | free_iova_fast(&dma_dom->iovad, address, npages); | 2633 | free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages); |
2627 | 2634 | ||
2628 | out_err: | 2635 | out_err: |
2629 | return 0; | 2636 | return 0; |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2bd9ac285c0d..78188bf7e90d 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -363,7 +363,7 @@ static int dmar_map_gfx = 1; | |||
363 | static int dmar_forcedac; | 363 | static int dmar_forcedac; |
364 | static int intel_iommu_strict; | 364 | static int intel_iommu_strict; |
365 | static int intel_iommu_superpage = 1; | 365 | static int intel_iommu_superpage = 1; |
366 | static int intel_iommu_sm = 1; | 366 | static int intel_iommu_sm; |
367 | static int iommu_identity_mapping; | 367 | static int iommu_identity_mapping; |
368 | 368 | ||
369 | #define IDENTMAP_ALL 1 | 369 | #define IDENTMAP_ALL 1 |
@@ -456,9 +456,9 @@ static int __init intel_iommu_setup(char *str) | |||
456 | } else if (!strncmp(str, "sp_off", 6)) { | 456 | } else if (!strncmp(str, "sp_off", 6)) { |
457 | pr_info("Disable supported super page\n"); | 457 | pr_info("Disable supported super page\n"); |
458 | intel_iommu_superpage = 0; | 458 | intel_iommu_superpage = 0; |
459 | } else if (!strncmp(str, "sm_off", 6)) { | 459 | } else if (!strncmp(str, "sm_on", 5)) { |
460 | pr_info("Intel-IOMMU: disable scalable mode support\n"); | 460 | pr_info("Intel-IOMMU: scalable mode supported\n"); |
461 | intel_iommu_sm = 0; | 461 | intel_iommu_sm = 1; |
462 | } else if (!strncmp(str, "tboot_noforce", 13)) { | 462 | } else if (!strncmp(str, "tboot_noforce", 13)) { |
463 | printk(KERN_INFO | 463 | printk(KERN_INFO |
464 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); | 464 | "Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); |
@@ -5294,7 +5294,7 @@ static void intel_iommu_put_resv_regions(struct device *dev, | |||
5294 | struct iommu_resv_region *entry, *next; | 5294 | struct iommu_resv_region *entry, *next; |
5295 | 5295 | ||
5296 | list_for_each_entry_safe(entry, next, head, list) { | 5296 | list_for_each_entry_safe(entry, next, head, list) { |
5297 | if (entry->type == IOMMU_RESV_RESERVED) | 5297 | if (entry->type == IOMMU_RESV_MSI) |
5298 | kfree(entry); | 5298 | kfree(entry); |
5299 | } | 5299 | } |
5300 | } | 5300 | } |
diff --git a/drivers/iommu/mtk_iommu_v1.c b/drivers/iommu/mtk_iommu_v1.c index 730f7dabcf37..7e0df67bd3e9 100644 --- a/drivers/iommu/mtk_iommu_v1.c +++ b/drivers/iommu/mtk_iommu_v1.c | |||
@@ -441,6 +441,10 @@ static int mtk_iommu_add_device(struct device *dev) | |||
441 | iommu_spec.args_count = count; | 441 | iommu_spec.args_count = count; |
442 | 442 | ||
443 | mtk_iommu_create_mapping(dev, &iommu_spec); | 443 | mtk_iommu_create_mapping(dev, &iommu_spec); |
444 | |||
445 | /* dev->iommu_fwspec might have changed */ | ||
446 | fwspec = dev_iommu_fwspec_get(dev); | ||
447 | |||
444 | of_node_put(iommu_spec.np); | 448 | of_node_put(iommu_spec.np); |
445 | } | 449 | } |
446 | 450 | ||
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 7f2a45445b00..c3aba3fc818d 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -97,9 +97,14 @@ struct its_device; | |||
97 | * The ITS structure - contains most of the infrastructure, with the | 97 | * The ITS structure - contains most of the infrastructure, with the |
98 | * top-level MSI domain, the command queue, the collections, and the | 98 | * top-level MSI domain, the command queue, the collections, and the |
99 | * list of devices writing to it. | 99 | * list of devices writing to it. |
100 | * | ||
101 | * dev_alloc_lock has to be taken for device allocations, while the | ||
102 | * spinlock must be taken to parse data structures such as the device | ||
103 | * list. | ||
100 | */ | 104 | */ |
101 | struct its_node { | 105 | struct its_node { |
102 | raw_spinlock_t lock; | 106 | raw_spinlock_t lock; |
107 | struct mutex dev_alloc_lock; | ||
103 | struct list_head entry; | 108 | struct list_head entry; |
104 | void __iomem *base; | 109 | void __iomem *base; |
105 | phys_addr_t phys_base; | 110 | phys_addr_t phys_base; |
@@ -156,6 +161,7 @@ struct its_device { | |||
156 | void *itt; | 161 | void *itt; |
157 | u32 nr_ites; | 162 | u32 nr_ites; |
158 | u32 device_id; | 163 | u32 device_id; |
164 | bool shared; | ||
159 | }; | 165 | }; |
160 | 166 | ||
161 | static struct { | 167 | static struct { |
@@ -1580,6 +1586,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) | |||
1580 | nr_irqs /= 2; | 1586 | nr_irqs /= 2; |
1581 | } while (nr_irqs > 0); | 1587 | } while (nr_irqs > 0); |
1582 | 1588 | ||
1589 | if (!nr_irqs) | ||
1590 | err = -ENOSPC; | ||
1591 | |||
1583 | if (err) | 1592 | if (err) |
1584 | goto out; | 1593 | goto out; |
1585 | 1594 | ||
@@ -2059,6 +2068,29 @@ static int __init allocate_lpi_tables(void) | |||
2059 | return 0; | 2068 | return 0; |
2060 | } | 2069 | } |
2061 | 2070 | ||
2071 | static u64 its_clear_vpend_valid(void __iomem *vlpi_base) | ||
2072 | { | ||
2073 | u32 count = 1000000; /* 1s! */ | ||
2074 | bool clean; | ||
2075 | u64 val; | ||
2076 | |||
2077 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2078 | val &= ~GICR_VPENDBASER_Valid; | ||
2079 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | ||
2080 | |||
2081 | do { | ||
2082 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2083 | clean = !(val & GICR_VPENDBASER_Dirty); | ||
2084 | if (!clean) { | ||
2085 | count--; | ||
2086 | cpu_relax(); | ||
2087 | udelay(1); | ||
2088 | } | ||
2089 | } while (!clean && count); | ||
2090 | |||
2091 | return val; | ||
2092 | } | ||
2093 | |||
2062 | static void its_cpu_init_lpis(void) | 2094 | static void its_cpu_init_lpis(void) |
2063 | { | 2095 | { |
2064 | void __iomem *rbase = gic_data_rdist_rd_base(); | 2096 | void __iomem *rbase = gic_data_rdist_rd_base(); |
@@ -2144,6 +2176,30 @@ static void its_cpu_init_lpis(void) | |||
2144 | val |= GICR_CTLR_ENABLE_LPIS; | 2176 | val |= GICR_CTLR_ENABLE_LPIS; |
2145 | writel_relaxed(val, rbase + GICR_CTLR); | 2177 | writel_relaxed(val, rbase + GICR_CTLR); |
2146 | 2178 | ||
2179 | if (gic_rdists->has_vlpis) { | ||
2180 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | ||
2181 | |||
2182 | /* | ||
2183 | * It's possible for CPU to receive VLPIs before it is | ||
2184 | * sheduled as a vPE, especially for the first CPU, and the | ||
2185 | * VLPI with INTID larger than 2^(IDbits+1) will be considered | ||
2186 | * as out of range and dropped by GIC. | ||
2187 | * So we initialize IDbits to known value to avoid VLPI drop. | ||
2188 | */ | ||
2189 | val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; | ||
2190 | pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", | ||
2191 | smp_processor_id(), val); | ||
2192 | gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); | ||
2193 | |||
2194 | /* | ||
2195 | * Also clear Valid bit of GICR_VPENDBASER, in case some | ||
2196 | * ancient programming gets left in and has possibility of | ||
2197 | * corrupting memory. | ||
2198 | */ | ||
2199 | val = its_clear_vpend_valid(vlpi_base); | ||
2200 | WARN_ON(val & GICR_VPENDBASER_Dirty); | ||
2201 | } | ||
2202 | |||
2147 | /* Make sure the GIC has seen the above */ | 2203 | /* Make sure the GIC has seen the above */ |
2148 | dsb(sy); | 2204 | dsb(sy); |
2149 | out: | 2205 | out: |
@@ -2422,6 +2478,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2422 | struct its_device *its_dev; | 2478 | struct its_device *its_dev; |
2423 | struct msi_domain_info *msi_info; | 2479 | struct msi_domain_info *msi_info; |
2424 | u32 dev_id; | 2480 | u32 dev_id; |
2481 | int err = 0; | ||
2425 | 2482 | ||
2426 | /* | 2483 | /* |
2427 | * We ignore "dev" entierely, and rely on the dev_id that has | 2484 | * We ignore "dev" entierely, and rely on the dev_id that has |
@@ -2444,6 +2501,7 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2444 | return -EINVAL; | 2501 | return -EINVAL; |
2445 | } | 2502 | } |
2446 | 2503 | ||
2504 | mutex_lock(&its->dev_alloc_lock); | ||
2447 | its_dev = its_find_device(its, dev_id); | 2505 | its_dev = its_find_device(its, dev_id); |
2448 | if (its_dev) { | 2506 | if (its_dev) { |
2449 | /* | 2507 | /* |
@@ -2451,18 +2509,22 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, | |||
2451 | * another alias (PCI bridge of some sort). No need to | 2509 | * another alias (PCI bridge of some sort). No need to |
2452 | * create the device. | 2510 | * create the device. |
2453 | */ | 2511 | */ |
2512 | its_dev->shared = true; | ||
2454 | pr_debug("Reusing ITT for devID %x\n", dev_id); | 2513 | pr_debug("Reusing ITT for devID %x\n", dev_id); |
2455 | goto out; | 2514 | goto out; |
2456 | } | 2515 | } |
2457 | 2516 | ||
2458 | its_dev = its_create_device(its, dev_id, nvec, true); | 2517 | its_dev = its_create_device(its, dev_id, nvec, true); |
2459 | if (!its_dev) | 2518 | if (!its_dev) { |
2460 | return -ENOMEM; | 2519 | err = -ENOMEM; |
2520 | goto out; | ||
2521 | } | ||
2461 | 2522 | ||
2462 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); | 2523 | pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); |
2463 | out: | 2524 | out: |
2525 | mutex_unlock(&its->dev_alloc_lock); | ||
2464 | info->scratchpad[0].ptr = its_dev; | 2526 | info->scratchpad[0].ptr = its_dev; |
2465 | return 0; | 2527 | return err; |
2466 | } | 2528 | } |
2467 | 2529 | ||
2468 | static struct msi_domain_ops its_msi_domain_ops = { | 2530 | static struct msi_domain_ops its_msi_domain_ops = { |
@@ -2566,6 +2628,7 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2566 | { | 2628 | { |
2567 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); | 2629 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
2568 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); | 2630 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
2631 | struct its_node *its = its_dev->its; | ||
2569 | int i; | 2632 | int i; |
2570 | 2633 | ||
2571 | for (i = 0; i < nr_irqs; i++) { | 2634 | for (i = 0; i < nr_irqs; i++) { |
@@ -2580,8 +2643,14 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2580 | irq_domain_reset_irq_data(data); | 2643 | irq_domain_reset_irq_data(data); |
2581 | } | 2644 | } |
2582 | 2645 | ||
2583 | /* If all interrupts have been freed, start mopping the floor */ | 2646 | mutex_lock(&its->dev_alloc_lock); |
2584 | if (bitmap_empty(its_dev->event_map.lpi_map, | 2647 | |
2648 | /* | ||
2649 | * If all interrupts have been freed, start mopping the | ||
2650 | * floor. This is conditionned on the device not being shared. | ||
2651 | */ | ||
2652 | if (!its_dev->shared && | ||
2653 | bitmap_empty(its_dev->event_map.lpi_map, | ||
2585 | its_dev->event_map.nr_lpis)) { | 2654 | its_dev->event_map.nr_lpis)) { |
2586 | its_lpi_free(its_dev->event_map.lpi_map, | 2655 | its_lpi_free(its_dev->event_map.lpi_map, |
2587 | its_dev->event_map.lpi_base, | 2656 | its_dev->event_map.lpi_base, |
@@ -2593,6 +2662,8 @@ static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, | |||
2593 | its_free_device(its_dev); | 2662 | its_free_device(its_dev); |
2594 | } | 2663 | } |
2595 | 2664 | ||
2665 | mutex_unlock(&its->dev_alloc_lock); | ||
2666 | |||
2596 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); | 2667 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
2597 | } | 2668 | } |
2598 | 2669 | ||
@@ -2755,26 +2826,11 @@ static void its_vpe_schedule(struct its_vpe *vpe) | |||
2755 | static void its_vpe_deschedule(struct its_vpe *vpe) | 2826 | static void its_vpe_deschedule(struct its_vpe *vpe) |
2756 | { | 2827 | { |
2757 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); | 2828 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2758 | u32 count = 1000000; /* 1s! */ | ||
2759 | bool clean; | ||
2760 | u64 val; | 2829 | u64 val; |
2761 | 2830 | ||
2762 | /* We're being scheduled out */ | 2831 | val = its_clear_vpend_valid(vlpi_base); |
2763 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2764 | val &= ~GICR_VPENDBASER_Valid; | ||
2765 | gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); | ||
2766 | |||
2767 | do { | ||
2768 | val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER); | ||
2769 | clean = !(val & GICR_VPENDBASER_Dirty); | ||
2770 | if (!clean) { | ||
2771 | count--; | ||
2772 | cpu_relax(); | ||
2773 | udelay(1); | ||
2774 | } | ||
2775 | } while (!clean && count); | ||
2776 | 2832 | ||
2777 | if (unlikely(!clean && !count)) { | 2833 | if (unlikely(val & GICR_VPENDBASER_Dirty)) { |
2778 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); | 2834 | pr_err_ratelimited("ITS virtual pending table not cleaning\n"); |
2779 | vpe->idai = false; | 2835 | vpe->idai = false; |
2780 | vpe->pending_last = true; | 2836 | vpe->pending_last = true; |
@@ -3517,6 +3573,7 @@ static int __init its_probe_one(struct resource *res, | |||
3517 | } | 3573 | } |
3518 | 3574 | ||
3519 | raw_spin_lock_init(&its->lock); | 3575 | raw_spin_lock_init(&its->lock); |
3576 | mutex_init(&its->dev_alloc_lock); | ||
3520 | INIT_LIST_HEAD(&its->entry); | 3577 | INIT_LIST_HEAD(&its->entry); |
3521 | INIT_LIST_HEAD(&its->its_device_list); | 3578 | INIT_LIST_HEAD(&its->its_device_list); |
3522 | typer = gic_read_typer(its_base + GITS_TYPER); | 3579 | typer = gic_read_typer(its_base + GITS_TYPER); |
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c index 25f32e1d7764..3496b61a312a 100644 --- a/drivers/irqchip/irq-mmp.c +++ b/drivers/irqchip/irq-mmp.c | |||
@@ -34,6 +34,9 @@ | |||
34 | #define SEL_INT_PENDING (1 << 6) | 34 | #define SEL_INT_PENDING (1 << 6) |
35 | #define SEL_INT_NUM_MASK 0x3f | 35 | #define SEL_INT_NUM_MASK 0x3f |
36 | 36 | ||
37 | #define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5) | ||
38 | #define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6) | ||
39 | |||
37 | struct icu_chip_data { | 40 | struct icu_chip_data { |
38 | int nr_irqs; | 41 | int nr_irqs; |
39 | unsigned int virq_base; | 42 | unsigned int virq_base; |
@@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = { | |||
190 | static const struct mmp_intc_conf mmp2_conf = { | 193 | static const struct mmp_intc_conf mmp2_conf = { |
191 | .conf_enable = 0x20, | 194 | .conf_enable = 0x20, |
192 | .conf_disable = 0x0, | 195 | .conf_disable = 0x0, |
193 | .conf_mask = 0x7f, | 196 | .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ | |
197 | MMP2_ICU_INT_ROUTE_PJ4_FIQ, | ||
194 | }; | 198 | }; |
195 | 199 | ||
196 | static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) | 200 | static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs) |
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c index 5385f5768345..27933338f7b3 100644 --- a/drivers/irqchip/irq-xtensa-mx.c +++ b/drivers/irqchip/irq-xtensa-mx.c | |||
@@ -71,14 +71,17 @@ static void xtensa_mx_irq_mask(struct irq_data *d) | |||
71 | unsigned int mask = 1u << d->hwirq; | 71 | unsigned int mask = 1u << d->hwirq; |
72 | 72 | ||
73 | if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | | 73 | if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
74 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { | 74 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { |
75 | set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - | 75 | unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); |
76 | HW_IRQ_MX_BASE), MIENG); | 76 | |
77 | } else { | 77 | if (ext_irq >= HW_IRQ_MX_BASE) { |
78 | mask = __this_cpu_read(cached_irq_mask) & ~mask; | 78 | set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG); |
79 | __this_cpu_write(cached_irq_mask, mask); | 79 | return; |
80 | xtensa_set_sr(mask, intenable); | 80 | } |
81 | } | 81 | } |
82 | mask = __this_cpu_read(cached_irq_mask) & ~mask; | ||
83 | __this_cpu_write(cached_irq_mask, mask); | ||
84 | xtensa_set_sr(mask, intenable); | ||
82 | } | 85 | } |
83 | 86 | ||
84 | static void xtensa_mx_irq_unmask(struct irq_data *d) | 87 | static void xtensa_mx_irq_unmask(struct irq_data *d) |
@@ -86,14 +89,17 @@ static void xtensa_mx_irq_unmask(struct irq_data *d) | |||
86 | unsigned int mask = 1u << d->hwirq; | 89 | unsigned int mask = 1u << d->hwirq; |
87 | 90 | ||
88 | if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | | 91 | if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE | |
89 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { | 92 | XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) { |
90 | set_er(1u << (xtensa_get_ext_irq_no(d->hwirq) - | 93 | unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq); |
91 | HW_IRQ_MX_BASE), MIENGSET); | 94 | |
92 | } else { | 95 | if (ext_irq >= HW_IRQ_MX_BASE) { |
93 | mask |= __this_cpu_read(cached_irq_mask); | 96 | set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET); |
94 | __this_cpu_write(cached_irq_mask, mask); | 97 | return; |
95 | xtensa_set_sr(mask, intenable); | 98 | } |
96 | } | 99 | } |
100 | mask |= __this_cpu_read(cached_irq_mask); | ||
101 | __this_cpu_write(cached_irq_mask, mask); | ||
102 | xtensa_set_sr(mask, intenable); | ||
97 | } | 103 | } |
98 | 104 | ||
99 | static void xtensa_mx_irq_enable(struct irq_data *d) | 105 | static void xtensa_mx_irq_enable(struct irq_data *d) |
@@ -113,7 +119,11 @@ static void xtensa_mx_irq_ack(struct irq_data *d) | |||
113 | 119 | ||
114 | static int xtensa_mx_irq_retrigger(struct irq_data *d) | 120 | static int xtensa_mx_irq_retrigger(struct irq_data *d) |
115 | { | 121 | { |
116 | xtensa_set_sr(1 << d->hwirq, intset); | 122 | unsigned int mask = 1u << d->hwirq; |
123 | |||
124 | if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) | ||
125 | return 0; | ||
126 | xtensa_set_sr(mask, intset); | ||
117 | return 1; | 127 | return 1; |
118 | } | 128 | } |
119 | 129 | ||
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c index c200234dd2c9..ab12328be5ee 100644 --- a/drivers/irqchip/irq-xtensa-pic.c +++ b/drivers/irqchip/irq-xtensa-pic.c | |||
@@ -70,7 +70,11 @@ static void xtensa_irq_ack(struct irq_data *d) | |||
70 | 70 | ||
71 | static int xtensa_irq_retrigger(struct irq_data *d) | 71 | static int xtensa_irq_retrigger(struct irq_data *d) |
72 | { | 72 | { |
73 | xtensa_set_sr(1 << d->hwirq, intset); | 73 | unsigned int mask = 1u << d->hwirq; |
74 | |||
75 | if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE)) | ||
76 | return 0; | ||
77 | xtensa_set_sr(mask, intset); | ||
74 | return 1; | 78 | return 1; |
75 | } | 79 | } |
76 | 80 | ||
diff --git a/drivers/isdn/mISDN/timerdev.c b/drivers/isdn/mISDN/timerdev.c index 211ed6cffd10..578978711887 100644 --- a/drivers/isdn/mISDN/timerdev.c +++ b/drivers/isdn/mISDN/timerdev.c | |||
@@ -170,8 +170,8 @@ dev_expire_timer(struct timer_list *t) | |||
170 | spin_lock_irqsave(&timer->dev->lock, flags); | 170 | spin_lock_irqsave(&timer->dev->lock, flags); |
171 | if (timer->id >= 0) | 171 | if (timer->id >= 0) |
172 | list_move_tail(&timer->list, &timer->dev->expired); | 172 | list_move_tail(&timer->list, &timer->dev->expired); |
173 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
174 | wake_up_interruptible(&timer->dev->wait); | 173 | wake_up_interruptible(&timer->dev->wait); |
174 | spin_unlock_irqrestore(&timer->dev->lock, flags); | ||
175 | } | 175 | } |
176 | 176 | ||
177 | static int | 177 | static int |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 47d4e0d30bf0..dd538e6b2748 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -932,7 +932,7 @@ static int dm_crypt_integrity_io_alloc(struct dm_crypt_io *io, struct bio *bio) | |||
932 | if (IS_ERR(bip)) | 932 | if (IS_ERR(bip)) |
933 | return PTR_ERR(bip); | 933 | return PTR_ERR(bip); |
934 | 934 | ||
935 | tag_len = io->cc->on_disk_tag_size * bio_sectors(bio); | 935 | tag_len = io->cc->on_disk_tag_size * (bio_sectors(bio) >> io->cc->sector_shift); |
936 | 936 | ||
937 | bip->bip_iter.bi_size = tag_len; | 937 | bip->bip_iter.bi_size = tag_len; |
938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; | 938 | bip->bip_iter.bi_sector = io->cc->start + io->sector; |
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 4eb5f8c56535..a20531e5f3b4 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c | |||
@@ -131,7 +131,7 @@ static void rq_end_stats(struct mapped_device *md, struct request *orig) | |||
131 | static void rq_completed(struct mapped_device *md) | 131 | static void rq_completed(struct mapped_device *md) |
132 | { | 132 | { |
133 | /* nudge anyone waiting on suspend queue */ | 133 | /* nudge anyone waiting on suspend queue */ |
134 | if (unlikely(waitqueue_active(&md->wait))) | 134 | if (unlikely(wq_has_sleeper(&md->wait))) |
135 | wake_up(&md->wait); | 135 | wake_up(&md->wait); |
136 | 136 | ||
137 | /* | 137 | /* |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index ca8af21bf644..e83b63608262 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -257,6 +257,7 @@ struct pool { | |||
257 | 257 | ||
258 | spinlock_t lock; | 258 | spinlock_t lock; |
259 | struct bio_list deferred_flush_bios; | 259 | struct bio_list deferred_flush_bios; |
260 | struct bio_list deferred_flush_completions; | ||
260 | struct list_head prepared_mappings; | 261 | struct list_head prepared_mappings; |
261 | struct list_head prepared_discards; | 262 | struct list_head prepared_discards; |
262 | struct list_head prepared_discards_pt2; | 263 | struct list_head prepared_discards_pt2; |
@@ -956,6 +957,39 @@ static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | |||
956 | mempool_free(m, &m->tc->pool->mapping_pool); | 957 | mempool_free(m, &m->tc->pool->mapping_pool); |
957 | } | 958 | } |
958 | 959 | ||
960 | static void complete_overwrite_bio(struct thin_c *tc, struct bio *bio) | ||
961 | { | ||
962 | struct pool *pool = tc->pool; | ||
963 | unsigned long flags; | ||
964 | |||
965 | /* | ||
966 | * If the bio has the REQ_FUA flag set we must commit the metadata | ||
967 | * before signaling its completion. | ||
968 | */ | ||
969 | if (!bio_triggers_commit(tc, bio)) { | ||
970 | bio_endio(bio); | ||
971 | return; | ||
972 | } | ||
973 | |||
974 | /* | ||
975 | * Complete bio with an error if earlier I/O caused changes to the | ||
976 | * metadata that can't be committed, e.g, due to I/O errors on the | ||
977 | * metadata device. | ||
978 | */ | ||
979 | if (dm_thin_aborted_changes(tc->td)) { | ||
980 | bio_io_error(bio); | ||
981 | return; | ||
982 | } | ||
983 | |||
984 | /* | ||
985 | * Batch together any bios that trigger commits and then issue a | ||
986 | * single commit for them in process_deferred_bios(). | ||
987 | */ | ||
988 | spin_lock_irqsave(&pool->lock, flags); | ||
989 | bio_list_add(&pool->deferred_flush_completions, bio); | ||
990 | spin_unlock_irqrestore(&pool->lock, flags); | ||
991 | } | ||
992 | |||
959 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) | 993 | static void process_prepared_mapping(struct dm_thin_new_mapping *m) |
960 | { | 994 | { |
961 | struct thin_c *tc = m->tc; | 995 | struct thin_c *tc = m->tc; |
@@ -988,7 +1022,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
988 | */ | 1022 | */ |
989 | if (bio) { | 1023 | if (bio) { |
990 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); | 1024 | inc_remap_and_issue_cell(tc, m->cell, m->data_block); |
991 | bio_endio(bio); | 1025 | complete_overwrite_bio(tc, bio); |
992 | } else { | 1026 | } else { |
993 | inc_all_io_entry(tc->pool, m->cell->holder); | 1027 | inc_all_io_entry(tc->pool, m->cell->holder); |
994 | remap_and_issue(tc, m->cell->holder, m->data_block); | 1028 | remap_and_issue(tc, m->cell->holder, m->data_block); |
@@ -2317,7 +2351,7 @@ static void process_deferred_bios(struct pool *pool) | |||
2317 | { | 2351 | { |
2318 | unsigned long flags; | 2352 | unsigned long flags; |
2319 | struct bio *bio; | 2353 | struct bio *bio; |
2320 | struct bio_list bios; | 2354 | struct bio_list bios, bio_completions; |
2321 | struct thin_c *tc; | 2355 | struct thin_c *tc; |
2322 | 2356 | ||
2323 | tc = get_first_thin(pool); | 2357 | tc = get_first_thin(pool); |
@@ -2328,26 +2362,36 @@ static void process_deferred_bios(struct pool *pool) | |||
2328 | } | 2362 | } |
2329 | 2363 | ||
2330 | /* | 2364 | /* |
2331 | * If there are any deferred flush bios, we must commit | 2365 | * If there are any deferred flush bios, we must commit the metadata |
2332 | * the metadata before issuing them. | 2366 | * before issuing them or signaling their completion. |
2333 | */ | 2367 | */ |
2334 | bio_list_init(&bios); | 2368 | bio_list_init(&bios); |
2369 | bio_list_init(&bio_completions); | ||
2370 | |||
2335 | spin_lock_irqsave(&pool->lock, flags); | 2371 | spin_lock_irqsave(&pool->lock, flags); |
2336 | bio_list_merge(&bios, &pool->deferred_flush_bios); | 2372 | bio_list_merge(&bios, &pool->deferred_flush_bios); |
2337 | bio_list_init(&pool->deferred_flush_bios); | 2373 | bio_list_init(&pool->deferred_flush_bios); |
2374 | |||
2375 | bio_list_merge(&bio_completions, &pool->deferred_flush_completions); | ||
2376 | bio_list_init(&pool->deferred_flush_completions); | ||
2338 | spin_unlock_irqrestore(&pool->lock, flags); | 2377 | spin_unlock_irqrestore(&pool->lock, flags); |
2339 | 2378 | ||
2340 | if (bio_list_empty(&bios) && | 2379 | if (bio_list_empty(&bios) && bio_list_empty(&bio_completions) && |
2341 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) | 2380 | !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) |
2342 | return; | 2381 | return; |
2343 | 2382 | ||
2344 | if (commit(pool)) { | 2383 | if (commit(pool)) { |
2384 | bio_list_merge(&bios, &bio_completions); | ||
2385 | |||
2345 | while ((bio = bio_list_pop(&bios))) | 2386 | while ((bio = bio_list_pop(&bios))) |
2346 | bio_io_error(bio); | 2387 | bio_io_error(bio); |
2347 | return; | 2388 | return; |
2348 | } | 2389 | } |
2349 | pool->last_commit_jiffies = jiffies; | 2390 | pool->last_commit_jiffies = jiffies; |
2350 | 2391 | ||
2392 | while ((bio = bio_list_pop(&bio_completions))) | ||
2393 | bio_endio(bio); | ||
2394 | |||
2351 | while ((bio = bio_list_pop(&bios))) | 2395 | while ((bio = bio_list_pop(&bios))) |
2352 | generic_make_request(bio); | 2396 | generic_make_request(bio); |
2353 | } | 2397 | } |
@@ -2954,6 +2998,7 @@ static struct pool *pool_create(struct mapped_device *pool_md, | |||
2954 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); | 2998 | INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); |
2955 | spin_lock_init(&pool->lock); | 2999 | spin_lock_init(&pool->lock); |
2956 | bio_list_init(&pool->deferred_flush_bios); | 3000 | bio_list_init(&pool->deferred_flush_bios); |
3001 | bio_list_init(&pool->deferred_flush_completions); | ||
2957 | INIT_LIST_HEAD(&pool->prepared_mappings); | 3002 | INIT_LIST_HEAD(&pool->prepared_mappings); |
2958 | INIT_LIST_HEAD(&pool->prepared_discards); | 3003 | INIT_LIST_HEAD(&pool->prepared_discards); |
2959 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); | 3004 | INIT_LIST_HEAD(&pool->prepared_discards_pt2); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2b53c3841b53..515e6af9bed2 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -699,7 +699,7 @@ static void end_io_acct(struct dm_io *io) | |||
699 | true, duration, &io->stats_aux); | 699 | true, duration, &io->stats_aux); |
700 | 700 | ||
701 | /* nudge anyone waiting on suspend queue */ | 701 | /* nudge anyone waiting on suspend queue */ |
702 | if (unlikely(waitqueue_active(&md->wait))) | 702 | if (unlikely(wq_has_sleeper(&md->wait))) |
703 | wake_up(&md->wait); | 703 | wake_up(&md->wait); |
704 | } | 704 | } |
705 | 705 | ||
@@ -1336,7 +1336,11 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, | |||
1336 | return r; | 1336 | return r; |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | bio_trim(clone, sector - clone->bi_iter.bi_sector, len); | 1339 | bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); |
1340 | clone->bi_iter.bi_size = to_bytes(len); | ||
1341 | |||
1342 | if (bio_integrity(bio)) | ||
1343 | bio_integrity_trim(clone); | ||
1340 | 1344 | ||
1341 | return 0; | 1345 | return 0; |
1342 | } | 1346 | } |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index f461460a2aeb..76f9909cf396 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -1419,7 +1419,7 @@ config MFD_TPS65217 | |||
1419 | 1419 | ||
1420 | config MFD_TPS68470 | 1420 | config MFD_TPS68470 |
1421 | bool "TI TPS68470 Power Management / LED chips" | 1421 | bool "TI TPS68470 Power Management / LED chips" |
1422 | depends on ACPI && I2C=y | 1422 | depends on ACPI && PCI && I2C=y |
1423 | select MFD_CORE | 1423 | select MFD_CORE |
1424 | select REGMAP_I2C | 1424 | select REGMAP_I2C |
1425 | select I2C_DESIGNWARE_PLATFORM | 1425 | select I2C_DESIGNWARE_PLATFORM |
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c index 1fc8ea0f519b..ca4c9cc218a2 100644 --- a/drivers/misc/mei/client.c +++ b/drivers/misc/mei/client.c | |||
@@ -401,8 +401,11 @@ static void mei_io_list_flush_cl(struct list_head *head, | |||
401 | struct mei_cl_cb *cb, *next; | 401 | struct mei_cl_cb *cb, *next; |
402 | 402 | ||
403 | list_for_each_entry_safe(cb, next, head, list) { | 403 | list_for_each_entry_safe(cb, next, head, list) { |
404 | if (cl == cb->cl) | 404 | if (cl == cb->cl) { |
405 | list_del_init(&cb->list); | 405 | list_del_init(&cb->list); |
406 | if (cb->fop_type == MEI_FOP_READ) | ||
407 | mei_io_cb_free(cb); | ||
408 | } | ||
406 | } | 409 | } |
407 | } | 410 | } |
408 | 411 | ||
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 23739a60517f..bb1ee9834a02 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -139,6 +139,8 @@ | |||
139 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ | 139 | #define MEI_DEV_ID_CNP_H 0xA360 /* Cannon Point H */ |
140 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ | 140 | #define MEI_DEV_ID_CNP_H_4 0xA364 /* Cannon Point H 4 (iTouch) */ |
141 | 141 | ||
142 | #define MEI_DEV_ID_ICP_LP 0x34E0 /* Ice Lake Point LP */ | ||
143 | |||
142 | /* | 144 | /* |
143 | * MEI HW Section | 145 | * MEI HW Section |
144 | */ | 146 | */ |
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index e89497f858ae..3ab946ad3257 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -105,6 +105,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
105 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, | 105 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H, MEI_ME_PCH12_CFG)}, |
106 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, | 106 | {MEI_PCI_DEVICE(MEI_DEV_ID_CNP_H_4, MEI_ME_PCH8_CFG)}, |
107 | 107 | ||
108 | {MEI_PCI_DEVICE(MEI_DEV_ID_ICP_LP, MEI_ME_PCH12_CFG)}, | ||
109 | |||
108 | /* required last entry */ | 110 | /* required last entry */ |
109 | {0, } | 111 | {0, } |
110 | }; | 112 | }; |
diff --git a/drivers/misc/mic/vop/vop_main.c b/drivers/misc/mic/vop/vop_main.c index 2bfa3a903bf9..744757f541be 100644 --- a/drivers/misc/mic/vop/vop_main.c +++ b/drivers/misc/mic/vop/vop_main.c | |||
@@ -47,7 +47,8 @@ | |||
47 | * @dc: Virtio device control | 47 | * @dc: Virtio device control |
48 | * @vpdev: VOP device which is the parent for this virtio device | 48 | * @vpdev: VOP device which is the parent for this virtio device |
49 | * @vr: Buffer for accessing the VRING | 49 | * @vr: Buffer for accessing the VRING |
50 | * @used: Buffer for used | 50 | * @used_virt: Virtual address of used ring |
51 | * @used: DMA address of used ring | ||
51 | * @used_size: Size of the used buffer | 52 | * @used_size: Size of the used buffer |
52 | * @reset_done: Track whether VOP reset is complete | 53 | * @reset_done: Track whether VOP reset is complete |
53 | * @virtio_cookie: Cookie returned upon requesting a interrupt | 54 | * @virtio_cookie: Cookie returned upon requesting a interrupt |
@@ -61,6 +62,7 @@ struct _vop_vdev { | |||
61 | struct mic_device_ctrl __iomem *dc; | 62 | struct mic_device_ctrl __iomem *dc; |
62 | struct vop_device *vpdev; | 63 | struct vop_device *vpdev; |
63 | void __iomem *vr[VOP_MAX_VRINGS]; | 64 | void __iomem *vr[VOP_MAX_VRINGS]; |
65 | void *used_virt[VOP_MAX_VRINGS]; | ||
64 | dma_addr_t used[VOP_MAX_VRINGS]; | 66 | dma_addr_t used[VOP_MAX_VRINGS]; |
65 | int used_size[VOP_MAX_VRINGS]; | 67 | int used_size[VOP_MAX_VRINGS]; |
66 | struct completion reset_done; | 68 | struct completion reset_done; |
@@ -260,12 +262,12 @@ static bool vop_notify(struct virtqueue *vq) | |||
260 | static void vop_del_vq(struct virtqueue *vq, int n) | 262 | static void vop_del_vq(struct virtqueue *vq, int n) |
261 | { | 263 | { |
262 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); | 264 | struct _vop_vdev *vdev = to_vopvdev(vq->vdev); |
263 | struct vring *vr = (struct vring *)(vq + 1); | ||
264 | struct vop_device *vpdev = vdev->vpdev; | 265 | struct vop_device *vpdev = vdev->vpdev; |
265 | 266 | ||
266 | dma_unmap_single(&vpdev->dev, vdev->used[n], | 267 | dma_unmap_single(&vpdev->dev, vdev->used[n], |
267 | vdev->used_size[n], DMA_BIDIRECTIONAL); | 268 | vdev->used_size[n], DMA_BIDIRECTIONAL); |
268 | free_pages((unsigned long)vr->used, get_order(vdev->used_size[n])); | 269 | free_pages((unsigned long)vdev->used_virt[n], |
270 | get_order(vdev->used_size[n])); | ||
269 | vring_del_virtqueue(vq); | 271 | vring_del_virtqueue(vq); |
270 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); | 272 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[n]); |
271 | vdev->vr[n] = NULL; | 273 | vdev->vr[n] = NULL; |
@@ -283,6 +285,26 @@ static void vop_del_vqs(struct virtio_device *dev) | |||
283 | vop_del_vq(vq, idx++); | 285 | vop_del_vq(vq, idx++); |
284 | } | 286 | } |
285 | 287 | ||
288 | static struct virtqueue *vop_new_virtqueue(unsigned int index, | ||
289 | unsigned int num, | ||
290 | struct virtio_device *vdev, | ||
291 | bool context, | ||
292 | void *pages, | ||
293 | bool (*notify)(struct virtqueue *vq), | ||
294 | void (*callback)(struct virtqueue *vq), | ||
295 | const char *name, | ||
296 | void *used) | ||
297 | { | ||
298 | bool weak_barriers = false; | ||
299 | struct vring vring; | ||
300 | |||
301 | vring_init(&vring, num, pages, MIC_VIRTIO_RING_ALIGN); | ||
302 | vring.used = used; | ||
303 | |||
304 | return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context, | ||
305 | notify, callback, name); | ||
306 | } | ||
307 | |||
286 | /* | 308 | /* |
287 | * This routine will assign vring's allocated in host/io memory. Code in | 309 | * This routine will assign vring's allocated in host/io memory. Code in |
288 | * virtio_ring.c however continues to access this io memory as if it were local | 310 | * virtio_ring.c however continues to access this io memory as if it were local |
@@ -302,7 +324,6 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
302 | struct _mic_vring_info __iomem *info; | 324 | struct _mic_vring_info __iomem *info; |
303 | void *used; | 325 | void *used; |
304 | int vr_size, _vr_size, err, magic; | 326 | int vr_size, _vr_size, err, magic; |
305 | struct vring *vr; | ||
306 | u8 type = ioread8(&vdev->desc->type); | 327 | u8 type = ioread8(&vdev->desc->type); |
307 | 328 | ||
308 | if (index >= ioread8(&vdev->desc->num_vq)) | 329 | if (index >= ioread8(&vdev->desc->num_vq)) |
@@ -322,17 +343,7 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
322 | return ERR_PTR(-ENOMEM); | 343 | return ERR_PTR(-ENOMEM); |
323 | vdev->vr[index] = va; | 344 | vdev->vr[index] = va; |
324 | memset_io(va, 0x0, _vr_size); | 345 | memset_io(va, 0x0, _vr_size); |
325 | vq = vring_new_virtqueue( | 346 | |
326 | index, | ||
327 | le16_to_cpu(config.num), MIC_VIRTIO_RING_ALIGN, | ||
328 | dev, | ||
329 | false, | ||
330 | ctx, | ||
331 | (void __force *)va, vop_notify, callback, name); | ||
332 | if (!vq) { | ||
333 | err = -ENOMEM; | ||
334 | goto unmap; | ||
335 | } | ||
336 | info = va + _vr_size; | 347 | info = va + _vr_size; |
337 | magic = ioread32(&info->magic); | 348 | magic = ioread32(&info->magic); |
338 | 349 | ||
@@ -341,18 +352,27 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
341 | goto unmap; | 352 | goto unmap; |
342 | } | 353 | } |
343 | 354 | ||
344 | /* Allocate and reassign used ring now */ | ||
345 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + | 355 | vdev->used_size[index] = PAGE_ALIGN(sizeof(__u16) * 3 + |
346 | sizeof(struct vring_used_elem) * | 356 | sizeof(struct vring_used_elem) * |
347 | le16_to_cpu(config.num)); | 357 | le16_to_cpu(config.num)); |
348 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | 358 | used = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
349 | get_order(vdev->used_size[index])); | 359 | get_order(vdev->used_size[index])); |
360 | vdev->used_virt[index] = used; | ||
350 | if (!used) { | 361 | if (!used) { |
351 | err = -ENOMEM; | 362 | err = -ENOMEM; |
352 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | 363 | dev_err(_vop_dev(vdev), "%s %d err %d\n", |
353 | __func__, __LINE__, err); | 364 | __func__, __LINE__, err); |
354 | goto del_vq; | 365 | goto unmap; |
366 | } | ||
367 | |||
368 | vq = vop_new_virtqueue(index, le16_to_cpu(config.num), dev, ctx, | ||
369 | (void __force *)va, vop_notify, callback, | ||
370 | name, used); | ||
371 | if (!vq) { | ||
372 | err = -ENOMEM; | ||
373 | goto free_used; | ||
355 | } | 374 | } |
375 | |||
356 | vdev->used[index] = dma_map_single(&vpdev->dev, used, | 376 | vdev->used[index] = dma_map_single(&vpdev->dev, used, |
357 | vdev->used_size[index], | 377 | vdev->used_size[index], |
358 | DMA_BIDIRECTIONAL); | 378 | DMA_BIDIRECTIONAL); |
@@ -360,26 +380,17 @@ static struct virtqueue *vop_find_vq(struct virtio_device *dev, | |||
360 | err = -ENOMEM; | 380 | err = -ENOMEM; |
361 | dev_err(_vop_dev(vdev), "%s %d err %d\n", | 381 | dev_err(_vop_dev(vdev), "%s %d err %d\n", |
362 | __func__, __LINE__, err); | 382 | __func__, __LINE__, err); |
363 | goto free_used; | 383 | goto del_vq; |
364 | } | 384 | } |
365 | writeq(vdev->used[index], &vqconfig->used_address); | 385 | writeq(vdev->used[index], &vqconfig->used_address); |
366 | /* | ||
367 | * To reassign the used ring here we are directly accessing | ||
368 | * struct vring_virtqueue which is a private data structure | ||
369 | * in virtio_ring.c. At the minimum, a BUILD_BUG_ON() in | ||
370 | * vring_new_virtqueue() would ensure that | ||
371 | * (&vq->vring == (struct vring *) (&vq->vq + 1)); | ||
372 | */ | ||
373 | vr = (struct vring *)(vq + 1); | ||
374 | vr->used = used; | ||
375 | 386 | ||
376 | vq->priv = vdev; | 387 | vq->priv = vdev; |
377 | return vq; | 388 | return vq; |
389 | del_vq: | ||
390 | vring_del_virtqueue(vq); | ||
378 | free_used: | 391 | free_used: |
379 | free_pages((unsigned long)used, | 392 | free_pages((unsigned long)used, |
380 | get_order(vdev->used_size[index])); | 393 | get_order(vdev->used_size[index])); |
381 | del_vq: | ||
382 | vring_del_virtqueue(vq); | ||
383 | unmap: | 394 | unmap: |
384 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); | 395 | vpdev->hw_ops->iounmap(vpdev, vdev->vr[index]); |
385 | return ERR_PTR(err); | 396 | return ERR_PTR(err); |
@@ -581,6 +592,8 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, | |||
581 | int ret = -1; | 592 | int ret = -1; |
582 | 593 | ||
583 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { | 594 | if (ioread8(&dc->config_change) == MIC_VIRTIO_PARAM_DEV_REMOVE) { |
595 | struct device *dev = get_device(&vdev->vdev.dev); | ||
596 | |||
584 | dev_dbg(&vpdev->dev, | 597 | dev_dbg(&vpdev->dev, |
585 | "%s %d config_change %d type %d vdev %p\n", | 598 | "%s %d config_change %d type %d vdev %p\n", |
586 | __func__, __LINE__, | 599 | __func__, __LINE__, |
@@ -592,7 +605,7 @@ static int _vop_remove_device(struct mic_device_desc __iomem *d, | |||
592 | iowrite8(-1, &dc->h2c_vdev_db); | 605 | iowrite8(-1, &dc->h2c_vdev_db); |
593 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) | 606 | if (status & VIRTIO_CONFIG_S_DRIVER_OK) |
594 | wait_for_completion(&vdev->reset_done); | 607 | wait_for_completion(&vdev->reset_done); |
595 | put_device(&vdev->vdev.dev); | 608 | put_device(dev); |
596 | iowrite8(1, &dc->guest_ack); | 609 | iowrite8(1, &dc->guest_ack); |
597 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", | 610 | dev_dbg(&vpdev->dev, "%s %d guest_ack %d\n", |
598 | __func__, __LINE__, ioread8(&dc->guest_ack)); | 611 | __func__, __LINE__, ioread8(&dc->guest_ack)); |
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index aef1185f383d..14f3fdb8c6bb 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c | |||
@@ -2112,7 +2112,7 @@ static void mmc_blk_mq_req_done(struct mmc_request *mrq) | |||
2112 | if (waiting) | 2112 | if (waiting) |
2113 | wake_up(&mq->wait); | 2113 | wake_up(&mq->wait); |
2114 | else | 2114 | else |
2115 | kblockd_schedule_work(&mq->complete_work); | 2115 | queue_work(mq->card->complete_wq, &mq->complete_work); |
2116 | 2116 | ||
2117 | return; | 2117 | return; |
2118 | } | 2118 | } |
@@ -2924,6 +2924,13 @@ static int mmc_blk_probe(struct mmc_card *card) | |||
2924 | 2924 | ||
2925 | mmc_fixup_device(card, mmc_blk_fixups); | 2925 | mmc_fixup_device(card, mmc_blk_fixups); |
2926 | 2926 | ||
2927 | card->complete_wq = alloc_workqueue("mmc_complete", | ||
2928 | WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); | ||
2929 | if (unlikely(!card->complete_wq)) { | ||
2930 | pr_err("Failed to create mmc completion workqueue"); | ||
2931 | return -ENOMEM; | ||
2932 | } | ||
2933 | |||
2927 | md = mmc_blk_alloc(card); | 2934 | md = mmc_blk_alloc(card); |
2928 | if (IS_ERR(md)) | 2935 | if (IS_ERR(md)) |
2929 | return PTR_ERR(md); | 2936 | return PTR_ERR(md); |
@@ -2987,6 +2994,7 @@ static void mmc_blk_remove(struct mmc_card *card) | |||
2987 | pm_runtime_put_noidle(&card->dev); | 2994 | pm_runtime_put_noidle(&card->dev); |
2988 | mmc_blk_remove_req(md); | 2995 | mmc_blk_remove_req(md); |
2989 | dev_set_drvdata(&card->dev, NULL); | 2996 | dev_set_drvdata(&card->dev, NULL); |
2997 | destroy_workqueue(card->complete_wq); | ||
2990 | } | 2998 | } |
2991 | 2999 | ||
2992 | static int _mmc_blk_suspend(struct mmc_card *card) | 3000 | static int _mmc_blk_suspend(struct mmc_card *card) |
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 50293529d6de..c9e7aa50bb0a 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c | |||
@@ -1431,6 +1431,8 @@ static int bcm2835_probe(struct platform_device *pdev) | |||
1431 | 1431 | ||
1432 | err: | 1432 | err: |
1433 | dev_dbg(dev, "%s -> err %d\n", __func__, ret); | 1433 | dev_dbg(dev, "%s -> err %d\n", __func__, ret); |
1434 | if (host->dma_chan_rxtx) | ||
1435 | dma_release_channel(host->dma_chan_rxtx); | ||
1434 | mmc_free_host(mmc); | 1436 | mmc_free_host(mmc); |
1435 | 1437 | ||
1436 | return ret; | 1438 | return ret; |
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index f19ec60bcbdc..2eba507790e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c | |||
@@ -1338,7 +1338,8 @@ static int meson_mmc_probe(struct platform_device *pdev) | |||
1338 | host->regs + SD_EMMC_IRQ_EN); | 1338 | host->regs + SD_EMMC_IRQ_EN); |
1339 | 1339 | ||
1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, | 1340 | ret = request_threaded_irq(host->irq, meson_mmc_irq, |
1341 | meson_mmc_irq_thread, IRQF_SHARED, NULL, host); | 1341 | meson_mmc_irq_thread, IRQF_SHARED, |
1342 | dev_name(&pdev->dev), host); | ||
1342 | if (ret) | 1343 | if (ret) |
1343 | goto err_init_clk; | 1344 | goto err_init_clk; |
1344 | 1345 | ||
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 8afeaf81ae66..833ef0590af8 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c | |||
@@ -846,7 +846,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) | |||
846 | 846 | ||
847 | if (timing == MMC_TIMING_MMC_HS400 && | 847 | if (timing == MMC_TIMING_MMC_HS400 && |
848 | host->dev_comp->hs400_tune) | 848 | host->dev_comp->hs400_tune) |
849 | sdr_set_field(host->base + PAD_CMD_TUNE, | 849 | sdr_set_field(host->base + tune_reg, |
850 | MSDC_PAD_TUNE_CMDRRDLY, | 850 | MSDC_PAD_TUNE_CMDRRDLY, |
851 | host->hs400_cmd_int_delay); | 851 | host->hs400_cmd_int_delay); |
852 | dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, | 852 | dev_dbg(host->dev, "sclk: %d, timing: %d\n", host->mmc->actual_clock, |
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 279e326e397e..70fadc976795 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c | |||
@@ -1399,13 +1399,37 @@ static int sunxi_mmc_probe(struct platform_device *pdev) | |||
1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | | 1399 | mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED | |
1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; | 1400 | MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ; |
1401 | 1401 | ||
1402 | if (host->cfg->clk_delays || host->use_new_timings) | 1402 | /* |
1403 | * Some H5 devices do not have signal traces precise enough to | ||
1404 | * use HS DDR mode for their eMMC chips. | ||
1405 | * | ||
1406 | * We still enable HS DDR modes for all the other controller | ||
1407 | * variants that support them. | ||
1408 | */ | ||
1409 | if ((host->cfg->clk_delays || host->use_new_timings) && | ||
1410 | !of_device_is_compatible(pdev->dev.of_node, | ||
1411 | "allwinner,sun50i-h5-emmc")) | ||
1403 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; | 1412 | mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; |
1404 | 1413 | ||
1405 | ret = mmc_of_parse(mmc); | 1414 | ret = mmc_of_parse(mmc); |
1406 | if (ret) | 1415 | if (ret) |
1407 | goto error_free_dma; | 1416 | goto error_free_dma; |
1408 | 1417 | ||
1418 | /* | ||
1419 | * If we don't support delay chains in the SoC, we can't use any | ||
1420 | * of the higher speed modes. Mask them out in case the device | ||
1421 | * tree specifies the properties for them, which gets added to | ||
1422 | * the caps by mmc_of_parse() above. | ||
1423 | */ | ||
1424 | if (!(host->cfg->clk_delays || host->use_new_timings)) { | ||
1425 | mmc->caps &= ~(MMC_CAP_3_3V_DDR | MMC_CAP_1_8V_DDR | | ||
1426 | MMC_CAP_1_2V_DDR | MMC_CAP_UHS); | ||
1427 | mmc->caps2 &= ~MMC_CAP2_HS200; | ||
1428 | } | ||
1429 | |||
1430 | /* TODO: This driver doesn't support HS400 mode yet */ | ||
1431 | mmc->caps2 &= ~MMC_CAP2_HS400; | ||
1432 | |||
1409 | ret = sunxi_mmc_init_host(host); | 1433 | ret = sunxi_mmc_init_host(host); |
1410 | if (ret) | 1434 | if (ret) |
1411 | goto error_free_dma; | 1435 | goto error_free_dma; |
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index 60104e1079c5..37f174ccbcec 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c | |||
@@ -480,6 +480,10 @@ static struct mtd_part *allocate_partition(struct mtd_info *parent, | |||
480 | /* let's register it anyway to preserve ordering */ | 480 | /* let's register it anyway to preserve ordering */ |
481 | slave->offset = 0; | 481 | slave->offset = 0; |
482 | slave->mtd.size = 0; | 482 | slave->mtd.size = 0; |
483 | |||
484 | /* Initialize ->erasesize to make add_mtd_device() happy. */ | ||
485 | slave->mtd.erasesize = parent->erasesize; | ||
486 | |||
483 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", | 487 | printk(KERN_ERR"mtd: partition \"%s\" is out of reach -- disabled\n", |
484 | part->name); | 488 | part->name); |
485 | goto out_register; | 489 | goto out_register; |
@@ -632,7 +636,6 @@ err_remove_part: | |||
632 | mutex_unlock(&mtd_partitions_mutex); | 636 | mutex_unlock(&mtd_partitions_mutex); |
633 | 637 | ||
634 | free_partition(new); | 638 | free_partition(new); |
635 | pr_info("%s:%i\n", __func__, __LINE__); | ||
636 | 639 | ||
637 | return ret; | 640 | return ret; |
638 | } | 641 | } |
diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c index bd4cfac6b5aa..a4768df5083f 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-lib.c | |||
@@ -155,9 +155,10 @@ int gpmi_init(struct gpmi_nand_data *this) | |||
155 | 155 | ||
156 | /* | 156 | /* |
157 | * Reset BCH here, too. We got failures otherwise :( | 157 | * Reset BCH here, too. We got failures otherwise :( |
158 | * See later BCH reset for explanation of MX23 handling | 158 | * See later BCH reset for explanation of MX23 and MX28 handling |
159 | */ | 159 | */ |
160 | ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); | 160 | ret = gpmi_reset_block(r->bch_regs, |
161 | GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); | ||
161 | if (ret) | 162 | if (ret) |
162 | goto err_out; | 163 | goto err_out; |
163 | 164 | ||
@@ -263,12 +264,10 @@ int bch_set_geometry(struct gpmi_nand_data *this) | |||
263 | /* | 264 | /* |
264 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this | 265 | * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this |
265 | * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. | 266 | * chip, otherwise it will lock up. So we skip resetting BCH on the MX23. |
266 | * On the other hand, the MX28 needs the reset, because one case has been | 267 | * and MX28. |
267 | * seen where the BCH produced ECC errors constantly after 10000 | ||
268 | * consecutive reboots. The latter case has not been seen on the MX23 | ||
269 | * yet, still we don't know if it could happen there as well. | ||
270 | */ | 268 | */ |
271 | ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this)); | 269 | ret = gpmi_reset_block(r->bch_regs, |
270 | GPMI_IS_MX23(this) || GPMI_IS_MX28(this)); | ||
272 | if (ret) | 271 | if (ret) |
273 | goto err_out; | 272 | goto err_out; |
274 | 273 | ||
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index cca4b24d2ffa..839494ac457c 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c | |||
@@ -410,6 +410,7 @@ static int nand_check_wp(struct nand_chip *chip) | |||
410 | 410 | ||
411 | /** | 411 | /** |
412 | * nand_fill_oob - [INTERN] Transfer client buffer to oob | 412 | * nand_fill_oob - [INTERN] Transfer client buffer to oob |
413 | * @chip: NAND chip object | ||
413 | * @oob: oob data buffer | 414 | * @oob: oob data buffer |
414 | * @len: oob data write length | 415 | * @len: oob data write length |
415 | * @ops: oob ops structure | 416 | * @ops: oob ops structure |
diff --git a/drivers/mtd/nand/raw/nand_bbt.c b/drivers/mtd/nand/raw/nand_bbt.c index 1b722fe9213c..19a2b563acdf 100644 --- a/drivers/mtd/nand/raw/nand_bbt.c +++ b/drivers/mtd/nand/raw/nand_bbt.c | |||
@@ -158,7 +158,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td) | |||
158 | 158 | ||
159 | /** | 159 | /** |
160 | * read_bbt - [GENERIC] Read the bad block table starting from page | 160 | * read_bbt - [GENERIC] Read the bad block table starting from page |
161 | * @chip: NAND chip object | 161 | * @this: NAND chip object |
162 | * @buf: temporary buffer | 162 | * @buf: temporary buffer |
163 | * @page: the starting page | 163 | * @page: the starting page |
164 | * @num: the number of bbt descriptors to read | 164 | * @num: the number of bbt descriptors to read |
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 479c2f2cf17f..fa87ae28cdfe 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c | |||
@@ -304,24 +304,30 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
304 | struct nand_device *nand = spinand_to_nand(spinand); | 304 | struct nand_device *nand = spinand_to_nand(spinand); |
305 | struct mtd_info *mtd = nanddev_to_mtd(nand); | 305 | struct mtd_info *mtd = nanddev_to_mtd(nand); |
306 | struct nand_page_io_req adjreq = *req; | 306 | struct nand_page_io_req adjreq = *req; |
307 | unsigned int nbytes = 0; | 307 | void *buf = spinand->databuf; |
308 | void *buf = NULL; | 308 | unsigned int nbytes; |
309 | u16 column = 0; | 309 | u16 column = 0; |
310 | int ret; | 310 | int ret; |
311 | 311 | ||
312 | memset(spinand->databuf, 0xff, | 312 | /* |
313 | nanddev_page_size(nand) + | 313 | * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset |
314 | nanddev_per_page_oobsize(nand)); | 314 | * the cache content to 0xFF (depends on vendor implementation), so we |
315 | * must fill the page cache entirely even if we only want to program | ||
316 | * the data portion of the page, otherwise we might corrupt the BBM or | ||
317 | * user data previously programmed in OOB area. | ||
318 | */ | ||
319 | nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand); | ||
320 | memset(spinand->databuf, 0xff, nbytes); | ||
321 | adjreq.dataoffs = 0; | ||
322 | adjreq.datalen = nanddev_page_size(nand); | ||
323 | adjreq.databuf.out = spinand->databuf; | ||
324 | adjreq.ooblen = nanddev_per_page_oobsize(nand); | ||
325 | adjreq.ooboffs = 0; | ||
326 | adjreq.oobbuf.out = spinand->oobbuf; | ||
315 | 327 | ||
316 | if (req->datalen) { | 328 | if (req->datalen) |
317 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, | 329 | memcpy(spinand->databuf + req->dataoffs, req->databuf.out, |
318 | req->datalen); | 330 | req->datalen); |
319 | adjreq.dataoffs = 0; | ||
320 | adjreq.datalen = nanddev_page_size(nand); | ||
321 | adjreq.databuf.out = spinand->databuf; | ||
322 | nbytes = adjreq.datalen; | ||
323 | buf = spinand->databuf; | ||
324 | } | ||
325 | 331 | ||
326 | if (req->ooblen) { | 332 | if (req->ooblen) { |
327 | if (req->mode == MTD_OPS_AUTO_OOB) | 333 | if (req->mode == MTD_OPS_AUTO_OOB) |
@@ -332,14 +338,6 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
332 | else | 338 | else |
333 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, | 339 | memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out, |
334 | req->ooblen); | 340 | req->ooblen); |
335 | |||
336 | adjreq.ooblen = nanddev_per_page_oobsize(nand); | ||
337 | adjreq.ooboffs = 0; | ||
338 | nbytes += nanddev_per_page_oobsize(nand); | ||
339 | if (!buf) { | ||
340 | buf = spinand->oobbuf; | ||
341 | column = nanddev_page_size(nand); | ||
342 | } | ||
343 | } | 341 | } |
344 | 342 | ||
345 | spinand_cache_op_adjust_colum(spinand, &adjreq, &column); | 343 | spinand_cache_op_adjust_colum(spinand, &adjreq, &column); |
@@ -370,8 +368,8 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand, | |||
370 | 368 | ||
371 | /* | 369 | /* |
372 | * We need to use the RANDOM LOAD CACHE operation if there's | 370 | * We need to use the RANDOM LOAD CACHE operation if there's |
373 | * more than one iteration, because the LOAD operation resets | 371 | * more than one iteration, because the LOAD operation might |
374 | * the cache to 0xff. | 372 | * reset the cache to 0xff. |
375 | */ | 373 | */ |
376 | if (nbytes) { | 374 | if (nbytes) { |
377 | column = op.addr.val; | 375 | column = op.addr.val; |
@@ -1018,11 +1016,11 @@ static int spinand_init(struct spinand_device *spinand) | |||
1018 | for (i = 0; i < nand->memorg.ntargets; i++) { | 1016 | for (i = 0; i < nand->memorg.ntargets; i++) { |
1019 | ret = spinand_select_target(spinand, i); | 1017 | ret = spinand_select_target(spinand, i); |
1020 | if (ret) | 1018 | if (ret) |
1021 | goto err_free_bufs; | 1019 | goto err_manuf_cleanup; |
1022 | 1020 | ||
1023 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); | 1021 | ret = spinand_lock_block(spinand, BL_ALL_UNLOCKED); |
1024 | if (ret) | 1022 | if (ret) |
1025 | goto err_free_bufs; | 1023 | goto err_manuf_cleanup; |
1026 | } | 1024 | } |
1027 | 1025 | ||
1028 | ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); | 1026 | ret = nanddev_init(nand, &spinand_ops, THIS_MODULE); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index edb1c023a753..21bf8ac78380 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -197,9 +197,9 @@ config VXLAN | |||
197 | 197 | ||
198 | config GENEVE | 198 | config GENEVE |
199 | tristate "Generic Network Virtualization Encapsulation" | 199 | tristate "Generic Network Virtualization Encapsulation" |
200 | depends on INET && NET_UDP_TUNNEL | 200 | depends on INET |
201 | depends on IPV6 || !IPV6 | 201 | depends on IPV6 || !IPV6 |
202 | select NET_IP_TUNNEL | 202 | select NET_UDP_TUNNEL |
203 | select GRO_CELLS | 203 | select GRO_CELLS |
204 | ---help--- | 204 | ---help--- |
205 | This allows one to create geneve virtual interfaces that provide | 205 | This allows one to create geneve virtual interfaces that provide |
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c index a0f954f36c09..44e6c7b1b222 100644 --- a/drivers/net/caif/caif_serial.c +++ b/drivers/net/caif/caif_serial.c | |||
@@ -257,10 +257,7 @@ static int handle_tx(struct ser_device *ser) | |||
257 | if (skb->len == 0) { | 257 | if (skb->len == 0) { |
258 | struct sk_buff *tmp = skb_dequeue(&ser->head); | 258 | struct sk_buff *tmp = skb_dequeue(&ser->head); |
259 | WARN_ON(tmp != skb); | 259 | WARN_ON(tmp != skb); |
260 | if (in_interrupt()) | 260 | dev_consume_skb_any(skb); |
261 | dev_kfree_skb_irq(skb); | ||
262 | else | ||
263 | kfree_skb(skb); | ||
264 | } | 261 | } |
265 | } | 262 | } |
266 | /* Send flow off if queue is empty */ | 263 | /* Send flow off if queue is empty */ |
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c index 90f514252987..d9c56a779c08 100644 --- a/drivers/net/dsa/b53/b53_srab.c +++ b/drivers/net/dsa/b53/b53_srab.c | |||
@@ -511,9 +511,6 @@ static void b53_srab_prepare_irq(struct platform_device *pdev) | |||
511 | /* Clear all pending interrupts */ | 511 | /* Clear all pending interrupts */ |
512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); | 512 | writel(0xffffffff, priv->regs + B53_SRAB_INTR); |
513 | 513 | ||
514 | if (dev->pdata && dev->pdata->chip_id != BCM58XX_DEVICE_ID) | ||
515 | return; | ||
516 | |||
517 | for (i = 0; i < B53_N_PORTS; i++) { | 514 | for (i = 0; i < B53_N_PORTS; i++) { |
518 | port = &priv->port_intrs[i]; | 515 | port = &priv->port_intrs[i]; |
519 | 516 | ||
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 361fbde76654..17ec32b0a1cc 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -690,7 +690,7 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds) | |||
690 | * port, the other ones have already been disabled during | 690 | * port, the other ones have already been disabled during |
691 | * bcm_sf2_sw_setup | 691 | * bcm_sf2_sw_setup |
692 | */ | 692 | */ |
693 | for (port = 0; port < DSA_MAX_PORTS; port++) { | 693 | for (port = 0; port < ds->num_ports; port++) { |
694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) | 694 | if (dsa_is_user_port(ds, port) || dsa_is_cpu_port(ds, port)) |
695 | bcm_sf2_port_disable(ds, port, NULL); | 695 | bcm_sf2_port_disable(ds, port, NULL); |
696 | } | 696 | } |
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 8dca2c949e73..12fd7ce3f1ff 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c | |||
@@ -261,6 +261,7 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
261 | unsigned int sub_irq; | 261 | unsigned int sub_irq; |
262 | unsigned int n; | 262 | unsigned int n; |
263 | u16 reg; | 263 | u16 reg; |
264 | u16 ctl1; | ||
264 | int err; | 265 | int err; |
265 | 266 | ||
266 | mutex_lock(&chip->reg_lock); | 267 | mutex_lock(&chip->reg_lock); |
@@ -270,13 +271,28 @@ static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip) | |||
270 | if (err) | 271 | if (err) |
271 | goto out; | 272 | goto out; |
272 | 273 | ||
273 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { | 274 | do { |
274 | if (reg & (1 << n)) { | 275 | for (n = 0; n < chip->g1_irq.nirqs; ++n) { |
275 | sub_irq = irq_find_mapping(chip->g1_irq.domain, n); | 276 | if (reg & (1 << n)) { |
276 | handle_nested_irq(sub_irq); | 277 | sub_irq = irq_find_mapping(chip->g1_irq.domain, |
277 | ++nhandled; | 278 | n); |
279 | handle_nested_irq(sub_irq); | ||
280 | ++nhandled; | ||
281 | } | ||
278 | } | 282 | } |
279 | } | 283 | |
284 | mutex_lock(&chip->reg_lock); | ||
285 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_CTL1, &ctl1); | ||
286 | if (err) | ||
287 | goto unlock; | ||
288 | err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, ®); | ||
289 | unlock: | ||
290 | mutex_unlock(&chip->reg_lock); | ||
291 | if (err) | ||
292 | goto out; | ||
293 | ctl1 &= GENMASK(chip->g1_irq.nirqs, 0); | ||
294 | } while (reg & ctl1); | ||
295 | |||
280 | out: | 296 | out: |
281 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); | 297 | return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); |
282 | } | 298 | } |
diff --git a/drivers/net/dsa/mv88e6xxx/global1_atu.c b/drivers/net/dsa/mv88e6xxx/global1_atu.c index 5200e4bdce93..ea243840ee0f 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_atu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_atu.c | |||
@@ -314,6 +314,7 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
314 | { | 314 | { |
315 | struct mv88e6xxx_chip *chip = dev_id; | 315 | struct mv88e6xxx_chip *chip = dev_id; |
316 | struct mv88e6xxx_atu_entry entry; | 316 | struct mv88e6xxx_atu_entry entry; |
317 | int spid; | ||
317 | int err; | 318 | int err; |
318 | u16 val; | 319 | u16 val; |
319 | 320 | ||
@@ -336,6 +337,8 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
336 | if (err) | 337 | if (err) |
337 | goto out; | 338 | goto out; |
338 | 339 | ||
340 | spid = entry.state; | ||
341 | |||
339 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { | 342 | if (val & MV88E6XXX_G1_ATU_OP_AGE_OUT_VIOLATION) { |
340 | dev_err_ratelimited(chip->dev, | 343 | dev_err_ratelimited(chip->dev, |
341 | "ATU age out violation for %pM\n", | 344 | "ATU age out violation for %pM\n", |
@@ -344,23 +347,23 @@ static irqreturn_t mv88e6xxx_g1_atu_prob_irq_thread_fn(int irq, void *dev_id) | |||
344 | 347 | ||
345 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { | 348 | if (val & MV88E6XXX_G1_ATU_OP_MEMBER_VIOLATION) { |
346 | dev_err_ratelimited(chip->dev, | 349 | dev_err_ratelimited(chip->dev, |
347 | "ATU member violation for %pM portvec %x\n", | 350 | "ATU member violation for %pM portvec %x spid %d\n", |
348 | entry.mac, entry.portvec); | 351 | entry.mac, entry.portvec, spid); |
349 | chip->ports[entry.portvec].atu_member_violation++; | 352 | chip->ports[spid].atu_member_violation++; |
350 | } | 353 | } |
351 | 354 | ||
352 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { | 355 | if (val & MV88E6XXX_G1_ATU_OP_MISS_VIOLATION) { |
353 | dev_err_ratelimited(chip->dev, | 356 | dev_err_ratelimited(chip->dev, |
354 | "ATU miss violation for %pM portvec %x\n", | 357 | "ATU miss violation for %pM portvec %x spid %d\n", |
355 | entry.mac, entry.portvec); | 358 | entry.mac, entry.portvec, spid); |
356 | chip->ports[entry.portvec].atu_miss_violation++; | 359 | chip->ports[spid].atu_miss_violation++; |
357 | } | 360 | } |
358 | 361 | ||
359 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { | 362 | if (val & MV88E6XXX_G1_ATU_OP_FULL_VIOLATION) { |
360 | dev_err_ratelimited(chip->dev, | 363 | dev_err_ratelimited(chip->dev, |
361 | "ATU full violation for %pM portvec %x\n", | 364 | "ATU full violation for %pM portvec %x spid %d\n", |
362 | entry.mac, entry.portvec); | 365 | entry.mac, entry.portvec, spid); |
363 | chip->ports[entry.portvec].atu_full_violation++; | 366 | chip->ports[spid].atu_full_violation++; |
364 | } | 367 | } |
365 | mutex_unlock(&chip->reg_lock); | 368 | mutex_unlock(&chip->reg_lock); |
366 | 369 | ||
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 2caa8c8b4b55..1bfc5ff8d81d 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c | |||
@@ -664,7 +664,7 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) | |||
664 | if (port < 9) | 664 | if (port < 9) |
665 | return 0; | 665 | return 0; |
666 | 666 | ||
667 | return mv88e6390_serdes_irq_setup(chip, port); | 667 | return mv88e6390x_serdes_irq_setup(chip, port); |
668 | } | 668 | } |
669 | 669 | ||
670 | void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) | 670 | void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) |
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c index 4f11f98347ed..1827ef1f6d55 100644 --- a/drivers/net/ethernet/alteon/acenic.c +++ b/drivers/net/ethernet/alteon/acenic.c | |||
@@ -2059,7 +2059,7 @@ static inline void ace_tx_int(struct net_device *dev, | |||
2059 | if (skb) { | 2059 | if (skb) { |
2060 | dev->stats.tx_packets++; | 2060 | dev->stats.tx_packets++; |
2061 | dev->stats.tx_bytes += skb->len; | 2061 | dev->stats.tx_bytes += skb->len; |
2062 | dev_kfree_skb_irq(skb); | 2062 | dev_consume_skb_irq(skb); |
2063 | info->skb = NULL; | 2063 | info->skb = NULL; |
2064 | } | 2064 | } |
2065 | 2065 | ||
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c index 0fb986ba3290..0ae723f75341 100644 --- a/drivers/net/ethernet/altera/altera_msgdma.c +++ b/drivers/net/ethernet/altera/altera_msgdma.c | |||
@@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv) | |||
145 | & 0xffff; | 145 | & 0xffff; |
146 | 146 | ||
147 | if (inuse) { /* Tx FIFO is not empty */ | 147 | if (inuse) { /* Tx FIFO is not empty */ |
148 | ready = priv->tx_prod - priv->tx_cons - inuse - 1; | 148 | ready = max_t(int, |
149 | priv->tx_prod - priv->tx_cons - inuse - 1, 0); | ||
149 | } else { | 150 | } else { |
150 | /* Check for buffered last packet */ | 151 | /* Check for buffered last packet */ |
151 | status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); | 152 | status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status)); |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index a70bb1bb90e7..a6eacf2099c3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -2663,11 +2663,6 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2663 | goto err_device_destroy; | 2663 | goto err_device_destroy; |
2664 | } | 2664 | } |
2665 | 2665 | ||
2666 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2667 | /* Make sure we don't have a race with AENQ Links state handler */ | ||
2668 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2669 | netif_carrier_on(adapter->netdev); | ||
2670 | |||
2671 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, | 2666 | rc = ena_enable_msix_and_set_admin_interrupts(adapter, |
2672 | adapter->num_queues); | 2667 | adapter->num_queues); |
2673 | if (rc) { | 2668 | if (rc) { |
@@ -2684,6 +2679,11 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2684 | } | 2679 | } |
2685 | 2680 | ||
2686 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | 2681 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); |
2682 | |||
2683 | clear_bit(ENA_FLAG_ONGOING_RESET, &adapter->flags); | ||
2684 | if (test_bit(ENA_FLAG_LINK_UP, &adapter->flags)) | ||
2685 | netif_carrier_on(adapter->netdev); | ||
2686 | |||
2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2687 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
2688 | dev_err(&pdev->dev, | 2688 | dev_err(&pdev->dev, |
2689 | "Device reset completed successfully, Driver info: %s\n", | 2689 | "Device reset completed successfully, Driver info: %s\n", |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index dc8b6173d8d8..63870072cbbd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define DRV_MODULE_VER_MAJOR 2 | 46 | #define DRV_MODULE_VER_MAJOR 2 |
47 | #define DRV_MODULE_VER_MINOR 0 | 47 | #define DRV_MODULE_VER_MINOR 0 |
48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 3 |
49 | 49 | ||
50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index a90080f12e67..e548c0ae2e00 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c | |||
@@ -666,7 +666,7 @@ static int amd8111e_tx(struct net_device *dev) | |||
666 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], | 666 | pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[tx_index], |
667 | lp->tx_skbuff[tx_index]->len, | 667 | lp->tx_skbuff[tx_index]->len, |
668 | PCI_DMA_TODEVICE); | 668 | PCI_DMA_TODEVICE); |
669 | dev_kfree_skb_irq (lp->tx_skbuff[tx_index]); | 669 | dev_consume_skb_irq(lp->tx_skbuff[tx_index]); |
670 | lp->tx_skbuff[tx_index] = NULL; | 670 | lp->tx_skbuff[tx_index] = NULL; |
671 | lp->tx_dma_addr[tx_index] = 0; | 671 | lp->tx_dma_addr[tx_index] = 0; |
672 | } | 672 | } |
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c index 6a8e2567f2bd..4d3855ceb500 100644 --- a/drivers/net/ethernet/apple/bmac.c +++ b/drivers/net/ethernet/apple/bmac.c | |||
@@ -777,7 +777,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) | |||
777 | 777 | ||
778 | if (bp->tx_bufs[bp->tx_empty]) { | 778 | if (bp->tx_bufs[bp->tx_empty]) { |
779 | ++dev->stats.tx_packets; | 779 | ++dev->stats.tx_packets; |
780 | dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); | 780 | dev_consume_skb_irq(bp->tx_bufs[bp->tx_empty]); |
781 | } | 781 | } |
782 | bp->tx_bufs[bp->tx_empty] = NULL; | 782 | bp->tx_bufs[bp->tx_empty] = NULL; |
783 | bp->tx_fullup = 0; | 783 | bp->tx_fullup = 0; |
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index f44808959ff3..97ab0dd25552 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c | |||
@@ -638,7 +638,7 @@ static void b44_tx(struct b44 *bp) | |||
638 | bytes_compl += skb->len; | 638 | bytes_compl += skb->len; |
639 | pkts_compl++; | 639 | pkts_compl++; |
640 | 640 | ||
641 | dev_kfree_skb_irq(skb); | 641 | dev_consume_skb_irq(skb); |
642 | } | 642 | } |
643 | 643 | ||
644 | netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); | 644 | netdev_completed_queue(bp->dev, pkts_compl, bytes_compl); |
@@ -1012,7 +1012,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1012 | } | 1012 | } |
1013 | 1013 | ||
1014 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); | 1014 | skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len); |
1015 | dev_kfree_skb_any(skb); | 1015 | dev_consume_skb_any(skb); |
1016 | skb = bounce_skb; | 1016 | skb = bounce_skb; |
1017 | } | 1017 | } |
1018 | 1018 | ||
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index f9521d0274b7..28c9b0bdf2f6 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c | |||
@@ -520,7 +520,6 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
520 | struct ethtool_wolinfo *wol) | 520 | struct ethtool_wolinfo *wol) |
521 | { | 521 | { |
522 | struct bcm_sysport_priv *priv = netdev_priv(dev); | 522 | struct bcm_sysport_priv *priv = netdev_priv(dev); |
523 | u32 reg; | ||
524 | 523 | ||
525 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; | 524 | wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER; |
526 | wol->wolopts = priv->wolopts; | 525 | wol->wolopts = priv->wolopts; |
@@ -528,11 +527,7 @@ static void bcm_sysport_get_wol(struct net_device *dev, | |||
528 | if (!(priv->wolopts & WAKE_MAGICSECURE)) | 527 | if (!(priv->wolopts & WAKE_MAGICSECURE)) |
529 | return; | 528 | return; |
530 | 529 | ||
531 | /* Return the programmed SecureOn password */ | 530 | memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass)); |
532 | reg = umac_readl(priv, UMAC_PSW_MS); | ||
533 | put_unaligned_be16(reg, &wol->sopass[0]); | ||
534 | reg = umac_readl(priv, UMAC_PSW_LS); | ||
535 | put_unaligned_be32(reg, &wol->sopass[2]); | ||
536 | } | 531 | } |
537 | 532 | ||
538 | static int bcm_sysport_set_wol(struct net_device *dev, | 533 | static int bcm_sysport_set_wol(struct net_device *dev, |
@@ -548,13 +543,8 @@ static int bcm_sysport_set_wol(struct net_device *dev, | |||
548 | if (wol->wolopts & ~supported) | 543 | if (wol->wolopts & ~supported) |
549 | return -EINVAL; | 544 | return -EINVAL; |
550 | 545 | ||
551 | /* Program the SecureOn password */ | 546 | if (wol->wolopts & WAKE_MAGICSECURE) |
552 | if (wol->wolopts & WAKE_MAGICSECURE) { | 547 | memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass)); |
553 | umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), | ||
554 | UMAC_PSW_MS); | ||
555 | umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), | ||
556 | UMAC_PSW_LS); | ||
557 | } | ||
558 | 548 | ||
559 | /* Flag the device and relevant IRQ as wakeup capable */ | 549 | /* Flag the device and relevant IRQ as wakeup capable */ |
560 | if (wol->wolopts) { | 550 | if (wol->wolopts) { |
@@ -2649,13 +2639,18 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv) | |||
2649 | unsigned int index, i = 0; | 2639 | unsigned int index, i = 0; |
2650 | u32 reg; | 2640 | u32 reg; |
2651 | 2641 | ||
2652 | /* Password has already been programmed */ | ||
2653 | reg = umac_readl(priv, UMAC_MPD_CTRL); | 2642 | reg = umac_readl(priv, UMAC_MPD_CTRL); |
2654 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) | 2643 | if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE)) |
2655 | reg |= MPD_EN; | 2644 | reg |= MPD_EN; |
2656 | reg &= ~PSW_EN; | 2645 | reg &= ~PSW_EN; |
2657 | if (priv->wolopts & WAKE_MAGICSECURE) | 2646 | if (priv->wolopts & WAKE_MAGICSECURE) { |
2647 | /* Program the SecureOn password */ | ||
2648 | umac_writel(priv, get_unaligned_be16(&priv->sopass[0]), | ||
2649 | UMAC_PSW_MS); | ||
2650 | umac_writel(priv, get_unaligned_be32(&priv->sopass[2]), | ||
2651 | UMAC_PSW_LS); | ||
2658 | reg |= PSW_EN; | 2652 | reg |= PSW_EN; |
2653 | } | ||
2659 | umac_writel(priv, reg, UMAC_MPD_CTRL); | 2654 | umac_writel(priv, reg, UMAC_MPD_CTRL); |
2660 | 2655 | ||
2661 | if (priv->wolopts & WAKE_FILTER) { | 2656 | if (priv->wolopts & WAKE_FILTER) { |
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index 0887e6356649..0b192fea9c5d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define __BCM_SYSPORT_H | 12 | #define __BCM_SYSPORT_H |
13 | 13 | ||
14 | #include <linux/bitmap.h> | 14 | #include <linux/bitmap.h> |
15 | #include <linux/ethtool.h> | ||
15 | #include <linux/if_vlan.h> | 16 | #include <linux/if_vlan.h> |
16 | #include <linux/net_dim.h> | 17 | #include <linux/net_dim.h> |
17 | 18 | ||
@@ -778,6 +779,7 @@ struct bcm_sysport_priv { | |||
778 | unsigned int crc_fwd:1; | 779 | unsigned int crc_fwd:1; |
779 | u16 rev; | 780 | u16 rev; |
780 | u32 wolopts; | 781 | u32 wolopts; |
782 | u8 sopass[SOPASS_MAX]; | ||
781 | unsigned int wol_irq_disabled:1; | 783 | unsigned int wol_irq_disabled:1; |
782 | 784 | ||
783 | /* MIB related fields */ | 785 | /* MIB related fields */ |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 6a512871176b..8bc7e495b027 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -4973,12 +4973,18 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) | |||
4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; | 4973 | struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; |
4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; | 4974 | struct bnxt_ring_struct *ring = &cpr->cp_ring_struct; |
4975 | u32 map_idx = ring->map_idx; | 4975 | u32 map_idx = ring->map_idx; |
4976 | unsigned int vector; | ||
4976 | 4977 | ||
4978 | vector = bp->irq_tbl[map_idx].vector; | ||
4979 | disable_irq_nosync(vector); | ||
4977 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); | 4980 | rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); |
4978 | if (rc) | 4981 | if (rc) { |
4982 | enable_irq(vector); | ||
4979 | goto err_out; | 4983 | goto err_out; |
4984 | } | ||
4980 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); | 4985 | bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id); |
4981 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); | 4986 | bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons); |
4987 | enable_irq(vector); | ||
4982 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; | 4988 | bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; |
4983 | 4989 | ||
4984 | if (!i) { | 4990 | if (!i) { |
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5db9f4158e62..134ae2862efa 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c | |||
@@ -1288,7 +1288,7 @@ static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, | |||
1288 | * for transmits, we just free buffers. | 1288 | * for transmits, we just free buffers. |
1289 | */ | 1289 | */ |
1290 | 1290 | ||
1291 | dev_kfree_skb_irq(sb); | 1291 | dev_consume_skb_irq(sb); |
1292 | 1292 | ||
1293 | /* | 1293 | /* |
1294 | * .. and advance to the next buffer. | 1294 | * .. and advance to the next buffer. |
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 3d45f4c92cf6..9bbaad9f3d63 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h | |||
@@ -643,6 +643,7 @@ | |||
643 | #define MACB_CAPS_JUMBO 0x00000020 | 643 | #define MACB_CAPS_JUMBO 0x00000020 |
644 | #define MACB_CAPS_GEM_HAS_PTP 0x00000040 | 644 | #define MACB_CAPS_GEM_HAS_PTP 0x00000040 |
645 | #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 | 645 | #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 |
646 | #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 | ||
646 | #define MACB_CAPS_FIFO_MODE 0x10000000 | 647 | #define MACB_CAPS_FIFO_MODE 0x10000000 |
647 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 | 648 | #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 |
648 | #define MACB_CAPS_SG_DISABLED 0x40000000 | 649 | #define MACB_CAPS_SG_DISABLED 0x40000000 |
@@ -1214,6 +1215,8 @@ struct macb { | |||
1214 | 1215 | ||
1215 | int rx_bd_rd_prefetch; | 1216 | int rx_bd_rd_prefetch; |
1216 | int tx_bd_rd_prefetch; | 1217 | int tx_bd_rd_prefetch; |
1218 | |||
1219 | u32 rx_intr_mask; | ||
1217 | }; | 1220 | }; |
1218 | 1221 | ||
1219 | #ifdef CONFIG_MACB_USE_HWSTAMP | 1222 | #ifdef CONFIG_MACB_USE_HWSTAMP |
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 66cc7927061a..2b2882615e8b 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c | |||
@@ -56,8 +56,7 @@ | |||
56 | /* level of occupied TX descriptors under which we wake up TX process */ | 56 | /* level of occupied TX descriptors under which we wake up TX process */ |
57 | #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) | 57 | #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4) |
58 | 58 | ||
59 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | 59 | #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR)) |
60 | | MACB_BIT(ISR_ROVR)) | ||
61 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ | 60 | #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \ |
62 | | MACB_BIT(ISR_RLE) \ | 61 | | MACB_BIT(ISR_RLE) \ |
63 | | MACB_BIT(TXERR)) | 62 | | MACB_BIT(TXERR)) |
@@ -1270,7 +1269,7 @@ static int macb_poll(struct napi_struct *napi, int budget) | |||
1270 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); | 1269 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
1271 | napi_reschedule(napi); | 1270 | napi_reschedule(napi); |
1272 | } else { | 1271 | } else { |
1273 | queue_writel(queue, IER, MACB_RX_INT_FLAGS); | 1272 | queue_writel(queue, IER, bp->rx_intr_mask); |
1274 | } | 1273 | } |
1275 | } | 1274 | } |
1276 | 1275 | ||
@@ -1288,7 +1287,7 @@ static void macb_hresp_error_task(unsigned long data) | |||
1288 | u32 ctrl; | 1287 | u32 ctrl; |
1289 | 1288 | ||
1290 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { | 1289 | for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { |
1291 | queue_writel(queue, IDR, MACB_RX_INT_FLAGS | | 1290 | queue_writel(queue, IDR, bp->rx_intr_mask | |
1292 | MACB_TX_INT_FLAGS | | 1291 | MACB_TX_INT_FLAGS | |
1293 | MACB_BIT(HRESP)); | 1292 | MACB_BIT(HRESP)); |
1294 | } | 1293 | } |
@@ -1318,7 +1317,7 @@ static void macb_hresp_error_task(unsigned long data) | |||
1318 | 1317 | ||
1319 | /* Enable interrupts */ | 1318 | /* Enable interrupts */ |
1320 | queue_writel(queue, IER, | 1319 | queue_writel(queue, IER, |
1321 | MACB_RX_INT_FLAGS | | 1320 | bp->rx_intr_mask | |
1322 | MACB_TX_INT_FLAGS | | 1321 | MACB_TX_INT_FLAGS | |
1323 | MACB_BIT(HRESP)); | 1322 | MACB_BIT(HRESP)); |
1324 | } | 1323 | } |
@@ -1372,14 +1371,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
1372 | (unsigned int)(queue - bp->queues), | 1371 | (unsigned int)(queue - bp->queues), |
1373 | (unsigned long)status); | 1372 | (unsigned long)status); |
1374 | 1373 | ||
1375 | if (status & MACB_RX_INT_FLAGS) { | 1374 | if (status & bp->rx_intr_mask) { |
1376 | /* There's no point taking any more interrupts | 1375 | /* There's no point taking any more interrupts |
1377 | * until we have processed the buffers. The | 1376 | * until we have processed the buffers. The |
1378 | * scheduling call may fail if the poll routine | 1377 | * scheduling call may fail if the poll routine |
1379 | * is already scheduled, so disable interrupts | 1378 | * is already scheduled, so disable interrupts |
1380 | * now. | 1379 | * now. |
1381 | */ | 1380 | */ |
1382 | queue_writel(queue, IDR, MACB_RX_INT_FLAGS); | 1381 | queue_writel(queue, IDR, bp->rx_intr_mask); |
1383 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) | 1382 | if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) |
1384 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); | 1383 | queue_writel(queue, ISR, MACB_BIT(RCOMP)); |
1385 | 1384 | ||
@@ -1412,8 +1411,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) | |||
1412 | /* There is a hardware issue under heavy load where DMA can | 1411 | /* There is a hardware issue under heavy load where DMA can |
1413 | * stop, this causes endless "used buffer descriptor read" | 1412 | * stop, this causes endless "used buffer descriptor read" |
1414 | * interrupts but it can be cleared by re-enabling RX. See | 1413 | * interrupts but it can be cleared by re-enabling RX. See |
1415 | * the at91 manual, section 41.3.1 or the Zynq manual | 1414 | * the at91rm9200 manual, section 41.3.1 or the Zynq manual |
1416 | * section 16.7.4 for details. | 1415 | * section 16.7.4 for details. RXUBR is only enabled for |
1416 | * these two versions. | ||
1417 | */ | 1417 | */ |
1418 | if (status & MACB_BIT(RXUBR)) { | 1418 | if (status & MACB_BIT(RXUBR)) { |
1419 | ctrl = macb_readl(bp, NCR); | 1419 | ctrl = macb_readl(bp, NCR); |
@@ -2259,7 +2259,7 @@ static void macb_init_hw(struct macb *bp) | |||
2259 | 2259 | ||
2260 | /* Enable interrupts */ | 2260 | /* Enable interrupts */ |
2261 | queue_writel(queue, IER, | 2261 | queue_writel(queue, IER, |
2262 | MACB_RX_INT_FLAGS | | 2262 | bp->rx_intr_mask | |
2263 | MACB_TX_INT_FLAGS | | 2263 | MACB_TX_INT_FLAGS | |
2264 | MACB_BIT(HRESP)); | 2264 | MACB_BIT(HRESP)); |
2265 | } | 2265 | } |
@@ -3907,6 +3907,7 @@ static const struct macb_config sama5d4_config = { | |||
3907 | }; | 3907 | }; |
3908 | 3908 | ||
3909 | static const struct macb_config emac_config = { | 3909 | static const struct macb_config emac_config = { |
3910 | .caps = MACB_CAPS_NEEDS_RSTONUBR, | ||
3910 | .clk_init = at91ether_clk_init, | 3911 | .clk_init = at91ether_clk_init, |
3911 | .init = at91ether_init, | 3912 | .init = at91ether_init, |
3912 | }; | 3913 | }; |
@@ -3928,7 +3929,8 @@ static const struct macb_config zynqmp_config = { | |||
3928 | }; | 3929 | }; |
3929 | 3930 | ||
3930 | static const struct macb_config zynq_config = { | 3931 | static const struct macb_config zynq_config = { |
3931 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF, | 3932 | .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF | |
3933 | MACB_CAPS_NEEDS_RSTONUBR, | ||
3932 | .dma_burst_length = 16, | 3934 | .dma_burst_length = 16, |
3933 | .clk_init = macb_clk_init, | 3935 | .clk_init = macb_clk_init, |
3934 | .init = macb_init, | 3936 | .init = macb_init, |
@@ -4083,6 +4085,10 @@ static int macb_probe(struct platform_device *pdev) | |||
4083 | macb_dma_desc_get_size(bp); | 4085 | macb_dma_desc_get_size(bp); |
4084 | } | 4086 | } |
4085 | 4087 | ||
4088 | bp->rx_intr_mask = MACB_RX_INT_FLAGS; | ||
4089 | if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR) | ||
4090 | bp->rx_intr_mask |= MACB_BIT(RXUBR); | ||
4091 | |||
4086 | mac = of_get_mac_address(np); | 4092 | mac = of_get_mac_address(np); |
4087 | if (mac) { | 4093 | if (mac) { |
4088 | ether_addr_copy(bp->dev->dev_addr, mac); | 4094 | ether_addr_copy(bp->dev->dev_addr, mac); |
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 5f03199a3acf..05f4a3b21e29 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig | |||
@@ -54,7 +54,6 @@ config CAVIUM_PTP | |||
54 | tristate "Cavium PTP coprocessor as PTP clock" | 54 | tristate "Cavium PTP coprocessor as PTP clock" |
55 | depends on 64BIT && PCI | 55 | depends on 64BIT && PCI |
56 | imply PTP_1588_CLOCK | 56 | imply PTP_1588_CLOCK |
57 | default y | ||
58 | ---help--- | 57 | ---help--- |
59 | This driver adds support for the Precision Time Protocol Clocks and | 58 | This driver adds support for the Precision Time Protocol Clocks and |
60 | Timestamping coprocessor (PTP) found on Cavium processors. | 59 | Timestamping coprocessor (PTP) found on Cavium processors. |
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 60641e202534..9a7f70db20c7 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c | |||
@@ -1434,7 +1434,8 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq, | |||
1434 | * csum is correct or is zero. | 1434 | * csum is correct or is zero. |
1435 | */ | 1435 | */ |
1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && | 1436 | if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc && |
1437 | tcp_udp_csum_ok && ipv4_csum_ok && outer_csum_ok) { | 1437 | tcp_udp_csum_ok && outer_csum_ok && |
1438 | (ipv4_csum_ok || ipv6)) { | ||
1438 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 1439 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
1439 | skb->csum_level = encap; | 1440 | skb->csum_level = encap; |
1440 | } | 1441 | } |
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index 13430f75496c..f1a2da15dd0a 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c | |||
@@ -585,7 +585,7 @@ static void de_tx (struct de_private *de) | |||
585 | netif_dbg(de, tx_done, de->dev, | 585 | netif_dbg(de, tx_done, de->dev, |
586 | "tx done, slot %d\n", tx_tail); | 586 | "tx done, slot %d\n", tx_tail); |
587 | } | 587 | } |
588 | dev_kfree_skb_irq(skb); | 588 | dev_consume_skb_irq(skb); |
589 | } | 589 | } |
590 | 590 | ||
591 | next: | 591 | next: |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 2370dc204202..697c2427f2b7 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -2098,6 +2098,7 @@ static int fec_enet_get_regs_len(struct net_device *ndev) | |||
2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ | 2098 | #if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ |
2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ | 2099 | defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ |
2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) | 2100 | defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) |
2101 | static __u32 fec_enet_register_version = 2; | ||
2101 | static u32 fec_enet_register_offset[] = { | 2102 | static u32 fec_enet_register_offset[] = { |
2102 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, | 2103 | FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, |
2103 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, | 2104 | FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, |
@@ -2128,6 +2129,7 @@ static u32 fec_enet_register_offset[] = { | |||
2128 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK | 2129 | IEEE_R_FDXFC, IEEE_R_OCTETS_OK |
2129 | }; | 2130 | }; |
2130 | #else | 2131 | #else |
2132 | static __u32 fec_enet_register_version = 1; | ||
2131 | static u32 fec_enet_register_offset[] = { | 2133 | static u32 fec_enet_register_offset[] = { |
2132 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, | 2134 | FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0, |
2133 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, | 2135 | FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0, |
@@ -2149,6 +2151,8 @@ static void fec_enet_get_regs(struct net_device *ndev, | |||
2149 | u32 *buf = (u32 *)regbuf; | 2151 | u32 *buf = (u32 *)regbuf; |
2150 | u32 i, off; | 2152 | u32 i, off; |
2151 | 2153 | ||
2154 | regs->version = fec_enet_register_version; | ||
2155 | |||
2152 | memset(buf, 0, regs->len); | 2156 | memset(buf, 0, regs->len); |
2153 | 2157 | ||
2154 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { | 2158 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { |
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index b90bab72efdb..c1968b3ecec8 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c | |||
@@ -369,7 +369,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id) | |||
369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, | 369 | dma_unmap_single(dev->dev.parent, bd->skb_pa, skb->len, |
370 | DMA_TO_DEVICE); | 370 | DMA_TO_DEVICE); |
371 | 371 | ||
372 | dev_kfree_skb_irq(skb); | 372 | dev_consume_skb_irq(skb); |
373 | } | 373 | } |
374 | spin_unlock(&priv->lock); | 374 | spin_unlock(&priv->lock); |
375 | 375 | ||
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index c3d539e209ed..eb3e65e8868f 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c | |||
@@ -1879,6 +1879,8 @@ static void ucc_geth_free_tx(struct ucc_geth_private *ugeth) | |||
1879 | u16 i, j; | 1879 | u16 i, j; |
1880 | u8 __iomem *bd; | 1880 | u8 __iomem *bd; |
1881 | 1881 | ||
1882 | netdev_reset_queue(ugeth->ndev); | ||
1883 | |||
1882 | ug_info = ugeth->ug_info; | 1884 | ug_info = ugeth->ug_info; |
1883 | uf_info = &ug_info->uf_info; | 1885 | uf_info = &ug_info->uf_info; |
1884 | 1886 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c index 3b9e74be5fbd..b8155f5e71b4 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c | |||
@@ -3081,6 +3081,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); | 3081 | dsaf_dev = dev_get_drvdata(&pdev->dev); |
3082 | if (!dsaf_dev) { | 3082 | if (!dsaf_dev) { |
3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); | 3083 | dev_err(&pdev->dev, "dsaf_dev is NULL\n"); |
3084 | put_device(&pdev->dev); | ||
3084 | return -ENODEV; | 3085 | return -ENODEV; |
3085 | } | 3086 | } |
3086 | 3087 | ||
@@ -3088,6 +3089,7 @@ int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset) | |||
3088 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { | 3089 | if (AE_IS_VER1(dsaf_dev->dsaf_ver)) { |
3089 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", | 3090 | dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n", |
3090 | dsaf_dev->ae_dev.name); | 3091 | dsaf_dev->ae_dev.name); |
3092 | put_device(&pdev->dev); | ||
3091 | return -ENODEV; | 3093 | return -ENODEV; |
3092 | } | 3094 | } |
3093 | 3095 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 5b33238c6680..60e7d7ae3787 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c | |||
@@ -2418,6 +2418,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev) | |||
2418 | out_notify_fail: | 2418 | out_notify_fail: |
2419 | (void)cancel_work_sync(&priv->service_task); | 2419 | (void)cancel_work_sync(&priv->service_task); |
2420 | out_read_prop_fail: | 2420 | out_read_prop_fail: |
2421 | /* safe for ACPI FW */ | ||
2422 | of_node_put(to_of_node(priv->fwnode)); | ||
2421 | free_netdev(ndev); | 2423 | free_netdev(ndev); |
2422 | return ret; | 2424 | return ret; |
2423 | } | 2425 | } |
@@ -2447,6 +2449,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev) | |||
2447 | set_bit(NIC_STATE_REMOVING, &priv->state); | 2449 | set_bit(NIC_STATE_REMOVING, &priv->state); |
2448 | (void)cancel_work_sync(&priv->service_task); | 2450 | (void)cancel_work_sync(&priv->service_task); |
2449 | 2451 | ||
2452 | /* safe for ACPI FW */ | ||
2453 | of_node_put(to_of_node(priv->fwnode)); | ||
2454 | |||
2450 | free_netdev(ndev); | 2455 | free_netdev(ndev); |
2451 | return 0; | 2456 | return 0; |
2452 | } | 2457 | } |
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 8e9b95871d30..ce15d2350db9 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c | |||
@@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev) | |||
1157 | */ | 1157 | */ |
1158 | static int hns_nic_nway_reset(struct net_device *netdev) | 1158 | static int hns_nic_nway_reset(struct net_device *netdev) |
1159 | { | 1159 | { |
1160 | int ret = 0; | ||
1161 | struct phy_device *phy = netdev->phydev; | 1160 | struct phy_device *phy = netdev->phydev; |
1162 | 1161 | ||
1163 | if (netif_running(netdev)) { | 1162 | if (!netif_running(netdev)) |
1164 | /* if autoneg is disabled, don't restart auto-negotiation */ | 1163 | return 0; |
1165 | if (phy && phy->autoneg == AUTONEG_ENABLE) | ||
1166 | ret = genphy_restart_aneg(phy); | ||
1167 | } | ||
1168 | 1164 | ||
1169 | return ret; | 1165 | if (!phy) |
1166 | return -EOPNOTSUPP; | ||
1167 | |||
1168 | if (phy->autoneg != AUTONEG_ENABLE) | ||
1169 | return -EINVAL; | ||
1170 | |||
1171 | return genphy_restart_aneg(phy); | ||
1170 | } | 1172 | } |
1171 | 1173 | ||
1172 | static u32 | 1174 | static u32 |
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c index 017e08452d8c..baf5cc251f32 100644 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c | |||
@@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum) | |||
321 | } | 321 | } |
322 | 322 | ||
323 | hns_mdio_cmd_write(mdio_dev, is_c45, | 323 | hns_mdio_cmd_write(mdio_dev, is_c45, |
324 | MDIO_C45_WRITE_ADDR, phy_id, devad); | 324 | MDIO_C45_READ, phy_id, devad); |
325 | } | 325 | } |
326 | 326 | ||
327 | /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ | 327 | /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/ |
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index d719668a6684..92929750f832 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c | |||
@@ -1310,7 +1310,7 @@ static irqreturn_t i596_interrupt(int irq, void *dev_id) | |||
1310 | dev->stats.tx_aborted_errors++; | 1310 | dev->stats.tx_aborted_errors++; |
1311 | } | 1311 | } |
1312 | 1312 | ||
1313 | dev_kfree_skb_irq(skb); | 1313 | dev_consume_skb_irq(skb); |
1314 | 1314 | ||
1315 | tx_cmd->cmd.command = 0; /* Mark free */ | 1315 | tx_cmd->cmd.command = 0; /* Mark free */ |
1316 | break; | 1316 | break; |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 04fd1f135011..654ac534b10e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -152,8 +152,10 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs, | |||
152 | memset(p, 0, regs->len); | 152 | memset(p, 0, regs->len); |
153 | memcpy_fromio(p, io, B3_RAM_ADDR); | 153 | memcpy_fromio(p, io, B3_RAM_ADDR); |
154 | 154 | ||
155 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, | 155 | if (regs->len > B3_RI_WTO_R1) { |
156 | regs->len - B3_RI_WTO_R1); | 156 | memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, |
157 | regs->len - B3_RI_WTO_R1); | ||
158 | } | ||
157 | } | 159 | } |
158 | 160 | ||
159 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ | 161 | /* Wake on Lan only supported on Yukon chips with rev 1 or above */ |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 9a0881cb7f51..6c01314e87b0 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c | |||
@@ -617,6 +617,8 @@ static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb, | |||
617 | } | 617 | } |
618 | #endif | 618 | #endif |
619 | 619 | ||
620 | #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN) | ||
621 | |||
620 | /* We reach this function only after checking that any of | 622 | /* We reach this function only after checking that any of |
621 | * the (IPv4 | IPv6) bits are set in cqe->status. | 623 | * the (IPv4 | IPv6) bits are set in cqe->status. |
622 | */ | 624 | */ |
@@ -624,9 +626,20 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, | |||
624 | netdev_features_t dev_features) | 626 | netdev_features_t dev_features) |
625 | { | 627 | { |
626 | __wsum hw_checksum = 0; | 628 | __wsum hw_checksum = 0; |
629 | void *hdr; | ||
630 | |||
631 | /* CQE csum doesn't cover padding octets in short ethernet | ||
632 | * frames. And the pad field is appended prior to calculating | ||
633 | * and appending the FCS field. | ||
634 | * | ||
635 | * Detecting these padded frames requires to verify and parse | ||
636 | * IP headers, so we simply force all those small frames to skip | ||
637 | * checksum complete. | ||
638 | */ | ||
639 | if (short_frame(skb->len)) | ||
640 | return -EINVAL; | ||
627 | 641 | ||
628 | void *hdr = (u8 *)va + sizeof(struct ethhdr); | 642 | hdr = (u8 *)va + sizeof(struct ethhdr); |
629 | |||
630 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); | 643 | hw_checksum = csum_unfold((__force __sum16)cqe->checksum); |
631 | 644 | ||
632 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && | 645 | if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) && |
@@ -819,6 +832,11 @@ xdp_drop_no_cnt: | |||
819 | skb_record_rx_queue(skb, cq_ring); | 832 | skb_record_rx_queue(skb, cq_ring); |
820 | 833 | ||
821 | if (likely(dev->features & NETIF_F_RXCSUM)) { | 834 | if (likely(dev->features & NETIF_F_RXCSUM)) { |
835 | /* TODO: For IP non TCP/UDP packets when csum complete is | ||
836 | * not an option (not supported or any other reason) we can | ||
837 | * actually check cqe IPOK status bit and report | ||
838 | * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE | ||
839 | */ | ||
822 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | | 840 | if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP | |
823 | MLX4_CQE_STATUS_UDP)) && | 841 | MLX4_CQE_STATUS_UDP)) && |
824 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && | 842 | (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) && |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 3e0fa8a8077b..e267ff93e8a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -1583,6 +1583,24 @@ no_trig: | |||
1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); | 1583 | spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); |
1584 | } | 1584 | } |
1585 | 1585 | ||
1586 | void mlx5_cmd_flush(struct mlx5_core_dev *dev) | ||
1587 | { | ||
1588 | struct mlx5_cmd *cmd = &dev->cmd; | ||
1589 | int i; | ||
1590 | |||
1591 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1592 | while (down_trylock(&cmd->sem)) | ||
1593 | mlx5_cmd_trigger_completions(dev); | ||
1594 | |||
1595 | while (down_trylock(&cmd->pages_sem)) | ||
1596 | mlx5_cmd_trigger_completions(dev); | ||
1597 | |||
1598 | /* Unlock cmdif */ | ||
1599 | up(&cmd->pages_sem); | ||
1600 | for (i = 0; i < cmd->max_reg_cmds; i++) | ||
1601 | up(&cmd->sem); | ||
1602 | } | ||
1603 | |||
1586 | static int status_to_err(u8 status) | 1604 | static int status_to_err(u8 status) |
1587 | { | 1605 | { |
1588 | return status ? -1 : 0; /* TBD more meaningful codes */ | 1606 | return status ? -1 : 0; /* TBD more meaningful codes */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 8fa8fdd30b85..448a92561567 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h | |||
@@ -657,6 +657,7 @@ struct mlx5e_channel_stats { | |||
657 | enum { | 657 | enum { |
658 | MLX5E_STATE_OPENED, | 658 | MLX5E_STATE_OPENED, |
659 | MLX5E_STATE_DESTROYING, | 659 | MLX5E_STATE_DESTROYING, |
660 | MLX5E_STATE_XDP_TX_ENABLED, | ||
660 | }; | 661 | }; |
661 | 662 | ||
662 | struct mlx5e_rqt { | 663 | struct mlx5e_rqt { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 046948ead152..f3c7ab6faea5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c | |||
@@ -256,6 +256,7 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, | |||
256 | e->m_neigh.family = n->ops->family; | 256 | e->m_neigh.family = n->ops->family; |
257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 257 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
258 | e->out_dev = out_dev; | 258 | e->out_dev = out_dev; |
259 | e->route_dev = route_dev; | ||
259 | 260 | ||
260 | /* It's important to add the neigh to the hash table before checking | 261 | /* It's important to add the neigh to the hash table before checking |
261 | * the neigh validity state. So if we'll get a notification, in case the | 262 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -369,6 +370,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, | |||
369 | e->m_neigh.family = n->ops->family; | 370 | e->m_neigh.family = n->ops->family; |
370 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); | 371 | memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); |
371 | e->out_dev = out_dev; | 372 | e->out_dev = out_dev; |
373 | e->route_dev = route_dev; | ||
372 | 374 | ||
373 | /* It's importent to add the neigh to the hash table before checking | 375 | /* It's importent to add the neigh to the hash table before checking |
374 | * the neigh validity state. So if we'll get a notification, in case the | 376 | * the neigh validity state. So if we'll get a notification, in case the |
@@ -612,16 +614,18 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
612 | struct mlx5_flow_spec *spec, | 614 | struct mlx5_flow_spec *spec, |
613 | struct tc_cls_flower_offload *f, | 615 | struct tc_cls_flower_offload *f, |
614 | void *headers_c, | 616 | void *headers_c, |
615 | void *headers_v) | 617 | void *headers_v, u8 *match_level) |
616 | { | 618 | { |
617 | int tunnel_type; | 619 | int tunnel_type; |
618 | int err = 0; | 620 | int err = 0; |
619 | 621 | ||
620 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); | 622 | tunnel_type = mlx5e_tc_tun_get_type(filter_dev); |
621 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { | 623 | if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { |
624 | *match_level = MLX5_MATCH_L4; | ||
622 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, | 625 | err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, |
623 | headers_c, headers_v); | 626 | headers_c, headers_v); |
624 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { | 627 | } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { |
628 | *match_level = MLX5_MATCH_L3; | ||
625 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, | 629 | err = mlx5e_tc_tun_parse_gretap(priv, spec, f, |
626 | headers_c, headers_v); | 630 | headers_c, headers_v); |
627 | } else { | 631 | } else { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h index 706ce7bf15e7..b63f15de899d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h | |||
@@ -39,6 +39,6 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, | |||
39 | struct mlx5_flow_spec *spec, | 39 | struct mlx5_flow_spec *spec, |
40 | struct tc_cls_flower_offload *f, | 40 | struct tc_cls_flower_offload *f, |
41 | void *headers_c, | 41 | void *headers_c, |
42 | void *headers_v); | 42 | void *headers_v, u8 *match_level); |
43 | 43 | ||
44 | #endif //__MLX5_EN_TC_TUNNEL_H__ | 44 | #endif //__MLX5_EN_TC_TUNNEL_H__ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 3740177eed09..03b2a9f9c589 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | |||
@@ -365,7 +365,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
365 | int sq_num; | 365 | int sq_num; |
366 | int i; | 366 | int i; |
367 | 367 | ||
368 | if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) | 368 | /* this flag is sufficient, no need to test internal sq state */ |
369 | if (unlikely(!mlx5e_xdp_tx_is_enabled(priv))) | ||
369 | return -ENETDOWN; | 370 | return -ENETDOWN; |
370 | 371 | ||
371 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) | 372 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) |
@@ -378,9 +379,6 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | |||
378 | 379 | ||
379 | sq = &priv->channels.c[sq_num]->xdpsq; | 380 | sq = &priv->channels.c[sq_num]->xdpsq; |
380 | 381 | ||
381 | if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) | ||
382 | return -ENETDOWN; | ||
383 | |||
384 | for (i = 0; i < n; i++) { | 382 | for (i = 0; i < n; i++) { |
385 | struct xdp_frame *xdpf = frames[i]; | 383 | struct xdp_frame *xdpf = frames[i]; |
386 | struct mlx5e_xdp_info xdpi; | 384 | struct mlx5e_xdp_info xdpi; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 3a67cb3cd179..ee27a7c8cd87 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h | |||
@@ -50,6 +50,23 @@ void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); | |||
50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, | 50 | int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
51 | u32 flags); | 51 | u32 flags); |
52 | 52 | ||
53 | static inline void mlx5e_xdp_tx_enable(struct mlx5e_priv *priv) | ||
54 | { | ||
55 | set_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
56 | } | ||
57 | |||
58 | static inline void mlx5e_xdp_tx_disable(struct mlx5e_priv *priv) | ||
59 | { | ||
60 | clear_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
61 | /* let other device's napi(s) see our new state */ | ||
62 | synchronize_rcu(); | ||
63 | } | ||
64 | |||
65 | static inline bool mlx5e_xdp_tx_is_enabled(struct mlx5e_priv *priv) | ||
66 | { | ||
67 | return test_bit(MLX5E_STATE_XDP_TX_ENABLED, &priv->state); | ||
68 | } | ||
69 | |||
53 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) | 70 | static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) |
54 | { | 71 | { |
55 | if (sq->doorbell_cseg) { | 72 | if (sq->doorbell_cseg) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3bbccead2f63..47233b9a4f81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c | |||
@@ -354,9 +354,6 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
354 | 354 | ||
355 | new_channels.params = priv->channels.params; | 355 | new_channels.params = priv->channels.params; |
356 | new_channels.params.num_channels = count; | 356 | new_channels.params.num_channels = count; |
357 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
358 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
359 | MLX5E_INDIR_RQT_SIZE, count); | ||
360 | 357 | ||
361 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { | 358 | if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { |
362 | priv->channels.params = new_channels.params; | 359 | priv->channels.params = new_channels.params; |
@@ -372,6 +369,10 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, | |||
372 | if (arfs_enabled) | 369 | if (arfs_enabled) |
373 | mlx5e_arfs_disable(priv); | 370 | mlx5e_arfs_disable(priv); |
374 | 371 | ||
372 | if (!netif_is_rxfh_configured(priv->netdev)) | ||
373 | mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, | ||
374 | MLX5E_INDIR_RQT_SIZE, count); | ||
375 | |||
375 | /* Switch to new channels, set new parameters and close old ones */ | 376 | /* Switch to new channels, set new parameters and close old ones */ |
376 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); | 377 | mlx5e_switch_priv_channels(priv, &new_channels, NULL); |
377 | 378 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 8cfd2ec7c0a2..93e50ccd44c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -950,7 +950,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, | |||
950 | if (params->rx_dim_enabled) | 950 | if (params->rx_dim_enabled) |
951 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); | 951 | __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); |
952 | 952 | ||
953 | if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) | 953 | if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)) |
954 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); | 954 | __set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state); |
955 | 955 | ||
956 | return 0; | 956 | return 0; |
@@ -2938,6 +2938,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) | |||
2938 | 2938 | ||
2939 | mlx5e_build_tx2sq_maps(priv); | 2939 | mlx5e_build_tx2sq_maps(priv); |
2940 | mlx5e_activate_channels(&priv->channels); | 2940 | mlx5e_activate_channels(&priv->channels); |
2941 | mlx5e_xdp_tx_enable(priv); | ||
2941 | netif_tx_start_all_queues(priv->netdev); | 2942 | netif_tx_start_all_queues(priv->netdev); |
2942 | 2943 | ||
2943 | if (mlx5e_is_vport_rep(priv)) | 2944 | if (mlx5e_is_vport_rep(priv)) |
@@ -2959,6 +2960,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) | |||
2959 | */ | 2960 | */ |
2960 | netif_tx_stop_all_queues(priv->netdev); | 2961 | netif_tx_stop_all_queues(priv->netdev); |
2961 | netif_tx_disable(priv->netdev); | 2962 | netif_tx_disable(priv->netdev); |
2963 | mlx5e_xdp_tx_disable(priv); | ||
2962 | mlx5e_deactivate_channels(&priv->channels); | 2964 | mlx5e_deactivate_channels(&priv->channels); |
2963 | } | 2965 | } |
2964 | 2966 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 04736212a21c..ef9e472daffb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -596,6 +596,10 @@ static void mlx5e_rep_update_flows(struct mlx5e_priv *priv, | |||
596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { | 596 | if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) { |
597 | ether_addr_copy(e->h_dest, ha); | 597 | ether_addr_copy(e->h_dest, ha); |
598 | ether_addr_copy(eth->h_dest, ha); | 598 | ether_addr_copy(eth->h_dest, ha); |
599 | /* Update the encap source mac, in case that we delete | ||
600 | * the flows when encap source mac changed. | ||
601 | */ | ||
602 | ether_addr_copy(eth->h_source, e->route_dev->dev_addr); | ||
599 | 603 | ||
600 | mlx5e_tc_encap_flows_add(priv, e); | 604 | mlx5e_tc_encap_flows_add(priv, e); |
601 | } | 605 | } |
@@ -1126,9 +1130,17 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev, | |||
1126 | struct mlx5e_priv *priv = netdev_priv(dev); | 1130 | struct mlx5e_priv *priv = netdev_priv(dev); |
1127 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1131 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1128 | struct mlx5_eswitch_rep *rep = rpriv->rep; | 1132 | struct mlx5_eswitch_rep *rep = rpriv->rep; |
1129 | int ret; | 1133 | int ret, pf_num; |
1134 | |||
1135 | ret = mlx5_lag_get_pf_num(priv->mdev, &pf_num); | ||
1136 | if (ret) | ||
1137 | return ret; | ||
1138 | |||
1139 | if (rep->vport == FDB_UPLINK_VPORT) | ||
1140 | ret = snprintf(buf, len, "p%d", pf_num); | ||
1141 | else | ||
1142 | ret = snprintf(buf, len, "pf%dvf%d", pf_num, rep->vport - 1); | ||
1130 | 1143 | ||
1131 | ret = snprintf(buf, len, "%d", rep->vport - 1); | ||
1132 | if (ret >= len) | 1144 | if (ret >= len) |
1133 | return -EOPNOTSUPP; | 1145 | return -EOPNOTSUPP; |
1134 | 1146 | ||
@@ -1285,6 +1297,18 @@ static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) | |||
1285 | return 0; | 1297 | return 0; |
1286 | } | 1298 | } |
1287 | 1299 | ||
1300 | static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos, | ||
1301 | __be16 vlan_proto) | ||
1302 | { | ||
1303 | netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n"); | ||
1304 | |||
1305 | if (vlan != 0) | ||
1306 | return -EOPNOTSUPP; | ||
1307 | |||
1308 | /* allow setting 0-vid for compatibility with libvirt */ | ||
1309 | return 0; | ||
1310 | } | ||
1311 | |||
1288 | static const struct switchdev_ops mlx5e_rep_switchdev_ops = { | 1312 | static const struct switchdev_ops mlx5e_rep_switchdev_ops = { |
1289 | .switchdev_port_attr_get = mlx5e_attr_get, | 1313 | .switchdev_port_attr_get = mlx5e_attr_get, |
1290 | }; | 1314 | }; |
@@ -1319,6 +1343,7 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { | |||
1319 | .ndo_set_vf_rate = mlx5e_set_vf_rate, | 1343 | .ndo_set_vf_rate = mlx5e_set_vf_rate, |
1320 | .ndo_get_vf_config = mlx5e_get_vf_config, | 1344 | .ndo_get_vf_config = mlx5e_get_vf_config, |
1321 | .ndo_get_vf_stats = mlx5e_get_vf_stats, | 1345 | .ndo_get_vf_stats = mlx5e_get_vf_stats, |
1346 | .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan, | ||
1322 | }; | 1347 | }; |
1323 | 1348 | ||
1324 | bool mlx5e_eswitch_rep(struct net_device *netdev) | 1349 | bool mlx5e_eswitch_rep(struct net_device *netdev) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index edd722824697..36eafc877e6b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h | |||
@@ -148,6 +148,7 @@ struct mlx5e_encap_entry { | |||
148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 148 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
149 | 149 | ||
150 | struct net_device *out_dev; | 150 | struct net_device *out_dev; |
151 | struct net_device *route_dev; | ||
151 | int tunnel_type; | 152 | int tunnel_type; |
152 | int tunnel_hlen; | 153 | int tunnel_hlen; |
153 | int reformat_type; | 154 | int reformat_type; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index cae6c6d48984..b5c1b039375a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -128,6 +128,7 @@ struct mlx5e_tc_flow_parse_attr { | |||
128 | struct net_device *filter_dev; | 128 | struct net_device *filter_dev; |
129 | struct mlx5_flow_spec spec; | 129 | struct mlx5_flow_spec spec; |
130 | int num_mod_hdr_actions; | 130 | int num_mod_hdr_actions; |
131 | int max_mod_hdr_actions; | ||
131 | void *mod_hdr_actions; | 132 | void *mod_hdr_actions; |
132 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; | 133 | int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; |
133 | }; | 134 | }; |
@@ -1302,7 +1303,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, | |||
1302 | static int parse_tunnel_attr(struct mlx5e_priv *priv, | 1303 | static int parse_tunnel_attr(struct mlx5e_priv *priv, |
1303 | struct mlx5_flow_spec *spec, | 1304 | struct mlx5_flow_spec *spec, |
1304 | struct tc_cls_flower_offload *f, | 1305 | struct tc_cls_flower_offload *f, |
1305 | struct net_device *filter_dev) | 1306 | struct net_device *filter_dev, u8 *match_level) |
1306 | { | 1307 | { |
1307 | struct netlink_ext_ack *extack = f->common.extack; | 1308 | struct netlink_ext_ack *extack = f->common.extack; |
1308 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1309 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1317,7 +1318,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, | |||
1317 | int err = 0; | 1318 | int err = 0; |
1318 | 1319 | ||
1319 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, | 1320 | err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, |
1320 | headers_c, headers_v); | 1321 | headers_c, headers_v, match_level); |
1321 | if (err) { | 1322 | if (err) { |
1322 | NL_SET_ERR_MSG_MOD(extack, | 1323 | NL_SET_ERR_MSG_MOD(extack, |
1323 | "failed to parse tunnel attributes"); | 1324 | "failed to parse tunnel attributes"); |
@@ -1426,7 +1427,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1426 | struct mlx5_flow_spec *spec, | 1427 | struct mlx5_flow_spec *spec, |
1427 | struct tc_cls_flower_offload *f, | 1428 | struct tc_cls_flower_offload *f, |
1428 | struct net_device *filter_dev, | 1429 | struct net_device *filter_dev, |
1429 | u8 *match_level) | 1430 | u8 *match_level, u8 *tunnel_match_level) |
1430 | { | 1431 | { |
1431 | struct netlink_ext_ack *extack = f->common.extack; | 1432 | struct netlink_ext_ack *extack = f->common.extack; |
1432 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, | 1433 | void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, |
@@ -1477,7 +1478,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, | |||
1477 | switch (key->addr_type) { | 1478 | switch (key->addr_type) { |
1478 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: | 1479 | case FLOW_DISSECTOR_KEY_IPV4_ADDRS: |
1479 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: | 1480 | case FLOW_DISSECTOR_KEY_IPV6_ADDRS: |
1480 | if (parse_tunnel_attr(priv, spec, f, filter_dev)) | 1481 | if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level)) |
1481 | return -EOPNOTSUPP; | 1482 | return -EOPNOTSUPP; |
1482 | break; | 1483 | break; |
1483 | default: | 1484 | default: |
@@ -1826,11 +1827,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1826 | struct mlx5_core_dev *dev = priv->mdev; | 1827 | struct mlx5_core_dev *dev = priv->mdev; |
1827 | struct mlx5_eswitch *esw = dev->priv.eswitch; | 1828 | struct mlx5_eswitch *esw = dev->priv.eswitch; |
1828 | struct mlx5e_rep_priv *rpriv = priv->ppriv; | 1829 | struct mlx5e_rep_priv *rpriv = priv->ppriv; |
1830 | u8 match_level, tunnel_match_level = MLX5_MATCH_NONE; | ||
1829 | struct mlx5_eswitch_rep *rep; | 1831 | struct mlx5_eswitch_rep *rep; |
1830 | u8 match_level; | ||
1831 | int err; | 1832 | int err; |
1832 | 1833 | ||
1833 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); | 1834 | err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level); |
1834 | 1835 | ||
1835 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { | 1836 | if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { |
1836 | rep = rpriv->rep; | 1837 | rep = rpriv->rep; |
@@ -1846,10 +1847,12 @@ static int parse_cls_flower(struct mlx5e_priv *priv, | |||
1846 | } | 1847 | } |
1847 | } | 1848 | } |
1848 | 1849 | ||
1849 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) | 1850 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { |
1850 | flow->esw_attr->match_level = match_level; | 1851 | flow->esw_attr->match_level = match_level; |
1851 | else | 1852 | flow->esw_attr->tunnel_match_level = tunnel_match_level; |
1853 | } else { | ||
1852 | flow->nic_attr->match_level = match_level; | 1854 | flow->nic_attr->match_level = match_level; |
1855 | } | ||
1853 | 1856 | ||
1854 | return err; | 1857 | return err; |
1855 | } | 1858 | } |
@@ -1934,9 +1937,9 @@ static struct mlx5_fields fields[] = { | |||
1934 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), | 1937 | OFFLOAD(UDP_DPORT, 2, udp.dest, 0), |
1935 | }; | 1938 | }; |
1936 | 1939 | ||
1937 | /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at | 1940 | /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at |
1938 | * max from the SW pedit action. On success, it says how many HW actions were | 1941 | * max from the SW pedit action. On success, attr->num_mod_hdr_actions |
1939 | * actually parsed. | 1942 | * says how many HW actions were actually parsed. |
1940 | */ | 1943 | */ |
1941 | static int offload_pedit_fields(struct pedit_headers *masks, | 1944 | static int offload_pedit_fields(struct pedit_headers *masks, |
1942 | struct pedit_headers *vals, | 1945 | struct pedit_headers *vals, |
@@ -1960,9 +1963,11 @@ static int offload_pedit_fields(struct pedit_headers *masks, | |||
1960 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; | 1963 | add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD]; |
1961 | 1964 | ||
1962 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); | 1965 | action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto); |
1963 | action = parse_attr->mod_hdr_actions; | 1966 | action = parse_attr->mod_hdr_actions + |
1964 | max_actions = parse_attr->num_mod_hdr_actions; | 1967 | parse_attr->num_mod_hdr_actions * action_size; |
1965 | nactions = 0; | 1968 | |
1969 | max_actions = parse_attr->max_mod_hdr_actions; | ||
1970 | nactions = parse_attr->num_mod_hdr_actions; | ||
1966 | 1971 | ||
1967 | for (i = 0; i < ARRAY_SIZE(fields); i++) { | 1972 | for (i = 0; i < ARRAY_SIZE(fields); i++) { |
1968 | f = &fields[i]; | 1973 | f = &fields[i]; |
@@ -2073,7 +2078,7 @@ static int alloc_mod_hdr_actions(struct mlx5e_priv *priv, | |||
2073 | if (!parse_attr->mod_hdr_actions) | 2078 | if (!parse_attr->mod_hdr_actions) |
2074 | return -ENOMEM; | 2079 | return -ENOMEM; |
2075 | 2080 | ||
2076 | parse_attr->num_mod_hdr_actions = max_actions; | 2081 | parse_attr->max_mod_hdr_actions = max_actions; |
2077 | return 0; | 2082 | return 0; |
2078 | } | 2083 | } |
2079 | 2084 | ||
@@ -2119,9 +2124,11 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv, | |||
2119 | goto out_err; | 2124 | goto out_err; |
2120 | } | 2125 | } |
2121 | 2126 | ||
2122 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); | 2127 | if (!parse_attr->mod_hdr_actions) { |
2123 | if (err) | 2128 | err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr); |
2124 | goto out_err; | 2129 | if (err) |
2130 | goto out_err; | ||
2131 | } | ||
2125 | 2132 | ||
2126 | err = offload_pedit_fields(masks, vals, parse_attr, extack); | 2133 | err = offload_pedit_fields(masks, vals, parse_attr, extack); |
2127 | if (err < 0) | 2134 | if (err < 0) |
@@ -2179,6 +2186,7 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, | |||
2179 | 2186 | ||
2180 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | 2187 | static bool modify_header_match_supported(struct mlx5_flow_spec *spec, |
2181 | struct tcf_exts *exts, | 2188 | struct tcf_exts *exts, |
2189 | u32 actions, | ||
2182 | struct netlink_ext_ack *extack) | 2190 | struct netlink_ext_ack *extack) |
2183 | { | 2191 | { |
2184 | const struct tc_action *a; | 2192 | const struct tc_action *a; |
@@ -2188,7 +2196,11 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, | |||
2188 | u16 ethertype; | 2196 | u16 ethertype; |
2189 | int nkeys, i; | 2197 | int nkeys, i; |
2190 | 2198 | ||
2191 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | 2199 | if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) |
2200 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers); | ||
2201 | else | ||
2202 | headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); | ||
2203 | |||
2192 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); | 2204 | ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype); |
2193 | 2205 | ||
2194 | /* for non-IP we only re-write MACs, so we're okay */ | 2206 | /* for non-IP we only re-write MACs, so we're okay */ |
@@ -2245,7 +2257,7 @@ static bool actions_match_supported(struct mlx5e_priv *priv, | |||
2245 | 2257 | ||
2246 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 2258 | if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
2247 | return modify_header_match_supported(&parse_attr->spec, exts, | 2259 | return modify_header_match_supported(&parse_attr->spec, exts, |
2248 | extack); | 2260 | actions, extack); |
2249 | 2261 | ||
2250 | return true; | 2262 | return true; |
2251 | } | 2263 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 598ad7e4d5c9..0e55cd1f2e98 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -387,8 +387,14 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); | 388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { | 389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { |
390 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
391 | struct mlx5_wqe_eth_seg cur_eth = wqe->eth; | ||
392 | #endif | ||
390 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); | 393 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
391 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); | 394 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); |
395 | #ifdef CONFIG_MLX5_EN_IPSEC | ||
396 | wqe->eth = cur_eth; | ||
397 | #endif | ||
392 | } | 398 | } |
393 | 399 | ||
394 | /* fill wqe */ | 400 | /* fill wqe */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index a44ea7b85614..5b492b67f4e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | |||
@@ -1134,13 +1134,6 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, | |||
1134 | int err = 0; | 1134 | int err = 0; |
1135 | u8 *smac_v; | 1135 | u8 *smac_v; |
1136 | 1136 | ||
1137 | if (vport->info.spoofchk && !is_valid_ether_addr(vport->info.mac)) { | ||
1138 | mlx5_core_warn(esw->dev, | ||
1139 | "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n", | ||
1140 | vport->vport); | ||
1141 | return -EPERM; | ||
1142 | } | ||
1143 | |||
1144 | esw_vport_cleanup_ingress_rules(esw, vport); | 1137 | esw_vport_cleanup_ingress_rules(esw, vport); |
1145 | 1138 | ||
1146 | if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { | 1139 | if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) { |
@@ -1728,7 +1721,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) | |||
1728 | int vport_num; | 1721 | int vport_num; |
1729 | int err; | 1722 | int err; |
1730 | 1723 | ||
1731 | if (!MLX5_ESWITCH_MANAGER(dev)) | 1724 | if (!MLX5_VPORT_MANAGER(dev)) |
1732 | return 0; | 1725 | return 0; |
1733 | 1726 | ||
1734 | esw_info(dev, | 1727 | esw_info(dev, |
@@ -1797,7 +1790,7 @@ abort: | |||
1797 | 1790 | ||
1798 | void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) | 1791 | void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) |
1799 | { | 1792 | { |
1800 | if (!esw || !MLX5_ESWITCH_MANAGER(esw->dev)) | 1793 | if (!esw || !MLX5_VPORT_MANAGER(esw->dev)) |
1801 | return; | 1794 | return; |
1802 | 1795 | ||
1803 | esw_info(esw->dev, "cleanup\n"); | 1796 | esw_info(esw->dev, "cleanup\n"); |
@@ -1827,13 +1820,10 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, | |||
1827 | mutex_lock(&esw->state_lock); | 1820 | mutex_lock(&esw->state_lock); |
1828 | evport = &esw->vports[vport]; | 1821 | evport = &esw->vports[vport]; |
1829 | 1822 | ||
1830 | if (evport->info.spoofchk && !is_valid_ether_addr(mac)) { | 1823 | if (evport->info.spoofchk && !is_valid_ether_addr(mac)) |
1831 | mlx5_core_warn(esw->dev, | 1824 | mlx5_core_warn(esw->dev, |
1832 | "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n", | 1825 | "Set invalid MAC while spoofchk is on, vport(%d)\n", |
1833 | vport); | 1826 | vport); |
1834 | err = -EPERM; | ||
1835 | goto unlock; | ||
1836 | } | ||
1837 | 1827 | ||
1838 | err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); | 1828 | err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac); |
1839 | if (err) { | 1829 | if (err) { |
@@ -1979,6 +1969,10 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, | |||
1979 | evport = &esw->vports[vport]; | 1969 | evport = &esw->vports[vport]; |
1980 | pschk = evport->info.spoofchk; | 1970 | pschk = evport->info.spoofchk; |
1981 | evport->info.spoofchk = spoofchk; | 1971 | evport->info.spoofchk = spoofchk; |
1972 | if (pschk && !is_valid_ether_addr(evport->info.mac)) | ||
1973 | mlx5_core_warn(esw->dev, | ||
1974 | "Spoofchk in set while MAC is invalid, vport(%d)\n", | ||
1975 | evport->vport); | ||
1982 | if (evport->enabled && esw->mode == SRIOV_LEGACY) | 1976 | if (evport->enabled && esw->mode == SRIOV_LEGACY) |
1983 | err = esw_vport_ingress_config(esw, evport); | 1977 | err = esw_vport_ingress_config(esw, evport); |
1984 | if (err) | 1978 | if (err) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 9c89eea9b2c3..748ff178a1d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h | |||
@@ -312,6 +312,7 @@ struct mlx5_esw_flow_attr { | |||
312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; | 312 | } dests[MLX5_MAX_FLOW_FWD_VPORTS]; |
313 | u32 mod_hdr_id; | 313 | u32 mod_hdr_id; |
314 | u8 match_level; | 314 | u8 match_level; |
315 | u8 tunnel_match_level; | ||
315 | struct mlx5_fc *counter; | 316 | struct mlx5_fc *counter; |
316 | u32 chain; | 317 | u32 chain; |
317 | u16 prio; | 318 | u16 prio; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 53065b6ae593..d4e6fe5b9300 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -160,14 +160,15 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, | 160 | MLX5_SET_TO_ONES(fte_match_set_misc, misc, |
161 | source_eswitch_owner_vhca_id); | 161 | source_eswitch_owner_vhca_id); |
162 | 162 | ||
163 | if (attr->match_level == MLX5_MATCH_NONE) | 163 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; |
164 | spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; | 164 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { |
165 | else | 165 | if (attr->tunnel_match_level != MLX5_MATCH_NONE) |
166 | spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | | 166 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
167 | MLX5_MATCH_MISC_PARAMETERS; | 167 | if (attr->match_level != MLX5_MATCH_NONE) |
168 | 168 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | |
169 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) | 169 | } else if (attr->match_level != MLX5_MATCH_NONE) { |
170 | spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS; | 170 | spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS; |
171 | } | ||
171 | 172 | ||
172 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) | 173 | if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) |
173 | flow_act.modify_id = attr->mod_hdr_id; | 174 | flow_act.modify_id = attr->mod_hdr_id; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index fbc42b7252a9..503035469d2d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c | |||
@@ -211,11 +211,10 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
211 | enum port_module_event_status_type module_status; | 211 | enum port_module_event_status_type module_status; |
212 | enum port_module_event_error_type error_type; | 212 | enum port_module_event_error_type error_type; |
213 | struct mlx5_eqe_port_module *module_event_eqe; | 213 | struct mlx5_eqe_port_module *module_event_eqe; |
214 | const char *status_str, *error_str; | 214 | const char *status_str; |
215 | u8 module_num; | 215 | u8 module_num; |
216 | 216 | ||
217 | module_event_eqe = &eqe->data.port_module; | 217 | module_event_eqe = &eqe->data.port_module; |
218 | module_num = module_event_eqe->module; | ||
219 | module_status = module_event_eqe->module_status & | 218 | module_status = module_event_eqe->module_status & |
220 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; | 219 | PORT_MODULE_EVENT_MODULE_STATUS_MASK; |
221 | error_type = module_event_eqe->error_type & | 220 | error_type = module_event_eqe->error_type & |
@@ -223,25 +222,27 @@ static int port_module(struct notifier_block *nb, unsigned long type, void *data | |||
223 | 222 | ||
224 | if (module_status < MLX5_MODULE_STATUS_NUM) | 223 | if (module_status < MLX5_MODULE_STATUS_NUM) |
225 | events->pme_stats.status_counters[module_status]++; | 224 | events->pme_stats.status_counters[module_status]++; |
226 | status_str = mlx5_pme_status_to_string(module_status); | ||
227 | 225 | ||
228 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | 226 | if (module_status == MLX5_MODULE_STATUS_ERROR) |
229 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) | 227 | if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) |
230 | events->pme_stats.error_counters[error_type]++; | 228 | events->pme_stats.error_counters[error_type]++; |
231 | error_str = mlx5_pme_error_to_string(error_type); | ||
232 | } | ||
233 | 229 | ||
234 | if (!printk_ratelimit()) | 230 | if (!printk_ratelimit()) |
235 | return NOTIFY_OK; | 231 | return NOTIFY_OK; |
236 | 232 | ||
237 | if (module_status == MLX5_MODULE_STATUS_ERROR) | 233 | module_num = module_event_eqe->module; |
234 | status_str = mlx5_pme_status_to_string(module_status); | ||
235 | if (module_status == MLX5_MODULE_STATUS_ERROR) { | ||
236 | const char *error_str = mlx5_pme_error_to_string(error_type); | ||
237 | |||
238 | mlx5_core_err(events->dev, | 238 | mlx5_core_err(events->dev, |
239 | "Port module event[error]: module %u, %s, %s\n", | 239 | "Port module event[error]: module %u, %s, %s\n", |
240 | module_num, status_str, error_str); | 240 | module_num, status_str, error_str); |
241 | else | 241 | } else { |
242 | mlx5_core_info(events->dev, | 242 | mlx5_core_info(events->dev, |
243 | "Port module event: module %u, %s\n", | 243 | "Port module event: module %u, %s\n", |
244 | module_num, status_str); | 244 | module_num, status_str); |
245 | } | ||
245 | 246 | ||
246 | return NOTIFY_OK; | 247 | return NOTIFY_OK; |
247 | } | 248 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 196c07383082..cb9fa3430c53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -103,7 +103,7 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) | |||
103 | mlx5_core_err(dev, "start\n"); | 103 | mlx5_core_err(dev, "start\n"); |
104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { | 104 | if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { |
105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; | 105 | dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; |
106 | mlx5_cmd_trigger_completions(dev); | 106 | mlx5_cmd_flush(dev); |
107 | } | 107 | } |
108 | 108 | ||
109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); | 109 | mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 3a6baed722d8..2d223385dc81 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c | |||
@@ -616,6 +616,27 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) | |||
616 | } | 616 | } |
617 | } | 617 | } |
618 | 618 | ||
619 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num) | ||
620 | { | ||
621 | struct mlx5_lag *ldev; | ||
622 | int n; | ||
623 | |||
624 | ldev = mlx5_lag_dev_get(dev); | ||
625 | if (!ldev) { | ||
626 | mlx5_core_warn(dev, "no lag device, can't get pf num\n"); | ||
627 | return -EINVAL; | ||
628 | } | ||
629 | |||
630 | for (n = 0; n < MLX5_MAX_PORTS; n++) | ||
631 | if (ldev->pf[n].dev == dev) { | ||
632 | *pf_num = n; | ||
633 | return 0; | ||
634 | } | ||
635 | |||
636 | mlx5_core_warn(dev, "wasn't able to locate pf in the lag device\n"); | ||
637 | return -EINVAL; | ||
638 | } | ||
639 | |||
619 | /* Must be called with intf_mutex held */ | 640 | /* Must be called with intf_mutex held */ |
620 | void mlx5_lag_remove(struct mlx5_core_dev *dev) | 641 | void mlx5_lag_remove(struct mlx5_core_dev *dev) |
621 | { | 642 | { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index c68dcea5985b..4fdac020b795 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h | |||
@@ -126,6 +126,7 @@ u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, | |||
126 | struct ptp_system_timestamp *sts); | 126 | struct ptp_system_timestamp *sts); |
127 | 127 | ||
128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); | 128 | void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); |
129 | void mlx5_cmd_flush(struct mlx5_core_dev *dev); | ||
129 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); | 130 | int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); |
130 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | 131 | void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); |
131 | 132 | ||
@@ -187,6 +188,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) | |||
187 | MLX5_CAP_GEN(dev, lag_master); | 188 | MLX5_CAP_GEN(dev, lag_master); |
188 | } | 189 | } |
189 | 190 | ||
191 | int mlx5_lag_get_pf_num(struct mlx5_core_dev *dev, int *pf_num); | ||
192 | |||
190 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); | 193 | void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); |
191 | void mlx5_lag_update(struct mlx5_core_dev *dev); | 194 | void mlx5_lag_update(struct mlx5_core_dev *dev); |
192 | 195 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 388f205a497f..370ca94b6775 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c | |||
@@ -44,14 +44,15 @@ static struct mlx5_core_rsc_common * | |||
44 | mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) | 44 | mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) |
45 | { | 45 | { |
46 | struct mlx5_core_rsc_common *common; | 46 | struct mlx5_core_rsc_common *common; |
47 | unsigned long flags; | ||
47 | 48 | ||
48 | spin_lock(&table->lock); | 49 | spin_lock_irqsave(&table->lock, flags); |
49 | 50 | ||
50 | common = radix_tree_lookup(&table->tree, rsn); | 51 | common = radix_tree_lookup(&table->tree, rsn); |
51 | if (common) | 52 | if (common) |
52 | atomic_inc(&common->refcount); | 53 | atomic_inc(&common->refcount); |
53 | 54 | ||
54 | spin_unlock(&table->lock); | 55 | spin_unlock_irqrestore(&table->lock, flags); |
55 | 56 | ||
56 | return common; | 57 | return common; |
57 | } | 58 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index 24a90163775e..2d8a77cc156b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h | |||
@@ -53,7 +53,7 @@ | |||
53 | extern const struct qed_common_ops qed_common_ops_pass; | 53 | extern const struct qed_common_ops qed_common_ops_pass; |
54 | 54 | ||
55 | #define QED_MAJOR_VERSION 8 | 55 | #define QED_MAJOR_VERSION 8 |
56 | #define QED_MINOR_VERSION 33 | 56 | #define QED_MINOR_VERSION 37 |
57 | #define QED_REVISION_VERSION 0 | 57 | #define QED_REVISION_VERSION 0 |
58 | #define QED_ENGINEERING_VERSION 20 | 58 | #define QED_ENGINEERING_VERSION 20 |
59 | 59 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 8f6551421945..2ecaaaa4469a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c | |||
@@ -795,19 +795,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn, | |||
795 | 795 | ||
796 | /* get pq index according to PQ_FLAGS */ | 796 | /* get pq index according to PQ_FLAGS */ |
797 | static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, | 797 | static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn, |
798 | u32 pq_flags) | 798 | unsigned long pq_flags) |
799 | { | 799 | { |
800 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; | 800 | struct qed_qm_info *qm_info = &p_hwfn->qm_info; |
801 | 801 | ||
802 | /* Can't have multiple flags set here */ | 802 | /* Can't have multiple flags set here */ |
803 | if (bitmap_weight((unsigned long *)&pq_flags, | 803 | if (bitmap_weight(&pq_flags, |
804 | sizeof(pq_flags) * BITS_PER_BYTE) > 1) { | 804 | sizeof(pq_flags) * BITS_PER_BYTE) > 1) { |
805 | DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags); | 805 | DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags); |
806 | goto err; | 806 | goto err; |
807 | } | 807 | } |
808 | 808 | ||
809 | if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { | 809 | if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) { |
810 | DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags); | 810 | DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags); |
811 | goto err; | 811 | goto err; |
812 | } | 812 | } |
813 | 813 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 67c02ea93906..58be1c4c6668 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c | |||
@@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, | |||
609 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && | 609 | (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) && |
610 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); | 610 | !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED))); |
611 | 611 | ||
612 | SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, | ||
613 | (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) && | ||
614 | !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED))); | ||
615 | |||
612 | SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, | 616 | SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, |
613 | !!(accept_filter & QED_ACCEPT_BCAST)); | 617 | !!(accept_filter & QED_ACCEPT_BCAST)); |
614 | 618 | ||
@@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, | |||
744 | return rc; | 748 | return rc; |
745 | } | 749 | } |
746 | 750 | ||
751 | if (p_params->update_ctl_frame_check) { | ||
752 | p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en; | ||
753 | p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en; | ||
754 | } | ||
755 | |||
747 | /* Update mcast bins for VFs, PF doesn't use this functionality */ | 756 | /* Update mcast bins for VFs, PF doesn't use this functionality */ |
748 | qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); | 757 | qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); |
749 | 758 | ||
@@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2207 | u16 num_queues = 0; | 2216 | u16 num_queues = 0; |
2208 | 2217 | ||
2209 | /* Since the feature controls only queue-zones, | 2218 | /* Since the feature controls only queue-zones, |
2210 | * make sure we have the contexts [rx, tx, xdp] to | 2219 | * make sure we have the contexts [rx, xdp, tcs] to |
2211 | * match. | 2220 | * match. |
2212 | */ | 2221 | */ |
2213 | for_each_hwfn(cdev, i) { | 2222 | for_each_hwfn(cdev, i) { |
@@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev, | |||
2217 | u16 cids; | 2226 | u16 cids; |
2218 | 2227 | ||
2219 | cids = hwfn->pf_params.eth_pf_params.num_cons; | 2228 | cids = hwfn->pf_params.eth_pf_params.num_cons; |
2220 | num_queues += min_t(u16, l2_queues, cids / 3); | 2229 | cids /= (2 + info->num_tc); |
2230 | num_queues += min_t(u16, l2_queues, cids); | ||
2221 | } | 2231 | } |
2222 | 2232 | ||
2223 | /* queues might theoretically be >256, but interrupts' | 2233 | /* queues might theoretically be >256, but interrupts' |
@@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev, | |||
2688 | if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { | 2698 | if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { |
2689 | accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | | 2699 | accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | |
2690 | QED_ACCEPT_MCAST_UNMATCHED; | 2700 | QED_ACCEPT_MCAST_UNMATCHED; |
2691 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; | 2701 | accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED | |
2702 | QED_ACCEPT_MCAST_UNMATCHED; | ||
2692 | } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { | 2703 | } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { |
2693 | accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; | 2704 | accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
2694 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; | 2705 | accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED; |
@@ -2860,7 +2871,8 @@ static int qed_get_coalesce(struct qed_dev *cdev, u16 *coal, void *handle) | |||
2860 | p_hwfn = p_cid->p_owner; | 2871 | p_hwfn = p_cid->p_owner; |
2861 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); | 2872 | rc = qed_get_queue_coalesce(p_hwfn, coal, handle); |
2862 | if (rc) | 2873 | if (rc) |
2863 | DP_NOTICE(p_hwfn, "Unable to read queue coalescing\n"); | 2874 | DP_VERBOSE(cdev, QED_MSG_DEBUG, |
2875 | "Unable to read queue coalescing\n"); | ||
2864 | 2876 | ||
2865 | return rc; | 2877 | return rc; |
2866 | } | 2878 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8d80f1095d17..7127d5aaac42 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h | |||
@@ -219,6 +219,9 @@ struct qed_sp_vport_update_params { | |||
219 | struct qed_rss_params *rss_params; | 219 | struct qed_rss_params *rss_params; |
220 | struct qed_filter_accept_flags accept_flags; | 220 | struct qed_filter_accept_flags accept_flags; |
221 | struct qed_sge_tpa_params *sge_tpa_params; | 221 | struct qed_sge_tpa_params *sge_tpa_params; |
222 | u8 update_ctl_frame_check; | ||
223 | u8 mac_chk_en; | ||
224 | u8 ethtype_chk_en; | ||
222 | }; | 225 | }; |
223 | 226 | ||
224 | int qed_sp_vport_update(struct qed_hwfn *p_hwfn, | 227 | int qed_sp_vport_update(struct qed_hwfn *p_hwfn, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index d9237c65a838..b5f419b71287 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -2451,19 +2451,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, | |||
2451 | { | 2451 | { |
2452 | struct qed_ll2_tx_pkt_info pkt; | 2452 | struct qed_ll2_tx_pkt_info pkt; |
2453 | const skb_frag_t *frag; | 2453 | const skb_frag_t *frag; |
2454 | u8 flags = 0, nr_frags; | ||
2454 | int rc = -EINVAL, i; | 2455 | int rc = -EINVAL, i; |
2455 | dma_addr_t mapping; | 2456 | dma_addr_t mapping; |
2456 | u16 vlan = 0; | 2457 | u16 vlan = 0; |
2457 | u8 flags = 0; | ||
2458 | 2458 | ||
2459 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { | 2459 | if (unlikely(skb->ip_summed != CHECKSUM_NONE)) { |
2460 | DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); | 2460 | DP_INFO(cdev, "Cannot transmit a checksummed packet\n"); |
2461 | return -EINVAL; | 2461 | return -EINVAL; |
2462 | } | 2462 | } |
2463 | 2463 | ||
2464 | if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { | 2464 | /* Cache number of fragments from SKB since SKB may be freed by |
2465 | * the completion routine after calling qed_ll2_prepare_tx_packet() | ||
2466 | */ | ||
2467 | nr_frags = skb_shinfo(skb)->nr_frags; | ||
2468 | |||
2469 | if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) { | ||
2465 | DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", | 2470 | DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n", |
2466 | 1 + skb_shinfo(skb)->nr_frags); | 2471 | 1 + nr_frags); |
2467 | return -EINVAL; | 2472 | return -EINVAL; |
2468 | } | 2473 | } |
2469 | 2474 | ||
@@ -2485,7 +2490,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, | |||
2485 | } | 2490 | } |
2486 | 2491 | ||
2487 | memset(&pkt, 0, sizeof(pkt)); | 2492 | memset(&pkt, 0, sizeof(pkt)); |
2488 | pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags; | 2493 | pkt.num_of_bds = 1 + nr_frags; |
2489 | pkt.vlan = vlan; | 2494 | pkt.vlan = vlan; |
2490 | pkt.bd_flags = flags; | 2495 | pkt.bd_flags = flags; |
2491 | pkt.tx_dest = QED_LL2_TX_DEST_NW; | 2496 | pkt.tx_dest = QED_LL2_TX_DEST_NW; |
@@ -2496,12 +2501,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb, | |||
2496 | test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) | 2501 | test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags)) |
2497 | pkt.remove_stag = true; | 2502 | pkt.remove_stag = true; |
2498 | 2503 | ||
2504 | /* qed_ll2_prepare_tx_packet() may actually send the packet if | ||
2505 | * there are no fragments in the skb and subsequently the completion | ||
2506 | * routine may run and free the SKB, so no dereferencing the SKB | ||
2507 | * beyond this point unless skb has any fragments. | ||
2508 | */ | ||
2499 | rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, | 2509 | rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle, |
2500 | &pkt, 1); | 2510 | &pkt, 1); |
2501 | if (rc) | 2511 | if (rc) |
2502 | goto err; | 2512 | goto err; |
2503 | 2513 | ||
2504 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2514 | for (i = 0; i < nr_frags; i++) { |
2505 | frag = &skb_shinfo(skb)->frags[i]; | 2515 | frag = &skb_shinfo(skb)->frags[i]; |
2506 | 2516 | ||
2507 | mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, | 2517 | mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0, |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 4179c9013fc6..96ab77ae6af5 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h | |||
@@ -382,6 +382,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn); | |||
382 | * @param p_hwfn | 382 | * @param p_hwfn |
383 | */ | 383 | */ |
384 | void qed_consq_free(struct qed_hwfn *p_hwfn); | 384 | void qed_consq_free(struct qed_hwfn *p_hwfn); |
385 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn); | ||
385 | 386 | ||
386 | /** | 387 | /** |
387 | * @file | 388 | * @file |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c index 888274fa208b..5a495fda9e9d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c | |||
@@ -604,6 +604,9 @@ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn) | |||
604 | 604 | ||
605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; | 605 | p_ent->ramrod.pf_update.update_mf_vlan_flag = true; |
606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); | 606 | p_ent->ramrod.pf_update.mf_vlan = cpu_to_le16(p_hwfn->hw_info.ovlan); |
607 | if (test_bit(QED_MF_UFP_SPECIFIC, &p_hwfn->cdev->mf_bits)) | ||
608 | p_ent->ramrod.pf_update.mf_vlan |= | ||
609 | cpu_to_le16(((u16)p_hwfn->ufp_info.tc << 13)); | ||
607 | 610 | ||
608 | return qed_spq_post(p_hwfn, p_ent, NULL); | 611 | return qed_spq_post(p_hwfn, p_ent, NULL); |
609 | } | 612 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index eb88bbc6b193..ba64ff9bedbd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c | |||
@@ -397,6 +397,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie) | |||
397 | 397 | ||
398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); | 398 | qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain)); |
399 | 399 | ||
400 | /* Attempt to post pending requests */ | ||
401 | spin_lock_bh(&p_hwfn->p_spq->lock); | ||
402 | rc = qed_spq_pend_post(p_hwfn); | ||
403 | spin_unlock_bh(&p_hwfn->p_spq->lock); | ||
404 | |||
400 | return rc; | 405 | return rc; |
401 | } | 406 | } |
402 | 407 | ||
@@ -767,7 +772,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn, | |||
767 | return 0; | 772 | return 0; |
768 | } | 773 | } |
769 | 774 | ||
770 | static int qed_spq_pend_post(struct qed_hwfn *p_hwfn) | 775 | int qed_spq_pend_post(struct qed_hwfn *p_hwfn) |
771 | { | 776 | { |
772 | struct qed_spq *p_spq = p_hwfn->p_spq; | 777 | struct qed_spq *p_spq = p_hwfn->p_spq; |
773 | struct qed_spq_entry *p_ent = NULL; | 778 | struct qed_spq_entry *p_ent = NULL; |
@@ -905,7 +910,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
905 | struct qed_spq_entry *p_ent = NULL; | 910 | struct qed_spq_entry *p_ent = NULL; |
906 | struct qed_spq_entry *tmp; | 911 | struct qed_spq_entry *tmp; |
907 | struct qed_spq_entry *found = NULL; | 912 | struct qed_spq_entry *found = NULL; |
908 | int rc; | ||
909 | 913 | ||
910 | if (!p_hwfn) | 914 | if (!p_hwfn) |
911 | return -EINVAL; | 915 | return -EINVAL; |
@@ -963,12 +967,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, | |||
963 | */ | 967 | */ |
964 | qed_spq_return_entry(p_hwfn, found); | 968 | qed_spq_return_entry(p_hwfn, found); |
965 | 969 | ||
966 | /* Attempt to post pending requests */ | 970 | return 0; |
967 | spin_lock_bh(&p_spq->lock); | ||
968 | rc = qed_spq_pend_post(p_hwfn); | ||
969 | spin_unlock_bh(&p_spq->lock); | ||
970 | |||
971 | return rc; | ||
972 | } | 971 | } |
973 | 972 | ||
974 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) | 973 | int qed_consq_alloc(struct qed_hwfn *p_hwfn) |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index ca6290fa0f30..71a7af134dd8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c | |||
@@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn, | |||
1969 | params.vport_id = vf->vport_id; | 1969 | params.vport_id = vf->vport_id; |
1970 | params.max_buffers_per_cqe = start->max_buffers_per_cqe; | 1970 | params.max_buffers_per_cqe = start->max_buffers_per_cqe; |
1971 | params.mtu = vf->mtu; | 1971 | params.mtu = vf->mtu; |
1972 | params.check_mac = true; | 1972 | |
1973 | /* Non trusted VFs should enable control frame filtering */ | ||
1974 | params.check_mac = !vf->p_vf_info.is_trusted_configured; | ||
1973 | 1975 | ||
1974 | rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); | 1976 | rc = qed_sp_eth_vport_start(p_hwfn, ¶ms); |
1975 | if (rc) { | 1977 | if (rc) { |
@@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) | |||
5130 | params.opaque_fid = vf->opaque_fid; | 5132 | params.opaque_fid = vf->opaque_fid; |
5131 | params.vport_id = vf->vport_id; | 5133 | params.vport_id = vf->vport_id; |
5132 | 5134 | ||
5135 | params.update_ctl_frame_check = 1; | ||
5136 | params.mac_chk_en = !vf_info->is_trusted_configured; | ||
5137 | |||
5133 | if (vf_info->rx_accept_mode & mask) { | 5138 | if (vf_info->rx_accept_mode & mask) { |
5134 | flags->update_rx_mode_config = 1; | 5139 | flags->update_rx_mode_config = 1; |
5135 | flags->rx_accept_filter = vf_info->rx_accept_mode; | 5140 | flags->rx_accept_filter = vf_info->rx_accept_mode; |
@@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn) | |||
5147 | } | 5152 | } |
5148 | 5153 | ||
5149 | if (flags->update_rx_mode_config || | 5154 | if (flags->update_rx_mode_config || |
5150 | flags->update_tx_mode_config) | 5155 | flags->update_tx_mode_config || |
5156 | params.update_ctl_frame_check) | ||
5151 | qed_sp_vport_update(hwfn, ¶ms, | 5157 | qed_sp_vport_update(hwfn, ¶ms, |
5152 | QED_SPQ_MODE_EBLOCK, NULL); | 5158 | QED_SPQ_MODE_EBLOCK, NULL); |
5153 | } | 5159 | } |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c index b6cccf44bf40..5dda547772c1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c | |||
@@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) | |||
261 | struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; | 261 | struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; |
262 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; | 262 | struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; |
263 | struct vf_pf_resc_request *p_resc; | 263 | struct vf_pf_resc_request *p_resc; |
264 | u8 retry_cnt = VF_ACQUIRE_THRESH; | ||
264 | bool resources_acquired = false; | 265 | bool resources_acquired = false; |
265 | struct vfpf_acquire_tlv *req; | 266 | struct vfpf_acquire_tlv *req; |
266 | int rc = 0, attempts = 0; | 267 | int rc = 0, attempts = 0; |
@@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn) | |||
314 | 315 | ||
315 | /* send acquire request */ | 316 | /* send acquire request */ |
316 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); | 317 | rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); |
318 | |||
319 | /* Re-try acquire in case of vf-pf hw channel timeout */ | ||
320 | if (retry_cnt && rc == -EBUSY) { | ||
321 | DP_VERBOSE(p_hwfn, QED_MSG_IOV, | ||
322 | "VF retrying to acquire due to VPC timeout\n"); | ||
323 | retry_cnt--; | ||
324 | continue; | ||
325 | } | ||
326 | |||
317 | if (rc) | 327 | if (rc) |
318 | goto exit; | 328 | goto exit; |
319 | 329 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 613249d1e967..730997b13747 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <net/tc_act/tc_gact.h> | 56 | #include <net/tc_act/tc_gact.h> |
57 | 57 | ||
58 | #define QEDE_MAJOR_VERSION 8 | 58 | #define QEDE_MAJOR_VERSION 8 |
59 | #define QEDE_MINOR_VERSION 33 | 59 | #define QEDE_MINOR_VERSION 37 |
60 | #define QEDE_REVISION_VERSION 0 | 60 | #define QEDE_REVISION_VERSION 0 |
61 | #define QEDE_ENGINEERING_VERSION 20 | 61 | #define QEDE_ENGINEERING_VERSION 20 |
62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ | 62 | #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ |
@@ -494,6 +494,9 @@ struct qede_reload_args { | |||
494 | 494 | ||
495 | /* Datapath functions definition */ | 495 | /* Datapath functions definition */ |
496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); | 496 | netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
497 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
498 | struct net_device *sb_dev, | ||
499 | select_queue_fallback_t fallback); | ||
497 | netdev_features_t qede_features_check(struct sk_buff *skb, | 500 | netdev_features_t qede_features_check(struct sk_buff *skb, |
498 | struct net_device *dev, | 501 | struct net_device *dev, |
499 | netdev_features_t features); | 502 | netdev_features_t features); |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index bdf816fe5a16..31b046e24565 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c | |||
@@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1695 | return NETDEV_TX_OK; | 1695 | return NETDEV_TX_OK; |
1696 | } | 1696 | } |
1697 | 1697 | ||
1698 | u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb, | ||
1699 | struct net_device *sb_dev, | ||
1700 | select_queue_fallback_t fallback) | ||
1701 | { | ||
1702 | struct qede_dev *edev = netdev_priv(dev); | ||
1703 | int total_txq; | ||
1704 | |||
1705 | total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; | ||
1706 | |||
1707 | return QEDE_TSS_COUNT(edev) ? | ||
1708 | fallback(dev, skb, NULL) % total_txq : 0; | ||
1709 | } | ||
1710 | |||
1698 | /* 8B udp header + 8B base tunnel header + 32B option length */ | 1711 | /* 8B udp header + 8B base tunnel header + 32B option length */ |
1699 | #define QEDE_MAX_TUN_HDR_LEN 48 | 1712 | #define QEDE_MAX_TUN_HDR_LEN 48 |
1700 | 1713 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 5a74fcbdbc2b..9790f26d17c4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = { | |||
631 | .ndo_open = qede_open, | 631 | .ndo_open = qede_open, |
632 | .ndo_stop = qede_close, | 632 | .ndo_stop = qede_close, |
633 | .ndo_start_xmit = qede_start_xmit, | 633 | .ndo_start_xmit = qede_start_xmit, |
634 | .ndo_select_queue = qede_select_queue, | ||
634 | .ndo_set_rx_mode = qede_set_rx_mode, | 635 | .ndo_set_rx_mode = qede_set_rx_mode, |
635 | .ndo_set_mac_address = qede_set_mac_addr, | 636 | .ndo_set_mac_address = qede_set_mac_addr, |
636 | .ndo_validate_addr = eth_validate_addr, | 637 | .ndo_validate_addr = eth_validate_addr, |
@@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = { | |||
666 | .ndo_open = qede_open, | 667 | .ndo_open = qede_open, |
667 | .ndo_stop = qede_close, | 668 | .ndo_stop = qede_close, |
668 | .ndo_start_xmit = qede_start_xmit, | 669 | .ndo_start_xmit = qede_start_xmit, |
670 | .ndo_select_queue = qede_select_queue, | ||
669 | .ndo_set_rx_mode = qede_set_rx_mode, | 671 | .ndo_set_rx_mode = qede_set_rx_mode, |
670 | .ndo_set_mac_address = qede_set_mac_addr, | 672 | .ndo_set_mac_address = qede_set_mac_addr, |
671 | .ndo_validate_addr = eth_validate_addr, | 673 | .ndo_validate_addr = eth_validate_addr, |
@@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = { | |||
684 | .ndo_open = qede_open, | 686 | .ndo_open = qede_open, |
685 | .ndo_stop = qede_close, | 687 | .ndo_stop = qede_close, |
686 | .ndo_start_xmit = qede_start_xmit, | 688 | .ndo_start_xmit = qede_start_xmit, |
689 | .ndo_select_queue = qede_select_queue, | ||
687 | .ndo_set_rx_mode = qede_set_rx_mode, | 690 | .ndo_set_rx_mode = qede_set_rx_mode, |
688 | .ndo_set_mac_address = qede_set_mac_addr, | 691 | .ndo_set_mac_address = qede_set_mac_addr, |
689 | .ndo_validate_addr = eth_validate_addr, | 692 | .ndo_validate_addr = eth_validate_addr, |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 44f6e4873aad..4f910c4f67b0 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -691,7 +691,7 @@ static void cp_tx (struct cp_private *cp) | |||
691 | } | 691 | } |
692 | bytes_compl += skb->len; | 692 | bytes_compl += skb->len; |
693 | pkts_compl++; | 693 | pkts_compl++; |
694 | dev_kfree_skb_irq(skb); | 694 | dev_consume_skb_irq(skb); |
695 | } | 695 | } |
696 | 696 | ||
697 | cp->tx_skb[tx_tail] = NULL; | 697 | cp->tx_skb[tx_tail] = NULL; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index abb94c543aa2..6e36b88ca7c9 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -1286,11 +1286,13 @@ static u16 rtl_get_events(struct rtl8169_private *tp) | |||
1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) | 1286 | static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) |
1287 | { | 1287 | { |
1288 | RTL_W16(tp, IntrStatus, bits); | 1288 | RTL_W16(tp, IntrStatus, bits); |
1289 | mmiowb(); | ||
1289 | } | 1290 | } |
1290 | 1291 | ||
1291 | static void rtl_irq_disable(struct rtl8169_private *tp) | 1292 | static void rtl_irq_disable(struct rtl8169_private *tp) |
1292 | { | 1293 | { |
1293 | RTL_W16(tp, IntrMask, 0); | 1294 | RTL_W16(tp, IntrMask, 0); |
1295 | mmiowb(); | ||
1294 | } | 1296 | } |
1295 | 1297 | ||
1296 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) | 1298 | #define RTL_EVENT_NAPI_RX (RxOK | RxErr) |
@@ -6072,7 +6074,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6072 | struct device *d = tp_to_dev(tp); | 6074 | struct device *d = tp_to_dev(tp); |
6073 | dma_addr_t mapping; | 6075 | dma_addr_t mapping; |
6074 | u32 opts[2], len; | 6076 | u32 opts[2], len; |
6075 | bool stop_queue; | ||
6076 | int frags; | 6077 | int frags; |
6077 | 6078 | ||
6078 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { | 6079 | if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { |
@@ -6114,6 +6115,8 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6114 | 6115 | ||
6115 | txd->opts2 = cpu_to_le32(opts[1]); | 6116 | txd->opts2 = cpu_to_le32(opts[1]); |
6116 | 6117 | ||
6118 | netdev_sent_queue(dev, skb->len); | ||
6119 | |||
6117 | skb_tx_timestamp(skb); | 6120 | skb_tx_timestamp(skb); |
6118 | 6121 | ||
6119 | /* Force memory writes to complete before releasing descriptor */ | 6122 | /* Force memory writes to complete before releasing descriptor */ |
@@ -6126,14 +6129,16 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, | |||
6126 | 6129 | ||
6127 | tp->cur_tx += frags + 1; | 6130 | tp->cur_tx += frags + 1; |
6128 | 6131 | ||
6129 | stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); | 6132 | RTL_W8(tp, TxPoll, NPQ); |
6130 | if (unlikely(stop_queue)) | ||
6131 | netif_stop_queue(dev); | ||
6132 | 6133 | ||
6133 | if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) | 6134 | mmiowb(); |
6134 | RTL_W8(tp, TxPoll, NPQ); | ||
6135 | 6135 | ||
6136 | if (unlikely(stop_queue)) { | 6136 | if (!rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { |
6137 | /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must | ||
6138 | * not miss a ring update when it notices a stopped queue. | ||
6139 | */ | ||
6140 | smp_wmb(); | ||
6141 | netif_stop_queue(dev); | ||
6137 | /* Sync with rtl_tx: | 6142 | /* Sync with rtl_tx: |
6138 | * - publish queue status and cur_tx ring index (write barrier) | 6143 | * - publish queue status and cur_tx ring index (write barrier) |
6139 | * - refresh dirty_tx ring index (read barrier). | 6144 | * - refresh dirty_tx ring index (read barrier). |
@@ -6483,7 +6488,9 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) | |||
6483 | 6488 | ||
6484 | if (work_done < budget) { | 6489 | if (work_done < budget) { |
6485 | napi_complete_done(napi, work_done); | 6490 | napi_complete_done(napi, work_done); |
6491 | |||
6486 | rtl_irq_enable(tp); | 6492 | rtl_irq_enable(tp); |
6493 | mmiowb(); | ||
6487 | } | 6494 | } |
6488 | 6495 | ||
6489 | return work_done; | 6496 | return work_done; |
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 2f2bda68d861..c08034154a9a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c | |||
@@ -6115,7 +6115,7 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, | |||
6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) | 6115 | static int efx_ef10_mtd_probe(struct efx_nic *efx) |
6116 | { | 6116 | { |
6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); | 6117 | MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX); |
6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT); | 6118 | DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 }; |
6119 | struct efx_mcdi_mtd_partition *parts; | 6119 | struct efx_mcdi_mtd_partition *parts; |
6120 | size_t outlen, n_parts_total, i, n_parts; | 6120 | size_t outlen, n_parts_total, i, n_parts; |
6121 | unsigned int type; | 6121 | unsigned int type; |
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 15c62c160953..be47d864f8b9 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c | |||
@@ -1037,7 +1037,7 @@ static void epic_tx(struct net_device *dev, struct epic_private *ep) | |||
1037 | skb = ep->tx_skbuff[entry]; | 1037 | skb = ep->tx_skbuff[entry]; |
1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, | 1038 | pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, |
1039 | skb->len, PCI_DMA_TODEVICE); | 1039 | skb->len, PCI_DMA_TODEVICE); |
1040 | dev_kfree_skb_irq(skb); | 1040 | dev_consume_skb_irq(skb); |
1041 | ep->tx_skbuff[entry] = NULL; | 1041 | ep->tx_skbuff[entry] = NULL; |
1042 | } | 1042 | } |
1043 | 1043 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 7b923362ee55..3b174eae77c1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c | |||
@@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) | |||
1342 | } | 1342 | } |
1343 | 1343 | ||
1344 | ret = phy_power_on(bsp_priv, true); | 1344 | ret = phy_power_on(bsp_priv, true); |
1345 | if (ret) | 1345 | if (ret) { |
1346 | gmac_clk_enable(bsp_priv, false); | ||
1346 | return ret; | 1347 | return ret; |
1348 | } | ||
1347 | 1349 | ||
1348 | pm_runtime_enable(dev); | 1350 | pm_runtime_enable(dev); |
1349 | pm_runtime_get_sync(dev); | 1351 | pm_runtime_get_sync(dev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d1f61c25d82b..5d85742a2be0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -721,8 +721,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv) | |||
721 | { | 721 | { |
722 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 722 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
723 | 723 | ||
724 | if (!clk) | 724 | if (!clk) { |
725 | return 0; | 725 | clk = priv->plat->clk_ref_rate; |
726 | if (!clk) | ||
727 | return 0; | ||
728 | } | ||
726 | 729 | ||
727 | return (usec * (clk / 1000000)) / 256; | 730 | return (usec * (clk / 1000000)) / 256; |
728 | } | 731 | } |
@@ -731,8 +734,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv) | |||
731 | { | 734 | { |
732 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); | 735 | unsigned long clk = clk_get_rate(priv->plat->stmmac_clk); |
733 | 736 | ||
734 | if (!clk) | 737 | if (!clk) { |
735 | return 0; | 738 | clk = priv->plat->clk_ref_rate; |
739 | if (!clk) | ||
740 | return 0; | ||
741 | } | ||
736 | 742 | ||
737 | return (riwt * 256) / (clk / 1000000); | 743 | return (riwt * 256) / (clk / 1000000); |
738 | } | 744 | } |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 5afba69981cf..685d20472358 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -3023,10 +3023,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3023 | 3023 | ||
3024 | tx_q = &priv->tx_queue[queue]; | 3024 | tx_q = &priv->tx_queue[queue]; |
3025 | 3025 | ||
3026 | if (priv->tx_path_in_lpi_mode) | ||
3027 | stmmac_disable_eee_mode(priv); | ||
3028 | |||
3026 | /* Manage oversized TCP frames for GMAC4 device */ | 3029 | /* Manage oversized TCP frames for GMAC4 device */ |
3027 | if (skb_is_gso(skb) && priv->tso) { | 3030 | if (skb_is_gso(skb) && priv->tso) { |
3028 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) | 3031 | if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { |
3032 | /* | ||
3033 | * There is no way to determine the number of TSO | ||
3034 | * capable Queues. Let's use always the Queue 0 | ||
3035 | * because if TSO is supported then at least this | ||
3036 | * one will be capable. | ||
3037 | */ | ||
3038 | skb_set_queue_mapping(skb, 0); | ||
3039 | |||
3029 | return stmmac_tso_xmit(skb, dev); | 3040 | return stmmac_tso_xmit(skb, dev); |
3041 | } | ||
3030 | } | 3042 | } |
3031 | 3043 | ||
3032 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { | 3044 | if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { |
@@ -3041,9 +3053,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3041 | return NETDEV_TX_BUSY; | 3053 | return NETDEV_TX_BUSY; |
3042 | } | 3054 | } |
3043 | 3055 | ||
3044 | if (priv->tx_path_in_lpi_mode) | ||
3045 | stmmac_disable_eee_mode(priv); | ||
3046 | |||
3047 | entry = tx_q->cur_tx; | 3056 | entry = tx_q->cur_tx; |
3048 | first_entry = entry; | 3057 | first_entry = entry; |
3049 | WARN_ON(tx_q->tx_skbuff[first_entry]); | 3058 | WARN_ON(tx_q->tx_skbuff[first_entry]); |
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 7ec4eb74fe21..6fc05c106afc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c | |||
@@ -1898,7 +1898,7 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) | |||
1898 | cp->net_stats[ring].tx_packets++; | 1898 | cp->net_stats[ring].tx_packets++; |
1899 | cp->net_stats[ring].tx_bytes += skb->len; | 1899 | cp->net_stats[ring].tx_bytes += skb->len; |
1900 | spin_unlock(&cp->stat_lock[ring]); | 1900 | spin_unlock(&cp->stat_lock[ring]); |
1901 | dev_kfree_skb_irq(skb); | 1901 | dev_consume_skb_irq(skb); |
1902 | } | 1902 | } |
1903 | cp->tx_old[ring] = entry; | 1903 | cp->tx_old[ring] = entry; |
1904 | 1904 | ||
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index 720b7ac77f3b..e9b757b03b56 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c | |||
@@ -781,7 +781,7 @@ static void bigmac_tx(struct bigmac *bp) | |||
781 | 781 | ||
782 | DTX(("skb(%p) ", skb)); | 782 | DTX(("skb(%p) ", skb)); |
783 | bp->tx_skbs[elem] = NULL; | 783 | bp->tx_skbs[elem] = NULL; |
784 | dev_kfree_skb_irq(skb); | 784 | dev_consume_skb_irq(skb); |
785 | 785 | ||
786 | elem = NEXT_TX(elem); | 786 | elem = NEXT_TX(elem); |
787 | } | 787 | } |
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index ff641cf30a4e..d007dfeba5c3 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c | |||
@@ -1962,7 +1962,7 @@ static void happy_meal_tx(struct happy_meal *hp) | |||
1962 | this = &txbase[elem]; | 1962 | this = &txbase[elem]; |
1963 | } | 1963 | } |
1964 | 1964 | ||
1965 | dev_kfree_skb_irq(skb); | 1965 | dev_consume_skb_irq(skb); |
1966 | dev->stats.tx_packets++; | 1966 | dev->stats.tx_packets++; |
1967 | } | 1967 | } |
1968 | hp->tx_old = elem; | 1968 | hp->tx_old = elem; |
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index dc966ddb6d81..b24c11187017 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c | |||
@@ -1739,7 +1739,7 @@ static void bdx_tx_cleanup(struct bdx_priv *priv) | |||
1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ | 1739 | tx_level -= db->rptr->len; /* '-' koz len is negative */ |
1740 | 1740 | ||
1741 | /* now should come skb pointer - free it */ | 1741 | /* now should come skb pointer - free it */ |
1742 | dev_kfree_skb_irq(db->rptr->addr.skb); | 1742 | dev_consume_skb_irq(db->rptr->addr.skb); |
1743 | bdx_tx_db_inc_rptr(db); | 1743 | bdx_tx_db_inc_rptr(db); |
1744 | } | 1744 | } |
1745 | 1745 | ||
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 810dfc7de1f9..e2d47b24a869 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c | |||
@@ -608,7 +608,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue) | |||
608 | netdev_dbg(dev, "sent 0x%p, len=%d\n", | 608 | netdev_dbg(dev, "sent 0x%p, len=%d\n", |
609 | desc->skb, desc->skb->len); | 609 | desc->skb, desc->skb->len); |
610 | 610 | ||
611 | dev_kfree_skb_irq(desc->skb); | 611 | dev_consume_skb_irq(desc->skb); |
612 | desc->skb = NULL; | 612 | desc->skb = NULL; |
613 | if (__netif_subqueue_stopped(dev, queue)) | 613 | if (__netif_subqueue_stopped(dev, queue)) |
614 | netif_wake_subqueue(dev, queue); | 614 | netif_wake_subqueue(dev, queue); |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 82412691ee66..27f6cf140845 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -1740,7 +1740,7 @@ static void velocity_free_tx_buf(struct velocity_info *vptr, | |||
1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], | 1740 | dma_unmap_single(vptr->dev, tdinfo->skb_dma[i], |
1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); | 1741 | le16_to_cpu(pktlen), DMA_TO_DEVICE); |
1742 | } | 1742 | } |
1743 | dev_kfree_skb_irq(skb); | 1743 | dev_consume_skb_irq(skb); |
1744 | tdinfo->skb = NULL; | 1744 | tdinfo->skb = NULL; |
1745 | } | 1745 | } |
1746 | 1746 | ||
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c index 38ac8ef41f5f..56b7791911bf 100644 --- a/drivers/net/fddi/defxx.c +++ b/drivers/net/fddi/defxx.c | |||
@@ -3512,7 +3512,7 @@ static int dfx_xmt_done(DFX_board_t *bp) | |||
3512 | bp->descr_block_virt->xmt_data[comp].long_1, | 3512 | bp->descr_block_virt->xmt_data[comp].long_1, |
3513 | p_xmt_drv_descr->p_skb->len, | 3513 | p_xmt_drv_descr->p_skb->len, |
3514 | DMA_TO_DEVICE); | 3514 | DMA_TO_DEVICE); |
3515 | dev_kfree_skb_irq(p_xmt_drv_descr->p_skb); | 3515 | dev_consume_skb_irq(p_xmt_drv_descr->p_skb); |
3516 | 3516 | ||
3517 | /* | 3517 | /* |
3518 | * Move to start of next packet by updating completion index | 3518 | * Move to start of next packet by updating completion index |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 58bbba8582b0..3377ac66a347 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -1512,9 +1512,13 @@ static void geneve_link_config(struct net_device *dev, | |||
1512 | } | 1512 | } |
1513 | #if IS_ENABLED(CONFIG_IPV6) | 1513 | #if IS_ENABLED(CONFIG_IPV6) |
1514 | case AF_INET6: { | 1514 | case AF_INET6: { |
1515 | struct rt6_info *rt = rt6_lookup(geneve->net, | 1515 | struct rt6_info *rt; |
1516 | &info->key.u.ipv6.dst, NULL, 0, | 1516 | |
1517 | NULL, 0); | 1517 | if (!__in6_dev_get(dev)) |
1518 | break; | ||
1519 | |||
1520 | rt = rt6_lookup(geneve->net, &info->key.u.ipv6.dst, NULL, 0, | ||
1521 | NULL, 0); | ||
1518 | 1522 | ||
1519 | if (rt && rt->dst.dev) | 1523 | if (rt && rt->dst.dev) |
1520 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; | 1524 | ldev_mtu = rt->dst.dev->mtu - GENEVE_IPV6_HLEN; |
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 44de81e5f140..c589f5ae75bb 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c | |||
@@ -905,9 +905,9 @@ mcr20a_irq_clean_complete(void *context) | |||
905 | } | 905 | } |
906 | break; | 906 | break; |
907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): | 907 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_SEQIRQ): |
908 | /* rx is starting */ | 908 | /* rx is starting */ |
909 | dev_dbg(printdev(lp), "RX is starting\n"); | 909 | dev_dbg(printdev(lp), "RX is starting\n"); |
910 | mcr20a_handle_rx(lp); | 910 | mcr20a_handle_rx(lp); |
911 | break; | 911 | break; |
912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): | 912 | case (DAR_IRQSTS1_RXIRQ | DAR_IRQSTS1_TXIRQ | DAR_IRQSTS1_SEQIRQ): |
913 | if (lp->is_tx) { | 913 | if (lp->is_tx) { |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 19bdde60680c..7cdac77d0c68 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -100,12 +100,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, | |||
100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); | 100 | err = ipvlan_register_nf_hook(read_pnet(&port->pnet)); |
101 | if (!err) { | 101 | if (!err) { |
102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; | 102 | mdev->l3mdev_ops = &ipvl_l3mdev_ops; |
103 | mdev->priv_flags |= IFF_L3MDEV_MASTER; | 103 | mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER; |
104 | } else | 104 | } else |
105 | goto fail; | 105 | goto fail; |
106 | } else if (port->mode == IPVLAN_MODE_L3S) { | 106 | } else if (port->mode == IPVLAN_MODE_L3S) { |
107 | /* Old mode was L3S */ | 107 | /* Old mode was L3S */ |
108 | mdev->priv_flags &= ~IFF_L3MDEV_MASTER; | 108 | mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); | 109 | ipvlan_unregister_nf_hook(read_pnet(&port->pnet)); |
110 | mdev->l3mdev_ops = NULL; | 110 | mdev->l3mdev_ops = NULL; |
111 | } | 111 | } |
@@ -167,7 +167,7 @@ static void ipvlan_port_destroy(struct net_device *dev) | |||
167 | struct sk_buff *skb; | 167 | struct sk_buff *skb; |
168 | 168 | ||
169 | if (port->mode == IPVLAN_MODE_L3S) { | 169 | if (port->mode == IPVLAN_MODE_L3S) { |
170 | dev->priv_flags &= ~IFF_L3MDEV_MASTER; | 170 | dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER; |
171 | ipvlan_unregister_nf_hook(dev_net(dev)); | 171 | ipvlan_unregister_nf_hook(dev_net(dev)); |
172 | dev->l3mdev_ops = NULL; | 172 | dev->l3mdev_ops = NULL; |
173 | } | 173 | } |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 18b41bc345ab..6e8807212aa3 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -898,14 +898,14 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
898 | struct phy_txts *phy_txts) | 898 | struct phy_txts *phy_txts) |
899 | { | 899 | { |
900 | struct skb_shared_hwtstamps shhwtstamps; | 900 | struct skb_shared_hwtstamps shhwtstamps; |
901 | struct dp83640_skb_info *skb_info; | ||
901 | struct sk_buff *skb; | 902 | struct sk_buff *skb; |
902 | u64 ns; | ||
903 | u8 overflow; | 903 | u8 overflow; |
904 | u64 ns; | ||
904 | 905 | ||
905 | /* We must already have the skb that triggered this. */ | 906 | /* We must already have the skb that triggered this. */ |
906 | 907 | again: | |
907 | skb = skb_dequeue(&dp83640->tx_queue); | 908 | skb = skb_dequeue(&dp83640->tx_queue); |
908 | |||
909 | if (!skb) { | 909 | if (!skb) { |
910 | pr_debug("have timestamp but tx_queue empty\n"); | 910 | pr_debug("have timestamp but tx_queue empty\n"); |
911 | return; | 911 | return; |
@@ -920,6 +920,11 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
920 | } | 920 | } |
921 | return; | 921 | return; |
922 | } | 922 | } |
923 | skb_info = (struct dp83640_skb_info *)skb->cb; | ||
924 | if (time_after(jiffies, skb_info->tmo)) { | ||
925 | kfree_skb(skb); | ||
926 | goto again; | ||
927 | } | ||
923 | 928 | ||
924 | ns = phy2txts(phy_txts); | 929 | ns = phy2txts(phy_txts); |
925 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); | 930 | memset(&shhwtstamps, 0, sizeof(shhwtstamps)); |
@@ -1472,6 +1477,7 @@ static bool dp83640_rxtstamp(struct phy_device *phydev, | |||
1472 | static void dp83640_txtstamp(struct phy_device *phydev, | 1477 | static void dp83640_txtstamp(struct phy_device *phydev, |
1473 | struct sk_buff *skb, int type) | 1478 | struct sk_buff *skb, int type) |
1474 | { | 1479 | { |
1480 | struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb; | ||
1475 | struct dp83640_private *dp83640 = phydev->priv; | 1481 | struct dp83640_private *dp83640 = phydev->priv; |
1476 | 1482 | ||
1477 | switch (dp83640->hwts_tx_en) { | 1483 | switch (dp83640->hwts_tx_en) { |
@@ -1484,6 +1490,7 @@ static void dp83640_txtstamp(struct phy_device *phydev, | |||
1484 | /* fall through */ | 1490 | /* fall through */ |
1485 | case HWTSTAMP_TX_ON: | 1491 | case HWTSTAMP_TX_ON: |
1486 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; | 1492 | skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; |
1493 | skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT; | ||
1487 | skb_queue_tail(&dp83640->tx_queue, skb); | 1494 | skb_queue_tail(&dp83640->tx_queue, skb); |
1488 | break; | 1495 | break; |
1489 | 1496 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2e12f982534f..abb7876a8776 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
@@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
847 | 847 | ||
848 | /* SGMII-to-Copper mode initialization */ | 848 | /* SGMII-to-Copper mode initialization */ |
849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { | 849 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { |
850 | |||
851 | /* Select page 18 */ | 850 | /* Select page 18 */ |
852 | err = marvell_set_page(phydev, 18); | 851 | err = marvell_set_page(phydev, 18); |
853 | if (err < 0) | 852 | if (err < 0) |
@@ -870,21 +869,6 @@ static int m88e1510_config_init(struct phy_device *phydev) | |||
870 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); | 869 | err = marvell_set_page(phydev, MII_MARVELL_COPPER_PAGE); |
871 | if (err < 0) | 870 | if (err < 0) |
872 | return err; | 871 | return err; |
873 | |||
874 | /* There appears to be a bug in the 88e1512 when used in | ||
875 | * SGMII to copper mode, where the AN advertisement register | ||
876 | * clears the pause bits each time a negotiation occurs. | ||
877 | * This means we can never be truely sure what was advertised, | ||
878 | * so disable Pause support. | ||
879 | */ | ||
880 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
881 | phydev->supported); | ||
882 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
883 | phydev->supported); | ||
884 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, | ||
885 | phydev->advertising); | ||
886 | linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, | ||
887 | phydev->advertising); | ||
888 | } | 872 | } |
889 | 873 | ||
890 | return m88e1318_config_init(phydev); | 874 | return m88e1318_config_init(phydev); |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 189cd2048c3a..c5675df5fc6f 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
@@ -553,7 +553,7 @@ int phy_start_aneg(struct phy_device *phydev) | |||
553 | if (err < 0) | 553 | if (err < 0) |
554 | goto out_unlock; | 554 | goto out_unlock; |
555 | 555 | ||
556 | if (__phy_is_started(phydev)) { | 556 | if (phy_is_started(phydev)) { |
557 | if (phydev->autoneg == AUTONEG_ENABLE) { | 557 | if (phydev->autoneg == AUTONEG_ENABLE) { |
558 | err = phy_check_link_status(phydev); | 558 | err = phy_check_link_status(phydev); |
559 | } else { | 559 | } else { |
@@ -709,7 +709,7 @@ void phy_stop_machine(struct phy_device *phydev) | |||
709 | cancel_delayed_work_sync(&phydev->state_queue); | 709 | cancel_delayed_work_sync(&phydev->state_queue); |
710 | 710 | ||
711 | mutex_lock(&phydev->lock); | 711 | mutex_lock(&phydev->lock); |
712 | if (__phy_is_started(phydev)) | 712 | if (phy_is_started(phydev)) |
713 | phydev->state = PHY_UP; | 713 | phydev->state = PHY_UP; |
714 | mutex_unlock(&phydev->lock); | 714 | mutex_unlock(&phydev->lock); |
715 | } | 715 | } |
@@ -762,9 +762,6 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) | |||
762 | { | 762 | { |
763 | struct phy_device *phydev = phy_dat; | 763 | struct phy_device *phydev = phy_dat; |
764 | 764 | ||
765 | if (!phy_is_started(phydev)) | ||
766 | return IRQ_NONE; /* It can't be ours. */ | ||
767 | |||
768 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) | 765 | if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) |
769 | return IRQ_NONE; | 766 | return IRQ_NONE; |
770 | 767 | ||
@@ -842,15 +839,14 @@ EXPORT_SYMBOL(phy_stop_interrupts); | |||
842 | */ | 839 | */ |
843 | void phy_stop(struct phy_device *phydev) | 840 | void phy_stop(struct phy_device *phydev) |
844 | { | 841 | { |
845 | mutex_lock(&phydev->lock); | 842 | if (!phy_is_started(phydev)) { |
846 | |||
847 | if (!__phy_is_started(phydev)) { | ||
848 | WARN(1, "called from state %s\n", | 843 | WARN(1, "called from state %s\n", |
849 | phy_state_to_str(phydev->state)); | 844 | phy_state_to_str(phydev->state)); |
850 | mutex_unlock(&phydev->lock); | ||
851 | return; | 845 | return; |
852 | } | 846 | } |
853 | 847 | ||
848 | mutex_lock(&phydev->lock); | ||
849 | |||
854 | if (phy_interrupt_is_valid(phydev)) | 850 | if (phy_interrupt_is_valid(phydev)) |
855 | phy_disable_interrupts(phydev); | 851 | phy_disable_interrupts(phydev); |
856 | 852 | ||
@@ -989,8 +985,10 @@ void phy_state_machine(struct work_struct *work) | |||
989 | * state machine would be pointless and possibly error prone when | 985 | * state machine would be pointless and possibly error prone when |
990 | * called from phy_disconnect() synchronously. | 986 | * called from phy_disconnect() synchronously. |
991 | */ | 987 | */ |
988 | mutex_lock(&phydev->lock); | ||
992 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) | 989 | if (phy_polling_mode(phydev) && phy_is_started(phydev)) |
993 | phy_queue_state_machine(phydev, PHY_STATE_TIME); | 990 | phy_queue_state_machine(phydev, PHY_STATE_TIME); |
991 | mutex_unlock(&phydev->lock); | ||
994 | } | 992 | } |
995 | 993 | ||
996 | /** | 994 | /** |
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index e7becc7379d7..938803237d7f 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c | |||
@@ -474,6 +474,17 @@ static void phylink_run_resolve(struct phylink *pl) | |||
474 | queue_work(system_power_efficient_wq, &pl->resolve); | 474 | queue_work(system_power_efficient_wq, &pl->resolve); |
475 | } | 475 | } |
476 | 476 | ||
477 | static void phylink_run_resolve_and_disable(struct phylink *pl, int bit) | ||
478 | { | ||
479 | unsigned long state = pl->phylink_disable_state; | ||
480 | |||
481 | set_bit(bit, &pl->phylink_disable_state); | ||
482 | if (state == 0) { | ||
483 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
484 | flush_work(&pl->resolve); | ||
485 | } | ||
486 | } | ||
487 | |||
477 | static void phylink_fixed_poll(struct timer_list *t) | 488 | static void phylink_fixed_poll(struct timer_list *t) |
478 | { | 489 | { |
479 | struct phylink *pl = container_of(t, struct phylink, link_poll); | 490 | struct phylink *pl = container_of(t, struct phylink, link_poll); |
@@ -924,9 +935,7 @@ void phylink_stop(struct phylink *pl) | |||
924 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) | 935 | if (pl->link_an_mode == MLO_AN_FIXED && !IS_ERR(pl->link_gpio)) |
925 | del_timer_sync(&pl->link_poll); | 936 | del_timer_sync(&pl->link_poll); |
926 | 937 | ||
927 | set_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state); | 938 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_STOPPED); |
928 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
929 | flush_work(&pl->resolve); | ||
930 | } | 939 | } |
931 | EXPORT_SYMBOL_GPL(phylink_stop); | 940 | EXPORT_SYMBOL_GPL(phylink_stop); |
932 | 941 | ||
@@ -1632,9 +1641,7 @@ static void phylink_sfp_link_down(void *upstream) | |||
1632 | 1641 | ||
1633 | ASSERT_RTNL(); | 1642 | ASSERT_RTNL(); |
1634 | 1643 | ||
1635 | set_bit(PHYLINK_DISABLE_LINK, &pl->phylink_disable_state); | 1644 | phylink_run_resolve_and_disable(pl, PHYLINK_DISABLE_LINK); |
1636 | queue_work(system_power_efficient_wq, &pl->resolve); | ||
1637 | flush_work(&pl->resolve); | ||
1638 | } | 1645 | } |
1639 | 1646 | ||
1640 | static void phylink_sfp_link_up(void *upstream) | 1647 | static void phylink_sfp_link_up(void *upstream) |
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c index ad9db652874d..fef701bfad62 100644 --- a/drivers/net/phy/sfp-bus.c +++ b/drivers/net/phy/sfp-bus.c | |||
@@ -347,6 +347,7 @@ static int sfp_register_bus(struct sfp_bus *bus) | |||
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | } | 349 | } |
350 | bus->socket_ops->attach(bus->sfp); | ||
350 | if (bus->started) | 351 | if (bus->started) |
351 | bus->socket_ops->start(bus->sfp); | 352 | bus->socket_ops->start(bus->sfp); |
352 | bus->netdev->sfp_bus = bus; | 353 | bus->netdev->sfp_bus = bus; |
@@ -362,6 +363,7 @@ static void sfp_unregister_bus(struct sfp_bus *bus) | |||
362 | if (bus->registered) { | 363 | if (bus->registered) { |
363 | if (bus->started) | 364 | if (bus->started) |
364 | bus->socket_ops->stop(bus->sfp); | 365 | bus->socket_ops->stop(bus->sfp); |
366 | bus->socket_ops->detach(bus->sfp); | ||
365 | if (bus->phydev && ops && ops->disconnect_phy) | 367 | if (bus->phydev && ops && ops->disconnect_phy) |
366 | ops->disconnect_phy(bus->upstream); | 368 | ops->disconnect_phy(bus->upstream); |
367 | } | 369 | } |
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c index fd8bb998ae52..68c8fbf099f8 100644 --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c | |||
@@ -184,6 +184,7 @@ struct sfp { | |||
184 | 184 | ||
185 | struct gpio_desc *gpio[GPIO_MAX]; | 185 | struct gpio_desc *gpio[GPIO_MAX]; |
186 | 186 | ||
187 | bool attached; | ||
187 | unsigned int state; | 188 | unsigned int state; |
188 | struct delayed_work poll; | 189 | struct delayed_work poll; |
189 | struct delayed_work timeout; | 190 | struct delayed_work timeout; |
@@ -1475,7 +1476,7 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1475 | */ | 1476 | */ |
1476 | switch (sfp->sm_mod_state) { | 1477 | switch (sfp->sm_mod_state) { |
1477 | default: | 1478 | default: |
1478 | if (event == SFP_E_INSERT) { | 1479 | if (event == SFP_E_INSERT && sfp->attached) { |
1479 | sfp_module_tx_disable(sfp); | 1480 | sfp_module_tx_disable(sfp); |
1480 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); | 1481 | sfp_sm_ins_next(sfp, SFP_MOD_PROBE, T_PROBE_INIT); |
1481 | } | 1482 | } |
@@ -1607,6 +1608,19 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event) | |||
1607 | mutex_unlock(&sfp->sm_mutex); | 1608 | mutex_unlock(&sfp->sm_mutex); |
1608 | } | 1609 | } |
1609 | 1610 | ||
1611 | static void sfp_attach(struct sfp *sfp) | ||
1612 | { | ||
1613 | sfp->attached = true; | ||
1614 | if (sfp->state & SFP_F_PRESENT) | ||
1615 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1616 | } | ||
1617 | |||
1618 | static void sfp_detach(struct sfp *sfp) | ||
1619 | { | ||
1620 | sfp->attached = false; | ||
1621 | sfp_sm_event(sfp, SFP_E_REMOVE); | ||
1622 | } | ||
1623 | |||
1610 | static void sfp_start(struct sfp *sfp) | 1624 | static void sfp_start(struct sfp *sfp) |
1611 | { | 1625 | { |
1612 | sfp_sm_event(sfp, SFP_E_DEV_UP); | 1626 | sfp_sm_event(sfp, SFP_E_DEV_UP); |
@@ -1667,6 +1681,8 @@ static int sfp_module_eeprom(struct sfp *sfp, struct ethtool_eeprom *ee, | |||
1667 | } | 1681 | } |
1668 | 1682 | ||
1669 | static const struct sfp_socket_ops sfp_module_ops = { | 1683 | static const struct sfp_socket_ops sfp_module_ops = { |
1684 | .attach = sfp_attach, | ||
1685 | .detach = sfp_detach, | ||
1670 | .start = sfp_start, | 1686 | .start = sfp_start, |
1671 | .stop = sfp_stop, | 1687 | .stop = sfp_stop, |
1672 | .module_info = sfp_module_info, | 1688 | .module_info = sfp_module_info, |
@@ -1834,10 +1850,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1834 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", | 1850 | dev_info(sfp->dev, "Host maximum power %u.%uW\n", |
1835 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); | 1851 | sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10); |
1836 | 1852 | ||
1837 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1838 | if (!sfp->sfp_bus) | ||
1839 | return -ENOMEM; | ||
1840 | |||
1841 | /* Get the initial state, and always signal TX disable, | 1853 | /* Get the initial state, and always signal TX disable, |
1842 | * since the network interface will not be up. | 1854 | * since the network interface will not be up. |
1843 | */ | 1855 | */ |
@@ -1848,10 +1860,6 @@ static int sfp_probe(struct platform_device *pdev) | |||
1848 | sfp->state |= SFP_F_RATE_SELECT; | 1860 | sfp->state |= SFP_F_RATE_SELECT; |
1849 | sfp_set_state(sfp, sfp->state); | 1861 | sfp_set_state(sfp, sfp->state); |
1850 | sfp_module_tx_disable(sfp); | 1862 | sfp_module_tx_disable(sfp); |
1851 | rtnl_lock(); | ||
1852 | if (sfp->state & SFP_F_PRESENT) | ||
1853 | sfp_sm_event(sfp, SFP_E_INSERT); | ||
1854 | rtnl_unlock(); | ||
1855 | 1863 | ||
1856 | for (i = 0; i < GPIO_MAX; i++) { | 1864 | for (i = 0; i < GPIO_MAX; i++) { |
1857 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) | 1865 | if (gpio_flags[i] != GPIOD_IN || !sfp->gpio[i]) |
@@ -1884,6 +1892,10 @@ static int sfp_probe(struct platform_device *pdev) | |||
1884 | dev_warn(sfp->dev, | 1892 | dev_warn(sfp->dev, |
1885 | "No tx_disable pin: SFP modules will always be emitting.\n"); | 1893 | "No tx_disable pin: SFP modules will always be emitting.\n"); |
1886 | 1894 | ||
1895 | sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops); | ||
1896 | if (!sfp->sfp_bus) | ||
1897 | return -ENOMEM; | ||
1898 | |||
1887 | return 0; | 1899 | return 0; |
1888 | } | 1900 | } |
1889 | 1901 | ||
diff --git a/drivers/net/phy/sfp.h b/drivers/net/phy/sfp.h index 31b0acf337e2..64f54b0bbd8c 100644 --- a/drivers/net/phy/sfp.h +++ b/drivers/net/phy/sfp.h | |||
@@ -7,6 +7,8 @@ | |||
7 | struct sfp; | 7 | struct sfp; |
8 | 8 | ||
9 | struct sfp_socket_ops { | 9 | struct sfp_socket_ops { |
10 | void (*attach)(struct sfp *sfp); | ||
11 | void (*detach)(struct sfp *sfp); | ||
10 | void (*start)(struct sfp *sfp); | 12 | void (*start)(struct sfp *sfp); |
11 | void (*stop)(struct sfp *sfp); | 13 | void (*stop)(struct sfp *sfp); |
12 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); | 14 | int (*module_info)(struct sfp *sfp, struct ethtool_modinfo *modinfo); |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index afd9d25d1992..958f1cf67282 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -256,17 +256,6 @@ static void __team_option_inst_mark_removed_port(struct team *team, | |||
256 | } | 256 | } |
257 | } | 257 | } |
258 | 258 | ||
259 | static bool __team_option_inst_tmp_find(const struct list_head *opts, | ||
260 | const struct team_option_inst *needle) | ||
261 | { | ||
262 | struct team_option_inst *opt_inst; | ||
263 | |||
264 | list_for_each_entry(opt_inst, opts, tmp_list) | ||
265 | if (opt_inst == needle) | ||
266 | return true; | ||
267 | return false; | ||
268 | } | ||
269 | |||
270 | static int __team_options_register(struct team *team, | 259 | static int __team_options_register(struct team *team, |
271 | const struct team_option *option, | 260 | const struct team_option *option, |
272 | size_t option_count) | 261 | size_t option_count) |
@@ -2460,7 +2449,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2460 | int err = 0; | 2449 | int err = 0; |
2461 | int i; | 2450 | int i; |
2462 | struct nlattr *nl_option; | 2451 | struct nlattr *nl_option; |
2463 | LIST_HEAD(opt_inst_list); | ||
2464 | 2452 | ||
2465 | rtnl_lock(); | 2453 | rtnl_lock(); |
2466 | 2454 | ||
@@ -2480,6 +2468,7 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2480 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; | 2468 | struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; |
2481 | struct nlattr *attr; | 2469 | struct nlattr *attr; |
2482 | struct nlattr *attr_data; | 2470 | struct nlattr *attr_data; |
2471 | LIST_HEAD(opt_inst_list); | ||
2483 | enum team_option_type opt_type; | 2472 | enum team_option_type opt_type; |
2484 | int opt_port_ifindex = 0; /* != 0 for per-port options */ | 2473 | int opt_port_ifindex = 0; /* != 0 for per-port options */ |
2485 | u32 opt_array_index = 0; | 2474 | u32 opt_array_index = 0; |
@@ -2584,23 +2573,17 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) | |||
2584 | if (err) | 2573 | if (err) |
2585 | goto team_put; | 2574 | goto team_put; |
2586 | opt_inst->changed = true; | 2575 | opt_inst->changed = true; |
2587 | |||
2588 | /* dumb/evil user-space can send us duplicate opt, | ||
2589 | * keep only the last one | ||
2590 | */ | ||
2591 | if (__team_option_inst_tmp_find(&opt_inst_list, | ||
2592 | opt_inst)) | ||
2593 | continue; | ||
2594 | |||
2595 | list_add(&opt_inst->tmp_list, &opt_inst_list); | 2576 | list_add(&opt_inst->tmp_list, &opt_inst_list); |
2596 | } | 2577 | } |
2597 | if (!opt_found) { | 2578 | if (!opt_found) { |
2598 | err = -ENOENT; | 2579 | err = -ENOENT; |
2599 | goto team_put; | 2580 | goto team_put; |
2600 | } | 2581 | } |
2601 | } | ||
2602 | 2582 | ||
2603 | err = team_nl_send_event_options_get(team, &opt_inst_list); | 2583 | err = team_nl_send_event_options_get(team, &opt_inst_list); |
2584 | if (err) | ||
2585 | break; | ||
2586 | } | ||
2604 | 2587 | ||
2605 | team_put: | 2588 | team_put: |
2606 | team_nl_team_put(team); | 2589 | team_nl_team_put(team); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 18656c4094b3..fed298c0cb39 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -866,8 +866,6 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
866 | if (rtnl_dereference(tun->xdp_prog)) | 866 | if (rtnl_dereference(tun->xdp_prog)) |
867 | sock_set_flag(&tfile->sk, SOCK_XDP); | 867 | sock_set_flag(&tfile->sk, SOCK_XDP); |
868 | 868 | ||
869 | tun_set_real_num_queues(tun); | ||
870 | |||
871 | /* device is allowed to go away first, so no need to hold extra | 869 | /* device is allowed to go away first, so no need to hold extra |
872 | * refcnt. | 870 | * refcnt. |
873 | */ | 871 | */ |
@@ -879,6 +877,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file, | |||
879 | rcu_assign_pointer(tfile->tun, tun); | 877 | rcu_assign_pointer(tfile->tun, tun); |
880 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); | 878 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
881 | tun->numqueues++; | 879 | tun->numqueues++; |
880 | tun_set_real_num_queues(tun); | ||
882 | out: | 881 | out: |
883 | return err; | 882 | return err; |
884 | } | 883 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 8fadd8eaf601..4cfceb789eea 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -57,6 +57,8 @@ module_param(napi_tx, bool, 0644); | |||
57 | #define VIRTIO_XDP_TX BIT(0) | 57 | #define VIRTIO_XDP_TX BIT(0) |
58 | #define VIRTIO_XDP_REDIR BIT(1) | 58 | #define VIRTIO_XDP_REDIR BIT(1) |
59 | 59 | ||
60 | #define VIRTIO_XDP_FLAG BIT(0) | ||
61 | |||
60 | /* RX packet size EWMA. The average packet size is used to determine the packet | 62 | /* RX packet size EWMA. The average packet size is used to determine the packet |
61 | * buffer size when refilling RX rings. As the entire RX ring may be refilled | 63 | * buffer size when refilling RX rings. As the entire RX ring may be refilled |
62 | * at once, the weight is chosen so that the EWMA will be insensitive to short- | 64 | * at once, the weight is chosen so that the EWMA will be insensitive to short- |
@@ -252,6 +254,21 @@ struct padded_vnet_hdr { | |||
252 | char padding[4]; | 254 | char padding[4]; |
253 | }; | 255 | }; |
254 | 256 | ||
257 | static bool is_xdp_frame(void *ptr) | ||
258 | { | ||
259 | return (unsigned long)ptr & VIRTIO_XDP_FLAG; | ||
260 | } | ||
261 | |||
262 | static void *xdp_to_ptr(struct xdp_frame *ptr) | ||
263 | { | ||
264 | return (void *)((unsigned long)ptr | VIRTIO_XDP_FLAG); | ||
265 | } | ||
266 | |||
267 | static struct xdp_frame *ptr_to_xdp(void *ptr) | ||
268 | { | ||
269 | return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); | ||
270 | } | ||
271 | |||
255 | /* Converting between virtqueue no. and kernel tx/rx queue no. | 272 | /* Converting between virtqueue no. and kernel tx/rx queue no. |
256 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq | 273 | * 0:rx0 1:tx0 2:rx1 3:tx1 ... 2N:rxN 2N+1:txN 2N+2:cvq |
257 | */ | 274 | */ |
@@ -462,7 +479,8 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, | |||
462 | 479 | ||
463 | sg_init_one(sq->sg, xdpf->data, xdpf->len); | 480 | sg_init_one(sq->sg, xdpf->data, xdpf->len); |
464 | 481 | ||
465 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdpf, GFP_ATOMIC); | 482 | err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp_to_ptr(xdpf), |
483 | GFP_ATOMIC); | ||
466 | if (unlikely(err)) | 484 | if (unlikely(err)) |
467 | return -ENOSPC; /* Caller handle free/refcnt */ | 485 | return -ENOSPC; /* Caller handle free/refcnt */ |
468 | 486 | ||
@@ -482,36 +500,47 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
482 | { | 500 | { |
483 | struct virtnet_info *vi = netdev_priv(dev); | 501 | struct virtnet_info *vi = netdev_priv(dev); |
484 | struct receive_queue *rq = vi->rq; | 502 | struct receive_queue *rq = vi->rq; |
485 | struct xdp_frame *xdpf_sent; | ||
486 | struct bpf_prog *xdp_prog; | 503 | struct bpf_prog *xdp_prog; |
487 | struct send_queue *sq; | 504 | struct send_queue *sq; |
488 | unsigned int len; | 505 | unsigned int len; |
506 | int packets = 0; | ||
507 | int bytes = 0; | ||
489 | int drops = 0; | 508 | int drops = 0; |
490 | int kicks = 0; | 509 | int kicks = 0; |
491 | int ret, err; | 510 | int ret, err; |
511 | void *ptr; | ||
492 | int i; | 512 | int i; |
493 | 513 | ||
494 | sq = virtnet_xdp_sq(vi); | ||
495 | |||
496 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
497 | ret = -EINVAL; | ||
498 | drops = n; | ||
499 | goto out; | ||
500 | } | ||
501 | |||
502 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this | 514 | /* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this |
503 | * indicate XDP resources have been successfully allocated. | 515 | * indicate XDP resources have been successfully allocated. |
504 | */ | 516 | */ |
505 | xdp_prog = rcu_dereference(rq->xdp_prog); | 517 | xdp_prog = rcu_dereference(rq->xdp_prog); |
506 | if (!xdp_prog) { | 518 | if (!xdp_prog) |
507 | ret = -ENXIO; | 519 | return -ENXIO; |
520 | |||
521 | sq = virtnet_xdp_sq(vi); | ||
522 | |||
523 | if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) { | ||
524 | ret = -EINVAL; | ||
508 | drops = n; | 525 | drops = n; |
509 | goto out; | 526 | goto out; |
510 | } | 527 | } |
511 | 528 | ||
512 | /* Free up any pending old buffers before queueing new ones. */ | 529 | /* Free up any pending old buffers before queueing new ones. */ |
513 | while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) | 530 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
514 | xdp_return_frame(xdpf_sent); | 531 | if (likely(is_xdp_frame(ptr))) { |
532 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
533 | |||
534 | bytes += frame->len; | ||
535 | xdp_return_frame(frame); | ||
536 | } else { | ||
537 | struct sk_buff *skb = ptr; | ||
538 | |||
539 | bytes += skb->len; | ||
540 | napi_consume_skb(skb, false); | ||
541 | } | ||
542 | packets++; | ||
543 | } | ||
515 | 544 | ||
516 | for (i = 0; i < n; i++) { | 545 | for (i = 0; i < n; i++) { |
517 | struct xdp_frame *xdpf = frames[i]; | 546 | struct xdp_frame *xdpf = frames[i]; |
@@ -530,6 +559,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, | |||
530 | } | 559 | } |
531 | out: | 560 | out: |
532 | u64_stats_update_begin(&sq->stats.syncp); | 561 | u64_stats_update_begin(&sq->stats.syncp); |
562 | sq->stats.bytes += bytes; | ||
563 | sq->stats.packets += packets; | ||
533 | sq->stats.xdp_tx += n; | 564 | sq->stats.xdp_tx += n; |
534 | sq->stats.xdp_tx_drops += drops; | 565 | sq->stats.xdp_tx_drops += drops; |
535 | sq->stats.kicks += kicks; | 566 | sq->stats.kicks += kicks; |
@@ -1332,18 +1363,26 @@ static int virtnet_receive(struct receive_queue *rq, int budget, | |||
1332 | 1363 | ||
1333 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | 1364 | static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) |
1334 | { | 1365 | { |
1335 | struct sk_buff *skb; | ||
1336 | unsigned int len; | 1366 | unsigned int len; |
1337 | unsigned int packets = 0; | 1367 | unsigned int packets = 0; |
1338 | unsigned int bytes = 0; | 1368 | unsigned int bytes = 0; |
1369 | void *ptr; | ||
1339 | 1370 | ||
1340 | while ((skb = virtqueue_get_buf(sq->vq, &len)) != NULL) { | 1371 | while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { |
1341 | pr_debug("Sent skb %p\n", skb); | 1372 | if (likely(!is_xdp_frame(ptr))) { |
1373 | struct sk_buff *skb = ptr; | ||
1342 | 1374 | ||
1343 | bytes += skb->len; | 1375 | pr_debug("Sent skb %p\n", skb); |
1344 | packets++; | 1376 | |
1377 | bytes += skb->len; | ||
1378 | napi_consume_skb(skb, in_napi); | ||
1379 | } else { | ||
1380 | struct xdp_frame *frame = ptr_to_xdp(ptr); | ||
1345 | 1381 | ||
1346 | napi_consume_skb(skb, in_napi); | 1382 | bytes += frame->len; |
1383 | xdp_return_frame(frame); | ||
1384 | } | ||
1385 | packets++; | ||
1347 | } | 1386 | } |
1348 | 1387 | ||
1349 | /* Avoid overhead when no packets have been processed | 1388 | /* Avoid overhead when no packets have been processed |
@@ -1358,6 +1397,16 @@ static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) | |||
1358 | u64_stats_update_end(&sq->stats.syncp); | 1397 | u64_stats_update_end(&sq->stats.syncp); |
1359 | } | 1398 | } |
1360 | 1399 | ||
1400 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
1401 | { | ||
1402 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
1403 | return false; | ||
1404 | else if (q < vi->curr_queue_pairs) | ||
1405 | return true; | ||
1406 | else | ||
1407 | return false; | ||
1408 | } | ||
1409 | |||
1361 | static void virtnet_poll_cleantx(struct receive_queue *rq) | 1410 | static void virtnet_poll_cleantx(struct receive_queue *rq) |
1362 | { | 1411 | { |
1363 | struct virtnet_info *vi = rq->vq->vdev->priv; | 1412 | struct virtnet_info *vi = rq->vq->vdev->priv; |
@@ -1365,7 +1414,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) | |||
1365 | struct send_queue *sq = &vi->sq[index]; | 1414 | struct send_queue *sq = &vi->sq[index]; |
1366 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); | 1415 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index); |
1367 | 1416 | ||
1368 | if (!sq->napi.weight) | 1417 | if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index)) |
1369 | return; | 1418 | return; |
1370 | 1419 | ||
1371 | if (__netif_tx_trylock(txq)) { | 1420 | if (__netif_tx_trylock(txq)) { |
@@ -1442,8 +1491,16 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) | |||
1442 | { | 1491 | { |
1443 | struct send_queue *sq = container_of(napi, struct send_queue, napi); | 1492 | struct send_queue *sq = container_of(napi, struct send_queue, napi); |
1444 | struct virtnet_info *vi = sq->vq->vdev->priv; | 1493 | struct virtnet_info *vi = sq->vq->vdev->priv; |
1445 | struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, vq2txq(sq->vq)); | 1494 | unsigned int index = vq2txq(sq->vq); |
1495 | struct netdev_queue *txq; | ||
1446 | 1496 | ||
1497 | if (unlikely(is_xdp_raw_buffer_queue(vi, index))) { | ||
1498 | /* We don't need to enable cb for XDP */ | ||
1499 | napi_complete_done(napi, 0); | ||
1500 | return 0; | ||
1501 | } | ||
1502 | |||
1503 | txq = netdev_get_tx_queue(vi->dev, index); | ||
1447 | __netif_tx_lock(txq, raw_smp_processor_id()); | 1504 | __netif_tx_lock(txq, raw_smp_processor_id()); |
1448 | free_old_xmit_skbs(sq, true); | 1505 | free_old_xmit_skbs(sq, true); |
1449 | __netif_tx_unlock(txq); | 1506 | __netif_tx_unlock(txq); |
@@ -2395,6 +2452,10 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2395 | return -ENOMEM; | 2452 | return -ENOMEM; |
2396 | } | 2453 | } |
2397 | 2454 | ||
2455 | old_prog = rtnl_dereference(vi->rq[0].xdp_prog); | ||
2456 | if (!prog && !old_prog) | ||
2457 | return 0; | ||
2458 | |||
2398 | if (prog) { | 2459 | if (prog) { |
2399 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); | 2460 | prog = bpf_prog_add(prog, vi->max_queue_pairs - 1); |
2400 | if (IS_ERR(prog)) | 2461 | if (IS_ERR(prog)) |
@@ -2402,36 +2463,62 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, | |||
2402 | } | 2463 | } |
2403 | 2464 | ||
2404 | /* Make sure NAPI is not using any XDP TX queues for RX. */ | 2465 | /* Make sure NAPI is not using any XDP TX queues for RX. */ |
2405 | if (netif_running(dev)) | 2466 | if (netif_running(dev)) { |
2406 | for (i = 0; i < vi->max_queue_pairs; i++) | 2467 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2407 | napi_disable(&vi->rq[i].napi); | 2468 | napi_disable(&vi->rq[i].napi); |
2469 | virtnet_napi_tx_disable(&vi->sq[i].napi); | ||
2470 | } | ||
2471 | } | ||
2472 | |||
2473 | if (!prog) { | ||
2474 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2475 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | ||
2476 | if (i == 0) | ||
2477 | virtnet_restore_guest_offloads(vi); | ||
2478 | } | ||
2479 | synchronize_net(); | ||
2480 | } | ||
2408 | 2481 | ||
2409 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2410 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); | 2482 | err = _virtnet_set_queues(vi, curr_qp + xdp_qp); |
2411 | if (err) | 2483 | if (err) |
2412 | goto err; | 2484 | goto err; |
2485 | netif_set_real_num_rx_queues(dev, curr_qp + xdp_qp); | ||
2413 | vi->xdp_queue_pairs = xdp_qp; | 2486 | vi->xdp_queue_pairs = xdp_qp; |
2414 | 2487 | ||
2415 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2488 | if (prog) { |
2416 | old_prog = rtnl_dereference(vi->rq[i].xdp_prog); | 2489 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2417 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); | 2490 | rcu_assign_pointer(vi->rq[i].xdp_prog, prog); |
2418 | if (i == 0) { | 2491 | if (i == 0 && !old_prog) |
2419 | if (!old_prog) | ||
2420 | virtnet_clear_guest_offloads(vi); | 2492 | virtnet_clear_guest_offloads(vi); |
2421 | if (!prog) | ||
2422 | virtnet_restore_guest_offloads(vi); | ||
2423 | } | 2493 | } |
2494 | } | ||
2495 | |||
2496 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2424 | if (old_prog) | 2497 | if (old_prog) |
2425 | bpf_prog_put(old_prog); | 2498 | bpf_prog_put(old_prog); |
2426 | if (netif_running(dev)) | 2499 | if (netif_running(dev)) { |
2427 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2500 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); |
2501 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2502 | &vi->sq[i].napi); | ||
2503 | } | ||
2428 | } | 2504 | } |
2429 | 2505 | ||
2430 | return 0; | 2506 | return 0; |
2431 | 2507 | ||
2432 | err: | 2508 | err: |
2433 | for (i = 0; i < vi->max_queue_pairs; i++) | 2509 | if (!prog) { |
2434 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | 2510 | virtnet_clear_guest_offloads(vi); |
2511 | for (i = 0; i < vi->max_queue_pairs; i++) | ||
2512 | rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog); | ||
2513 | } | ||
2514 | |||
2515 | if (netif_running(dev)) { | ||
2516 | for (i = 0; i < vi->max_queue_pairs; i++) { | ||
2517 | virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); | ||
2518 | virtnet_napi_tx_enable(vi, vi->sq[i].vq, | ||
2519 | &vi->sq[i].napi); | ||
2520 | } | ||
2521 | } | ||
2435 | if (prog) | 2522 | if (prog) |
2436 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); | 2523 | bpf_prog_sub(prog, vi->max_queue_pairs - 1); |
2437 | return err; | 2524 | return err; |
@@ -2613,16 +2700,6 @@ static void free_receive_page_frags(struct virtnet_info *vi) | |||
2613 | put_page(vi->rq[i].alloc_frag.page); | 2700 | put_page(vi->rq[i].alloc_frag.page); |
2614 | } | 2701 | } |
2615 | 2702 | ||
2616 | static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) | ||
2617 | { | ||
2618 | if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) | ||
2619 | return false; | ||
2620 | else if (q < vi->curr_queue_pairs) | ||
2621 | return true; | ||
2622 | else | ||
2623 | return false; | ||
2624 | } | ||
2625 | |||
2626 | static void free_unused_bufs(struct virtnet_info *vi) | 2703 | static void free_unused_bufs(struct virtnet_info *vi) |
2627 | { | 2704 | { |
2628 | void *buf; | 2705 | void *buf; |
@@ -2631,10 +2708,10 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
2631 | for (i = 0; i < vi->max_queue_pairs; i++) { | 2708 | for (i = 0; i < vi->max_queue_pairs; i++) { |
2632 | struct virtqueue *vq = vi->sq[i].vq; | 2709 | struct virtqueue *vq = vi->sq[i].vq; |
2633 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { | 2710 | while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) { |
2634 | if (!is_xdp_raw_buffer_queue(vi, i)) | 2711 | if (!is_xdp_frame(buf)) |
2635 | dev_kfree_skb(buf); | 2712 | dev_kfree_skb(buf); |
2636 | else | 2713 | else |
2637 | put_page(virt_to_head_page(buf)); | 2714 | xdp_return_frame(ptr_to_xdp(buf)); |
2638 | } | 2715 | } |
2639 | } | 2716 | } |
2640 | 2717 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 5209ee9aac47..2aae11feff0c 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2219,7 +2219,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; | 2219 | struct pcpu_sw_netstats *tx_stats, *rx_stats; |
2220 | union vxlan_addr loopback; | 2220 | union vxlan_addr loopback; |
2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; | 2221 | union vxlan_addr *remote_ip = &dst_vxlan->default_dst.remote_ip; |
2222 | struct net_device *dev = skb->dev; | 2222 | struct net_device *dev; |
2223 | int len = skb->len; | 2223 | int len = skb->len; |
2224 | 2224 | ||
2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); | 2225 | tx_stats = this_cpu_ptr(src_vxlan->dev->tstats); |
@@ -2239,9 +2239,15 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2239 | #endif | 2239 | #endif |
2240 | } | 2240 | } |
2241 | 2241 | ||
2242 | rcu_read_lock(); | ||
2243 | dev = skb->dev; | ||
2244 | if (unlikely(!(dev->flags & IFF_UP))) { | ||
2245 | kfree_skb(skb); | ||
2246 | goto drop; | ||
2247 | } | ||
2248 | |||
2242 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) | 2249 | if (dst_vxlan->cfg.flags & VXLAN_F_LEARN) |
2243 | vxlan_snoop(skb->dev, &loopback, eth_hdr(skb)->h_source, 0, | 2250 | vxlan_snoop(dev, &loopback, eth_hdr(skb)->h_source, 0, vni); |
2244 | vni); | ||
2245 | 2251 | ||
2246 | u64_stats_update_begin(&tx_stats->syncp); | 2252 | u64_stats_update_begin(&tx_stats->syncp); |
2247 | tx_stats->tx_packets++; | 2253 | tx_stats->tx_packets++; |
@@ -2254,8 +2260,10 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan, | |||
2254 | rx_stats->rx_bytes += len; | 2260 | rx_stats->rx_bytes += len; |
2255 | u64_stats_update_end(&rx_stats->syncp); | 2261 | u64_stats_update_end(&rx_stats->syncp); |
2256 | } else { | 2262 | } else { |
2263 | drop: | ||
2257 | dev->stats.rx_dropped++; | 2264 | dev->stats.rx_dropped++; |
2258 | } | 2265 | } |
2266 | rcu_read_unlock(); | ||
2259 | } | 2267 | } |
2260 | 2268 | ||
2261 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, | 2269 | static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev, |
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index c0b0f525c87c..27decf8ae840 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -1575,7 +1575,7 @@ try: | |||
1575 | dev->stats.tx_packets++; | 1575 | dev->stats.tx_packets++; |
1576 | dev->stats.tx_bytes += skb->len; | 1576 | dev->stats.tx_bytes += skb->len; |
1577 | } | 1577 | } |
1578 | dev_kfree_skb_irq(skb); | 1578 | dev_consume_skb_irq(skb); |
1579 | dpriv->tx_skbuff[cur] = NULL; | 1579 | dpriv->tx_skbuff[cur] = NULL; |
1580 | ++dpriv->tx_dirty; | 1580 | ++dpriv->tx_dirty; |
1581 | } else { | 1581 | } else { |
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 66d889d54e58..a08f04c3f644 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c | |||
@@ -482,7 +482,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) | |||
482 | memset(priv->tx_buffer + | 482 | memset(priv->tx_buffer + |
483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), | 483 | (be32_to_cpu(bd->buf) - priv->dma_tx_addr), |
484 | 0, skb->len); | 484 | 0, skb->len); |
485 | dev_kfree_skb_irq(skb); | 485 | dev_consume_skb_irq(skb); |
486 | 486 | ||
487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; | 487 | priv->tx_skbuff[priv->skb_dirtytx] = NULL; |
488 | priv->skb_dirtytx = | 488 | priv->skb_dirtytx = |
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index 399b501f3c3c..e8891f5fc83a 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c | |||
@@ -548,7 +548,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { | |||
548 | { | 548 | { |
549 | .id = WCN3990_HW_1_0_DEV_VERSION, | 549 | .id = WCN3990_HW_1_0_DEV_VERSION, |
550 | .dev_id = 0, | 550 | .dev_id = 0, |
551 | .bus = ATH10K_BUS_PCI, | 551 | .bus = ATH10K_BUS_SNOC, |
552 | .name = "wcn3990 hw1.0", | 552 | .name = "wcn3990 hw1.0", |
553 | .continuous_frag_desc = true, | 553 | .continuous_frag_desc = true, |
554 | .tx_chain_mask = 0x7, | 554 | .tx_chain_mask = 0x7, |
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index 491ca3c8b43c..83d5bceea08f 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config IWLWIFI | 1 | config IWLWIFI |
2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " | 2 | tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " |
3 | depends on PCI && HAS_IOMEM | 3 | depends on PCI && HAS_IOMEM && CFG80211 |
4 | select FW_LOADER | 4 | select FW_LOADER |
5 | ---help--- | 5 | ---help--- |
6 | Select to build the driver supporting the: | 6 | Select to build the driver supporting the: |
@@ -47,6 +47,7 @@ if IWLWIFI | |||
47 | config IWLWIFI_LEDS | 47 | config IWLWIFI_LEDS |
48 | bool | 48 | bool |
49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI | 49 | depends on LEDS_CLASS=y || LEDS_CLASS=IWLWIFI |
50 | depends on IWLMVM || IWLDVM | ||
50 | select LEDS_TRIGGERS | 51 | select LEDS_TRIGGERS |
51 | select MAC80211_LEDS | 52 | select MAC80211_LEDS |
52 | default y | 53 | default y |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index 497e762978cc..b2cabce1d74d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c | |||
@@ -212,24 +212,24 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) | |||
212 | mt76x02_add_rate_power_offset(t, delta); | 212 | mt76x02_add_rate_power_offset(t, delta); |
213 | } | 213 | } |
214 | 214 | ||
215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | 215 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp) |
216 | { | 216 | { |
217 | struct mt76x0_chan_map { | 217 | struct mt76x0_chan_map { |
218 | u8 chan; | 218 | u8 chan; |
219 | u8 offset; | 219 | u8 offset; |
220 | } chan_map[] = { | 220 | } chan_map[] = { |
221 | { 2, 0 }, { 4, 1 }, { 6, 2 }, { 8, 3 }, | 221 | { 2, 0 }, { 4, 2 }, { 6, 4 }, { 8, 6 }, |
222 | { 10, 4 }, { 12, 5 }, { 14, 6 }, { 38, 0 }, | 222 | { 10, 8 }, { 12, 10 }, { 14, 12 }, { 38, 0 }, |
223 | { 44, 1 }, { 48, 2 }, { 54, 3 }, { 60, 4 }, | 223 | { 44, 2 }, { 48, 4 }, { 54, 6 }, { 60, 8 }, |
224 | { 64, 5 }, { 102, 6 }, { 108, 7 }, { 112, 8 }, | 224 | { 64, 10 }, { 102, 12 }, { 108, 14 }, { 112, 16 }, |
225 | { 118, 9 }, { 124, 10 }, { 128, 11 }, { 134, 12 }, | 225 | { 118, 18 }, { 124, 20 }, { 128, 22 }, { 134, 24 }, |
226 | { 140, 13 }, { 151, 14 }, { 157, 15 }, { 161, 16 }, | 226 | { 140, 26 }, { 151, 28 }, { 157, 30 }, { 161, 32 }, |
227 | { 167, 17 }, { 171, 18 }, { 173, 19 }, | 227 | { 167, 34 }, { 171, 36 }, { 175, 38 }, |
228 | }; | 228 | }; |
229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; | 229 | struct ieee80211_channel *chan = dev->mt76.chandef.chan; |
230 | u8 offset, addr; | 230 | u8 offset, addr; |
231 | int i, idx = 0; | ||
231 | u16 data; | 232 | u16 data; |
232 | int i; | ||
233 | 233 | ||
234 | if (mt76x0_tssi_enabled(dev)) { | 234 | if (mt76x0_tssi_enabled(dev)) { |
235 | s8 target_power; | 235 | s8 target_power; |
@@ -239,14 +239,14 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
239 | else | 239 | else |
240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); | 240 | data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); |
241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; | 241 | target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; |
242 | info[0] = target_power + mt76x0_get_delta(dev); | 242 | *tp = target_power + mt76x0_get_delta(dev); |
243 | info[1] = 0; | ||
244 | 243 | ||
245 | return; | 244 | return; |
246 | } | 245 | } |
247 | 246 | ||
248 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { | 247 | for (i = 0; i < ARRAY_SIZE(chan_map); i++) { |
249 | if (chan_map[i].chan <= chan->hw_value) { | 248 | if (chan->hw_value <= chan_map[i].chan) { |
249 | idx = (chan->hw_value == chan_map[i].chan); | ||
250 | offset = chan_map[i].offset; | 250 | offset = chan_map[i].offset; |
251 | break; | 251 | break; |
252 | } | 252 | } |
@@ -258,13 +258,16 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; | 258 | addr = MT_EE_TX_POWER_DELTA_BW80 + offset; |
259 | } else { | 259 | } else { |
260 | switch (chan->hw_value) { | 260 | switch (chan->hw_value) { |
261 | case 42: | ||
262 | offset = 2; | ||
263 | break; | ||
261 | case 58: | 264 | case 58: |
262 | offset = 8; | 265 | offset = 8; |
263 | break; | 266 | break; |
264 | case 106: | 267 | case 106: |
265 | offset = 14; | 268 | offset = 14; |
266 | break; | 269 | break; |
267 | case 112: | 270 | case 122: |
268 | offset = 20; | 271 | offset = 20; |
269 | break; | 272 | break; |
270 | case 155: | 273 | case 155: |
@@ -277,14 +280,9 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) | |||
277 | } | 280 | } |
278 | 281 | ||
279 | data = mt76x02_eeprom_get(dev, addr); | 282 | data = mt76x02_eeprom_get(dev, addr); |
280 | 283 | *tp = data >> (8 * idx); | |
281 | info[0] = data; | 284 | if (*tp < 0 || *tp > 0x3f) |
282 | if (!info[0] || info[0] > 0x3f) | 285 | *tp = 5; |
283 | info[0] = 5; | ||
284 | |||
285 | info[1] = data >> 8; | ||
286 | if (!info[1] || info[1] > 0x3f) | ||
287 | info[1] = 5; | ||
288 | } | 286 | } |
289 | 287 | ||
290 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) | 288 | static int mt76x0_check_eeprom(struct mt76x02_dev *dev) |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h index ee9ade9f3c8b..42b259f90b6d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.h | |||
@@ -26,7 +26,7 @@ struct mt76x02_dev; | |||
26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); | 26 | int mt76x0_eeprom_init(struct mt76x02_dev *dev); |
27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); | 27 | void mt76x0_read_rx_gain(struct mt76x02_dev *dev); |
28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); | 28 | void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev); |
29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info); | 29 | void mt76x0_get_power_info(struct mt76x02_dev *dev, s8 *tp); |
30 | 30 | ||
31 | static inline s8 s6_to_s8(u32 val) | 31 | static inline s8 s6_to_s8(u32 val) |
32 | { | 32 | { |
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index 1eb1a802ed20..b6166703ad76 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c | |||
@@ -845,17 +845,17 @@ static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) | |||
845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) | 845 | void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) |
846 | { | 846 | { |
847 | struct mt76_rate_power *t = &dev->mt76.rate_power; | 847 | struct mt76_rate_power *t = &dev->mt76.rate_power; |
848 | u8 info[2]; | 848 | s8 info; |
849 | 849 | ||
850 | mt76x0_get_tx_power_per_rate(dev); | 850 | mt76x0_get_tx_power_per_rate(dev); |
851 | mt76x0_get_power_info(dev, info); | 851 | mt76x0_get_power_info(dev, &info); |
852 | 852 | ||
853 | mt76x02_add_rate_power_offset(t, info[0]); | 853 | mt76x02_add_rate_power_offset(t, info); |
854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); | 854 | mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); |
855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); | 855 | dev->mt76.txpower_cur = mt76x02_get_max_rate_power(t); |
856 | mt76x02_add_rate_power_offset(t, -info[0]); | 856 | mt76x02_add_rate_power_offset(t, -info); |
857 | 857 | ||
858 | mt76x02_phy_set_txpower(dev, info[0], info[1]); | 858 | mt76x02_phy_set_txpower(dev, info, info); |
859 | } | 859 | } |
860 | 860 | ||
861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) | 861 | void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) |
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c index bd10165d7eec..4d4b07701149 100644 --- a/drivers/net/wireless/ti/wlcore/sdio.c +++ b/drivers/net/wireless/ti/wlcore/sdio.c | |||
@@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | sdio_claim_host(func); | 166 | sdio_claim_host(func); |
167 | /* | ||
168 | * To guarantee that the SDIO card is power cycled, as required to make | ||
169 | * the FW programming to succeed, let's do a brute force HW reset. | ||
170 | */ | ||
171 | mmc_hw_reset(card->host); | ||
172 | |||
167 | sdio_enable_func(func); | 173 | sdio_enable_func(func); |
168 | sdio_release_host(func); | 174 | sdio_release_host(func); |
169 | 175 | ||
@@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue) | |||
174 | { | 180 | { |
175 | struct sdio_func *func = dev_to_sdio_func(glue->dev); | 181 | struct sdio_func *func = dev_to_sdio_func(glue->dev); |
176 | struct mmc_card *card = func->card; | 182 | struct mmc_card *card = func->card; |
177 | int error; | ||
178 | 183 | ||
179 | sdio_claim_host(func); | 184 | sdio_claim_host(func); |
180 | sdio_disable_func(func); | 185 | sdio_disable_func(func); |
181 | sdio_release_host(func); | 186 | sdio_release_host(func); |
182 | 187 | ||
183 | /* Let runtime PM know the card is powered off */ | 188 | /* Let runtime PM know the card is powered off */ |
184 | error = pm_runtime_put(&card->dev); | 189 | pm_runtime_put(&card->dev); |
185 | if (error < 0 && error != -EBUSY) { | ||
186 | dev_err(&card->dev, "%s failed: %i\n", __func__, error); | ||
187 | |||
188 | return error; | ||
189 | } | ||
190 | |||
191 | return 0; | 190 | return 0; |
192 | } | 191 | } |
193 | 192 | ||
diff --git a/drivers/pci/controller/dwc/pci-imx6.c b/drivers/pci/controller/dwc/pci-imx6.c index 52e47dac028f..80f843030e36 100644 --- a/drivers/pci/controller/dwc/pci-imx6.c +++ b/drivers/pci/controller/dwc/pci-imx6.c | |||
@@ -310,6 +310,9 @@ static int imx6_pcie_attach_pd(struct device *dev) | |||
310 | imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); | 310 | imx6_pcie->pd_pcie = dev_pm_domain_attach_by_name(dev, "pcie"); |
311 | if (IS_ERR(imx6_pcie->pd_pcie)) | 311 | if (IS_ERR(imx6_pcie->pd_pcie)) |
312 | return PTR_ERR(imx6_pcie->pd_pcie); | 312 | return PTR_ERR(imx6_pcie->pd_pcie); |
313 | /* Do nothing when power domain missing */ | ||
314 | if (!imx6_pcie->pd_pcie) | ||
315 | return 0; | ||
313 | link = device_link_add(dev, imx6_pcie->pd_pcie, | 316 | link = device_link_add(dev, imx6_pcie->pd_pcie, |
314 | DL_FLAG_STATELESS | | 317 | DL_FLAG_STATELESS | |
315 | DL_FLAG_PM_RUNTIME | | 318 | DL_FLAG_PM_RUNTIME | |
@@ -323,13 +326,13 @@ static int imx6_pcie_attach_pd(struct device *dev) | |||
323 | if (IS_ERR(imx6_pcie->pd_pcie_phy)) | 326 | if (IS_ERR(imx6_pcie->pd_pcie_phy)) |
324 | return PTR_ERR(imx6_pcie->pd_pcie_phy); | 327 | return PTR_ERR(imx6_pcie->pd_pcie_phy); |
325 | 328 | ||
326 | device_link_add(dev, imx6_pcie->pd_pcie_phy, | 329 | link = device_link_add(dev, imx6_pcie->pd_pcie_phy, |
327 | DL_FLAG_STATELESS | | 330 | DL_FLAG_STATELESS | |
328 | DL_FLAG_PM_RUNTIME | | 331 | DL_FLAG_PM_RUNTIME | |
329 | DL_FLAG_RPM_ACTIVE); | 332 | DL_FLAG_RPM_ACTIVE); |
330 | if (IS_ERR(link)) { | 333 | if (!link) { |
331 | dev_err(dev, "Failed to add device_link to pcie_phy pd: %ld\n", PTR_ERR(link)); | 334 | dev_err(dev, "Failed to add device_link to pcie_phy pd.\n"); |
332 | return PTR_ERR(link); | 335 | return -EINVAL; |
333 | } | 336 | } |
334 | 337 | ||
335 | return 0; | 338 | return 0; |
diff --git a/drivers/pci/controller/dwc/pcie-armada8k.c b/drivers/pci/controller/dwc/pcie-armada8k.c index b171b6bc15c8..0c389a30ef5d 100644 --- a/drivers/pci/controller/dwc/pcie-armada8k.c +++ b/drivers/pci/controller/dwc/pcie-armada8k.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/resource.h> | 22 | #include <linux/resource.h> |
23 | #include <linux/of_pci.h> | 23 | #include <linux/of_pci.h> |
24 | #include <linux/of_irq.h> | 24 | #include <linux/of_irq.h> |
25 | #include <linux/gpio/consumer.h> | ||
26 | 25 | ||
27 | #include "pcie-designware.h" | 26 | #include "pcie-designware.h" |
28 | 27 | ||
@@ -30,7 +29,6 @@ struct armada8k_pcie { | |||
30 | struct dw_pcie *pci; | 29 | struct dw_pcie *pci; |
31 | struct clk *clk; | 30 | struct clk *clk; |
32 | struct clk *clk_reg; | 31 | struct clk *clk_reg; |
33 | struct gpio_desc *reset_gpio; | ||
34 | }; | 32 | }; |
35 | 33 | ||
36 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 | 34 | #define PCIE_VENDOR_REGS_OFFSET 0x8000 |
@@ -139,12 +137,6 @@ static int armada8k_pcie_host_init(struct pcie_port *pp) | |||
139 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); | 137 | struct dw_pcie *pci = to_dw_pcie_from_pp(pp); |
140 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); | 138 | struct armada8k_pcie *pcie = to_armada8k_pcie(pci); |
141 | 139 | ||
142 | if (pcie->reset_gpio) { | ||
143 | /* assert and then deassert the reset signal */ | ||
144 | gpiod_set_value_cansleep(pcie->reset_gpio, 1); | ||
145 | msleep(100); | ||
146 | gpiod_set_value_cansleep(pcie->reset_gpio, 0); | ||
147 | } | ||
148 | dw_pcie_setup_rc(pp); | 140 | dw_pcie_setup_rc(pp); |
149 | armada8k_pcie_establish_link(pcie); | 141 | armada8k_pcie_establish_link(pcie); |
150 | 142 | ||
@@ -257,14 +249,6 @@ static int armada8k_pcie_probe(struct platform_device *pdev) | |||
257 | goto fail_clkreg; | 249 | goto fail_clkreg; |
258 | } | 250 | } |
259 | 251 | ||
260 | /* Get reset gpio signal and hold asserted (logically high) */ | ||
261 | pcie->reset_gpio = devm_gpiod_get_optional(dev, "reset", | ||
262 | GPIOD_OUT_HIGH); | ||
263 | if (IS_ERR(pcie->reset_gpio)) { | ||
264 | ret = PTR_ERR(pcie->reset_gpio); | ||
265 | goto fail_clkreg; | ||
266 | } | ||
267 | |||
268 | platform_set_drvdata(pdev, pcie); | 252 | platform_set_drvdata(pdev, pcie); |
269 | 253 | ||
270 | ret = armada8k_add_pcie_port(pcie, pdev); | 254 | ret = armada8k_add_pcie_port(pcie, pdev); |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index b0a413f3f7ca..e2a879e93d86 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -639,8 +639,9 @@ static void quirk_synopsys_haps(struct pci_dev *pdev) | |||
639 | break; | 639 | break; |
640 | } | 640 | } |
641 | } | 641 | } |
642 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, | 642 | DECLARE_PCI_FIXUP_CLASS_HEADER(PCI_VENDOR_ID_SYNOPSYS, PCI_ANY_ID, |
643 | quirk_synopsys_haps); | 643 | PCI_CLASS_SERIAL_USB_XHCI, 0, |
644 | quirk_synopsys_haps); | ||
644 | 645 | ||
645 | /* | 646 | /* |
646 | * Let's make the southbridge information explicit instead of having to | 647 | * Let's make the southbridge information explicit instead of having to |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 05044e323ea5..03ec7a5d9d0b 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1513,7 +1513,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1513 | .matches = { | 1513 | .matches = { |
1514 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1514 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1515 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), | 1515 | DMI_MATCH(DMI_PRODUCT_FAMILY, "Intel_Strago"), |
1516 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1516 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1517 | }, | 1517 | }, |
1518 | }, | 1518 | }, |
1519 | { | 1519 | { |
@@ -1521,7 +1521,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1521 | .matches = { | 1521 | .matches = { |
1522 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), | 1522 | DMI_MATCH(DMI_SYS_VENDOR, "HP"), |
1523 | DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), | 1523 | DMI_MATCH(DMI_PRODUCT_NAME, "Setzer"), |
1524 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1524 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1525 | }, | 1525 | }, |
1526 | }, | 1526 | }, |
1527 | { | 1527 | { |
@@ -1529,7 +1529,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1529 | .matches = { | 1529 | .matches = { |
1530 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1530 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1531 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), | 1531 | DMI_MATCH(DMI_PRODUCT_NAME, "Cyan"), |
1532 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1532 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1533 | }, | 1533 | }, |
1534 | }, | 1534 | }, |
1535 | { | 1535 | { |
@@ -1537,7 +1537,7 @@ static const struct dmi_system_id chv_no_valid_mask[] = { | |||
1537 | .matches = { | 1537 | .matches = { |
1538 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), | 1538 | DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"), |
1539 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), | 1539 | DMI_MATCH(DMI_PRODUCT_NAME, "Celes"), |
1540 | DMI_MATCH(DMI_BOARD_VERSION, "1.0"), | 1540 | DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"), |
1541 | }, | 1541 | }, |
1542 | }, | 1542 | }, |
1543 | {} | 1543 | {} |
diff --git a/drivers/pinctrl/mediatek/Kconfig b/drivers/pinctrl/mediatek/Kconfig index 1817786ab6aa..a005cbccb4f7 100644 --- a/drivers/pinctrl/mediatek/Kconfig +++ b/drivers/pinctrl/mediatek/Kconfig | |||
@@ -45,12 +45,14 @@ config PINCTRL_MT2701 | |||
45 | config PINCTRL_MT7623 | 45 | config PINCTRL_MT7623 |
46 | bool "Mediatek MT7623 pin control with generic binding" | 46 | bool "Mediatek MT7623 pin control with generic binding" |
47 | depends on MACH_MT7623 || COMPILE_TEST | 47 | depends on MACH_MT7623 || COMPILE_TEST |
48 | depends on OF | ||
48 | default MACH_MT7623 | 49 | default MACH_MT7623 |
49 | select PINCTRL_MTK_MOORE | 50 | select PINCTRL_MTK_MOORE |
50 | 51 | ||
51 | config PINCTRL_MT7629 | 52 | config PINCTRL_MT7629 |
52 | bool "Mediatek MT7629 pin control" | 53 | bool "Mediatek MT7629 pin control" |
53 | depends on MACH_MT7629 || COMPILE_TEST | 54 | depends on MACH_MT7629 || COMPILE_TEST |
55 | depends on OF | ||
54 | default MACH_MT7629 | 56 | default MACH_MT7629 |
55 | select PINCTRL_MTK_MOORE | 57 | select PINCTRL_MTK_MOORE |
56 | 58 | ||
@@ -92,6 +94,7 @@ config PINCTRL_MT6797 | |||
92 | 94 | ||
93 | config PINCTRL_MT7622 | 95 | config PINCTRL_MT7622 |
94 | bool "MediaTek MT7622 pin control" | 96 | bool "MediaTek MT7622 pin control" |
97 | depends on OF | ||
95 | depends on ARM64 || COMPILE_TEST | 98 | depends on ARM64 || COMPILE_TEST |
96 | default ARM64 && ARCH_MEDIATEK | 99 | default ARM64 && ARCH_MEDIATEK |
97 | select PINCTRL_MTK_MOORE | 100 | select PINCTRL_MTK_MOORE |
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index b03481ef99a1..98905d4a79ca 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c | |||
@@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev, | |||
832 | break; | 832 | break; |
833 | 833 | ||
834 | case MCP_TYPE_S18: | 834 | case MCP_TYPE_S18: |
835 | one_regmap_config = | ||
836 | devm_kmemdup(dev, &mcp23x17_regmap, | ||
837 | sizeof(struct regmap_config), GFP_KERNEL); | ||
838 | if (!one_regmap_config) | ||
839 | return -ENOMEM; | ||
835 | mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, | 840 | mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp, |
836 | &mcp23x17_regmap); | 841 | one_regmap_config); |
837 | mcp->reg_shift = 1; | 842 | mcp->reg_shift = 1; |
838 | mcp->chip.ngpio = 16; | 843 | mcp->chip.ngpio = 16; |
839 | mcp->chip.label = "mcp23s18"; | 844 | mcp->chip.label = "mcp23s18"; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c index aa8b58125568..ef4268cc6227 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h6.c | |||
@@ -588,7 +588,7 @@ static const unsigned int h6_irq_bank_map[] = { 1, 5, 6, 7 }; | |||
588 | static const struct sunxi_pinctrl_desc h6_pinctrl_data = { | 588 | static const struct sunxi_pinctrl_desc h6_pinctrl_data = { |
589 | .pins = h6_pins, | 589 | .pins = h6_pins, |
590 | .npins = ARRAY_SIZE(h6_pins), | 590 | .npins = ARRAY_SIZE(h6_pins), |
591 | .irq_banks = 3, | 591 | .irq_banks = 4, |
592 | .irq_bank_map = h6_irq_bank_map, | 592 | .irq_bank_map = h6_irq_bank_map, |
593 | .irq_read_needs_mux = true, | 593 | .irq_read_needs_mux = true, |
594 | }; | 594 | }; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c index 5d9184d18c16..0e7fa69e93df 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c | |||
@@ -698,26 +698,24 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) | |||
698 | { | 698 | { |
699 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 699 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
700 | unsigned short bank = offset / PINS_PER_BANK; | 700 | unsigned short bank = offset / PINS_PER_BANK; |
701 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; | 701 | unsigned short bank_offset = bank - pctl->desc->pin_base / |
702 | struct regulator *reg; | 702 | PINS_PER_BANK; |
703 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; | ||
704 | struct regulator *reg = s_reg->regulator; | ||
705 | char supply[16]; | ||
703 | int ret; | 706 | int ret; |
704 | 707 | ||
705 | reg = s_reg->regulator; | 708 | if (reg) { |
706 | if (!reg) { | ||
707 | char supply[16]; | ||
708 | |||
709 | snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); | ||
710 | reg = regulator_get(pctl->dev, supply); | ||
711 | if (IS_ERR(reg)) { | ||
712 | dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", | ||
713 | 'A' + bank); | ||
714 | return PTR_ERR(reg); | ||
715 | } | ||
716 | |||
717 | s_reg->regulator = reg; | ||
718 | refcount_set(&s_reg->refcount, 1); | ||
719 | } else { | ||
720 | refcount_inc(&s_reg->refcount); | 709 | refcount_inc(&s_reg->refcount); |
710 | return 0; | ||
711 | } | ||
712 | |||
713 | snprintf(supply, sizeof(supply), "vcc-p%c", 'a' + bank); | ||
714 | reg = regulator_get(pctl->dev, supply); | ||
715 | if (IS_ERR(reg)) { | ||
716 | dev_err(pctl->dev, "Couldn't get bank P%c regulator\n", | ||
717 | 'A' + bank); | ||
718 | return PTR_ERR(reg); | ||
721 | } | 719 | } |
722 | 720 | ||
723 | ret = regulator_enable(reg); | 721 | ret = regulator_enable(reg); |
@@ -727,13 +725,13 @@ static int sunxi_pmx_request(struct pinctrl_dev *pctldev, unsigned offset) | |||
727 | goto out; | 725 | goto out; |
728 | } | 726 | } |
729 | 727 | ||
728 | s_reg->regulator = reg; | ||
729 | refcount_set(&s_reg->refcount, 1); | ||
730 | |||
730 | return 0; | 731 | return 0; |
731 | 732 | ||
732 | out: | 733 | out: |
733 | if (refcount_dec_and_test(&s_reg->refcount)) { | 734 | regulator_put(s_reg->regulator); |
734 | regulator_put(s_reg->regulator); | ||
735 | s_reg->regulator = NULL; | ||
736 | } | ||
737 | 735 | ||
738 | return ret; | 736 | return ret; |
739 | } | 737 | } |
@@ -742,7 +740,9 @@ static int sunxi_pmx_free(struct pinctrl_dev *pctldev, unsigned offset) | |||
742 | { | 740 | { |
743 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); | 741 | struct sunxi_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev); |
744 | unsigned short bank = offset / PINS_PER_BANK; | 742 | unsigned short bank = offset / PINS_PER_BANK; |
745 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank]; | 743 | unsigned short bank_offset = bank - pctl->desc->pin_base / |
744 | PINS_PER_BANK; | ||
745 | struct sunxi_pinctrl_regulator *s_reg = &pctl->regulators[bank_offset]; | ||
746 | 746 | ||
747 | if (!refcount_dec_and_test(&s_reg->refcount)) | 747 | if (!refcount_dec_and_test(&s_reg->refcount)) |
748 | return 0; | 748 | return 0; |
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/drivers/pinctrl/sunxi/pinctrl-sunxi.h index e340d2a24b44..034c0317c8d6 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sunxi.h +++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.h | |||
@@ -136,7 +136,7 @@ struct sunxi_pinctrl { | |||
136 | struct gpio_chip *chip; | 136 | struct gpio_chip *chip; |
137 | const struct sunxi_pinctrl_desc *desc; | 137 | const struct sunxi_pinctrl_desc *desc; |
138 | struct device *dev; | 138 | struct device *dev; |
139 | struct sunxi_pinctrl_regulator regulators[12]; | 139 | struct sunxi_pinctrl_regulator regulators[9]; |
140 | struct irq_domain *domain; | 140 | struct irq_domain *domain; |
141 | struct sunxi_pinctrl_function *functions; | 141 | struct sunxi_pinctrl_function *functions; |
142 | unsigned nfunctions; | 142 | unsigned nfunctions; |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 5e2109c54c7c..b5e9db85e881 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -905,6 +905,7 @@ config TOSHIBA_WMI | |||
905 | config ACPI_CMPC | 905 | config ACPI_CMPC |
906 | tristate "CMPC Laptop Extras" | 906 | tristate "CMPC Laptop Extras" |
907 | depends on ACPI && INPUT | 907 | depends on ACPI && INPUT |
908 | depends on BACKLIGHT_LCD_SUPPORT | ||
908 | depends on RFKILL || RFKILL=n | 909 | depends on RFKILL || RFKILL=n |
909 | select BACKLIGHT_CLASS_DEVICE | 910 | select BACKLIGHT_CLASS_DEVICE |
910 | help | 911 | help |
@@ -1128,6 +1129,7 @@ config INTEL_OAKTRAIL | |||
1128 | config SAMSUNG_Q10 | 1129 | config SAMSUNG_Q10 |
1129 | tristate "Samsung Q10 Extras" | 1130 | tristate "Samsung Q10 Extras" |
1130 | depends on ACPI | 1131 | depends on ACPI |
1132 | depends on BACKLIGHT_LCD_SUPPORT | ||
1131 | select BACKLIGHT_CLASS_DEVICE | 1133 | select BACKLIGHT_CLASS_DEVICE |
1132 | ---help--- | 1134 | ---help--- |
1133 | This driver provides support for backlight control on Samsung Q10 | 1135 | This driver provides support for backlight control on Samsung Q10 |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 4e7b55a14b1a..6e294b4d3635 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -4469,6 +4469,14 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp) | |||
4469 | usrparm.psf_data &= 0x7fffffffULL; | 4469 | usrparm.psf_data &= 0x7fffffffULL; |
4470 | usrparm.rssd_result &= 0x7fffffffULL; | 4470 | usrparm.rssd_result &= 0x7fffffffULL; |
4471 | } | 4471 | } |
4472 | /* at least 2 bytes are accessed and should be allocated */ | ||
4473 | if (usrparm.psf_data_len < 2) { | ||
4474 | DBF_DEV_EVENT(DBF_WARNING, device, | ||
4475 | "Symmetrix ioctl invalid data length %d", | ||
4476 | usrparm.psf_data_len); | ||
4477 | rc = -EINVAL; | ||
4478 | goto out; | ||
4479 | } | ||
4472 | /* alloc I/O data area */ | 4480 | /* alloc I/O data area */ |
4473 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); | 4481 | psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA); |
4474 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); | 4482 | rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA); |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 48ea0004a56d..5a699746c357 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -248,7 +248,8 @@ static inline int ap_test_config(unsigned int *field, unsigned int nr) | |||
248 | static inline int ap_test_config_card_id(unsigned int id) | 248 | static inline int ap_test_config_card_id(unsigned int id) |
249 | { | 249 | { |
250 | if (!ap_configuration) /* QCI not supported */ | 250 | if (!ap_configuration) /* QCI not supported */ |
251 | return 1; | 251 | /* only ids 0...3F may be probed */ |
252 | return id < 0x40 ? 1 : 0; | ||
252 | return ap_test_config(ap_configuration->apm, id); | 253 | return ap_test_config(ap_configuration->apm, id); |
253 | } | 254 | } |
254 | 255 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 0ee026947f20..122059ecad84 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/hashtable.h> | 22 | #include <linux/hashtable.h> |
23 | #include <linux/ip.h> | 23 | #include <linux/ip.h> |
24 | #include <linux/refcount.h> | 24 | #include <linux/refcount.h> |
25 | #include <linux/workqueue.h> | ||
25 | 26 | ||
26 | #include <net/ipv6.h> | 27 | #include <net/ipv6.h> |
27 | #include <net/if_inet6.h> | 28 | #include <net/if_inet6.h> |
@@ -789,6 +790,7 @@ struct qeth_card { | |||
789 | struct qeth_seqno seqno; | 790 | struct qeth_seqno seqno; |
790 | struct qeth_card_options options; | 791 | struct qeth_card_options options; |
791 | 792 | ||
793 | struct workqueue_struct *event_wq; | ||
792 | wait_queue_head_t wait_q; | 794 | wait_queue_head_t wait_q; |
793 | spinlock_t mclock; | 795 | spinlock_t mclock; |
794 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; | 796 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
@@ -962,7 +964,6 @@ extern const struct attribute_group *qeth_osn_attr_groups[]; | |||
962 | extern const struct attribute_group qeth_device_attr_group; | 964 | extern const struct attribute_group qeth_device_attr_group; |
963 | extern const struct attribute_group qeth_device_blkt_group; | 965 | extern const struct attribute_group qeth_device_blkt_group; |
964 | extern const struct device_type qeth_generic_devtype; | 966 | extern const struct device_type qeth_generic_devtype; |
965 | extern struct workqueue_struct *qeth_wq; | ||
966 | 967 | ||
967 | int qeth_card_hw_is_reachable(struct qeth_card *); | 968 | int qeth_card_hw_is_reachable(struct qeth_card *); |
968 | const char *qeth_get_cardname_short(struct qeth_card *); | 969 | const char *qeth_get_cardname_short(struct qeth_card *); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e63e03143ca7..89f912213e62 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -74,8 +74,7 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue, | |||
74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); | 74 | static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf); |
75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); | 75 | static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int); |
76 | 76 | ||
77 | struct workqueue_struct *qeth_wq; | 77 | static struct workqueue_struct *qeth_wq; |
78 | EXPORT_SYMBOL_GPL(qeth_wq); | ||
79 | 78 | ||
80 | int qeth_card_hw_is_reachable(struct qeth_card *card) | 79 | int qeth_card_hw_is_reachable(struct qeth_card *card) |
81 | { | 80 | { |
@@ -566,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card) | |||
566 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", | 565 | QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n", |
567 | rc, CARD_DEVID(card)); | 566 | rc, CARD_DEVID(card)); |
568 | atomic_set(&channel->irq_pending, 0); | 567 | atomic_set(&channel->irq_pending, 0); |
568 | qeth_release_buffer(channel, iob); | ||
569 | card->read_or_write_problem = 1; | 569 | card->read_or_write_problem = 1; |
570 | qeth_schedule_recovery(card); | 570 | qeth_schedule_recovery(card); |
571 | wake_up(&card->wait_q); | 571 | wake_up(&card->wait_q); |
@@ -1127,6 +1127,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
1127 | rc = qeth_get_problem(card, cdev, irb); | 1127 | rc = qeth_get_problem(card, cdev, irb); |
1128 | if (rc) { | 1128 | if (rc) { |
1129 | card->read_or_write_problem = 1; | 1129 | card->read_or_write_problem = 1; |
1130 | if (iob) | ||
1131 | qeth_release_buffer(iob->channel, iob); | ||
1130 | qeth_clear_ipacmd_list(card); | 1132 | qeth_clear_ipacmd_list(card); |
1131 | qeth_schedule_recovery(card); | 1133 | qeth_schedule_recovery(card); |
1132 | goto out; | 1134 | goto out; |
@@ -1466,6 +1468,10 @@ static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev) | |||
1466 | CARD_RDEV(card) = gdev->cdev[0]; | 1468 | CARD_RDEV(card) = gdev->cdev[0]; |
1467 | CARD_WDEV(card) = gdev->cdev[1]; | 1469 | CARD_WDEV(card) = gdev->cdev[1]; |
1468 | CARD_DDEV(card) = gdev->cdev[2]; | 1470 | CARD_DDEV(card) = gdev->cdev[2]; |
1471 | |||
1472 | card->event_wq = alloc_ordered_workqueue("%s", 0, dev_name(&gdev->dev)); | ||
1473 | if (!card->event_wq) | ||
1474 | goto out_wq; | ||
1469 | if (qeth_setup_channel(&card->read, true)) | 1475 | if (qeth_setup_channel(&card->read, true)) |
1470 | goto out_ip; | 1476 | goto out_ip; |
1471 | if (qeth_setup_channel(&card->write, true)) | 1477 | if (qeth_setup_channel(&card->write, true)) |
@@ -1481,6 +1487,8 @@ out_data: | |||
1481 | out_channel: | 1487 | out_channel: |
1482 | qeth_clean_channel(&card->read); | 1488 | qeth_clean_channel(&card->read); |
1483 | out_ip: | 1489 | out_ip: |
1490 | destroy_workqueue(card->event_wq); | ||
1491 | out_wq: | ||
1484 | dev_set_drvdata(&gdev->dev, NULL); | 1492 | dev_set_drvdata(&gdev->dev, NULL); |
1485 | kfree(card); | 1493 | kfree(card); |
1486 | out: | 1494 | out: |
@@ -1809,6 +1817,7 @@ static int qeth_idx_activate_get_answer(struct qeth_card *card, | |||
1809 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); | 1817 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); |
1810 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 1818 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1811 | atomic_set(&channel->irq_pending, 0); | 1819 | atomic_set(&channel->irq_pending, 0); |
1820 | qeth_release_buffer(channel, iob); | ||
1812 | wake_up(&card->wait_q); | 1821 | wake_up(&card->wait_q); |
1813 | return rc; | 1822 | return rc; |
1814 | } | 1823 | } |
@@ -1878,6 +1887,7 @@ static int qeth_idx_activate_channel(struct qeth_card *card, | |||
1878 | rc); | 1887 | rc); |
1879 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1888 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1880 | atomic_set(&channel->irq_pending, 0); | 1889 | atomic_set(&channel->irq_pending, 0); |
1890 | qeth_release_buffer(channel, iob); | ||
1881 | wake_up(&card->wait_q); | 1891 | wake_up(&card->wait_q); |
1882 | return rc; | 1892 | return rc; |
1883 | } | 1893 | } |
@@ -2058,6 +2068,7 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
2058 | } | 2068 | } |
2059 | reply = qeth_alloc_reply(card); | 2069 | reply = qeth_alloc_reply(card); |
2060 | if (!reply) { | 2070 | if (!reply) { |
2071 | qeth_release_buffer(channel, iob); | ||
2061 | return -ENOMEM; | 2072 | return -ENOMEM; |
2062 | } | 2073 | } |
2063 | reply->callback = reply_cb; | 2074 | reply->callback = reply_cb; |
@@ -2389,11 +2400,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx) | |||
2389 | return 0; | 2400 | return 0; |
2390 | } | 2401 | } |
2391 | 2402 | ||
2392 | static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q) | 2403 | static void qeth_free_output_queue(struct qeth_qdio_out_q *q) |
2393 | { | 2404 | { |
2394 | if (!q) | 2405 | if (!q) |
2395 | return; | 2406 | return; |
2396 | 2407 | ||
2408 | qeth_clear_outq_buffers(q, 1); | ||
2397 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); | 2409 | qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q); |
2398 | kfree(q); | 2410 | kfree(q); |
2399 | } | 2411 | } |
@@ -2467,10 +2479,8 @@ out_freeoutqbufs: | |||
2467 | card->qdio.out_qs[i]->bufs[j] = NULL; | 2479 | card->qdio.out_qs[i]->bufs[j] = NULL; |
2468 | } | 2480 | } |
2469 | out_freeoutq: | 2481 | out_freeoutq: |
2470 | while (i > 0) { | 2482 | while (i > 0) |
2471 | qeth_free_qdio_out_buf(card->qdio.out_qs[--i]); | 2483 | qeth_free_output_queue(card->qdio.out_qs[--i]); |
2472 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | ||
2473 | } | ||
2474 | kfree(card->qdio.out_qs); | 2484 | kfree(card->qdio.out_qs); |
2475 | card->qdio.out_qs = NULL; | 2485 | card->qdio.out_qs = NULL; |
2476 | out_freepool: | 2486 | out_freepool: |
@@ -2503,10 +2513,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card) | |||
2503 | qeth_free_buffer_pool(card); | 2513 | qeth_free_buffer_pool(card); |
2504 | /* free outbound qdio_qs */ | 2514 | /* free outbound qdio_qs */ |
2505 | if (card->qdio.out_qs) { | 2515 | if (card->qdio.out_qs) { |
2506 | for (i = 0; i < card->qdio.no_out_queues; ++i) { | 2516 | for (i = 0; i < card->qdio.no_out_queues; i++) |
2507 | qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); | 2517 | qeth_free_output_queue(card->qdio.out_qs[i]); |
2508 | qeth_free_qdio_out_buf(card->qdio.out_qs[i]); | ||
2509 | } | ||
2510 | kfree(card->qdio.out_qs); | 2518 | kfree(card->qdio.out_qs); |
2511 | card->qdio.out_qs = NULL; | 2519 | card->qdio.out_qs = NULL; |
2512 | } | 2520 | } |
@@ -5028,6 +5036,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
5028 | qeth_clean_channel(&card->read); | 5036 | qeth_clean_channel(&card->read); |
5029 | qeth_clean_channel(&card->write); | 5037 | qeth_clean_channel(&card->write); |
5030 | qeth_clean_channel(&card->data); | 5038 | qeth_clean_channel(&card->data); |
5039 | destroy_workqueue(card->event_wq); | ||
5031 | qeth_free_qdio_buffers(card); | 5040 | qeth_free_qdio_buffers(card); |
5032 | unregister_service_level(&card->qeth_service_level); | 5041 | unregister_service_level(&card->qeth_service_level); |
5033 | dev_set_drvdata(&card->gdev->dev, NULL); | 5042 | dev_set_drvdata(&card->gdev->dev, NULL); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index f108d4b44605..a43de2f9bcac 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -369,6 +369,8 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode) | |||
369 | qeth_clear_cmd_buffers(&card->read); | 369 | qeth_clear_cmd_buffers(&card->read); |
370 | qeth_clear_cmd_buffers(&card->write); | 370 | qeth_clear_cmd_buffers(&card->write); |
371 | } | 371 | } |
372 | |||
373 | flush_workqueue(card->event_wq); | ||
372 | } | 374 | } |
373 | 375 | ||
374 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, | 376 | static int qeth_l2_process_inbound_buffer(struct qeth_card *card, |
@@ -801,6 +803,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
801 | 803 | ||
802 | if (cgdev->state == CCWGROUP_ONLINE) | 804 | if (cgdev->state == CCWGROUP_ONLINE) |
803 | qeth_l2_set_offline(cgdev); | 805 | qeth_l2_set_offline(cgdev); |
806 | |||
807 | cancel_work_sync(&card->close_dev_work); | ||
804 | if (qeth_netdev_is_registered(card->dev)) | 808 | if (qeth_netdev_is_registered(card->dev)) |
805 | unregister_netdev(card->dev); | 809 | unregister_netdev(card->dev); |
806 | } | 810 | } |
@@ -1434,7 +1438,7 @@ static void qeth_bridge_state_change(struct qeth_card *card, | |||
1434 | data->card = card; | 1438 | data->card = card; |
1435 | memcpy(&data->qports, qports, | 1439 | memcpy(&data->qports, qports, |
1436 | sizeof(struct qeth_sbp_state_change) + extrasize); | 1440 | sizeof(struct qeth_sbp_state_change) + extrasize); |
1437 | queue_work(qeth_wq, &data->worker); | 1441 | queue_work(card->event_wq, &data->worker); |
1438 | } | 1442 | } |
1439 | 1443 | ||
1440 | struct qeth_bridge_host_data { | 1444 | struct qeth_bridge_host_data { |
@@ -1506,7 +1510,7 @@ static void qeth_bridge_host_event(struct qeth_card *card, | |||
1506 | data->card = card; | 1510 | data->card = card; |
1507 | memcpy(&data->hostevs, hostevs, | 1511 | memcpy(&data->hostevs, hostevs, |
1508 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); | 1512 | sizeof(struct qeth_ipacmd_addr_change) + extrasize); |
1509 | queue_work(qeth_wq, &data->worker); | 1513 | queue_work(card->event_wq, &data->worker); |
1510 | } | 1514 | } |
1511 | 1515 | ||
1512 | /* SETBRIDGEPORT support; sending commands */ | 1516 | /* SETBRIDGEPORT support; sending commands */ |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 42a7cdc59b76..df34bff4ac31 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -1433,6 +1433,8 @@ static void qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
1433 | qeth_clear_cmd_buffers(&card->read); | 1433 | qeth_clear_cmd_buffers(&card->read); |
1434 | qeth_clear_cmd_buffers(&card->write); | 1434 | qeth_clear_cmd_buffers(&card->write); |
1435 | } | 1435 | } |
1436 | |||
1437 | flush_workqueue(card->event_wq); | ||
1436 | } | 1438 | } |
1437 | 1439 | ||
1438 | /* | 1440 | /* |
@@ -2338,6 +2340,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev) | |||
2338 | if (cgdev->state == CCWGROUP_ONLINE) | 2340 | if (cgdev->state == CCWGROUP_ONLINE) |
2339 | qeth_l3_set_offline(cgdev); | 2341 | qeth_l3_set_offline(cgdev); |
2340 | 2342 | ||
2343 | cancel_work_sync(&card->close_dev_work); | ||
2341 | if (qeth_netdev_is_registered(card->dev)) | 2344 | if (qeth_netdev_is_registered(card->dev)) |
2342 | unregister_netdev(card->dev); | 2345 | unregister_netdev(card->dev); |
2343 | qeth_l3_clear_ip_htable(card, 0); | 2346 | qeth_l3_clear_ip_htable(card, 0); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cf30d124b9e..e390f8c6d5f3 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -403,7 +403,6 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
403 | goto failed; | 403 | goto failed; |
404 | 404 | ||
405 | /* report size limit per scatter-gather segment */ | 405 | /* report size limit per scatter-gather segment */ |
406 | adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; | ||
407 | adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; | 406 | adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; |
408 | 407 | ||
409 | adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; | 408 | adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 00acc7144bbc..f4f6a07c5222 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -428,6 +428,8 @@ static struct scsi_host_template zfcp_scsi_host_template = { | |||
428 | .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) | 428 | .max_sectors = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1) |
429 | * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, | 429 | * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8, |
430 | /* GCD, adjusted later */ | 430 | /* GCD, adjusted later */ |
431 | /* report size limit per scatter-gather segment */ | ||
432 | .max_segment_size = ZFCP_QDIO_SBALE_LEN, | ||
431 | .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, | 433 | .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, |
432 | .shost_attrs = zfcp_sysfs_shost_attrs, | 434 | .shost_attrs = zfcp_sysfs_shost_attrs, |
433 | .sdev_attrs = zfcp_sysfs_sdev_attrs, | 435 | .sdev_attrs = zfcp_sysfs_sdev_attrs, |
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index 128d658d472a..16957d7ac414 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c | |||
@@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt, | |||
295 | if(tpnt->sdev_attrs == NULL) | 295 | if(tpnt->sdev_attrs == NULL) |
296 | tpnt->sdev_attrs = NCR_700_dev_attrs; | 296 | tpnt->sdev_attrs = NCR_700_dev_attrs; |
297 | 297 | ||
298 | memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript, | 298 | memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript, |
299 | GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); | 299 | GFP_KERNEL, DMA_ATTR_NON_CONSISTENT); |
300 | if(memory == NULL) { | 300 | if(memory == NULL) { |
301 | printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); | 301 | printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n"); |
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index f83f79b07b50..07efcb9b5b94 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c | |||
@@ -280,7 +280,7 @@ static ssize_t asd_show_dev_rev(struct device *dev, | |||
280 | return snprintf(buf, PAGE_SIZE, "%s\n", | 280 | return snprintf(buf, PAGE_SIZE, "%s\n", |
281 | asd_dev_rev[asd_ha->revision_id]); | 281 | asd_dev_rev[asd_ha->revision_id]); |
282 | } | 282 | } |
283 | static DEVICE_ATTR(revision, S_IRUGO, asd_show_dev_rev, NULL); | 283 | static DEVICE_ATTR(aic_revision, S_IRUGO, asd_show_dev_rev, NULL); |
284 | 284 | ||
285 | static ssize_t asd_show_dev_bios_build(struct device *dev, | 285 | static ssize_t asd_show_dev_bios_build(struct device *dev, |
286 | struct device_attribute *attr,char *buf) | 286 | struct device_attribute *attr,char *buf) |
@@ -477,7 +477,7 @@ static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha) | |||
477 | { | 477 | { |
478 | int err; | 478 | int err; |
479 | 479 | ||
480 | err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 480 | err = device_create_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
481 | if (err) | 481 | if (err) |
482 | return err; | 482 | return err; |
483 | 483 | ||
@@ -499,13 +499,13 @@ err_update_bios: | |||
499 | err_biosb: | 499 | err_biosb: |
500 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); | 500 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
501 | err_rev: | 501 | err_rev: |
502 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 502 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
503 | return err; | 503 | return err; |
504 | } | 504 | } |
505 | 505 | ||
506 | static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) | 506 | static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha) |
507 | { | 507 | { |
508 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_revision); | 508 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_aic_revision); |
509 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); | 509 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_bios_build); |
510 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); | 510 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_pcba_sn); |
511 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); | 511 | device_remove_file(&asd_ha->pcidev->dev, &dev_attr_update_bios); |
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 350257c13a5b..bc9f2a2365f4 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c | |||
@@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) | |||
240 | return NULL; | 240 | return NULL; |
241 | } | 241 | } |
242 | 242 | ||
243 | cmgr->hba = hba; | ||
243 | cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), | 244 | cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list), |
244 | GFP_KERNEL); | 245 | GFP_KERNEL); |
245 | if (!cmgr->free_list) { | 246 | if (!cmgr->free_list) { |
@@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) | |||
256 | goto mem_err; | 257 | goto mem_err; |
257 | } | 258 | } |
258 | 259 | ||
259 | cmgr->hba = hba; | ||
260 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); | 260 | cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1); |
261 | 261 | ||
262 | for (i = 0; i < arr_sz; i++) { | 262 | for (i = 0; i < arr_sz; i++) { |
@@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba) | |||
295 | 295 | ||
296 | /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ | 296 | /* Allocate pool of io_bdts - one for each bnx2fc_cmd */ |
297 | mem_size = num_ios * sizeof(struct io_bdt *); | 297 | mem_size = num_ios * sizeof(struct io_bdt *); |
298 | cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL); | 298 | cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL); |
299 | if (!cmgr->io_bdt_pool) { | 299 | if (!cmgr->io_bdt_pool) { |
300 | printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); | 300 | printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n"); |
301 | goto mem_err; | 301 | goto mem_err; |
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index bfa13e3b191c..c8bad2c093b8 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c | |||
@@ -3687,6 +3687,7 @@ static int cxlflash_probe(struct pci_dev *pdev, | |||
3687 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; | 3687 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; |
3688 | 3688 | ||
3689 | cfg = shost_priv(host); | 3689 | cfg = shost_priv(host); |
3690 | cfg->state = STATE_PROBING; | ||
3690 | cfg->host = host; | 3691 | cfg->host = host; |
3691 | rc = alloc_mem(cfg); | 3692 | rc = alloc_mem(cfg); |
3692 | if (rc) { | 3693 | if (rc) { |
@@ -3775,6 +3776,7 @@ out: | |||
3775 | return rc; | 3776 | return rc; |
3776 | 3777 | ||
3777 | out_remove: | 3778 | out_remove: |
3779 | cfg->state = STATE_PROBED; | ||
3778 | cxlflash_remove(pdev); | 3780 | cxlflash_remove(pdev); |
3779 | goto out; | 3781 | goto out; |
3780 | } | 3782 | } |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index be83590ed955..ff943f477d6f 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
@@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1726 | fc_frame_payload_op(fp) != ELS_LS_ACC) { | 1726 | fc_frame_payload_op(fp) != ELS_LS_ACC) { |
1727 | FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); | 1727 | FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n"); |
1728 | fc_lport_error(lport, fp); | 1728 | fc_lport_error(lport, fp); |
1729 | goto err; | 1729 | goto out; |
1730 | } | 1730 | } |
1731 | 1731 | ||
1732 | flp = fc_frame_payload_get(fp, sizeof(*flp)); | 1732 | flp = fc_frame_payload_get(fp, sizeof(*flp)); |
1733 | if (!flp) { | 1733 | if (!flp) { |
1734 | FC_LPORT_DBG(lport, "FLOGI bad response\n"); | 1734 | FC_LPORT_DBG(lport, "FLOGI bad response\n"); |
1735 | fc_lport_error(lport, fp); | 1735 | fc_lport_error(lport, fp); |
1736 | goto err; | 1736 | goto out; |
1737 | } | 1737 | } |
1738 | 1738 | ||
1739 | mfs = ntohs(flp->fl_csp.sp_bb_data) & | 1739 | mfs = ntohs(flp->fl_csp.sp_bb_data) & |
@@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp, | |||
1743 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " | 1743 | FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, " |
1744 | "lport->mfs:%hu\n", mfs, lport->mfs); | 1744 | "lport->mfs:%hu\n", mfs, lport->mfs); |
1745 | fc_lport_error(lport, fp); | 1745 | fc_lport_error(lport, fp); |
1746 | goto err; | 1746 | goto out; |
1747 | } | 1747 | } |
1748 | 1748 | ||
1749 | if (mfs <= lport->mfs) { | 1749 | if (mfs <= lport->mfs) { |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 9192a1d9dec6..dfba4921b265 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref) | |||
184 | struct fc_rport_priv *rdata; | 184 | struct fc_rport_priv *rdata; |
185 | 185 | ||
186 | rdata = container_of(kref, struct fc_rport_priv, kref); | 186 | rdata = container_of(kref, struct fc_rport_priv, kref); |
187 | WARN_ON(!list_empty(&rdata->peers)); | ||
188 | kfree_rcu(rdata, rcu); | 187 | kfree_rcu(rdata, rcu); |
189 | } | 188 | } |
190 | EXPORT_SYMBOL(fc_rport_destroy); | 189 | EXPORT_SYMBOL(fc_rport_destroy); |
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 661512bec3ac..e27f4df24021 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -62,7 +62,7 @@ | |||
62 | 62 | ||
63 | /* make sure inq_product_rev string corresponds to this version */ | 63 | /* make sure inq_product_rev string corresponds to this version */ |
64 | #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ | 64 | #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */ |
65 | static const char *sdebug_version_date = "20180128"; | 65 | static const char *sdebug_version_date = "20190125"; |
66 | 66 | ||
67 | #define MY_NAME "scsi_debug" | 67 | #define MY_NAME "scsi_debug" |
68 | 68 | ||
@@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void) | |||
735 | (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); | 735 | (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10); |
736 | } | 736 | } |
737 | 737 | ||
738 | static void *fake_store(unsigned long long lba) | 738 | static void *lba2fake_store(unsigned long long lba) |
739 | { | 739 | { |
740 | lba = do_div(lba, sdebug_store_sectors); | 740 | lba = do_div(lba, sdebug_store_sectors); |
741 | 741 | ||
@@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba, | |||
2514 | return ret; | 2514 | return ret; |
2515 | } | 2515 | } |
2516 | 2516 | ||
2517 | /* If fake_store(lba,num) compares equal to arr(num), then copy top half of | 2517 | /* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of |
2518 | * arr into fake_store(lba,num) and return true. If comparison fails then | 2518 | * arr into lba2fake_store(lba,num) and return true. If comparison fails then |
2519 | * return false. */ | 2519 | * return false. */ |
2520 | static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) | 2520 | static bool comp_write_worker(u64 lba, u32 num, const u8 *arr) |
2521 | { | 2521 | { |
@@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec, | |||
2643 | if (sdt->app_tag == cpu_to_be16(0xffff)) | 2643 | if (sdt->app_tag == cpu_to_be16(0xffff)) |
2644 | continue; | 2644 | continue; |
2645 | 2645 | ||
2646 | ret = dif_verify(sdt, fake_store(sector), sector, ei_lba); | 2646 | ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba); |
2647 | if (ret) { | 2647 | if (ret) { |
2648 | dif_errors++; | 2648 | dif_errors++; |
2649 | return ret; | 2649 | return ret; |
@@ -3261,10 +3261,12 @@ err_out: | |||
3261 | static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, | 3261 | static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, |
3262 | u32 ei_lba, bool unmap, bool ndob) | 3262 | u32 ei_lba, bool unmap, bool ndob) |
3263 | { | 3263 | { |
3264 | int ret; | ||
3264 | unsigned long iflags; | 3265 | unsigned long iflags; |
3265 | unsigned long long i; | 3266 | unsigned long long i; |
3266 | int ret; | 3267 | u32 lb_size = sdebug_sector_size; |
3267 | u64 lba_off; | 3268 | u64 block, lbaa; |
3269 | u8 *fs1p; | ||
3268 | 3270 | ||
3269 | ret = check_device_access_params(scp, lba, num); | 3271 | ret = check_device_access_params(scp, lba, num); |
3270 | if (ret) | 3272 | if (ret) |
@@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num, | |||
3276 | unmap_region(lba, num); | 3278 | unmap_region(lba, num); |
3277 | goto out; | 3279 | goto out; |
3278 | } | 3280 | } |
3279 | 3281 | lbaa = lba; | |
3280 | lba_off = lba * sdebug_sector_size; | 3282 | block = do_div(lbaa, sdebug_store_sectors); |
3281 | /* if ndob then zero 1 logical block, else fetch 1 logical block */ | 3283 | /* if ndob then zero 1 logical block, else fetch 1 logical block */ |
3284 | fs1p = fake_storep + (block * lb_size); | ||
3282 | if (ndob) { | 3285 | if (ndob) { |
3283 | memset(fake_storep + lba_off, 0, sdebug_sector_size); | 3286 | memset(fs1p, 0, lb_size); |
3284 | ret = 0; | 3287 | ret = 0; |
3285 | } else | 3288 | } else |
3286 | ret = fetch_to_dev_buffer(scp, fake_storep + lba_off, | 3289 | ret = fetch_to_dev_buffer(scp, fs1p, lb_size); |
3287 | sdebug_sector_size); | ||
3288 | 3290 | ||
3289 | if (-1 == ret) { | 3291 | if (-1 == ret) { |
3290 | write_unlock_irqrestore(&atomic_rw, iflags); | 3292 | write_unlock_irqrestore(&atomic_rw, iflags); |
3291 | return DID_ERROR << 16; | 3293 | return DID_ERROR << 16; |
3292 | } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size)) | 3294 | } else if (sdebug_verbose && !ndob && (ret < lb_size)) |
3293 | sdev_printk(KERN_INFO, scp->device, | 3295 | sdev_printk(KERN_INFO, scp->device, |
3294 | "%s: %s: lb size=%u, IO sent=%d bytes\n", | 3296 | "%s: %s: lb size=%u, IO sent=%d bytes\n", |
3295 | my_name, "write same", | 3297 | my_name, "write same", lb_size, ret); |
3296 | sdebug_sector_size, ret); | ||
3297 | 3298 | ||
3298 | /* Copy first sector to remaining blocks */ | 3299 | /* Copy first sector to remaining blocks */ |
3299 | for (i = 1 ; i < num ; i++) | 3300 | for (i = 1 ; i < num ; i++) { |
3300 | memcpy(fake_storep + ((lba + i) * sdebug_sector_size), | 3301 | lbaa = lba + i; |
3301 | fake_storep + lba_off, | 3302 | block = do_div(lbaa, sdebug_store_sectors); |
3302 | sdebug_sector_size); | 3303 | memmove(fake_storep + (block * lb_size), fs1p, lb_size); |
3303 | 3304 | } | |
3304 | if (scsi_debug_lbp()) | 3305 | if (scsi_debug_lbp()) |
3305 | map_region(lba, num); | 3306 | map_region(lba, num); |
3306 | out: | 3307 | out: |
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 83365b29a4d8..fff86940388b 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c | |||
@@ -462,12 +462,16 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) | |||
462 | sdkp->device->use_10_for_rw = 0; | 462 | sdkp->device->use_10_for_rw = 0; |
463 | 463 | ||
464 | /* | 464 | /* |
465 | * If something changed, revalidate the disk zone bitmaps once we have | 465 | * Revalidate the disk zone bitmaps once the block device capacity is |
466 | * the capacity, that is on the second revalidate execution during disk | 466 | * set on the second revalidate execution during disk scan and if |
467 | * scan and always during normal revalidate. | 467 | * something changed when executing a normal revalidate. |
468 | */ | 468 | */ |
469 | if (sdkp->first_scan) | 469 | if (sdkp->first_scan) { |
470 | sdkp->zone_blocks = zone_blocks; | ||
471 | sdkp->nr_zones = nr_zones; | ||
470 | return 0; | 472 | return 0; |
473 | } | ||
474 | |||
471 | if (sdkp->zone_blocks != zone_blocks || | 475 | if (sdkp->zone_blocks != zone_blocks || |
472 | sdkp->nr_zones != nr_zones || | 476 | sdkp->nr_zones != nr_zones || |
473 | disk->queue->nr_zones != nr_zones) { | 477 | disk->queue->nr_zones != nr_zones) { |
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c index 52c153cd795a..636f83f781f5 100644 --- a/drivers/soc/fsl/qbman/qman.c +++ b/drivers/soc/fsl/qbman/qman.c | |||
@@ -1143,18 +1143,19 @@ static void qm_mr_process_task(struct work_struct *work); | |||
1143 | static irqreturn_t portal_isr(int irq, void *ptr) | 1143 | static irqreturn_t portal_isr(int irq, void *ptr) |
1144 | { | 1144 | { |
1145 | struct qman_portal *p = ptr; | 1145 | struct qman_portal *p = ptr; |
1146 | |||
1147 | u32 clear = QM_DQAVAIL_MASK | p->irq_sources; | ||
1148 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; | 1146 | u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources; |
1147 | u32 clear = 0; | ||
1149 | 1148 | ||
1150 | if (unlikely(!is)) | 1149 | if (unlikely(!is)) |
1151 | return IRQ_NONE; | 1150 | return IRQ_NONE; |
1152 | 1151 | ||
1153 | /* DQRR-handling if it's interrupt-driven */ | 1152 | /* DQRR-handling if it's interrupt-driven */ |
1154 | if (is & QM_PIRQ_DQRI) | 1153 | if (is & QM_PIRQ_DQRI) { |
1155 | __poll_portal_fast(p, QMAN_POLL_LIMIT); | 1154 | __poll_portal_fast(p, QMAN_POLL_LIMIT); |
1155 | clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI; | ||
1156 | } | ||
1156 | /* Handling of anything else that's interrupt-driven */ | 1157 | /* Handling of anything else that's interrupt-driven */ |
1157 | clear |= __poll_portal_slow(p, is); | 1158 | clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW; |
1158 | qm_out(&p->p, QM_REG_ISR, clear); | 1159 | qm_out(&p->p, QM_REG_ISR, clear); |
1159 | return IRQ_HANDLED; | 1160 | return IRQ_HANDLED; |
1160 | } | 1161 | } |
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c index 2848fa71a33d..d6248eecf123 100644 --- a/drivers/staging/octeon/ethernet-mdio.c +++ b/drivers/staging/octeon/ethernet-mdio.c | |||
@@ -170,7 +170,7 @@ int cvm_oct_phy_setup_device(struct net_device *dev) | |||
170 | return -ENODEV; | 170 | return -ENODEV; |
171 | 171 | ||
172 | priv->last_link = 0; | 172 | priv->last_link = 0; |
173 | phy_start_aneg(phydev); | 173 | phy_start(phydev); |
174 | 174 | ||
175 | return 0; | 175 | return 0; |
176 | no_phy: | 176 | no_phy: |
diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index c92bbd05516e..005de0024dd4 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c | |||
@@ -265,7 +265,8 @@ static void spk_ttyio_send_xchar(char ch) | |||
265 | return; | 265 | return; |
266 | } | 266 | } |
267 | 267 | ||
268 | speakup_tty->ops->send_xchar(speakup_tty, ch); | 268 | if (speakup_tty->ops->send_xchar) |
269 | speakup_tty->ops->send_xchar(speakup_tty, ch); | ||
269 | mutex_unlock(&speakup_tty_mutex); | 270 | mutex_unlock(&speakup_tty_mutex); |
270 | } | 271 | } |
271 | 272 | ||
@@ -277,7 +278,8 @@ static void spk_ttyio_tiocmset(unsigned int set, unsigned int clear) | |||
277 | return; | 278 | return; |
278 | } | 279 | } |
279 | 280 | ||
280 | speakup_tty->ops->tiocmset(speakup_tty, set, clear); | 281 | if (speakup_tty->ops->tiocmset) |
282 | speakup_tty->ops->tiocmset(speakup_tty, set, clear); | ||
281 | mutex_unlock(&speakup_tty_mutex); | 283 | mutex_unlock(&speakup_tty_mutex); |
282 | } | 284 | } |
283 | 285 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 72016d0dfca5..8e7fffbb8802 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -852,6 +852,12 @@ static ssize_t pi_prot_type_store(struct config_item *item, | |||
852 | return count; | 852 | return count; |
853 | } | 853 | } |
854 | 854 | ||
855 | /* always zero, but attr needs to remain RW to avoid userspace breakage */ | ||
856 | static ssize_t pi_prot_format_show(struct config_item *item, char *page) | ||
857 | { | ||
858 | return snprintf(page, PAGE_SIZE, "0\n"); | ||
859 | } | ||
860 | |||
855 | static ssize_t pi_prot_format_store(struct config_item *item, | 861 | static ssize_t pi_prot_format_store(struct config_item *item, |
856 | const char *page, size_t count) | 862 | const char *page, size_t count) |
857 | { | 863 | { |
@@ -1132,7 +1138,7 @@ CONFIGFS_ATTR(, emulate_3pc); | |||
1132 | CONFIGFS_ATTR(, emulate_pr); | 1138 | CONFIGFS_ATTR(, emulate_pr); |
1133 | CONFIGFS_ATTR(, pi_prot_type); | 1139 | CONFIGFS_ATTR(, pi_prot_type); |
1134 | CONFIGFS_ATTR_RO(, hw_pi_prot_type); | 1140 | CONFIGFS_ATTR_RO(, hw_pi_prot_type); |
1135 | CONFIGFS_ATTR_WO(, pi_prot_format); | 1141 | CONFIGFS_ATTR(, pi_prot_format); |
1136 | CONFIGFS_ATTR(, pi_prot_verify); | 1142 | CONFIGFS_ATTR(, pi_prot_verify); |
1137 | CONFIGFS_ATTR(, enforce_pr_isids); | 1143 | CONFIGFS_ATTR(, enforce_pr_isids); |
1138 | CONFIGFS_ATTR(, is_nonrot); | 1144 | CONFIGFS_ATTR(, is_nonrot); |
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dfd23245f778..6fff16113628 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c | |||
@@ -774,7 +774,7 @@ of_cpufreq_cooling_register(struct cpufreq_policy *policy) | |||
774 | 774 | ||
775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); | 775 | cdev = __cpufreq_cooling_register(np, policy, capacitance); |
776 | if (IS_ERR(cdev)) { | 776 | if (IS_ERR(cdev)) { |
777 | pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", | 777 | pr_err("cpu_cooling: cpu%d failed to register as cooling device: %ld\n", |
778 | policy->cpu, PTR_ERR(cdev)); | 778 | policy->cpu, PTR_ERR(cdev)); |
779 | cdev = NULL; | 779 | cdev = NULL; |
780 | } | 780 | } |
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index 4bfdb4a1e47d..2df059cc07e2 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c | |||
@@ -867,14 +867,14 @@ __init *thermal_of_build_thermal_zone(struct device_node *np) | |||
867 | 867 | ||
868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); | 868 | ret = of_property_read_u32(np, "polling-delay-passive", &prop); |
869 | if (ret < 0) { | 869 | if (ret < 0) { |
870 | pr_err("missing polling-delay-passive property\n"); | 870 | pr_err("%pOFn: missing polling-delay-passive property\n", np); |
871 | goto free_tz; | 871 | goto free_tz; |
872 | } | 872 | } |
873 | tz->passive_delay = prop; | 873 | tz->passive_delay = prop; |
874 | 874 | ||
875 | ret = of_property_read_u32(np, "polling-delay", &prop); | 875 | ret = of_property_read_u32(np, "polling-delay", &prop); |
876 | if (ret < 0) { | 876 | if (ret < 0) { |
877 | pr_err("missing polling-delay property\n"); | 877 | pr_err("%pOFn: missing polling-delay property\n", np); |
878 | goto free_tz; | 878 | goto free_tz; |
879 | } | 879 | } |
880 | tz->polling_delay = prop; | 880 | tz->polling_delay = prop; |
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c index e2c407656fa6..c1fdbc0b6840 100644 --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c | |||
@@ -357,6 +357,9 @@ static int mtk8250_probe_of(struct platform_device *pdev, struct uart_port *p, | |||
357 | if (dmacnt == 2) { | 357 | if (dmacnt == 2) { |
358 | data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), | 358 | data->dma = devm_kzalloc(&pdev->dev, sizeof(*data->dma), |
359 | GFP_KERNEL); | 359 | GFP_KERNEL); |
360 | if (!data->dma) | ||
361 | return -ENOMEM; | ||
362 | |||
360 | data->dma->fn = mtk8250_dma_filter; | 363 | data->dma->fn = mtk8250_dma_filter; |
361 | data->dma->rx_size = MTK_UART_RX_SIZE; | 364 | data->dma->rx_size = MTK_UART_RX_SIZE; |
362 | data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; | 365 | data->dma->rxconf.src_maxburst = MTK_UART_RX_TRIGGER; |
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index f80a300b5d68..48bd694a5fa1 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c | |||
@@ -3420,6 +3420,11 @@ static int | |||
3420 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | 3420 | serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) |
3421 | { | 3421 | { |
3422 | int num_iomem, num_port, first_port = -1, i; | 3422 | int num_iomem, num_port, first_port = -1, i; |
3423 | int rc; | ||
3424 | |||
3425 | rc = serial_pci_is_class_communication(dev); | ||
3426 | if (rc) | ||
3427 | return rc; | ||
3423 | 3428 | ||
3424 | /* | 3429 | /* |
3425 | * Should we try to make guesses for multiport serial devices later? | 3430 | * Should we try to make guesses for multiport serial devices later? |
@@ -3647,10 +3652,6 @@ pciserial_init_one(struct pci_dev *dev, const struct pci_device_id *ent) | |||
3647 | 3652 | ||
3648 | board = &pci_boards[ent->driver_data]; | 3653 | board = &pci_boards[ent->driver_data]; |
3649 | 3654 | ||
3650 | rc = serial_pci_is_class_communication(dev); | ||
3651 | if (rc) | ||
3652 | return rc; | ||
3653 | |||
3654 | rc = serial_pci_is_blacklisted(dev); | 3655 | rc = serial_pci_is_blacklisted(dev); |
3655 | if (rc) | 3656 | if (rc) |
3656 | return rc; | 3657 | return rc; |
diff --git a/drivers/tty/serial/earlycon-riscv-sbi.c b/drivers/tty/serial/earlycon-riscv-sbi.c index e1a551aae336..ce81523c3113 100644 --- a/drivers/tty/serial/earlycon-riscv-sbi.c +++ b/drivers/tty/serial/earlycon-riscv-sbi.c | |||
@@ -10,13 +10,16 @@ | |||
10 | #include <linux/serial_core.h> | 10 | #include <linux/serial_core.h> |
11 | #include <asm/sbi.h> | 11 | #include <asm/sbi.h> |
12 | 12 | ||
13 | static void sbi_console_write(struct console *con, | 13 | static void sbi_putc(struct uart_port *port, int c) |
14 | const char *s, unsigned int n) | ||
15 | { | 14 | { |
16 | int i; | 15 | sbi_console_putchar(c); |
16 | } | ||
17 | 17 | ||
18 | for (i = 0; i < n; ++i) | 18 | static void sbi_console_write(struct console *con, |
19 | sbi_console_putchar(s[i]); | 19 | const char *s, unsigned n) |
20 | { | ||
21 | struct earlycon_device *dev = con->data; | ||
22 | uart_console_write(&dev->port, s, n, sbi_putc); | ||
20 | } | 23 | } |
21 | 24 | ||
22 | static int __init early_sbi_setup(struct earlycon_device *device, | 25 | static int __init early_sbi_setup(struct earlycon_device *device, |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 5c01bb6d1c24..556f50aa1b58 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -130,6 +130,9 @@ static void uart_start(struct tty_struct *tty) | |||
130 | struct uart_port *port; | 130 | struct uart_port *port; |
131 | unsigned long flags; | 131 | unsigned long flags; |
132 | 132 | ||
133 | if (!state) | ||
134 | return; | ||
135 | |||
133 | port = uart_port_lock(state, flags); | 136 | port = uart_port_lock(state, flags); |
134 | __uart_start(tty); | 137 | __uart_start(tty); |
135 | uart_port_unlock(port, flags); | 138 | uart_port_unlock(port, flags); |
@@ -727,6 +730,9 @@ static void uart_unthrottle(struct tty_struct *tty) | |||
727 | upstat_t mask = UPSTAT_SYNC_FIFO; | 730 | upstat_t mask = UPSTAT_SYNC_FIFO; |
728 | struct uart_port *port; | 731 | struct uart_port *port; |
729 | 732 | ||
733 | if (!state) | ||
734 | return; | ||
735 | |||
730 | port = uart_port_ref(state); | 736 | port = uart_port_ref(state); |
731 | if (!port) | 737 | if (!port) |
732 | return; | 738 | return; |
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index 8df0fd824520..64bbeb7d7e0c 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c | |||
@@ -1921,7 +1921,7 @@ out_nomem: | |||
1921 | 1921 | ||
1922 | static void sci_free_irq(struct sci_port *port) | 1922 | static void sci_free_irq(struct sci_port *port) |
1923 | { | 1923 | { |
1924 | int i; | 1924 | int i, j; |
1925 | 1925 | ||
1926 | /* | 1926 | /* |
1927 | * Intentionally in reverse order so we iterate over the muxed | 1927 | * Intentionally in reverse order so we iterate over the muxed |
@@ -1937,6 +1937,13 @@ static void sci_free_irq(struct sci_port *port) | |||
1937 | if (unlikely(irq < 0)) | 1937 | if (unlikely(irq < 0)) |
1938 | continue; | 1938 | continue; |
1939 | 1939 | ||
1940 | /* Check if already freed (irq was muxed) */ | ||
1941 | for (j = 0; j < i; j++) | ||
1942 | if (port->irqs[j] == irq) | ||
1943 | j = i + 1; | ||
1944 | if (j > i) | ||
1945 | continue; | ||
1946 | |||
1940 | free_irq(port->irqs[i], port); | 1947 | free_irq(port->irqs[i], port); |
1941 | kfree(port->irqstr[i]); | 1948 | kfree(port->irqstr[i]); |
1942 | 1949 | ||
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c index cb7fcd7c0ad8..c1e9ea621f41 100644 --- a/drivers/usb/dwc3/dwc3-exynos.c +++ b/drivers/usb/dwc3/dwc3-exynos.c | |||
@@ -78,7 +78,7 @@ static int dwc3_exynos_probe(struct platform_device *pdev) | |||
78 | for (i = 0; i < exynos->num_clks; i++) { | 78 | for (i = 0; i < exynos->num_clks; i++) { |
79 | ret = clk_prepare_enable(exynos->clks[i]); | 79 | ret = clk_prepare_enable(exynos->clks[i]); |
80 | if (ret) { | 80 | if (ret) { |
81 | while (--i > 0) | 81 | while (i-- > 0) |
82 | clk_disable_unprepare(exynos->clks[i]); | 82 | clk_disable_unprepare(exynos->clks[i]); |
83 | return ret; | 83 | return ret; |
84 | } | 84 | } |
@@ -223,7 +223,7 @@ static int dwc3_exynos_resume(struct device *dev) | |||
223 | for (i = 0; i < exynos->num_clks; i++) { | 223 | for (i = 0; i < exynos->num_clks; i++) { |
224 | ret = clk_prepare_enable(exynos->clks[i]); | 224 | ret = clk_prepare_enable(exynos->clks[i]); |
225 | if (ret) { | 225 | if (ret) { |
226 | while (--i > 0) | 226 | while (i-- > 0) |
227 | clk_disable_unprepare(exynos->clks[i]); | 227 | clk_disable_unprepare(exynos->clks[i]); |
228 | return ret; | 228 | return ret; |
229 | } | 229 | } |
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index bed2ff42780b..6c9b76bcc2e1 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
@@ -1119,7 +1119,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, | |||
1119 | unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); | 1119 | unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); |
1120 | unsigned int rem = length % maxp; | 1120 | unsigned int rem = length % maxp; |
1121 | 1121 | ||
1122 | if (rem && usb_endpoint_dir_out(dep->endpoint.desc)) { | 1122 | if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { |
1123 | struct dwc3 *dwc = dep->dwc; | 1123 | struct dwc3 *dwc = dep->dwc; |
1124 | struct dwc3_trb *trb; | 1124 | struct dwc3_trb *trb; |
1125 | 1125 | ||
diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 660878a19505..b77f3126580e 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c | |||
@@ -2083,7 +2083,7 @@ static irqreturn_t net2272_irq(int irq, void *_dev) | |||
2083 | #if defined(PLX_PCI_RDK2) | 2083 | #if defined(PLX_PCI_RDK2) |
2084 | /* see if PCI int for us by checking irqstat */ | 2084 | /* see if PCI int for us by checking irqstat */ |
2085 | intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); | 2085 | intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT); |
2086 | if (!intcsr & (1 << NET2272_PCI_IRQ)) { | 2086 | if (!(intcsr & (1 << NET2272_PCI_IRQ))) { |
2087 | spin_unlock(&dev->lock); | 2087 | spin_unlock(&dev->lock); |
2088 | return IRQ_NONE; | 2088 | return IRQ_NONE; |
2089 | } | 2089 | } |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index eae8b1b1b45b..ffe462a657b1 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -452,13 +452,10 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | if (request) { | 454 | if (request) { |
455 | u8 is_dma = 0; | ||
456 | bool short_packet = false; | ||
457 | 455 | ||
458 | trace_musb_req_tx(req); | 456 | trace_musb_req_tx(req); |
459 | 457 | ||
460 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { | 458 | if (dma && (csr & MUSB_TXCSR_DMAENAB)) { |
461 | is_dma = 1; | ||
462 | csr |= MUSB_TXCSR_P_WZC_BITS; | 459 | csr |= MUSB_TXCSR_P_WZC_BITS; |
463 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | | 460 | csr &= ~(MUSB_TXCSR_DMAENAB | MUSB_TXCSR_P_UNDERRUN | |
464 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); | 461 | MUSB_TXCSR_TXPKTRDY | MUSB_TXCSR_AUTOSET); |
@@ -476,16 +473,8 @@ void musb_g_tx(struct musb *musb, u8 epnum) | |||
476 | */ | 473 | */ |
477 | if ((request->zero && request->length) | 474 | if ((request->zero && request->length) |
478 | && (request->length % musb_ep->packet_sz == 0) | 475 | && (request->length % musb_ep->packet_sz == 0) |
479 | && (request->actual == request->length)) | 476 | && (request->actual == request->length)) { |
480 | short_packet = true; | ||
481 | 477 | ||
482 | if ((musb_dma_inventra(musb) || musb_dma_ux500(musb)) && | ||
483 | (is_dma && (!dma->desired_mode || | ||
484 | (request->actual & | ||
485 | (musb_ep->packet_sz - 1))))) | ||
486 | short_packet = true; | ||
487 | |||
488 | if (short_packet) { | ||
489 | /* | 478 | /* |
490 | * On DMA completion, FIFO may not be | 479 | * On DMA completion, FIFO may not be |
491 | * available yet... | 480 | * available yet... |
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c index a688f7f87829..5fc6825745f2 100644 --- a/drivers/usb/musb/musbhsdma.c +++ b/drivers/usb/musb/musbhsdma.c | |||
@@ -346,12 +346,10 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
346 | channel->status = MUSB_DMA_STATUS_FREE; | 346 | channel->status = MUSB_DMA_STATUS_FREE; |
347 | 347 | ||
348 | /* completed */ | 348 | /* completed */ |
349 | if ((devctl & MUSB_DEVCTL_HM) | 349 | if (musb_channel->transmit && |
350 | && (musb_channel->transmit) | 350 | (!channel->desired_mode || |
351 | && ((channel->desired_mode == 0) | 351 | (channel->actual_len % |
352 | || (channel->actual_len & | 352 | musb_channel->max_packet_sz))) { |
353 | (musb_channel->max_packet_sz - 1))) | ||
354 | ) { | ||
355 | u8 epnum = musb_channel->epnum; | 353 | u8 epnum = musb_channel->epnum; |
356 | int offset = musb->io.ep_offset(epnum, | 354 | int offset = musb->io.ep_offset(epnum, |
357 | MUSB_TXCSR); | 355 | MUSB_TXCSR); |
@@ -363,11 +361,14 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data) | |||
363 | */ | 361 | */ |
364 | musb_ep_select(mbase, epnum); | 362 | musb_ep_select(mbase, epnum); |
365 | txcsr = musb_readw(mbase, offset); | 363 | txcsr = musb_readw(mbase, offset); |
366 | txcsr &= ~(MUSB_TXCSR_DMAENAB | 364 | if (channel->desired_mode == 1) { |
365 | txcsr &= ~(MUSB_TXCSR_DMAENAB | ||
367 | | MUSB_TXCSR_AUTOSET); | 366 | | MUSB_TXCSR_AUTOSET); |
368 | musb_writew(mbase, offset, txcsr); | 367 | musb_writew(mbase, offset, txcsr); |
369 | /* Send out the packet */ | 368 | /* Send out the packet */ |
370 | txcsr &= ~MUSB_TXCSR_DMAMODE; | 369 | txcsr &= ~MUSB_TXCSR_DMAMODE; |
370 | txcsr |= MUSB_TXCSR_DMAENAB; | ||
371 | } | ||
371 | txcsr |= MUSB_TXCSR_TXPKTRDY; | 372 | txcsr |= MUSB_TXCSR_TXPKTRDY; |
372 | musb_writew(mbase, offset, txcsr); | 373 | musb_writew(mbase, offset, txcsr); |
373 | } | 374 | } |
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index d7312eed6088..91ea3083e7ad 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig | |||
@@ -21,7 +21,7 @@ config AB8500_USB | |||
21 | 21 | ||
22 | config FSL_USB2_OTG | 22 | config FSL_USB2_OTG |
23 | bool "Freescale USB OTG Transceiver Driver" | 23 | bool "Freescale USB OTG Transceiver Driver" |
24 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM | 24 | depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM |
25 | depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' | 25 | depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y' |
26 | select USB_PHY | 26 | select USB_PHY |
27 | help | 27 | help |
diff --git a/drivers/usb/phy/phy-am335x.c b/drivers/usb/phy/phy-am335x.c index 27bdb7222527..f5f0568d8533 100644 --- a/drivers/usb/phy/phy-am335x.c +++ b/drivers/usb/phy/phy-am335x.c | |||
@@ -61,9 +61,6 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
61 | if (ret) | 61 | if (ret) |
62 | return ret; | 62 | return ret; |
63 | 63 | ||
64 | ret = usb_add_phy_dev(&am_phy->usb_phy_gen.phy); | ||
65 | if (ret) | ||
66 | return ret; | ||
67 | am_phy->usb_phy_gen.phy.init = am335x_init; | 64 | am_phy->usb_phy_gen.phy.init = am335x_init; |
68 | am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; | 65 | am_phy->usb_phy_gen.phy.shutdown = am335x_shutdown; |
69 | 66 | ||
@@ -82,7 +79,7 @@ static int am335x_phy_probe(struct platform_device *pdev) | |||
82 | device_set_wakeup_enable(dev, false); | 79 | device_set_wakeup_enable(dev, false); |
83 | phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); | 80 | phy_ctrl_power(am_phy->phy_ctrl, am_phy->id, am_phy->dr_mode, false); |
84 | 81 | ||
85 | return 0; | 82 | return usb_add_phy_dev(&am_phy->usb_phy_gen.phy); |
86 | } | 83 | } |
87 | 84 | ||
88 | static int am335x_phy_remove(struct platform_device *pdev) | 85 | static int am335x_phy_remove(struct platform_device *pdev) |
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 4bc29b586698..f1c39a3c7534 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c | |||
@@ -2297,7 +2297,8 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port) | |||
2297 | pdo_pps_apdo_max_voltage(snk)); | 2297 | pdo_pps_apdo_max_voltage(snk)); |
2298 | port->pps_data.max_curr = min_pps_apdo_current(src, snk); | 2298 | port->pps_data.max_curr = min_pps_apdo_current(src, snk); |
2299 | port->pps_data.out_volt = min(port->pps_data.max_volt, | 2299 | port->pps_data.out_volt = min(port->pps_data.max_volt, |
2300 | port->pps_data.out_volt); | 2300 | max(port->pps_data.min_volt, |
2301 | port->pps_data.out_volt)); | ||
2301 | port->pps_data.op_curr = min(port->pps_data.max_curr, | 2302 | port->pps_data.op_curr = min(port->pps_data.max_curr, |
2302 | port->pps_data.op_curr); | 2303 | port->pps_data.op_curr); |
2303 | } | 2304 | } |
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index bca86bf7189f..df51a35cf537 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -1337,7 +1337,8 @@ static int vhost_net_open(struct inode *inode, struct file *f) | |||
1337 | n->vqs[i].rx_ring = NULL; | 1337 | n->vqs[i].rx_ring = NULL; |
1338 | vhost_net_buf_init(&n->vqs[i].rxq); | 1338 | vhost_net_buf_init(&n->vqs[i].rxq); |
1339 | } | 1339 | } |
1340 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX); | 1340 | vhost_dev_init(dev, vqs, VHOST_NET_VQ_MAX, |
1341 | UIO_MAXIOV + VHOST_NET_BATCH); | ||
1341 | 1342 | ||
1342 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); | 1343 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, EPOLLOUT, dev); |
1343 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); | 1344 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, EPOLLIN, dev); |
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 344684f3e2e4..23593cb23dd0 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c | |||
@@ -1627,7 +1627,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) | |||
1627 | vqs[i] = &vs->vqs[i].vq; | 1627 | vqs[i] = &vs->vqs[i].vq; |
1628 | vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; | 1628 | vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick; |
1629 | } | 1629 | } |
1630 | vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); | 1630 | vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ, UIO_MAXIOV); |
1631 | 1631 | ||
1632 | vhost_scsi_init_inflight(vs, NULL); | 1632 | vhost_scsi_init_inflight(vs, NULL); |
1633 | 1633 | ||
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 15a216cdd507..24a129fcdd61 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -390,9 +390,9 @@ static long vhost_dev_alloc_iovecs(struct vhost_dev *dev) | |||
390 | vq->indirect = kmalloc_array(UIO_MAXIOV, | 390 | vq->indirect = kmalloc_array(UIO_MAXIOV, |
391 | sizeof(*vq->indirect), | 391 | sizeof(*vq->indirect), |
392 | GFP_KERNEL); | 392 | GFP_KERNEL); |
393 | vq->log = kmalloc_array(UIO_MAXIOV, sizeof(*vq->log), | 393 | vq->log = kmalloc_array(dev->iov_limit, sizeof(*vq->log), |
394 | GFP_KERNEL); | 394 | GFP_KERNEL); |
395 | vq->heads = kmalloc_array(UIO_MAXIOV, sizeof(*vq->heads), | 395 | vq->heads = kmalloc_array(dev->iov_limit, sizeof(*vq->heads), |
396 | GFP_KERNEL); | 396 | GFP_KERNEL); |
397 | if (!vq->indirect || !vq->log || !vq->heads) | 397 | if (!vq->indirect || !vq->log || !vq->heads) |
398 | goto err_nomem; | 398 | goto err_nomem; |
@@ -414,7 +414,7 @@ static void vhost_dev_free_iovecs(struct vhost_dev *dev) | |||
414 | } | 414 | } |
415 | 415 | ||
416 | void vhost_dev_init(struct vhost_dev *dev, | 416 | void vhost_dev_init(struct vhost_dev *dev, |
417 | struct vhost_virtqueue **vqs, int nvqs) | 417 | struct vhost_virtqueue **vqs, int nvqs, int iov_limit) |
418 | { | 418 | { |
419 | struct vhost_virtqueue *vq; | 419 | struct vhost_virtqueue *vq; |
420 | int i; | 420 | int i; |
@@ -427,6 +427,7 @@ void vhost_dev_init(struct vhost_dev *dev, | |||
427 | dev->iotlb = NULL; | 427 | dev->iotlb = NULL; |
428 | dev->mm = NULL; | 428 | dev->mm = NULL; |
429 | dev->worker = NULL; | 429 | dev->worker = NULL; |
430 | dev->iov_limit = iov_limit; | ||
430 | init_llist_head(&dev->work_list); | 431 | init_llist_head(&dev->work_list); |
431 | init_waitqueue_head(&dev->wait); | 432 | init_waitqueue_head(&dev->wait); |
432 | INIT_LIST_HEAD(&dev->read_list); | 433 | INIT_LIST_HEAD(&dev->read_list); |
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 1b675dad5e05..9490e7ddb340 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -170,9 +170,11 @@ struct vhost_dev { | |||
170 | struct list_head read_list; | 170 | struct list_head read_list; |
171 | struct list_head pending_list; | 171 | struct list_head pending_list; |
172 | wait_queue_head_t wait; | 172 | wait_queue_head_t wait; |
173 | int iov_limit; | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs); | 176 | void vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, |
177 | int nvqs, int iov_limit); | ||
176 | long vhost_dev_set_owner(struct vhost_dev *dev); | 178 | long vhost_dev_set_owner(struct vhost_dev *dev); |
177 | bool vhost_dev_has_owner(struct vhost_dev *dev); | 179 | bool vhost_dev_has_owner(struct vhost_dev *dev); |
178 | long vhost_dev_check_owner(struct vhost_dev *); | 180 | long vhost_dev_check_owner(struct vhost_dev *); |
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 3fbc068eaa9b..bb5fc0e9fbc2 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c | |||
@@ -531,7 +531,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) | |||
531 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; | 531 | vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick; |
532 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; | 532 | vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick; |
533 | 533 | ||
534 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs)); | 534 | vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs), UIO_MAXIOV); |
535 | 535 | ||
536 | file->private_data = vsock; | 536 | file->private_data = vsock; |
537 | spin_lock_init(&vsock->send_pkt_list_lock); | 537 | spin_lock_init(&vsock->send_pkt_list_lock); |
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index cd7e755484e3..a0b07c331255 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c | |||
@@ -152,7 +152,12 @@ struct vring_virtqueue { | |||
152 | /* Available for packed ring */ | 152 | /* Available for packed ring */ |
153 | struct { | 153 | struct { |
154 | /* Actual memory layout for this queue. */ | 154 | /* Actual memory layout for this queue. */ |
155 | struct vring_packed vring; | 155 | struct { |
156 | unsigned int num; | ||
157 | struct vring_packed_desc *desc; | ||
158 | struct vring_packed_desc_event *driver; | ||
159 | struct vring_packed_desc_event *device; | ||
160 | } vring; | ||
156 | 161 | ||
157 | /* Driver ring wrap counter. */ | 162 | /* Driver ring wrap counter. */ |
158 | bool avail_wrap_counter; | 163 | bool avail_wrap_counter; |
@@ -1609,6 +1614,9 @@ static struct virtqueue *vring_create_virtqueue_packed( | |||
1609 | !context; | 1614 | !context; |
1610 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 1615 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
1611 | 1616 | ||
1617 | if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) | ||
1618 | vq->weak_barriers = false; | ||
1619 | |||
1612 | vq->packed.ring_dma_addr = ring_dma_addr; | 1620 | vq->packed.ring_dma_addr = ring_dma_addr; |
1613 | vq->packed.driver_event_dma_addr = driver_event_dma_addr; | 1621 | vq->packed.driver_event_dma_addr = driver_event_dma_addr; |
1614 | vq->packed.device_event_dma_addr = device_event_dma_addr; | 1622 | vq->packed.device_event_dma_addr = device_event_dma_addr; |
@@ -2079,6 +2087,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index, | |||
2079 | !context; | 2087 | !context; |
2080 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); | 2088 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
2081 | 2089 | ||
2090 | if (virtio_has_feature(vdev, VIRTIO_F_ORDER_PLATFORM)) | ||
2091 | vq->weak_barriers = false; | ||
2092 | |||
2082 | vq->split.queue_dma_addr = 0; | 2093 | vq->split.queue_dma_addr = 0; |
2083 | vq->split.queue_size_in_bytes = 0; | 2094 | vq->split.queue_size_in_bytes = 0; |
2084 | 2095 | ||
@@ -2213,6 +2224,8 @@ void vring_transport_features(struct virtio_device *vdev) | |||
2213 | break; | 2224 | break; |
2214 | case VIRTIO_F_RING_PACKED: | 2225 | case VIRTIO_F_RING_PACKED: |
2215 | break; | 2226 | break; |
2227 | case VIRTIO_F_ORDER_PLATFORM: | ||
2228 | break; | ||
2216 | default: | 2229 | default: |
2217 | /* We don't understand this bit. */ | 2230 | /* We don't understand this bit. */ |
2218 | __virtio_clear_bit(vdev, i); | 2231 | __virtio_clear_bit(vdev, i); |
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c index d441244b79df..28d9c2b1b3bb 100644 --- a/fs/autofs/expire.c +++ b/fs/autofs/expire.c | |||
@@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb, | |||
596 | pkt.len = dentry->d_name.len; | 596 | pkt.len = dentry->d_name.len; |
597 | memcpy(pkt.name, dentry->d_name.name, pkt.len); | 597 | memcpy(pkt.name, dentry->d_name.name, pkt.len); |
598 | pkt.name[pkt.len] = '\0'; | 598 | pkt.name[pkt.len] = '\0'; |
599 | dput(dentry); | ||
600 | 599 | ||
601 | if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) | 600 | if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire))) |
602 | ret = -EFAULT; | 601 | ret = -EFAULT; |
@@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb, | |||
609 | complete_all(&ino->expire_complete); | 608 | complete_all(&ino->expire_complete); |
610 | spin_unlock(&sbi->fs_lock); | 609 | spin_unlock(&sbi->fs_lock); |
611 | 610 | ||
611 | dput(dentry); | ||
612 | |||
612 | return ret; | 613 | return ret; |
613 | } | 614 | } |
614 | 615 | ||
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c index 0e8ea2d9a2bb..078992eee299 100644 --- a/fs/autofs/inode.c +++ b/fs/autofs/inode.c | |||
@@ -266,8 +266,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent) | |||
266 | } | 266 | } |
267 | root_inode = autofs_get_inode(s, S_IFDIR | 0755); | 267 | root_inode = autofs_get_inode(s, S_IFDIR | 0755); |
268 | root = d_make_root(root_inode); | 268 | root = d_make_root(root_inode); |
269 | if (!root) | 269 | if (!root) { |
270 | ret = -ENOMEM; | ||
270 | goto fail_ino; | 271 | goto fail_ino; |
272 | } | ||
271 | pipe = NULL; | 273 | pipe = NULL; |
272 | 274 | ||
273 | root->d_fsdata = ino; | 275 | root->d_fsdata = ino; |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index d0078cbb718b..7cde3f46ad26 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -42,14 +42,10 @@ static int load_script(struct linux_binprm *bprm) | |||
42 | fput(bprm->file); | 42 | fput(bprm->file); |
43 | bprm->file = NULL; | 43 | bprm->file = NULL; |
44 | 44 | ||
45 | for (cp = bprm->buf+2;; cp++) { | 45 | bprm->buf[BINPRM_BUF_SIZE - 1] = '\0'; |
46 | if (cp >= bprm->buf + BINPRM_BUF_SIZE) | 46 | if ((cp = strchr(bprm->buf, '\n')) == NULL) |
47 | return -ENOEXEC; | 47 | cp = bprm->buf+BINPRM_BUF_SIZE-1; |
48 | if (!*cp || (*cp == '\n')) | ||
49 | break; | ||
50 | } | ||
51 | *cp = '\0'; | 48 | *cp = '\0'; |
52 | |||
53 | while (cp > bprm->buf) { | 49 | while (cp > bprm->buf) { |
54 | cp--; | 50 | cp--; |
55 | if ((*cp == ' ') || (*cp == '\t')) | 51 | if ((*cp == ' ') || (*cp == '\t')) |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f64aad613727..5a6c39b44c84 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -968,6 +968,48 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, | |||
968 | return 0; | 968 | return 0; |
969 | } | 969 | } |
970 | 970 | ||
971 | static struct extent_buffer *alloc_tree_block_no_bg_flush( | ||
972 | struct btrfs_trans_handle *trans, | ||
973 | struct btrfs_root *root, | ||
974 | u64 parent_start, | ||
975 | const struct btrfs_disk_key *disk_key, | ||
976 | int level, | ||
977 | u64 hint, | ||
978 | u64 empty_size) | ||
979 | { | ||
980 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
981 | struct extent_buffer *ret; | ||
982 | |||
983 | /* | ||
984 | * If we are COWing a node/leaf from the extent, chunk, device or free | ||
985 | * space trees, make sure that we do not finish block group creation of | ||
986 | * pending block groups. We do this to avoid a deadlock. | ||
987 | * COWing can result in allocation of a new chunk, and flushing pending | ||
988 | * block groups (btrfs_create_pending_block_groups()) can be triggered | ||
989 | * when finishing allocation of a new chunk. Creation of a pending block | ||
990 | * group modifies the extent, chunk, device and free space trees, | ||
991 | * therefore we could deadlock with ourselves since we are holding a | ||
992 | * lock on an extent buffer that btrfs_create_pending_block_groups() may | ||
993 | * try to COW later. | ||
994 | * For similar reasons, we also need to delay flushing pending block | ||
995 | * groups when splitting a leaf or node, from one of those trees, since | ||
996 | * we are holding a write lock on it and its parent or when inserting a | ||
997 | * new root node for one of those trees. | ||
998 | */ | ||
999 | if (root == fs_info->extent_root || | ||
1000 | root == fs_info->chunk_root || | ||
1001 | root == fs_info->dev_root || | ||
1002 | root == fs_info->free_space_root) | ||
1003 | trans->can_flush_pending_bgs = false; | ||
1004 | |||
1005 | ret = btrfs_alloc_tree_block(trans, root, parent_start, | ||
1006 | root->root_key.objectid, disk_key, level, | ||
1007 | hint, empty_size); | ||
1008 | trans->can_flush_pending_bgs = true; | ||
1009 | |||
1010 | return ret; | ||
1011 | } | ||
1012 | |||
971 | /* | 1013 | /* |
972 | * does the dirty work in cow of a single block. The parent block (if | 1014 | * does the dirty work in cow of a single block. The parent block (if |
973 | * supplied) is updated to point to the new cow copy. The new buffer is marked | 1015 | * supplied) is updated to point to the new cow copy. The new buffer is marked |
@@ -1015,28 +1057,8 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, | |||
1015 | if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) | 1057 | if ((root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) && parent) |
1016 | parent_start = parent->start; | 1058 | parent_start = parent->start; |
1017 | 1059 | ||
1018 | /* | 1060 | cow = alloc_tree_block_no_bg_flush(trans, root, parent_start, &disk_key, |
1019 | * If we are COWing a node/leaf from the extent, chunk, device or free | 1061 | level, search_start, empty_size); |
1020 | * space trees, make sure that we do not finish block group creation of | ||
1021 | * pending block groups. We do this to avoid a deadlock. | ||
1022 | * COWing can result in allocation of a new chunk, and flushing pending | ||
1023 | * block groups (btrfs_create_pending_block_groups()) can be triggered | ||
1024 | * when finishing allocation of a new chunk. Creation of a pending block | ||
1025 | * group modifies the extent, chunk, device and free space trees, | ||
1026 | * therefore we could deadlock with ourselves since we are holding a | ||
1027 | * lock on an extent buffer that btrfs_create_pending_block_groups() may | ||
1028 | * try to COW later. | ||
1029 | */ | ||
1030 | if (root == fs_info->extent_root || | ||
1031 | root == fs_info->chunk_root || | ||
1032 | root == fs_info->dev_root || | ||
1033 | root == fs_info->free_space_root) | ||
1034 | trans->can_flush_pending_bgs = false; | ||
1035 | |||
1036 | cow = btrfs_alloc_tree_block(trans, root, parent_start, | ||
1037 | root->root_key.objectid, &disk_key, level, | ||
1038 | search_start, empty_size); | ||
1039 | trans->can_flush_pending_bgs = true; | ||
1040 | if (IS_ERR(cow)) | 1062 | if (IS_ERR(cow)) |
1041 | return PTR_ERR(cow); | 1063 | return PTR_ERR(cow); |
1042 | 1064 | ||
@@ -3345,8 +3367,8 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, | |||
3345 | else | 3367 | else |
3346 | btrfs_node_key(lower, &lower_key, 0); | 3368 | btrfs_node_key(lower, &lower_key, 0); |
3347 | 3369 | ||
3348 | c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 3370 | c = alloc_tree_block_no_bg_flush(trans, root, 0, &lower_key, level, |
3349 | &lower_key, level, root->node->start, 0); | 3371 | root->node->start, 0); |
3350 | if (IS_ERR(c)) | 3372 | if (IS_ERR(c)) |
3351 | return PTR_ERR(c); | 3373 | return PTR_ERR(c); |
3352 | 3374 | ||
@@ -3475,8 +3497,8 @@ static noinline int split_node(struct btrfs_trans_handle *trans, | |||
3475 | mid = (c_nritems + 1) / 2; | 3497 | mid = (c_nritems + 1) / 2; |
3476 | btrfs_node_key(c, &disk_key, mid); | 3498 | btrfs_node_key(c, &disk_key, mid); |
3477 | 3499 | ||
3478 | split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 3500 | split = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, level, |
3479 | &disk_key, level, c->start, 0); | 3501 | c->start, 0); |
3480 | if (IS_ERR(split)) | 3502 | if (IS_ERR(split)) |
3481 | return PTR_ERR(split); | 3503 | return PTR_ERR(split); |
3482 | 3504 | ||
@@ -4260,8 +4282,8 @@ again: | |||
4260 | else | 4282 | else |
4261 | btrfs_item_key(l, &disk_key, mid); | 4283 | btrfs_item_key(l, &disk_key, mid); |
4262 | 4284 | ||
4263 | right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid, | 4285 | right = alloc_tree_block_no_bg_flush(trans, root, 0, &disk_key, 0, |
4264 | &disk_key, 0, l->start, 0); | 4286 | l->start, 0); |
4265 | if (IS_ERR(right)) | 4287 | if (IS_ERR(right)) |
4266 | return PTR_ERR(right); | 4288 | return PTR_ERR(right); |
4267 | 4289 | ||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index c5586ffd1426..0a3f122dd61f 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1621,6 +1621,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
1621 | flags | SB_RDONLY, device_name, data); | 1621 | flags | SB_RDONLY, device_name, data); |
1622 | if (IS_ERR(mnt_root)) { | 1622 | if (IS_ERR(mnt_root)) { |
1623 | root = ERR_CAST(mnt_root); | 1623 | root = ERR_CAST(mnt_root); |
1624 | kfree(subvol_name); | ||
1624 | goto out; | 1625 | goto out; |
1625 | } | 1626 | } |
1626 | 1627 | ||
@@ -1630,12 +1631,14 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
1630 | if (error < 0) { | 1631 | if (error < 0) { |
1631 | root = ERR_PTR(error); | 1632 | root = ERR_PTR(error); |
1632 | mntput(mnt_root); | 1633 | mntput(mnt_root); |
1634 | kfree(subvol_name); | ||
1633 | goto out; | 1635 | goto out; |
1634 | } | 1636 | } |
1635 | } | 1637 | } |
1636 | } | 1638 | } |
1637 | if (IS_ERR(mnt_root)) { | 1639 | if (IS_ERR(mnt_root)) { |
1638 | root = ERR_CAST(mnt_root); | 1640 | root = ERR_CAST(mnt_root); |
1641 | kfree(subvol_name); | ||
1639 | goto out; | 1642 | goto out; |
1640 | } | 1643 | } |
1641 | 1644 | ||
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 127fa1535f58..4ec2b660d014 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -850,14 +850,6 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
850 | 850 | ||
851 | btrfs_trans_release_chunk_metadata(trans); | 851 | btrfs_trans_release_chunk_metadata(trans); |
852 | 852 | ||
853 | if (lock && should_end_transaction(trans) && | ||
854 | READ_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) { | ||
855 | spin_lock(&info->trans_lock); | ||
856 | if (cur_trans->state == TRANS_STATE_RUNNING) | ||
857 | cur_trans->state = TRANS_STATE_BLOCKED; | ||
858 | spin_unlock(&info->trans_lock); | ||
859 | } | ||
860 | |||
861 | if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { | 853 | if (lock && READ_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) { |
862 | if (throttle) | 854 | if (throttle) |
863 | return btrfs_commit_transaction(trans); | 855 | return btrfs_commit_transaction(trans); |
@@ -1879,6 +1871,21 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans, int err) | |||
1879 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | 1871 | kmem_cache_free(btrfs_trans_handle_cachep, trans); |
1880 | } | 1872 | } |
1881 | 1873 | ||
1874 | /* | ||
1875 | * Release reserved delayed ref space of all pending block groups of the | ||
1876 | * transaction and remove them from the list | ||
1877 | */ | ||
1878 | static void btrfs_cleanup_pending_block_groups(struct btrfs_trans_handle *trans) | ||
1879 | { | ||
1880 | struct btrfs_fs_info *fs_info = trans->fs_info; | ||
1881 | struct btrfs_block_group_cache *block_group, *tmp; | ||
1882 | |||
1883 | list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { | ||
1884 | btrfs_delayed_refs_rsv_release(fs_info, 1); | ||
1885 | list_del_init(&block_group->bg_list); | ||
1886 | } | ||
1887 | } | ||
1888 | |||
1882 | static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) | 1889 | static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) |
1883 | { | 1890 | { |
1884 | /* | 1891 | /* |
@@ -2270,6 +2277,7 @@ scrub_continue: | |||
2270 | btrfs_scrub_continue(fs_info); | 2277 | btrfs_scrub_continue(fs_info); |
2271 | cleanup_transaction: | 2278 | cleanup_transaction: |
2272 | btrfs_trans_release_metadata(trans); | 2279 | btrfs_trans_release_metadata(trans); |
2280 | btrfs_cleanup_pending_block_groups(trans); | ||
2273 | btrfs_trans_release_chunk_metadata(trans); | 2281 | btrfs_trans_release_chunk_metadata(trans); |
2274 | trans->block_rsv = NULL; | 2282 | trans->block_rsv = NULL; |
2275 | btrfs_warn(fs_info, "Skipping commit of aborted transaction."); | 2283 | btrfs_warn(fs_info, "Skipping commit of aborted transaction."); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 3e4f8f88353e..15561926ab32 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -957,11 +957,11 @@ static noinline struct btrfs_device *device_list_add(const char *path, | |||
957 | else | 957 | else |
958 | fs_devices = alloc_fs_devices(disk_super->fsid, NULL); | 958 | fs_devices = alloc_fs_devices(disk_super->fsid, NULL); |
959 | 959 | ||
960 | fs_devices->fsid_change = fsid_change_in_progress; | ||
961 | |||
962 | if (IS_ERR(fs_devices)) | 960 | if (IS_ERR(fs_devices)) |
963 | return ERR_CAST(fs_devices); | 961 | return ERR_CAST(fs_devices); |
964 | 962 | ||
963 | fs_devices->fsid_change = fsid_change_in_progress; | ||
964 | |||
965 | mutex_lock(&fs_devices->device_list_mutex); | 965 | mutex_lock(&fs_devices->device_list_mutex); |
966 | list_add(&fs_devices->fs_list, &fs_uuids); | 966 | list_add(&fs_devices->fs_list, &fs_uuids); |
967 | 967 | ||
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index d1f9c2f3f575..7652551a1fc4 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -150,5 +150,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
150 | extern const struct export_operations cifs_export_ops; | 150 | extern const struct export_operations cifs_export_ops; |
151 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ | 151 | #endif /* CONFIG_CIFS_NFSD_EXPORT */ |
152 | 152 | ||
153 | #define CIFS_VERSION "2.16" | 153 | #define CIFS_VERSION "2.17" |
154 | #endif /* _CIFSFS_H */ | 154 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 2c7689f3998d..659ce1b92c44 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -2696,6 +2696,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, | |||
2696 | 2696 | ||
2697 | rc = cifs_write_allocate_pages(wdata->pages, nr_pages); | 2697 | rc = cifs_write_allocate_pages(wdata->pages, nr_pages); |
2698 | if (rc) { | 2698 | if (rc) { |
2699 | kvfree(wdata->pages); | ||
2699 | kfree(wdata); | 2700 | kfree(wdata); |
2700 | add_credits_and_wake_if(server, credits, 0); | 2701 | add_credits_and_wake_if(server, credits, 0); |
2701 | break; | 2702 | break; |
@@ -2707,6 +2708,7 @@ cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from, | |||
2707 | if (rc) { | 2708 | if (rc) { |
2708 | for (i = 0; i < nr_pages; i++) | 2709 | for (i = 0; i < nr_pages; i++) |
2709 | put_page(wdata->pages[i]); | 2710 | put_page(wdata->pages[i]); |
2711 | kvfree(wdata->pages); | ||
2710 | kfree(wdata); | 2712 | kfree(wdata); |
2711 | add_credits_and_wake_if(server, credits, 0); | 2713 | add_credits_and_wake_if(server, credits, 0); |
2712 | break; | 2714 | break; |
@@ -3386,8 +3388,12 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, | |||
3386 | } | 3388 | } |
3387 | 3389 | ||
3388 | rc = cifs_read_allocate_pages(rdata, npages); | 3390 | rc = cifs_read_allocate_pages(rdata, npages); |
3389 | if (rc) | 3391 | if (rc) { |
3390 | goto error; | 3392 | kvfree(rdata->pages); |
3393 | kfree(rdata); | ||
3394 | add_credits_and_wake_if(server, credits, 0); | ||
3395 | break; | ||
3396 | } | ||
3391 | 3397 | ||
3392 | rdata->tailsz = PAGE_SIZE; | 3398 | rdata->tailsz = PAGE_SIZE; |
3393 | } | 3399 | } |
@@ -3407,7 +3413,6 @@ cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file, | |||
3407 | if (!rdata->cfile->invalidHandle || | 3413 | if (!rdata->cfile->invalidHandle || |
3408 | !(rc = cifs_reopen_file(rdata->cfile, true))) | 3414 | !(rc = cifs_reopen_file(rdata->cfile, true))) |
3409 | rc = server->ops->async_readv(rdata); | 3415 | rc = server->ops->async_readv(rdata); |
3410 | error: | ||
3411 | if (rc) { | 3416 | if (rc) { |
3412 | add_credits_and_wake_if(server, rdata->credits, 0); | 3417 | add_credits_and_wake_if(server, rdata->credits, 0); |
3413 | kref_put(&rdata->refcount, | 3418 | kref_put(&rdata->refcount, |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 153238fc4fa9..6f96e2292856 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -866,7 +866,9 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | |||
866 | FILE_READ_EA, | 866 | FILE_READ_EA, |
867 | FILE_FULL_EA_INFORMATION, | 867 | FILE_FULL_EA_INFORMATION, |
868 | SMB2_O_INFO_FILE, | 868 | SMB2_O_INFO_FILE, |
869 | SMB2_MAX_EA_BUF, | 869 | CIFSMaxBufSize - |
870 | MAX_SMB2_CREATE_RESPONSE_SIZE - | ||
871 | MAX_SMB2_CLOSE_RESPONSE_SIZE, | ||
870 | &rsp_iov, &buftype, cifs_sb); | 872 | &rsp_iov, &buftype, cifs_sb); |
871 | if (rc) { | 873 | if (rc) { |
872 | /* | 874 | /* |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2ff209ec4fab..77b3aaa39b35 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -3241,8 +3241,17 @@ smb2_readv_callback(struct mid_q_entry *mid) | |||
3241 | rdata->mr = NULL; | 3241 | rdata->mr = NULL; |
3242 | } | 3242 | } |
3243 | #endif | 3243 | #endif |
3244 | if (rdata->result) | 3244 | if (rdata->result && rdata->result != -ENODATA) { |
3245 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); | 3245 | cifs_stats_fail_inc(tcon, SMB2_READ_HE); |
3246 | trace_smb3_read_err(0 /* xid */, | ||
3247 | rdata->cfile->fid.persistent_fid, | ||
3248 | tcon->tid, tcon->ses->Suid, rdata->offset, | ||
3249 | rdata->bytes, rdata->result); | ||
3250 | } else | ||
3251 | trace_smb3_read_done(0 /* xid */, | ||
3252 | rdata->cfile->fid.persistent_fid, | ||
3253 | tcon->tid, tcon->ses->Suid, | ||
3254 | rdata->offset, rdata->got_bytes); | ||
3246 | 3255 | ||
3247 | queue_work(cifsiod_wq, &rdata->work); | 3256 | queue_work(cifsiod_wq, &rdata->work); |
3248 | DeleteMidQEntry(mid); | 3257 | DeleteMidQEntry(mid); |
@@ -3317,13 +3326,11 @@ smb2_async_readv(struct cifs_readdata *rdata) | |||
3317 | if (rc) { | 3326 | if (rc) { |
3318 | kref_put(&rdata->refcount, cifs_readdata_release); | 3327 | kref_put(&rdata->refcount, cifs_readdata_release); |
3319 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); | 3328 | cifs_stats_fail_inc(io_parms.tcon, SMB2_READ_HE); |
3320 | trace_smb3_read_err(rc, 0 /* xid */, io_parms.persistent_fid, | 3329 | trace_smb3_read_err(0 /* xid */, io_parms.persistent_fid, |
3321 | io_parms.tcon->tid, io_parms.tcon->ses->Suid, | 3330 | io_parms.tcon->tid, |
3322 | io_parms.offset, io_parms.length); | 3331 | io_parms.tcon->ses->Suid, |
3323 | } else | 3332 | io_parms.offset, io_parms.length, rc); |
3324 | trace_smb3_read_done(0 /* xid */, io_parms.persistent_fid, | 3333 | } |
3325 | io_parms.tcon->tid, io_parms.tcon->ses->Suid, | ||
3326 | io_parms.offset, io_parms.length); | ||
3327 | 3334 | ||
3328 | cifs_small_buf_release(buf); | 3335 | cifs_small_buf_release(buf); |
3329 | return rc; | 3336 | return rc; |
@@ -3367,10 +3374,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
3367 | if (rc != -ENODATA) { | 3374 | if (rc != -ENODATA) { |
3368 | cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); | 3375 | cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); |
3369 | cifs_dbg(VFS, "Send error in read = %d\n", rc); | 3376 | cifs_dbg(VFS, "Send error in read = %d\n", rc); |
3377 | trace_smb3_read_err(xid, req->PersistentFileId, | ||
3378 | io_parms->tcon->tid, ses->Suid, | ||
3379 | io_parms->offset, io_parms->length, | ||
3380 | rc); | ||
3370 | } | 3381 | } |
3371 | trace_smb3_read_err(rc, xid, req->PersistentFileId, | ||
3372 | io_parms->tcon->tid, ses->Suid, | ||
3373 | io_parms->offset, io_parms->length); | ||
3374 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | 3382 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
3375 | return rc == -ENODATA ? 0 : rc; | 3383 | return rc == -ENODATA ? 0 : rc; |
3376 | } else | 3384 | } else |
@@ -3459,8 +3467,17 @@ smb2_writev_callback(struct mid_q_entry *mid) | |||
3459 | wdata->mr = NULL; | 3467 | wdata->mr = NULL; |
3460 | } | 3468 | } |
3461 | #endif | 3469 | #endif |
3462 | if (wdata->result) | 3470 | if (wdata->result) { |
3463 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); | 3471 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); |
3472 | trace_smb3_write_err(0 /* no xid */, | ||
3473 | wdata->cfile->fid.persistent_fid, | ||
3474 | tcon->tid, tcon->ses->Suid, wdata->offset, | ||
3475 | wdata->bytes, wdata->result); | ||
3476 | } else | ||
3477 | trace_smb3_write_done(0 /* no xid */, | ||
3478 | wdata->cfile->fid.persistent_fid, | ||
3479 | tcon->tid, tcon->ses->Suid, | ||
3480 | wdata->offset, wdata->bytes); | ||
3464 | 3481 | ||
3465 | queue_work(cifsiod_wq, &wdata->work); | 3482 | queue_work(cifsiod_wq, &wdata->work); |
3466 | DeleteMidQEntry(mid); | 3483 | DeleteMidQEntry(mid); |
@@ -3602,10 +3619,7 @@ smb2_async_writev(struct cifs_writedata *wdata, | |||
3602 | wdata->bytes, rc); | 3619 | wdata->bytes, rc); |
3603 | kref_put(&wdata->refcount, release); | 3620 | kref_put(&wdata->refcount, release); |
3604 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); | 3621 | cifs_stats_fail_inc(tcon, SMB2_WRITE_HE); |
3605 | } else | 3622 | } |
3606 | trace_smb3_write_done(0 /* no xid */, req->PersistentFileId, | ||
3607 | tcon->tid, tcon->ses->Suid, wdata->offset, | ||
3608 | wdata->bytes); | ||
3609 | 3623 | ||
3610 | async_writev_out: | 3624 | async_writev_out: |
3611 | cifs_small_buf_release(req); | 3625 | cifs_small_buf_release(req); |
@@ -3831,8 +3845,8 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, | |||
3831 | rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { | 3845 | rsp->sync_hdr.Status == STATUS_NO_MORE_FILES) { |
3832 | srch_inf->endOfSearch = true; | 3846 | srch_inf->endOfSearch = true; |
3833 | rc = 0; | 3847 | rc = 0; |
3834 | } | 3848 | } else |
3835 | cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); | 3849 | cifs_stats_fail_inc(tcon, SMB2_QUERY_DIRECTORY_HE); |
3836 | goto qdir_exit; | 3850 | goto qdir_exit; |
3837 | } | 3851 | } |
3838 | 3852 | ||
@@ -4427,8 +4441,8 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon, | |||
4427 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); | 4441 | rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov); |
4428 | cifs_small_buf_release(req); | 4442 | cifs_small_buf_release(req); |
4429 | 4443 | ||
4430 | please_key_low = (__u64 *)req->LeaseKey; | 4444 | please_key_low = (__u64 *)lease_key; |
4431 | please_key_high = (__u64 *)(req->LeaseKey+8); | 4445 | please_key_high = (__u64 *)(lease_key+8); |
4432 | if (rc) { | 4446 | if (rc) { |
4433 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); | 4447 | cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE); |
4434 | trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, | 4448 | trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid, |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 7a2d0a2255e6..538e2299805f 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -84,8 +84,9 @@ | |||
84 | 84 | ||
85 | #define NUMBER_OF_SMB2_COMMANDS 0x0013 | 85 | #define NUMBER_OF_SMB2_COMMANDS 0x0013 |
86 | 86 | ||
87 | /* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */ | 87 | /* 52 transform hdr + 64 hdr + 88 create rsp */ |
88 | #define MAX_SMB2_HDR_SIZE 0x00b0 | 88 | #define SMB2_TRANSFORM_HEADER_SIZE 52 |
89 | #define MAX_SMB2_HDR_SIZE 204 | ||
89 | 90 | ||
90 | #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) | 91 | #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) |
91 | #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) | 92 | #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd) |
@@ -648,6 +649,13 @@ struct smb2_create_req { | |||
648 | __u8 Buffer[0]; | 649 | __u8 Buffer[0]; |
649 | } __packed; | 650 | } __packed; |
650 | 651 | ||
652 | /* | ||
653 | * Maximum size of a SMB2_CREATE response is 64 (smb2 header) + | ||
654 | * 88 (fixed part of create response) + 520 (path) + 150 (contexts) + | ||
655 | * 2 bytes of padding. | ||
656 | */ | ||
657 | #define MAX_SMB2_CREATE_RESPONSE_SIZE 824 | ||
658 | |||
651 | struct smb2_create_rsp { | 659 | struct smb2_create_rsp { |
652 | struct smb2_sync_hdr sync_hdr; | 660 | struct smb2_sync_hdr sync_hdr; |
653 | __le16 StructureSize; /* Must be 89 */ | 661 | __le16 StructureSize; /* Must be 89 */ |
@@ -996,6 +1004,11 @@ struct smb2_close_req { | |||
996 | __u64 VolatileFileId; /* opaque endianness */ | 1004 | __u64 VolatileFileId; /* opaque endianness */ |
997 | } __packed; | 1005 | } __packed; |
998 | 1006 | ||
1007 | /* | ||
1008 | * Maximum size of a SMB2_CLOSE response is 64 (smb2 header) + 60 (data) | ||
1009 | */ | ||
1010 | #define MAX_SMB2_CLOSE_RESPONSE_SIZE 124 | ||
1011 | |||
999 | struct smb2_close_rsp { | 1012 | struct smb2_close_rsp { |
1000 | struct smb2_sync_hdr sync_hdr; | 1013 | struct smb2_sync_hdr sync_hdr; |
1001 | __le16 StructureSize; /* 60 */ | 1014 | __le16 StructureSize; /* 60 */ |
@@ -1398,8 +1411,6 @@ struct smb2_file_link_info { /* encoding of request for level 11 */ | |||
1398 | char FileName[0]; /* Name to be assigned to new link */ | 1411 | char FileName[0]; /* Name to be assigned to new link */ |
1399 | } __packed; /* level 11 Set */ | 1412 | } __packed; /* level 11 Set */ |
1400 | 1413 | ||
1401 | #define SMB2_MAX_EA_BUF 65536 | ||
1402 | |||
1403 | struct smb2_file_full_ea_info { /* encoding of response for level 15 */ | 1414 | struct smb2_file_full_ea_info { /* encoding of response for level 15 */ |
1404 | __le32 next_entry_offset; | 1415 | __le32 next_entry_offset; |
1405 | __u8 flags; | 1416 | __u8 flags; |
diff --git a/fs/dcache.c b/fs/dcache.c index 2593153471cf..aac41adf4743 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -119,6 +119,7 @@ struct dentry_stat_t dentry_stat = { | |||
119 | 119 | ||
120 | static DEFINE_PER_CPU(long, nr_dentry); | 120 | static DEFINE_PER_CPU(long, nr_dentry); |
121 | static DEFINE_PER_CPU(long, nr_dentry_unused); | 121 | static DEFINE_PER_CPU(long, nr_dentry_unused); |
122 | static DEFINE_PER_CPU(long, nr_dentry_negative); | ||
122 | 123 | ||
123 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) | 124 | #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) |
124 | 125 | ||
@@ -152,11 +153,22 @@ static long get_nr_dentry_unused(void) | |||
152 | return sum < 0 ? 0 : sum; | 153 | return sum < 0 ? 0 : sum; |
153 | } | 154 | } |
154 | 155 | ||
156 | static long get_nr_dentry_negative(void) | ||
157 | { | ||
158 | int i; | ||
159 | long sum = 0; | ||
160 | |||
161 | for_each_possible_cpu(i) | ||
162 | sum += per_cpu(nr_dentry_negative, i); | ||
163 | return sum < 0 ? 0 : sum; | ||
164 | } | ||
165 | |||
155 | int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, | 166 | int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer, |
156 | size_t *lenp, loff_t *ppos) | 167 | size_t *lenp, loff_t *ppos) |
157 | { | 168 | { |
158 | dentry_stat.nr_dentry = get_nr_dentry(); | 169 | dentry_stat.nr_dentry = get_nr_dentry(); |
159 | dentry_stat.nr_unused = get_nr_dentry_unused(); | 170 | dentry_stat.nr_unused = get_nr_dentry_unused(); |
171 | dentry_stat.nr_negative = get_nr_dentry_negative(); | ||
160 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); | 172 | return proc_doulongvec_minmax(table, write, buffer, lenp, ppos); |
161 | } | 173 | } |
162 | #endif | 174 | #endif |
@@ -317,6 +329,8 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry) | |||
317 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); | 329 | flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); |
318 | WRITE_ONCE(dentry->d_flags, flags); | 330 | WRITE_ONCE(dentry->d_flags, flags); |
319 | dentry->d_inode = NULL; | 331 | dentry->d_inode = NULL; |
332 | if (dentry->d_flags & DCACHE_LRU_LIST) | ||
333 | this_cpu_inc(nr_dentry_negative); | ||
320 | } | 334 | } |
321 | 335 | ||
322 | static void dentry_free(struct dentry *dentry) | 336 | static void dentry_free(struct dentry *dentry) |
@@ -371,6 +385,11 @@ static void dentry_unlink_inode(struct dentry * dentry) | |||
371 | * The per-cpu "nr_dentry_unused" counters are updated with | 385 | * The per-cpu "nr_dentry_unused" counters are updated with |
372 | * the DCACHE_LRU_LIST bit. | 386 | * the DCACHE_LRU_LIST bit. |
373 | * | 387 | * |
388 | * The per-cpu "nr_dentry_negative" counters are only updated | ||
389 | * when deleted from or added to the per-superblock LRU list, not | ||
390 | * from/to the shrink list. That is to avoid an unneeded dec/inc | ||
391 | * pair when moving from LRU to shrink list in select_collect(). | ||
392 | * | ||
374 | * These helper functions make sure we always follow the | 393 | * These helper functions make sure we always follow the |
375 | * rules. d_lock must be held by the caller. | 394 | * rules. d_lock must be held by the caller. |
376 | */ | 395 | */ |
@@ -380,6 +399,8 @@ static void d_lru_add(struct dentry *dentry) | |||
380 | D_FLAG_VERIFY(dentry, 0); | 399 | D_FLAG_VERIFY(dentry, 0); |
381 | dentry->d_flags |= DCACHE_LRU_LIST; | 400 | dentry->d_flags |= DCACHE_LRU_LIST; |
382 | this_cpu_inc(nr_dentry_unused); | 401 | this_cpu_inc(nr_dentry_unused); |
402 | if (d_is_negative(dentry)) | ||
403 | this_cpu_inc(nr_dentry_negative); | ||
383 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | 404 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
384 | } | 405 | } |
385 | 406 | ||
@@ -388,6 +409,8 @@ static void d_lru_del(struct dentry *dentry) | |||
388 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | 409 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
389 | dentry->d_flags &= ~DCACHE_LRU_LIST; | 410 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
390 | this_cpu_dec(nr_dentry_unused); | 411 | this_cpu_dec(nr_dentry_unused); |
412 | if (d_is_negative(dentry)) | ||
413 | this_cpu_dec(nr_dentry_negative); | ||
391 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | 414 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); |
392 | } | 415 | } |
393 | 416 | ||
@@ -418,6 +441,8 @@ static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry) | |||
418 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | 441 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
419 | dentry->d_flags &= ~DCACHE_LRU_LIST; | 442 | dentry->d_flags &= ~DCACHE_LRU_LIST; |
420 | this_cpu_dec(nr_dentry_unused); | 443 | this_cpu_dec(nr_dentry_unused); |
444 | if (d_is_negative(dentry)) | ||
445 | this_cpu_dec(nr_dentry_negative); | ||
421 | list_lru_isolate(lru, &dentry->d_lru); | 446 | list_lru_isolate(lru, &dentry->d_lru); |
422 | } | 447 | } |
423 | 448 | ||
@@ -426,6 +451,8 @@ static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry, | |||
426 | { | 451 | { |
427 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | 452 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); |
428 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 453 | dentry->d_flags |= DCACHE_SHRINK_LIST; |
454 | if (d_is_negative(dentry)) | ||
455 | this_cpu_dec(nr_dentry_negative); | ||
429 | list_lru_isolate_move(lru, &dentry->d_lru, list); | 456 | list_lru_isolate_move(lru, &dentry->d_lru, list); |
430 | } | 457 | } |
431 | 458 | ||
@@ -1188,15 +1215,11 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, | |||
1188 | */ | 1215 | */ |
1189 | void shrink_dcache_sb(struct super_block *sb) | 1216 | void shrink_dcache_sb(struct super_block *sb) |
1190 | { | 1217 | { |
1191 | long freed; | ||
1192 | |||
1193 | do { | 1218 | do { |
1194 | LIST_HEAD(dispose); | 1219 | LIST_HEAD(dispose); |
1195 | 1220 | ||
1196 | freed = list_lru_walk(&sb->s_dentry_lru, | 1221 | list_lru_walk(&sb->s_dentry_lru, |
1197 | dentry_lru_isolate_shrink, &dispose, 1024); | 1222 | dentry_lru_isolate_shrink, &dispose, 1024); |
1198 | |||
1199 | this_cpu_sub(nr_dentry_unused, freed); | ||
1200 | shrink_dentry_list(&dispose); | 1223 | shrink_dentry_list(&dispose); |
1201 | } while (list_lru_count(&sb->s_dentry_lru) > 0); | 1224 | } while (list_lru_count(&sb->s_dentry_lru) > 0); |
1202 | } | 1225 | } |
@@ -1820,6 +1843,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) | |||
1820 | WARN_ON(d_in_lookup(dentry)); | 1843 | WARN_ON(d_in_lookup(dentry)); |
1821 | 1844 | ||
1822 | spin_lock(&dentry->d_lock); | 1845 | spin_lock(&dentry->d_lock); |
1846 | /* | ||
1847 | * Decrement negative dentry count if it was in the LRU list. | ||
1848 | */ | ||
1849 | if (dentry->d_flags & DCACHE_LRU_LIST) | ||
1850 | this_cpu_dec(nr_dentry_negative); | ||
1823 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); | 1851 | hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); |
1824 | raw_write_seqcount_begin(&dentry->d_seq); | 1852 | raw_write_seqcount_begin(&dentry->d_seq); |
1825 | __d_set_inode_and_type(dentry, inode, add_flags); | 1853 | __d_set_inode_and_type(dentry, inode, add_flags); |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 13b01351dd1c..29c68c5d44d5 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
@@ -324,7 +324,7 @@ static struct dentry *failed_creating(struct dentry *dentry) | |||
324 | inode_unlock(d_inode(dentry->d_parent)); | 324 | inode_unlock(d_inode(dentry->d_parent)); |
325 | dput(dentry); | 325 | dput(dentry); |
326 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | 326 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
327 | return NULL; | 327 | return ERR_PTR(-ENOMEM); |
328 | } | 328 | } |
329 | 329 | ||
330 | static struct dentry *end_creating(struct dentry *dentry) | 330 | static struct dentry *end_creating(struct dentry *dentry) |
@@ -347,7 +347,7 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, | |||
347 | dentry = start_creating(name, parent); | 347 | dentry = start_creating(name, parent); |
348 | 348 | ||
349 | if (IS_ERR(dentry)) | 349 | if (IS_ERR(dentry)) |
350 | return NULL; | 350 | return dentry; |
351 | 351 | ||
352 | inode = debugfs_get_inode(dentry->d_sb); | 352 | inode = debugfs_get_inode(dentry->d_sb); |
353 | if (unlikely(!inode)) | 353 | if (unlikely(!inode)) |
@@ -386,7 +386,8 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, | |||
386 | * This function will return a pointer to a dentry if it succeeds. This | 386 | * This function will return a pointer to a dentry if it succeeds. This |
387 | * pointer must be passed to the debugfs_remove() function when the file is | 387 | * pointer must be passed to the debugfs_remove() function when the file is |
388 | * to be removed (no automatic cleanup happens if your module is unloaded, | 388 | * to be removed (no automatic cleanup happens if your module is unloaded, |
389 | * you are responsible here.) If an error occurs, %NULL will be returned. | 389 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
390 | * returned. | ||
390 | * | 391 | * |
391 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 392 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
392 | * returned. | 393 | * returned. |
@@ -464,7 +465,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); | |||
464 | * This function will return a pointer to a dentry if it succeeds. This | 465 | * This function will return a pointer to a dentry if it succeeds. This |
465 | * pointer must be passed to the debugfs_remove() function when the file is | 466 | * pointer must be passed to the debugfs_remove() function when the file is |
466 | * to be removed (no automatic cleanup happens if your module is unloaded, | 467 | * to be removed (no automatic cleanup happens if your module is unloaded, |
467 | * you are responsible here.) If an error occurs, %NULL will be returned. | 468 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
469 | * returned. | ||
468 | * | 470 | * |
469 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 471 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
470 | * returned. | 472 | * returned. |
@@ -495,7 +497,8 @@ EXPORT_SYMBOL_GPL(debugfs_create_file_size); | |||
495 | * This function will return a pointer to a dentry if it succeeds. This | 497 | * This function will return a pointer to a dentry if it succeeds. This |
496 | * pointer must be passed to the debugfs_remove() function when the file is | 498 | * pointer must be passed to the debugfs_remove() function when the file is |
497 | * to be removed (no automatic cleanup happens if your module is unloaded, | 499 | * to be removed (no automatic cleanup happens if your module is unloaded, |
498 | * you are responsible here.) If an error occurs, %NULL will be returned. | 500 | * you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) will be |
501 | * returned. | ||
499 | * | 502 | * |
500 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 503 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
501 | * returned. | 504 | * returned. |
@@ -506,7 +509,7 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) | |||
506 | struct inode *inode; | 509 | struct inode *inode; |
507 | 510 | ||
508 | if (IS_ERR(dentry)) | 511 | if (IS_ERR(dentry)) |
509 | return NULL; | 512 | return dentry; |
510 | 513 | ||
511 | inode = debugfs_get_inode(dentry->d_sb); | 514 | inode = debugfs_get_inode(dentry->d_sb); |
512 | if (unlikely(!inode)) | 515 | if (unlikely(!inode)) |
@@ -545,7 +548,7 @@ struct dentry *debugfs_create_automount(const char *name, | |||
545 | struct inode *inode; | 548 | struct inode *inode; |
546 | 549 | ||
547 | if (IS_ERR(dentry)) | 550 | if (IS_ERR(dentry)) |
548 | return NULL; | 551 | return dentry; |
549 | 552 | ||
550 | inode = debugfs_get_inode(dentry->d_sb); | 553 | inode = debugfs_get_inode(dentry->d_sb); |
551 | if (unlikely(!inode)) | 554 | if (unlikely(!inode)) |
@@ -581,8 +584,8 @@ EXPORT_SYMBOL(debugfs_create_automount); | |||
581 | * This function will return a pointer to a dentry if it succeeds. This | 584 | * This function will return a pointer to a dentry if it succeeds. This |
582 | * pointer must be passed to the debugfs_remove() function when the symbolic | 585 | * pointer must be passed to the debugfs_remove() function when the symbolic |
583 | * link is to be removed (no automatic cleanup happens if your module is | 586 | * link is to be removed (no automatic cleanup happens if your module is |
584 | * unloaded, you are responsible here.) If an error occurs, %NULL will be | 587 | * unloaded, you are responsible here.) If an error occurs, %ERR_PTR(-ERROR) |
585 | * returned. | 588 | * will be returned. |
586 | * | 589 | * |
587 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be | 590 | * If debugfs is not enabled in the kernel, the value -%ENODEV will be |
588 | * returned. | 591 | * returned. |
@@ -594,12 +597,12 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent, | |||
594 | struct inode *inode; | 597 | struct inode *inode; |
595 | char *link = kstrdup(target, GFP_KERNEL); | 598 | char *link = kstrdup(target, GFP_KERNEL); |
596 | if (!link) | 599 | if (!link) |
597 | return NULL; | 600 | return ERR_PTR(-ENOMEM); |
598 | 601 | ||
599 | dentry = start_creating(name, parent); | 602 | dentry = start_creating(name, parent); |
600 | if (IS_ERR(dentry)) { | 603 | if (IS_ERR(dentry)) { |
601 | kfree(link); | 604 | kfree(link); |
602 | return NULL; | 605 | return dentry; |
603 | } | 606 | } |
604 | 607 | ||
605 | inode = debugfs_get_inode(dentry->d_sb); | 608 | inode = debugfs_get_inode(dentry->d_sb); |
@@ -787,6 +790,13 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, | |||
787 | struct dentry *dentry = NULL, *trap; | 790 | struct dentry *dentry = NULL, *trap; |
788 | struct name_snapshot old_name; | 791 | struct name_snapshot old_name; |
789 | 792 | ||
793 | if (IS_ERR(old_dir)) | ||
794 | return old_dir; | ||
795 | if (IS_ERR(new_dir)) | ||
796 | return new_dir; | ||
797 | if (IS_ERR_OR_NULL(old_dentry)) | ||
798 | return old_dentry; | ||
799 | |||
790 | trap = lock_rename(new_dir, old_dir); | 800 | trap = lock_rename(new_dir, old_dir); |
791 | /* Source or destination directories don't exist? */ | 801 | /* Source or destination directories don't exist? */ |
792 | if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) | 802 | if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir)) |
@@ -820,7 +830,9 @@ exit: | |||
820 | if (dentry && !IS_ERR(dentry)) | 830 | if (dentry && !IS_ERR(dentry)) |
821 | dput(dentry); | 831 | dput(dentry); |
822 | unlock_rename(new_dir, old_dir); | 832 | unlock_rename(new_dir, old_dir); |
823 | return NULL; | 833 | if (IS_ERR(dentry)) |
834 | return dentry; | ||
835 | return ERR_PTR(-EINVAL); | ||
824 | } | 836 | } |
825 | EXPORT_SYMBOL_GPL(debugfs_rename); | 837 | EXPORT_SYMBOL_GPL(debugfs_rename); |
826 | 838 | ||
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 82377017130f..d31b6c72b476 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) | |||
21 | spin_lock(&sb->s_inode_list_lock); | 21 | spin_lock(&sb->s_inode_list_lock); |
22 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 22 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
23 | spin_lock(&inode->i_lock); | 23 | spin_lock(&inode->i_lock); |
24 | /* | ||
25 | * We must skip inodes in unusual state. We may also skip | ||
26 | * inodes without pages but we deliberately won't in case | ||
27 | * we need to reschedule to avoid softlockups. | ||
28 | */ | ||
24 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || | 29 | if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) || |
25 | (inode->i_mapping->nrpages == 0)) { | 30 | (inode->i_mapping->nrpages == 0 && !need_resched())) { |
26 | spin_unlock(&inode->i_lock); | 31 | spin_unlock(&inode->i_lock); |
27 | continue; | 32 | continue; |
28 | } | 33 | } |
@@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused) | |||
30 | spin_unlock(&inode->i_lock); | 35 | spin_unlock(&inode->i_lock); |
31 | spin_unlock(&sb->s_inode_list_lock); | 36 | spin_unlock(&sb->s_inode_list_lock); |
32 | 37 | ||
38 | cond_resched(); | ||
33 | invalidate_mapping_pages(inode->i_mapping, 0, -1); | 39 | invalidate_mapping_pages(inode->i_mapping, 0, -1); |
34 | iput(toput_inode); | 40 | iput(toput_inode); |
35 | toput_inode = inode; | 41 | toput_inode = inode; |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index 712f00995390..5508baa11bb6 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
@@ -116,16 +116,8 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
116 | goto out; | 116 | goto out; |
117 | } | 117 | } |
118 | 118 | ||
119 | ret = file_write_and_wait_range(file, start, end); | ||
120 | if (ret) | ||
121 | return ret; | ||
122 | |||
123 | if (!journal) { | 119 | if (!journal) { |
124 | struct writeback_control wbc = { | 120 | ret = __generic_file_fsync(file, start, end, datasync); |
125 | .sync_mode = WB_SYNC_ALL | ||
126 | }; | ||
127 | |||
128 | ret = ext4_write_inode(inode, &wbc); | ||
129 | if (!ret) | 121 | if (!ret) |
130 | ret = ext4_sync_parent(inode); | 122 | ret = ext4_sync_parent(inode); |
131 | if (test_opt(inode->i_sb, BARRIER)) | 123 | if (test_opt(inode->i_sb, BARRIER)) |
@@ -133,6 +125,9 @@ int ext4_sync_file(struct file *file, loff_t start, loff_t end, int datasync) | |||
133 | goto out; | 125 | goto out; |
134 | } | 126 | } |
135 | 127 | ||
128 | ret = file_write_and_wait_range(file, start, end); | ||
129 | if (ret) | ||
130 | return ret; | ||
136 | /* | 131 | /* |
137 | * data=writeback,ordered: | 132 | * data=writeback,ordered: |
138 | * The caller's filemap_fdatawrite()/wait will sync the data. | 133 | * The caller's filemap_fdatawrite()/wait will sync the data. |
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index a5e516a40e7a..809c0f2f9942 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c | |||
@@ -1742,7 +1742,6 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
1742 | req->in.h.nodeid = outarg->nodeid; | 1742 | req->in.h.nodeid = outarg->nodeid; |
1743 | req->in.numargs = 2; | 1743 | req->in.numargs = 2; |
1744 | req->in.argpages = 1; | 1744 | req->in.argpages = 1; |
1745 | req->page_descs[0].offset = offset; | ||
1746 | req->end = fuse_retrieve_end; | 1745 | req->end = fuse_retrieve_end; |
1747 | 1746 | ||
1748 | index = outarg->offset >> PAGE_SHIFT; | 1747 | index = outarg->offset >> PAGE_SHIFT; |
@@ -1757,6 +1756,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, | |||
1757 | 1756 | ||
1758 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); | 1757 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); |
1759 | req->pages[req->num_pages] = page; | 1758 | req->pages[req->num_pages] = page; |
1759 | req->page_descs[req->num_pages].offset = offset; | ||
1760 | req->page_descs[req->num_pages].length = this_num; | 1760 | req->page_descs[req->num_pages].length = this_num; |
1761 | req->num_pages++; | 1761 | req->num_pages++; |
1762 | 1762 | ||
@@ -2077,8 +2077,10 @@ static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, | |||
2077 | 2077 | ||
2078 | ret = fuse_dev_do_write(fud, &cs, len); | 2078 | ret = fuse_dev_do_write(fud, &cs, len); |
2079 | 2079 | ||
2080 | pipe_lock(pipe); | ||
2080 | for (idx = 0; idx < nbuf; idx++) | 2081 | for (idx = 0; idx < nbuf; idx++) |
2081 | pipe_buf_release(pipe, &bufs[idx]); | 2082 | pipe_buf_release(pipe, &bufs[idx]); |
2083 | pipe_unlock(pipe); | ||
2082 | 2084 | ||
2083 | out: | 2085 | out: |
2084 | kvfree(bufs); | 2086 | kvfree(bufs); |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index ffaffe18352a..a59c16bd90ac 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1782,7 +1782,7 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req, | |||
1782 | spin_unlock(&fc->lock); | 1782 | spin_unlock(&fc->lock); |
1783 | 1783 | ||
1784 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); | 1784 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); |
1785 | dec_node_page_state(page, NR_WRITEBACK_TEMP); | 1785 | dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP); |
1786 | wb_writeout_inc(&bdi->wb); | 1786 | wb_writeout_inc(&bdi->wb); |
1787 | fuse_writepage_free(fc, new_req); | 1787 | fuse_writepage_free(fc, new_req); |
1788 | fuse_request_free(new_req); | 1788 | fuse_request_free(new_req); |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 76baaa6be393..c2d4099429be 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -628,6 +628,7 @@ void fuse_conn_init(struct fuse_conn *fc, struct user_namespace *user_ns) | |||
628 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); | 628 | get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key)); |
629 | fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); | 629 | fc->pid_ns = get_pid_ns(task_active_pid_ns(current)); |
630 | fc->user_ns = get_user_ns(user_ns); | 630 | fc->user_ns = get_user_ns(user_ns); |
631 | fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; | ||
631 | } | 632 | } |
632 | EXPORT_SYMBOL_GPL(fuse_conn_init); | 633 | EXPORT_SYMBOL_GPL(fuse_conn_init); |
633 | 634 | ||
@@ -1162,7 +1163,6 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1162 | fc->user_id = d.user_id; | 1163 | fc->user_id = d.user_id; |
1163 | fc->group_id = d.group_id; | 1164 | fc->group_id = d.group_id; |
1164 | fc->max_read = max_t(unsigned, 4096, d.max_read); | 1165 | fc->max_read = max_t(unsigned, 4096, d.max_read); |
1165 | fc->max_pages = FUSE_DEFAULT_MAX_PAGES_PER_REQ; | ||
1166 | 1166 | ||
1167 | /* Used by get_root_inode() */ | 1167 | /* Used by get_root_inode() */ |
1168 | sb->s_fs_info = fc; | 1168 | sb->s_fs_info = fc; |
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index f15b4c57c4bd..78510ab91835 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include "util.h" | 28 | #include "util.h" |
29 | #include "trans.h" | 29 | #include "trans.h" |
30 | #include "dir.h" | 30 | #include "dir.h" |
31 | #include "lops.h" | ||
32 | 31 | ||
33 | struct workqueue_struct *gfs2_freeze_wq; | 32 | struct workqueue_struct *gfs2_freeze_wq; |
34 | 33 | ||
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index 5bfaf381921a..b8830fda51e8 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c | |||
@@ -733,7 +733,7 @@ void gfs2_write_log_header(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd, | |||
733 | lh->lh_crc = cpu_to_be32(crc); | 733 | lh->lh_crc = cpu_to_be32(crc); |
734 | 734 | ||
735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); | 735 | gfs2_log_write(sdp, page, sb->s_blocksize, 0, addr); |
736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE | op_flags); | 736 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, op_flags); |
737 | log_flush_wait(sdp); | 737 | log_flush_wait(sdp); |
738 | } | 738 | } |
739 | 739 | ||
@@ -810,7 +810,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) | |||
810 | 810 | ||
811 | gfs2_ordered_write(sdp); | 811 | gfs2_ordered_write(sdp); |
812 | lops_before_commit(sdp, tr); | 812 | lops_before_commit(sdp, tr); |
813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE); | 813 | gfs2_log_submit_bio(&sdp->sd_log_bio, REQ_OP_WRITE, 0); |
814 | 814 | ||
815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { | 815 | if (sdp->sd_log_head != sdp->sd_log_flush_head) { |
816 | log_flush_wait(sdp); | 816 | log_flush_wait(sdp); |
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index 94dcab655bc0..2295042bc625 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c | |||
@@ -17,9 +17,7 @@ | |||
17 | #include <linux/bio.h> | 17 | #include <linux/bio.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/list_sort.h> | 19 | #include <linux/list_sort.h> |
20 | #include <linux/blkdev.h> | ||
21 | 20 | ||
22 | #include "bmap.h" | ||
23 | #include "dir.h" | 21 | #include "dir.h" |
24 | #include "gfs2.h" | 22 | #include "gfs2.h" |
25 | #include "incore.h" | 23 | #include "incore.h" |
@@ -195,6 +193,7 @@ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, | |||
195 | /** | 193 | /** |
196 | * gfs2_end_log_write - end of i/o to the log | 194 | * gfs2_end_log_write - end of i/o to the log |
197 | * @bio: The bio | 195 | * @bio: The bio |
196 | * @error: Status of i/o request | ||
198 | * | 197 | * |
199 | * Each bio_vec contains either data from the pagecache or data | 198 | * Each bio_vec contains either data from the pagecache or data |
200 | * relating to the log itself. Here we iterate over the bio_vec | 199 | * relating to the log itself. Here we iterate over the bio_vec |
@@ -231,19 +230,20 @@ static void gfs2_end_log_write(struct bio *bio) | |||
231 | /** | 230 | /** |
232 | * gfs2_log_submit_bio - Submit any pending log bio | 231 | * gfs2_log_submit_bio - Submit any pending log bio |
233 | * @biop: Address of the bio pointer | 232 | * @biop: Address of the bio pointer |
234 | * @opf: REQ_OP | op_flags | 233 | * @op: REQ_OP |
234 | * @op_flags: req_flag_bits | ||
235 | * | 235 | * |
236 | * Submit any pending part-built or full bio to the block device. If | 236 | * Submit any pending part-built or full bio to the block device. If |
237 | * there is no pending bio, then this is a no-op. | 237 | * there is no pending bio, then this is a no-op. |
238 | */ | 238 | */ |
239 | 239 | ||
240 | void gfs2_log_submit_bio(struct bio **biop, int opf) | 240 | void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags) |
241 | { | 241 | { |
242 | struct bio *bio = *biop; | 242 | struct bio *bio = *biop; |
243 | if (bio) { | 243 | if (bio) { |
244 | struct gfs2_sbd *sdp = bio->bi_private; | 244 | struct gfs2_sbd *sdp = bio->bi_private; |
245 | atomic_inc(&sdp->sd_log_in_flight); | 245 | atomic_inc(&sdp->sd_log_in_flight); |
246 | bio->bi_opf = opf; | 246 | bio_set_op_attrs(bio, op, op_flags); |
247 | submit_bio(bio); | 247 | submit_bio(bio); |
248 | *biop = NULL; | 248 | *biop = NULL; |
249 | } | 249 | } |
@@ -304,7 +304,7 @@ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno, | |||
304 | nblk >>= sdp->sd_fsb2bb_shift; | 304 | nblk >>= sdp->sd_fsb2bb_shift; |
305 | if (blkno == nblk && !flush) | 305 | if (blkno == nblk && !flush) |
306 | return bio; | 306 | return bio; |
307 | gfs2_log_submit_bio(biop, op); | 307 | gfs2_log_submit_bio(biop, op, 0); |
308 | } | 308 | } |
309 | 309 | ||
310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); | 310 | *biop = gfs2_log_alloc_bio(sdp, blkno, end_io); |
@@ -375,184 +375,6 @@ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) | |||
375 | gfs2_log_bmap(sdp)); | 375 | gfs2_log_bmap(sdp)); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | ||
379 | * gfs2_end_log_read - end I/O callback for reads from the log | ||
380 | * @bio: The bio | ||
381 | * | ||
382 | * Simply unlock the pages in the bio. The main thread will wait on them and | ||
383 | * process them in order as necessary. | ||
384 | */ | ||
385 | |||
386 | static void gfs2_end_log_read(struct bio *bio) | ||
387 | { | ||
388 | struct page *page; | ||
389 | struct bio_vec *bvec; | ||
390 | int i; | ||
391 | |||
392 | bio_for_each_segment_all(bvec, bio, i) { | ||
393 | page = bvec->bv_page; | ||
394 | if (bio->bi_status) { | ||
395 | int err = blk_status_to_errno(bio->bi_status); | ||
396 | |||
397 | SetPageError(page); | ||
398 | mapping_set_error(page->mapping, err); | ||
399 | } | ||
400 | unlock_page(page); | ||
401 | } | ||
402 | |||
403 | bio_put(bio); | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * gfs2_jhead_pg_srch - Look for the journal head in a given page. | ||
408 | * @jd: The journal descriptor | ||
409 | * @page: The page to look in | ||
410 | * | ||
411 | * Returns: 1 if found, 0 otherwise. | ||
412 | */ | ||
413 | |||
414 | static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, | ||
415 | struct gfs2_log_header_host *head, | ||
416 | struct page *page) | ||
417 | { | ||
418 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
419 | struct gfs2_log_header_host uninitialized_var(lh); | ||
420 | void *kaddr = kmap_atomic(page); | ||
421 | unsigned int offset; | ||
422 | bool ret = false; | ||
423 | |||
424 | for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) { | ||
425 | if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) { | ||
426 | if (lh.lh_sequence > head->lh_sequence) | ||
427 | *head = lh; | ||
428 | else { | ||
429 | ret = true; | ||
430 | break; | ||
431 | } | ||
432 | } | ||
433 | } | ||
434 | kunmap_atomic(kaddr); | ||
435 | return ret; | ||
436 | } | ||
437 | |||
438 | /** | ||
439 | * gfs2_jhead_process_page - Search/cleanup a page | ||
440 | * @jd: The journal descriptor | ||
441 | * @index: Index of the page to look into | ||
442 | * @done: If set, perform only cleanup, else search and set if found. | ||
443 | * | ||
444 | * Find the page with 'index' in the journal's mapping. Search the page for | ||
445 | * the journal head if requested (cleanup == false). Release refs on the | ||
446 | * page so the page cache can reclaim it (put_page() twice). We grabbed a | ||
447 | * reference on this page two times, first when we did a find_or_create_page() | ||
448 | * to obtain the page to add it to the bio and second when we do a | ||
449 | * find_get_page() here to get the page to wait on while I/O on it is being | ||
450 | * completed. | ||
451 | * This function is also used to free up a page we might've grabbed but not | ||
452 | * used. Maybe we added it to a bio, but not submitted it for I/O. Or we | ||
453 | * submitted the I/O, but we already found the jhead so we only need to drop | ||
454 | * our references to the page. | ||
455 | */ | ||
456 | |||
457 | static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index, | ||
458 | struct gfs2_log_header_host *head, | ||
459 | bool *done) | ||
460 | { | ||
461 | struct page *page; | ||
462 | |||
463 | page = find_get_page(jd->jd_inode->i_mapping, index); | ||
464 | wait_on_page_locked(page); | ||
465 | |||
466 | if (PageError(page)) | ||
467 | *done = true; | ||
468 | |||
469 | if (!*done) | ||
470 | *done = gfs2_jhead_pg_srch(jd, head, page); | ||
471 | |||
472 | put_page(page); /* Once for find_get_page */ | ||
473 | put_page(page); /* Once more for find_or_create_page */ | ||
474 | } | ||
475 | |||
476 | /** | ||
477 | * gfs2_find_jhead - find the head of a log | ||
478 | * @jd: The journal descriptor | ||
479 | * @head: The log descriptor for the head of the log is returned here | ||
480 | * | ||
481 | * Do a search of a journal by reading it in large chunks using bios and find | ||
482 | * the valid log entry with the highest sequence number. (i.e. the log head) | ||
483 | * | ||
484 | * Returns: 0 on success, errno otherwise | ||
485 | */ | ||
486 | |||
487 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
488 | { | ||
489 | struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); | ||
490 | struct address_space *mapping = jd->jd_inode->i_mapping; | ||
491 | struct gfs2_journal_extent *je; | ||
492 | u32 block, read_idx = 0, submit_idx = 0, index = 0; | ||
493 | int shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; | ||
494 | int blocks_per_page = 1 << shift, sz, ret = 0; | ||
495 | struct bio *bio = NULL; | ||
496 | struct page *page; | ||
497 | bool done = false; | ||
498 | errseq_t since; | ||
499 | |||
500 | memset(head, 0, sizeof(*head)); | ||
501 | if (list_empty(&jd->extent_list)) | ||
502 | gfs2_map_journal_extents(sdp, jd); | ||
503 | |||
504 | since = filemap_sample_wb_err(mapping); | ||
505 | list_for_each_entry(je, &jd->extent_list, list) { | ||
506 | for (block = 0; block < je->blocks; block += blocks_per_page) { | ||
507 | index = (je->lblock + block) >> shift; | ||
508 | |||
509 | page = find_or_create_page(mapping, index, GFP_NOFS); | ||
510 | if (!page) { | ||
511 | ret = -ENOMEM; | ||
512 | done = true; | ||
513 | goto out; | ||
514 | } | ||
515 | |||
516 | if (bio) { | ||
517 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
518 | if (sz == PAGE_SIZE) | ||
519 | goto page_added; | ||
520 | submit_idx = index; | ||
521 | submit_bio(bio); | ||
522 | bio = NULL; | ||
523 | } | ||
524 | |||
525 | bio = gfs2_log_alloc_bio(sdp, | ||
526 | je->dblock + (index << shift), | ||
527 | gfs2_end_log_read); | ||
528 | bio->bi_opf = REQ_OP_READ; | ||
529 | sz = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
530 | gfs2_assert_warn(sdp, sz == PAGE_SIZE); | ||
531 | |||
532 | page_added: | ||
533 | if (submit_idx <= read_idx + BIO_MAX_PAGES) { | ||
534 | /* Keep at least one bio in flight */ | ||
535 | continue; | ||
536 | } | ||
537 | |||
538 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
539 | if (done) | ||
540 | goto out; /* found */ | ||
541 | } | ||
542 | } | ||
543 | |||
544 | out: | ||
545 | if (bio) | ||
546 | submit_bio(bio); | ||
547 | while (read_idx <= index) | ||
548 | gfs2_jhead_process_page(jd, read_idx++, head, &done); | ||
549 | |||
550 | if (!ret) | ||
551 | ret = filemap_check_wb_err(mapping, since); | ||
552 | |||
553 | return ret; | ||
554 | } | ||
555 | |||
556 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, | 378 | static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, |
557 | u32 ld_length, u32 ld_data1) | 379 | u32 ld_length, u32 ld_data1) |
558 | { | 380 | { |
diff --git a/fs/gfs2/lops.h b/fs/gfs2/lops.h index 331160fc568b..711c4d89c063 100644 --- a/fs/gfs2/lops.h +++ b/fs/gfs2/lops.h | |||
@@ -30,10 +30,8 @@ extern u64 gfs2_log_bmap(struct gfs2_sbd *sdp); | |||
30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, | 30 | extern void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, |
31 | unsigned size, unsigned offset, u64 blkno); | 31 | unsigned size, unsigned offset, u64 blkno); |
32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); | 32 | extern void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page); |
33 | extern void gfs2_log_submit_bio(struct bio **biop, int opf); | 33 | extern void gfs2_log_submit_bio(struct bio **biop, int op, int op_flags); |
34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); | 34 | extern void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh); |
35 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
36 | struct gfs2_log_header_host *head); | ||
37 | 35 | ||
38 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) | 36 | static inline unsigned int buf_limit(struct gfs2_sbd *sdp) |
39 | { | 37 | { |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 1179763f6370..b041cb8ae383 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include "dir.h" | 41 | #include "dir.h" |
42 | #include "meta_io.h" | 42 | #include "meta_io.h" |
43 | #include "trace_gfs2.h" | 43 | #include "trace_gfs2.h" |
44 | #include "lops.h" | ||
45 | 44 | ||
46 | #define DO 0 | 45 | #define DO 0 |
47 | #define UNDO 1 | 46 | #define UNDO 1 |
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c index 7389e445a7a7..2dac43065382 100644 --- a/fs/gfs2/recovery.c +++ b/fs/gfs2/recovery.c | |||
@@ -182,6 +182,129 @@ static int get_log_header(struct gfs2_jdesc *jd, unsigned int blk, | |||
182 | } | 182 | } |
183 | 183 | ||
184 | /** | 184 | /** |
185 | * find_good_lh - find a good log header | ||
186 | * @jd: the journal | ||
187 | * @blk: the segment to start searching from | ||
188 | * @lh: the log header to fill in | ||
189 | * @forward: if true search forward in the log, else search backward | ||
190 | * | ||
191 | * Call get_log_header() to get a log header for a segment, but if the | ||
192 | * segment is bad, either scan forward or backward until we find a good one. | ||
193 | * | ||
194 | * Returns: errno | ||
195 | */ | ||
196 | |||
197 | static int find_good_lh(struct gfs2_jdesc *jd, unsigned int *blk, | ||
198 | struct gfs2_log_header_host *head) | ||
199 | { | ||
200 | unsigned int orig_blk = *blk; | ||
201 | int error; | ||
202 | |||
203 | for (;;) { | ||
204 | error = get_log_header(jd, *blk, head); | ||
205 | if (error <= 0) | ||
206 | return error; | ||
207 | |||
208 | if (++*blk == jd->jd_blocks) | ||
209 | *blk = 0; | ||
210 | |||
211 | if (*blk == orig_blk) { | ||
212 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
213 | return -EIO; | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * jhead_scan - make sure we've found the head of the log | ||
220 | * @jd: the journal | ||
221 | * @head: this is filled in with the log descriptor of the head | ||
222 | * | ||
223 | * At this point, seg and lh should be either the head of the log or just | ||
224 | * before. Scan forward until we find the head. | ||
225 | * | ||
226 | * Returns: errno | ||
227 | */ | ||
228 | |||
229 | static int jhead_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
230 | { | ||
231 | unsigned int blk = head->lh_blkno; | ||
232 | struct gfs2_log_header_host lh; | ||
233 | int error; | ||
234 | |||
235 | for (;;) { | ||
236 | if (++blk == jd->jd_blocks) | ||
237 | blk = 0; | ||
238 | |||
239 | error = get_log_header(jd, blk, &lh); | ||
240 | if (error < 0) | ||
241 | return error; | ||
242 | if (error == 1) | ||
243 | continue; | ||
244 | |||
245 | if (lh.lh_sequence == head->lh_sequence) { | ||
246 | gfs2_consist_inode(GFS2_I(jd->jd_inode)); | ||
247 | return -EIO; | ||
248 | } | ||
249 | if (lh.lh_sequence < head->lh_sequence) | ||
250 | break; | ||
251 | |||
252 | *head = lh; | ||
253 | } | ||
254 | |||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * gfs2_find_jhead - find the head of a log | ||
260 | * @jd: the journal | ||
261 | * @head: the log descriptor for the head of the log is returned here | ||
262 | * | ||
263 | * Do a binary search of a journal and find the valid log entry with the | ||
264 | * highest sequence number. (i.e. the log head) | ||
265 | * | ||
266 | * Returns: errno | ||
267 | */ | ||
268 | |||
269 | int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head) | ||
270 | { | ||
271 | struct gfs2_log_header_host lh_1, lh_m; | ||
272 | u32 blk_1, blk_2, blk_m; | ||
273 | int error; | ||
274 | |||
275 | blk_1 = 0; | ||
276 | blk_2 = jd->jd_blocks - 1; | ||
277 | |||
278 | for (;;) { | ||
279 | blk_m = (blk_1 + blk_2) / 2; | ||
280 | |||
281 | error = find_good_lh(jd, &blk_1, &lh_1); | ||
282 | if (error) | ||
283 | return error; | ||
284 | |||
285 | error = find_good_lh(jd, &blk_m, &lh_m); | ||
286 | if (error) | ||
287 | return error; | ||
288 | |||
289 | if (blk_1 == blk_m || blk_m == blk_2) | ||
290 | break; | ||
291 | |||
292 | if (lh_1.lh_sequence <= lh_m.lh_sequence) | ||
293 | blk_1 = blk_m; | ||
294 | else | ||
295 | blk_2 = blk_m; | ||
296 | } | ||
297 | |||
298 | error = jhead_scan(jd, &lh_1); | ||
299 | if (error) | ||
300 | return error; | ||
301 | |||
302 | *head = lh_1; | ||
303 | |||
304 | return error; | ||
305 | } | ||
306 | |||
307 | /** | ||
185 | * foreach_descriptor - go through the active part of the log | 308 | * foreach_descriptor - go through the active part of the log |
186 | * @jd: the journal | 309 | * @jd: the journal |
187 | * @start: the first log header in the active region | 310 | * @start: the first log header in the active region |
diff --git a/fs/gfs2/recovery.h b/fs/gfs2/recovery.h index 99575ab81202..11d81248be85 100644 --- a/fs/gfs2/recovery.h +++ b/fs/gfs2/recovery.h | |||
@@ -27,6 +27,8 @@ extern int gfs2_revoke_add(struct gfs2_jdesc *jd, u64 blkno, unsigned int where) | |||
27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); | 27 | extern int gfs2_revoke_check(struct gfs2_jdesc *jd, u64 blkno, unsigned int where); |
28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); | 28 | extern void gfs2_revoke_clean(struct gfs2_jdesc *jd); |
29 | 29 | ||
30 | extern int gfs2_find_jhead(struct gfs2_jdesc *jd, | ||
31 | struct gfs2_log_header_host *head); | ||
30 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); | 32 | extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd, bool wait); |
31 | extern void gfs2_recover_func(struct work_struct *work); | 33 | extern void gfs2_recover_func(struct work_struct *work); |
32 | extern int __get_log_header(struct gfs2_sbd *sdp, | 34 | extern int __get_log_header(struct gfs2_sbd *sdp, |
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 831d7cb5a49c..17a8d3b43990 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c | |||
@@ -1780,9 +1780,9 @@ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, | |||
1780 | goto next_iter; | 1780 | goto next_iter; |
1781 | } | 1781 | } |
1782 | if (ret == -E2BIG) { | 1782 | if (ret == -E2BIG) { |
1783 | n += rbm->bii - initial_bii; | ||
1784 | rbm->bii = 0; | 1783 | rbm->bii = 0; |
1785 | rbm->offset = 0; | 1784 | rbm->offset = 0; |
1785 | n += (rbm->bii - initial_bii); | ||
1786 | goto res_covered_end_of_rgrp; | 1786 | goto res_covered_end_of_rgrp; |
1787 | } | 1787 | } |
1788 | return ret; | 1788 | return ret; |
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index d4b11c903971..ca71163ff7cf 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include "util.h" | 45 | #include "util.h" |
46 | #include "sys.h" | 46 | #include "sys.h" |
47 | #include "xattr.h" | 47 | #include "xattr.h" |
48 | #include "lops.h" | ||
49 | 48 | ||
50 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) | 49 | #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x) |
51 | 50 | ||
diff --git a/fs/inode.c b/fs/inode.c index 0cd47fe0dbe5..73432e64f874 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -730,11 +730,8 @@ static enum lru_status inode_lru_isolate(struct list_head *item, | |||
730 | return LRU_REMOVED; | 730 | return LRU_REMOVED; |
731 | } | 731 | } |
732 | 732 | ||
733 | /* | 733 | /* recently referenced inodes get one more pass */ |
734 | * Recently referenced inodes and inodes with many attached pages | 734 | if (inode->i_state & I_REFERENCED) { |
735 | * get one more pass. | ||
736 | */ | ||
737 | if (inode->i_state & I_REFERENCED || inode->i_data.nrpages > 1) { | ||
738 | inode->i_state &= ~I_REFERENCED; | 735 | inode->i_state &= ~I_REFERENCED; |
739 | spin_unlock(&inode->i_lock); | 736 | spin_unlock(&inode->i_lock); |
740 | return LRU_ROTATE; | 737 | return LRU_ROTATE; |
diff --git a/fs/iomap.c b/fs/iomap.c index a3088fae567b..897c60215dd1 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -116,6 +116,12 @@ iomap_page_create(struct inode *inode, struct page *page) | |||
116 | atomic_set(&iop->read_count, 0); | 116 | atomic_set(&iop->read_count, 0); |
117 | atomic_set(&iop->write_count, 0); | 117 | atomic_set(&iop->write_count, 0); |
118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); | 118 | bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE); |
119 | |||
120 | /* | ||
121 | * migrate_page_move_mapping() assumes that pages with private data have | ||
122 | * their count elevated by 1. | ||
123 | */ | ||
124 | get_page(page); | ||
119 | set_page_private(page, (unsigned long)iop); | 125 | set_page_private(page, (unsigned long)iop); |
120 | SetPagePrivate(page); | 126 | SetPagePrivate(page); |
121 | return iop; | 127 | return iop; |
@@ -132,6 +138,7 @@ iomap_page_release(struct page *page) | |||
132 | WARN_ON_ONCE(atomic_read(&iop->write_count)); | 138 | WARN_ON_ONCE(atomic_read(&iop->write_count)); |
133 | ClearPagePrivate(page); | 139 | ClearPagePrivate(page); |
134 | set_page_private(page, 0); | 140 | set_page_private(page, 0); |
141 | put_page(page); | ||
135 | kfree(iop); | 142 | kfree(iop); |
136 | } | 143 | } |
137 | 144 | ||
@@ -569,8 +576,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage, | |||
569 | 576 | ||
570 | if (page_has_private(page)) { | 577 | if (page_has_private(page)) { |
571 | ClearPagePrivate(page); | 578 | ClearPagePrivate(page); |
579 | get_page(newpage); | ||
572 | set_page_private(newpage, page_private(page)); | 580 | set_page_private(newpage, page_private(page)); |
573 | set_page_private(page, 0); | 581 | set_page_private(page, 0); |
582 | put_page(page); | ||
574 | SetPagePrivate(newpage); | 583 | SetPagePrivate(newpage); |
575 | } | 584 | } |
576 | 585 | ||
@@ -1804,6 +1813,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1804 | loff_t pos = iocb->ki_pos, start = pos; | 1813 | loff_t pos = iocb->ki_pos, start = pos; |
1805 | loff_t end = iocb->ki_pos + count - 1, ret = 0; | 1814 | loff_t end = iocb->ki_pos + count - 1, ret = 0; |
1806 | unsigned int flags = IOMAP_DIRECT; | 1815 | unsigned int flags = IOMAP_DIRECT; |
1816 | bool wait_for_completion = is_sync_kiocb(iocb); | ||
1807 | struct blk_plug plug; | 1817 | struct blk_plug plug; |
1808 | struct iomap_dio *dio; | 1818 | struct iomap_dio *dio; |
1809 | 1819 | ||
@@ -1823,7 +1833,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1823 | dio->end_io = end_io; | 1833 | dio->end_io = end_io; |
1824 | dio->error = 0; | 1834 | dio->error = 0; |
1825 | dio->flags = 0; | 1835 | dio->flags = 0; |
1826 | dio->wait_for_completion = is_sync_kiocb(iocb); | ||
1827 | 1836 | ||
1828 | dio->submit.iter = iter; | 1837 | dio->submit.iter = iter; |
1829 | dio->submit.waiter = current; | 1838 | dio->submit.waiter = current; |
@@ -1878,7 +1887,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1878 | dio_warn_stale_pagecache(iocb->ki_filp); | 1887 | dio_warn_stale_pagecache(iocb->ki_filp); |
1879 | ret = 0; | 1888 | ret = 0; |
1880 | 1889 | ||
1881 | if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion && | 1890 | if (iov_iter_rw(iter) == WRITE && !wait_for_completion && |
1882 | !inode->i_sb->s_dio_done_wq) { | 1891 | !inode->i_sb->s_dio_done_wq) { |
1883 | ret = sb_init_dio_done_wq(inode->i_sb); | 1892 | ret = sb_init_dio_done_wq(inode->i_sb); |
1884 | if (ret < 0) | 1893 | if (ret < 0) |
@@ -1894,7 +1903,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1894 | if (ret <= 0) { | 1903 | if (ret <= 0) { |
1895 | /* magic error code to fall back to buffered I/O */ | 1904 | /* magic error code to fall back to buffered I/O */ |
1896 | if (ret == -ENOTBLK) { | 1905 | if (ret == -ENOTBLK) { |
1897 | dio->wait_for_completion = true; | 1906 | wait_for_completion = true; |
1898 | ret = 0; | 1907 | ret = 0; |
1899 | } | 1908 | } |
1900 | break; | 1909 | break; |
@@ -1916,8 +1925,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1916 | if (dio->flags & IOMAP_DIO_WRITE_FUA) | 1925 | if (dio->flags & IOMAP_DIO_WRITE_FUA) |
1917 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; | 1926 | dio->flags &= ~IOMAP_DIO_NEED_SYNC; |
1918 | 1927 | ||
1928 | /* | ||
1929 | * We are about to drop our additional submission reference, which | ||
1930 | * might be the last reference to the dio. There are three three | ||
1931 | * different ways we can progress here: | ||
1932 | * | ||
1933 | * (a) If this is the last reference we will always complete and free | ||
1934 | * the dio ourselves. | ||
1935 | * (b) If this is not the last reference, and we serve an asynchronous | ||
1936 | * iocb, we must never touch the dio after the decrement, the | ||
1937 | * I/O completion handler will complete and free it. | ||
1938 | * (c) If this is not the last reference, but we serve a synchronous | ||
1939 | * iocb, the I/O completion handler will wake us up on the drop | ||
1940 | * of the final reference, and we will complete and free it here | ||
1941 | * after we got woken by the I/O completion handler. | ||
1942 | */ | ||
1943 | dio->wait_for_completion = wait_for_completion; | ||
1919 | if (!atomic_dec_and_test(&dio->ref)) { | 1944 | if (!atomic_dec_and_test(&dio->ref)) { |
1920 | if (!dio->wait_for_completion) | 1945 | if (!wait_for_completion) |
1921 | return -EIOCBQUEUED; | 1946 | return -EIOCBQUEUED; |
1922 | 1947 | ||
1923 | for (;;) { | 1948 | for (;;) { |
@@ -1934,9 +1959,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | |||
1934 | __set_current_state(TASK_RUNNING); | 1959 | __set_current_state(TASK_RUNNING); |
1935 | } | 1960 | } |
1936 | 1961 | ||
1937 | ret = iomap_dio_complete(dio); | 1962 | return iomap_dio_complete(dio); |
1938 | |||
1939 | return ret; | ||
1940 | 1963 | ||
1941 | out_free_dio: | 1964 | out_free_dio: |
1942 | kfree(dio); | 1965 | kfree(dio); |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 22ce3c8a2f46..0570391eaa16 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -1895,6 +1895,11 @@ static int nfs_parse_devname(const char *dev_name, | |||
1895 | size_t len; | 1895 | size_t len; |
1896 | char *end; | 1896 | char *end; |
1897 | 1897 | ||
1898 | if (unlikely(!dev_name || !*dev_name)) { | ||
1899 | dfprintk(MOUNT, "NFS: device name not specified\n"); | ||
1900 | return -EINVAL; | ||
1901 | } | ||
1902 | |||
1898 | /* Is the host name protected with square brakcets? */ | 1903 | /* Is the host name protected with square brakcets? */ |
1899 | if (*dev_name == '[') { | 1904 | if (*dev_name == '[') { |
1900 | end = strchr(++dev_name, ']'); | 1905 | end = strchr(++dev_name, ']'); |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5a0bbf917a32..f12cb31a41e5 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -621,11 +621,12 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
621 | nfs_set_page_writeback(page); | 621 | nfs_set_page_writeback(page); |
622 | WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); | 622 | WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); |
623 | 623 | ||
624 | ret = 0; | 624 | ret = req->wb_context->error; |
625 | /* If there is a fatal error that covers this write, just exit */ | 625 | /* If there is a fatal error that covers this write, just exit */ |
626 | if (nfs_error_is_fatal_on_server(req->wb_context->error)) | 626 | if (nfs_error_is_fatal_on_server(ret)) |
627 | goto out_launder; | 627 | goto out_launder; |
628 | 628 | ||
629 | ret = 0; | ||
629 | if (!nfs_pageio_add_request(pgio, req)) { | 630 | if (!nfs_pageio_add_request(pgio, req)) { |
630 | ret = pgio->pg_error; | 631 | ret = pgio->pg_error; |
631 | /* | 632 | /* |
@@ -635,9 +636,9 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, | |||
635 | nfs_context_set_write_error(req->wb_context, ret); | 636 | nfs_context_set_write_error(req->wb_context, ret); |
636 | if (nfs_error_is_fatal_on_server(ret)) | 637 | if (nfs_error_is_fatal_on_server(ret)) |
637 | goto out_launder; | 638 | goto out_launder; |
638 | } | 639 | } else |
640 | ret = -EAGAIN; | ||
639 | nfs_redirty_request(req); | 641 | nfs_redirty_request(req); |
640 | ret = -EAGAIN; | ||
641 | } else | 642 | } else |
642 | nfs_add_stats(page_file_mapping(page)->host, | 643 | nfs_add_stats(page_file_mapping(page)->host, |
643 | NFSIOS_WRITEPAGES, 1); | 644 | NFSIOS_WRITEPAGES, 1); |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 9824e32b2f23..7dc98e14655d 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -557,9 +557,11 @@ __be32 nfsd4_clone_file_range(struct file *src, u64 src_pos, struct file *dst, | |||
557 | loff_t cloned; | 557 | loff_t cloned; |
558 | 558 | ||
559 | cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); | 559 | cloned = vfs_clone_file_range(src, src_pos, dst, dst_pos, count, 0); |
560 | if (cloned < 0) | ||
561 | return nfserrno(cloned); | ||
560 | if (count && cloned != count) | 562 | if (count && cloned != count) |
561 | cloned = -EINVAL; | 563 | return nfserrno(-EINVAL); |
562 | return nfserrno(cloned < 0 ? cloned : 0); | 564 | return 0; |
563 | } | 565 | } |
564 | 566 | ||
565 | ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, | 567 | ssize_t nfsd_copy_file_range(struct file *src, u64 src_pos, struct file *dst, |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 8ae109429a88..e39bac94dead 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry, | |||
256 | inode = proc_get_inode(dir->i_sb, de); | 256 | inode = proc_get_inode(dir->i_sb, de); |
257 | if (!inode) | 257 | if (!inode) |
258 | return ERR_PTR(-ENOMEM); | 258 | return ERR_PTR(-ENOMEM); |
259 | d_set_d_op(dentry, &proc_misc_dentry_ops); | 259 | d_set_d_op(dentry, de->proc_dops); |
260 | return d_splice_alias(inode, dentry); | 260 | return d_splice_alias(inode, dentry); |
261 | } | 261 | } |
262 | read_unlock(&proc_subdir_lock); | 262 | read_unlock(&proc_subdir_lock); |
@@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, | |||
429 | INIT_LIST_HEAD(&ent->pde_openers); | 429 | INIT_LIST_HEAD(&ent->pde_openers); |
430 | proc_set_user(ent, (*parent)->uid, (*parent)->gid); | 430 | proc_set_user(ent, (*parent)->uid, (*parent)->gid); |
431 | 431 | ||
432 | ent->proc_dops = &proc_misc_dentry_ops; | ||
433 | |||
432 | out: | 434 | out: |
433 | return ent; | 435 | return ent; |
434 | } | 436 | } |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 5185d7f6a51e..95b14196f284 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -44,6 +44,7 @@ struct proc_dir_entry { | |||
44 | struct completion *pde_unload_completion; | 44 | struct completion *pde_unload_completion; |
45 | const struct inode_operations *proc_iops; | 45 | const struct inode_operations *proc_iops; |
46 | const struct file_operations *proc_fops; | 46 | const struct file_operations *proc_fops; |
47 | const struct dentry_operations *proc_dops; | ||
47 | union { | 48 | union { |
48 | const struct seq_operations *seq_ops; | 49 | const struct seq_operations *seq_ops; |
49 | int (*single_show)(struct seq_file *, void *); | 50 | int (*single_show)(struct seq_file *, void *); |
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index d5e0fcb3439e..a7b12435519e 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c | |||
@@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode) | |||
38 | return maybe_get_net(PDE_NET(PDE(inode))); | 38 | return maybe_get_net(PDE_NET(PDE(inode))); |
39 | } | 39 | } |
40 | 40 | ||
41 | static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static const struct dentry_operations proc_net_dentry_ops = { | ||
47 | .d_revalidate = proc_net_d_revalidate, | ||
48 | .d_delete = always_delete_dentry, | ||
49 | }; | ||
50 | |||
51 | static void pde_force_lookup(struct proc_dir_entry *pde) | ||
52 | { | ||
53 | /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */ | ||
54 | pde->proc_dops = &proc_net_dentry_ops; | ||
55 | } | ||
56 | |||
41 | static int seq_open_net(struct inode *inode, struct file *file) | 57 | static int seq_open_net(struct inode *inode, struct file *file) |
42 | { | 58 | { |
43 | unsigned int state_size = PDE(inode)->state_size; | 59 | unsigned int state_size = PDE(inode)->state_size; |
@@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode, | |||
90 | p = proc_create_reg(name, mode, &parent, data); | 106 | p = proc_create_reg(name, mode, &parent, data); |
91 | if (!p) | 107 | if (!p) |
92 | return NULL; | 108 | return NULL; |
109 | pde_force_lookup(p); | ||
93 | p->proc_fops = &proc_net_seq_fops; | 110 | p->proc_fops = &proc_net_seq_fops; |
94 | p->seq_ops = ops; | 111 | p->seq_ops = ops; |
95 | p->state_size = state_size; | 112 | p->state_size = state_size; |
@@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode | |||
133 | p = proc_create_reg(name, mode, &parent, data); | 150 | p = proc_create_reg(name, mode, &parent, data); |
134 | if (!p) | 151 | if (!p) |
135 | return NULL; | 152 | return NULL; |
153 | pde_force_lookup(p); | ||
136 | p->proc_fops = &proc_net_seq_fops; | 154 | p->proc_fops = &proc_net_seq_fops; |
137 | p->seq_ops = ops; | 155 | p->seq_ops = ops; |
138 | p->state_size = state_size; | 156 | p->state_size = state_size; |
@@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode, | |||
181 | p = proc_create_reg(name, mode, &parent, data); | 199 | p = proc_create_reg(name, mode, &parent, data); |
182 | if (!p) | 200 | if (!p) |
183 | return NULL; | 201 | return NULL; |
202 | pde_force_lookup(p); | ||
184 | p->proc_fops = &proc_net_single_fops; | 203 | p->proc_fops = &proc_net_single_fops; |
185 | p->single_show = show; | 204 | p->single_show = show; |
186 | return proc_register(parent, p); | 205 | return proc_register(parent, p); |
@@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo | |||
223 | p = proc_create_reg(name, mode, &parent, data); | 242 | p = proc_create_reg(name, mode, &parent, data); |
224 | if (!p) | 243 | if (!p) |
225 | return NULL; | 244 | return NULL; |
245 | pde_force_lookup(p); | ||
226 | p->proc_fops = &proc_net_single_fops; | 246 | p->proc_fops = &proc_net_single_fops; |
227 | p->single_show = show; | 247 | p->single_show = show; |
228 | p->write = write; | 248 | p->write = write; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index f0ec9edab2f3..85b0ef890b28 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -423,7 +423,7 @@ struct mem_size_stats { | |||
423 | }; | 423 | }; |
424 | 424 | ||
425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, | 425 | static void smaps_account(struct mem_size_stats *mss, struct page *page, |
426 | bool compound, bool young, bool dirty) | 426 | bool compound, bool young, bool dirty, bool locked) |
427 | { | 427 | { |
428 | int i, nr = compound ? 1 << compound_order(page) : 1; | 428 | int i, nr = compound ? 1 << compound_order(page) : 1; |
429 | unsigned long size = nr * PAGE_SIZE; | 429 | unsigned long size = nr * PAGE_SIZE; |
@@ -450,24 +450,31 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page, | |||
450 | else | 450 | else |
451 | mss->private_clean += size; | 451 | mss->private_clean += size; |
452 | mss->pss += (u64)size << PSS_SHIFT; | 452 | mss->pss += (u64)size << PSS_SHIFT; |
453 | if (locked) | ||
454 | mss->pss_locked += (u64)size << PSS_SHIFT; | ||
453 | return; | 455 | return; |
454 | } | 456 | } |
455 | 457 | ||
456 | for (i = 0; i < nr; i++, page++) { | 458 | for (i = 0; i < nr; i++, page++) { |
457 | int mapcount = page_mapcount(page); | 459 | int mapcount = page_mapcount(page); |
460 | unsigned long pss = (PAGE_SIZE << PSS_SHIFT); | ||
458 | 461 | ||
459 | if (mapcount >= 2) { | 462 | if (mapcount >= 2) { |
460 | if (dirty || PageDirty(page)) | 463 | if (dirty || PageDirty(page)) |
461 | mss->shared_dirty += PAGE_SIZE; | 464 | mss->shared_dirty += PAGE_SIZE; |
462 | else | 465 | else |
463 | mss->shared_clean += PAGE_SIZE; | 466 | mss->shared_clean += PAGE_SIZE; |
464 | mss->pss += (PAGE_SIZE << PSS_SHIFT) / mapcount; | 467 | mss->pss += pss / mapcount; |
468 | if (locked) | ||
469 | mss->pss_locked += pss / mapcount; | ||
465 | } else { | 470 | } else { |
466 | if (dirty || PageDirty(page)) | 471 | if (dirty || PageDirty(page)) |
467 | mss->private_dirty += PAGE_SIZE; | 472 | mss->private_dirty += PAGE_SIZE; |
468 | else | 473 | else |
469 | mss->private_clean += PAGE_SIZE; | 474 | mss->private_clean += PAGE_SIZE; |
470 | mss->pss += PAGE_SIZE << PSS_SHIFT; | 475 | mss->pss += pss; |
476 | if (locked) | ||
477 | mss->pss_locked += pss; | ||
471 | } | 478 | } |
472 | } | 479 | } |
473 | } | 480 | } |
@@ -490,6 +497,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
490 | { | 497 | { |
491 | struct mem_size_stats *mss = walk->private; | 498 | struct mem_size_stats *mss = walk->private; |
492 | struct vm_area_struct *vma = walk->vma; | 499 | struct vm_area_struct *vma = walk->vma; |
500 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
493 | struct page *page = NULL; | 501 | struct page *page = NULL; |
494 | 502 | ||
495 | if (pte_present(*pte)) { | 503 | if (pte_present(*pte)) { |
@@ -532,7 +540,7 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, | |||
532 | if (!page) | 540 | if (!page) |
533 | return; | 541 | return; |
534 | 542 | ||
535 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte)); | 543 | smaps_account(mss, page, false, pte_young(*pte), pte_dirty(*pte), locked); |
536 | } | 544 | } |
537 | 545 | ||
538 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 546 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -541,6 +549,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
541 | { | 549 | { |
542 | struct mem_size_stats *mss = walk->private; | 550 | struct mem_size_stats *mss = walk->private; |
543 | struct vm_area_struct *vma = walk->vma; | 551 | struct vm_area_struct *vma = walk->vma; |
552 | bool locked = !!(vma->vm_flags & VM_LOCKED); | ||
544 | struct page *page; | 553 | struct page *page; |
545 | 554 | ||
546 | /* FOLL_DUMP will return -EFAULT on huge zero page */ | 555 | /* FOLL_DUMP will return -EFAULT on huge zero page */ |
@@ -555,7 +564,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
555 | /* pass */; | 564 | /* pass */; |
556 | else | 565 | else |
557 | VM_BUG_ON_PAGE(1, page); | 566 | VM_BUG_ON_PAGE(1, page); |
558 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd)); | 567 | smaps_account(mss, page, true, pmd_young(*pmd), pmd_dirty(*pmd), locked); |
559 | } | 568 | } |
560 | #else | 569 | #else |
561 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, | 570 | static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr, |
@@ -737,11 +746,8 @@ static void smap_gather_stats(struct vm_area_struct *vma, | |||
737 | } | 746 | } |
738 | } | 747 | } |
739 | #endif | 748 | #endif |
740 | |||
741 | /* mmap_sem is held in m_start */ | 749 | /* mmap_sem is held in m_start */ |
742 | walk_page_vma(vma, &smaps_walk); | 750 | walk_page_vma(vma, &smaps_walk); |
743 | if (vma->vm_flags & VM_LOCKED) | ||
744 | mss->pss_locked += mss->pss; | ||
745 | } | 751 | } |
746 | 752 | ||
747 | #define SEQ_PUT_DEC(str, val) \ | 753 | #define SEQ_PUT_DEC(str, val) \ |
diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index 1c8eecfe52b8..6acf1bfa0bfe 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c | |||
@@ -768,18 +768,23 @@ xrep_findroot_block( | |||
768 | if (!uuid_equal(&btblock->bb_u.s.bb_uuid, | 768 | if (!uuid_equal(&btblock->bb_u.s.bb_uuid, |
769 | &mp->m_sb.sb_meta_uuid)) | 769 | &mp->m_sb.sb_meta_uuid)) |
770 | goto out; | 770 | goto out; |
771 | /* | ||
772 | * Read verifiers can reference b_ops, so we set the pointer | ||
773 | * here. If the verifier fails we'll reset the buffer state | ||
774 | * to what it was before we touched the buffer. | ||
775 | */ | ||
776 | bp->b_ops = fab->buf_ops; | ||
771 | fab->buf_ops->verify_read(bp); | 777 | fab->buf_ops->verify_read(bp); |
772 | if (bp->b_error) { | 778 | if (bp->b_error) { |
779 | bp->b_ops = NULL; | ||
773 | bp->b_error = 0; | 780 | bp->b_error = 0; |
774 | goto out; | 781 | goto out; |
775 | } | 782 | } |
776 | 783 | ||
777 | /* | 784 | /* |
778 | * Some read verifiers will (re)set b_ops, so we must be | 785 | * Some read verifiers will (re)set b_ops, so we must be |
779 | * careful not to blow away any such assignment. | 786 | * careful not to change b_ops after running the verifier. |
780 | */ | 787 | */ |
781 | if (!bp->b_ops) | ||
782 | bp->b_ops = fab->buf_ops; | ||
783 | } | 788 | } |
784 | 789 | ||
785 | /* | 790 | /* |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 338b9d9984e0..d9048bcea49c 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -449,6 +449,7 @@ xfs_map_blocks( | |||
449 | } | 449 | } |
450 | 450 | ||
451 | wpc->imap = imap; | 451 | wpc->imap = imap; |
452 | xfs_trim_extent_eof(&wpc->imap, ip); | ||
452 | trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); | 453 | trace_xfs_map_blocks_found(ip, offset, count, wpc->io_type, &imap); |
453 | return 0; | 454 | return 0; |
454 | allocate_blocks: | 455 | allocate_blocks: |
@@ -459,6 +460,7 @@ allocate_blocks: | |||
459 | ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || | 460 | ASSERT(whichfork == XFS_COW_FORK || cow_fsb == NULLFILEOFF || |
460 | imap.br_startoff + imap.br_blockcount <= cow_fsb); | 461 | imap.br_startoff + imap.br_blockcount <= cow_fsb); |
461 | wpc->imap = imap; | 462 | wpc->imap = imap; |
463 | xfs_trim_extent_eof(&wpc->imap, ip); | ||
462 | trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); | 464 | trace_xfs_map_blocks_alloc(ip, offset, count, wpc->io_type, &imap); |
463 | return 0; | 465 | return 0; |
464 | } | 466 | } |
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index eedc5e0156ff..4f5f2ff3f70f 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c | |||
@@ -776,10 +776,26 @@ _xfs_buf_read( | |||
776 | } | 776 | } |
777 | 777 | ||
778 | /* | 778 | /* |
779 | * Set buffer ops on an unchecked buffer and validate it, if possible. | ||
780 | * | ||
779 | * If the caller passed in an ops structure and the buffer doesn't have ops | 781 | * If the caller passed in an ops structure and the buffer doesn't have ops |
780 | * assigned, set the ops and use them to verify the contents. If the contents | 782 | * assigned, set the ops and use them to verify the contents. If the contents |
781 | * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no | 783 | * cannot be verified, we'll clear XBF_DONE. We assume the buffer has no |
782 | * recorded errors and is already in XBF_DONE state. | 784 | * recorded errors and is already in XBF_DONE state. |
785 | * | ||
786 | * Under normal operations, every in-core buffer must have buffer ops assigned | ||
787 | * to them when the buffer is read in from disk so that we can validate the | ||
788 | * metadata. | ||
789 | * | ||
790 | * However, there are two scenarios where one can encounter in-core buffers | ||
791 | * that don't have buffer ops. The first is during log recovery of buffers on | ||
792 | * a V4 filesystem, though these buffers are purged at the end of recovery. | ||
793 | * | ||
794 | * The other is online repair, which tries to match arbitrary metadata blocks | ||
795 | * with btree types in order to find the root. If online repair doesn't match | ||
796 | * the buffer with /any/ btree type, the buffer remains in memory in DONE state | ||
797 | * with no ops, and a subsequent read_buf call from elsewhere will not set the | ||
798 | * ops. This function helps us fix this situation. | ||
783 | */ | 799 | */ |
784 | int | 800 | int |
785 | xfs_buf_ensure_ops( | 801 | xfs_buf_ensure_ops( |
@@ -1536,8 +1552,7 @@ __xfs_buf_submit( | |||
1536 | xfs_buf_ioerror(bp, -EIO); | 1552 | xfs_buf_ioerror(bp, -EIO); |
1537 | bp->b_flags &= ~XBF_DONE; | 1553 | bp->b_flags &= ~XBF_DONE; |
1538 | xfs_buf_stale(bp); | 1554 | xfs_buf_stale(bp); |
1539 | if (bp->b_flags & XBF_ASYNC) | 1555 | xfs_buf_ioend(bp); |
1540 | xfs_buf_ioend(bp); | ||
1541 | return -EIO; | 1556 | return -EIO; |
1542 | } | 1557 | } |
1543 | 1558 | ||
diff --git a/include/uapi/asm-generic/shmparam.h b/include/asm-generic/shmparam.h index 8b78c0ba08b1..8b78c0ba08b1 100644 --- a/include/uapi/asm-generic/shmparam.h +++ b/include/asm-generic/shmparam.h | |||
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h index b53be41929be..04f7ac345984 100644 --- a/include/dt-bindings/clock/imx8mq-clock.h +++ b/include/dt-bindings/clock/imx8mq-clock.h | |||
@@ -350,7 +350,7 @@ | |||
350 | #define IMX8MQ_CLK_VPU_G2_ROOT 241 | 350 | #define IMX8MQ_CLK_VPU_G2_ROOT 241 |
351 | 351 | ||
352 | /* SCCG PLL GATE */ | 352 | /* SCCG PLL GATE */ |
353 | #define IMX8MQ_SYS1_PLL_OUT 232 | 353 | #define IMX8MQ_SYS1_PLL_OUT 242 |
354 | #define IMX8MQ_SYS2_PLL_OUT 243 | 354 | #define IMX8MQ_SYS2_PLL_OUT 243 |
355 | #define IMX8MQ_SYS3_PLL_OUT 244 | 355 | #define IMX8MQ_SYS3_PLL_OUT 244 |
356 | #define IMX8MQ_DRAM_PLL_OUT 245 | 356 | #define IMX8MQ_DRAM_PLL_OUT 245 |
@@ -372,24 +372,24 @@ | |||
372 | /* txesc clock */ | 372 | /* txesc clock */ |
373 | #define IMX8MQ_CLK_DSI_IPG_DIV 256 | 373 | #define IMX8MQ_CLK_DSI_IPG_DIV 256 |
374 | 374 | ||
375 | #define IMX8MQ_CLK_TMU_ROOT 265 | 375 | #define IMX8MQ_CLK_TMU_ROOT 257 |
376 | 376 | ||
377 | /* Display root clocks */ | 377 | /* Display root clocks */ |
378 | #define IMX8MQ_CLK_DISP_AXI_ROOT 266 | 378 | #define IMX8MQ_CLK_DISP_AXI_ROOT 258 |
379 | #define IMX8MQ_CLK_DISP_APB_ROOT 267 | 379 | #define IMX8MQ_CLK_DISP_APB_ROOT 259 |
380 | #define IMX8MQ_CLK_DISP_RTRM_ROOT 268 | 380 | #define IMX8MQ_CLK_DISP_RTRM_ROOT 260 |
381 | 381 | ||
382 | #define IMX8MQ_CLK_OCOTP_ROOT 269 | 382 | #define IMX8MQ_CLK_OCOTP_ROOT 261 |
383 | 383 | ||
384 | #define IMX8MQ_CLK_DRAM_ALT_ROOT 270 | 384 | #define IMX8MQ_CLK_DRAM_ALT_ROOT 262 |
385 | #define IMX8MQ_CLK_DRAM_CORE 271 | 385 | #define IMX8MQ_CLK_DRAM_CORE 263 |
386 | 386 | ||
387 | #define IMX8MQ_CLK_MU_ROOT 272 | 387 | #define IMX8MQ_CLK_MU_ROOT 264 |
388 | #define IMX8MQ_VIDEO2_PLL_OUT 273 | 388 | #define IMX8MQ_VIDEO2_PLL_OUT 265 |
389 | 389 | ||
390 | #define IMX8MQ_CLK_CLKO2 274 | 390 | #define IMX8MQ_CLK_CLKO2 266 |
391 | 391 | ||
392 | #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 275 | 392 | #define IMX8MQ_CLK_NAND_USDHC_BUS_RAWNAND_CLK 267 |
393 | 393 | ||
394 | #define IMX8MQ_CLK_END 276 | 394 | #define IMX8MQ_CLK_END 268 |
395 | #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ | 395 | #endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */ |
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h index 7b24fc791146..228a5e234af0 100644 --- a/include/dt-bindings/clock/marvell,mmp2.h +++ b/include/dt-bindings/clock/marvell,mmp2.h | |||
@@ -71,7 +71,6 @@ | |||
71 | #define MMP2_CLK_CCIC1_MIX 117 | 71 | #define MMP2_CLK_CCIC1_MIX 117 |
72 | #define MMP2_CLK_CCIC1_PHY 118 | 72 | #define MMP2_CLK_CCIC1_PHY 118 |
73 | #define MMP2_CLK_CCIC1_SPHY 119 | 73 | #define MMP2_CLK_CCIC1_SPHY 119 |
74 | #define MMP2_CLK_SP 120 | ||
75 | 74 | ||
76 | #define MMP2_NR_CLKS 200 | 75 | #define MMP2_NR_CLKS 200 |
77 | #endif | 76 | #endif |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 218df7f4d3e1..5041357d0297 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -180,12 +180,10 @@ enum cpuhp_smt_control { | |||
180 | #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) | 180 | #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) |
181 | extern enum cpuhp_smt_control cpu_smt_control; | 181 | extern enum cpuhp_smt_control cpu_smt_control; |
182 | extern void cpu_smt_disable(bool force); | 182 | extern void cpu_smt_disable(bool force); |
183 | extern void cpu_smt_check_topology_early(void); | ||
184 | extern void cpu_smt_check_topology(void); | 183 | extern void cpu_smt_check_topology(void); |
185 | #else | 184 | #else |
186 | # define cpu_smt_control (CPU_SMT_ENABLED) | 185 | # define cpu_smt_control (CPU_SMT_ENABLED) |
187 | static inline void cpu_smt_disable(bool force) { } | 186 | static inline void cpu_smt_disable(bool force) { } |
188 | static inline void cpu_smt_check_topology_early(void) { } | ||
189 | static inline void cpu_smt_check_topology(void) { } | 187 | static inline void cpu_smt_check_topology(void) { } |
190 | #endif | 188 | #endif |
191 | 189 | ||
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ef4b70f64f33..60996e64c579 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -62,9 +62,10 @@ extern const struct qstr slash_name; | |||
62 | struct dentry_stat_t { | 62 | struct dentry_stat_t { |
63 | long nr_dentry; | 63 | long nr_dentry; |
64 | long nr_unused; | 64 | long nr_unused; |
65 | long age_limit; /* age in seconds */ | 65 | long age_limit; /* age in seconds */ |
66 | long want_pages; /* pages requested by system */ | 66 | long want_pages; /* pages requested by system */ |
67 | long dummy[2]; | 67 | long nr_negative; /* # of unused negative dentries */ |
68 | long dummy; /* Reserved for future use */ | ||
68 | }; | 69 | }; |
69 | extern struct dentry_stat_t dentry_stat; | 70 | extern struct dentry_stat_t dentry_stat; |
70 | 71 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..e532fcc6e4b5 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -591,8 +591,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) | |||
591 | return qdisc_skb_cb(skb)->data; | 591 | return qdisc_skb_cb(skb)->data; |
592 | } | 592 | } |
593 | 593 | ||
594 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | 594 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
595 | struct sk_buff *skb) | 595 | struct sk_buff *skb) |
596 | { | 596 | { |
597 | u8 *cb_data = bpf_skb_cb(skb); | 597 | u8 *cb_data = bpf_skb_cb(skb); |
598 | u8 cb_saved[BPF_SKB_CB_LEN]; | 598 | u8 cb_saved[BPF_SKB_CB_LEN]; |
@@ -611,15 +611,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | |||
611 | return res; | 611 | return res; |
612 | } | 612 | } |
613 | 613 | ||
614 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | ||
615 | struct sk_buff *skb) | ||
616 | { | ||
617 | u32 res; | ||
618 | |||
619 | preempt_disable(); | ||
620 | res = __bpf_prog_run_save_cb(prog, skb); | ||
621 | preempt_enable(); | ||
622 | return res; | ||
623 | } | ||
624 | |||
614 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | 625 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
615 | struct sk_buff *skb) | 626 | struct sk_buff *skb) |
616 | { | 627 | { |
617 | u8 *cb_data = bpf_skb_cb(skb); | 628 | u8 *cb_data = bpf_skb_cb(skb); |
629 | u32 res; | ||
618 | 630 | ||
619 | if (unlikely(prog->cb_access)) | 631 | if (unlikely(prog->cb_access)) |
620 | memset(cb_data, 0, BPF_SKB_CB_LEN); | 632 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
621 | 633 | ||
622 | return BPF_PROG_RUN(prog, skb); | 634 | preempt_disable(); |
635 | res = BPF_PROG_RUN(prog, skb); | ||
636 | preempt_enable(); | ||
637 | return res; | ||
623 | } | 638 | } |
624 | 639 | ||
625 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | 640 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..29d8e2cfed0e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1479,11 +1479,12 @@ struct super_block { | |||
1479 | struct user_namespace *s_user_ns; | 1479 | struct user_namespace *s_user_ns; |
1480 | 1480 | ||
1481 | /* | 1481 | /* |
1482 | * Keep the lru lists last in the structure so they always sit on their | 1482 | * The list_lru structure is essentially just a pointer to a table |
1483 | * own individual cachelines. | 1483 | * of per-node lru lists, each of which has its own spinlock. |
1484 | * There is no need to put them into separate cachelines. | ||
1484 | */ | 1485 | */ |
1485 | struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; | 1486 | struct list_lru s_dentry_lru; |
1486 | struct list_lru s_inode_lru ____cacheline_aligned_in_smp; | 1487 | struct list_lru s_inode_lru; |
1487 | struct rcu_head rcu; | 1488 | struct rcu_head rcu; |
1488 | struct work_struct destroy_work; | 1489 | struct work_struct destroy_work; |
1489 | 1490 | ||
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h index 8663f216c563..2d6100edf204 100644 --- a/include/linux/hid-debug.h +++ b/include/linux/hid-debug.h | |||
@@ -24,7 +24,10 @@ | |||
24 | 24 | ||
25 | #ifdef CONFIG_DEBUG_FS | 25 | #ifdef CONFIG_DEBUG_FS |
26 | 26 | ||
27 | #include <linux/kfifo.h> | ||
28 | |||
27 | #define HID_DEBUG_BUFSIZE 512 | 29 | #define HID_DEBUG_BUFSIZE 512 |
30 | #define HID_DEBUG_FIFOSIZE 512 | ||
28 | 31 | ||
29 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); | 32 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); |
30 | void hid_dump_report(struct hid_device *, int , u8 *, int); | 33 | void hid_dump_report(struct hid_device *, int , u8 *, int); |
@@ -37,11 +40,8 @@ void hid_debug_init(void); | |||
37 | void hid_debug_exit(void); | 40 | void hid_debug_exit(void); |
38 | void hid_debug_event(struct hid_device *, char *); | 41 | void hid_debug_event(struct hid_device *, char *); |
39 | 42 | ||
40 | |||
41 | struct hid_debug_list { | 43 | struct hid_debug_list { |
42 | char *hid_debug_buf; | 44 | DECLARE_KFIFO_PTR(hid_debug_fifo, char); |
43 | int head; | ||
44 | int tail; | ||
45 | struct fasync_struct *fasync; | 45 | struct fasync_struct *fasync; |
46 | struct hid_device *hdev; | 46 | struct hid_device *hdev; |
47 | struct list_head node; | 47 | struct list_head node; |
@@ -64,4 +64,3 @@ struct hid_debug_list { | |||
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #endif | 66 | #endif |
67 | |||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 071b4cbdf010..c848a7cc502e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -319,7 +319,7 @@ | |||
319 | #define GITS_TYPER_PLPIS (1UL << 0) | 319 | #define GITS_TYPER_PLPIS (1UL << 0) |
320 | #define GITS_TYPER_VLPIS (1UL << 1) | 320 | #define GITS_TYPER_VLPIS (1UL << 1) |
321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 | 321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 |
322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) | 322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) |
323 | #define GITS_TYPER_IDBITS_SHIFT 8 | 323 | #define GITS_TYPER_IDBITS_SHIFT 8 |
324 | #define GITS_TYPER_DEVBITS_SHIFT 13 | 324 | #define GITS_TYPER_DEVBITS_SHIFT 13 |
325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | 325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 07da5c6c5ba0..368267c1b71b 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -21,14 +21,16 @@ struct vmem_altmap; | |||
21 | * walkers which rely on the fully initialized page->flags and others | 21 | * walkers which rely on the fully initialized page->flags and others |
22 | * should use this rather than pfn_valid && pfn_to_page | 22 | * should use this rather than pfn_valid && pfn_to_page |
23 | */ | 23 | */ |
24 | #define pfn_to_online_page(pfn) \ | 24 | #define pfn_to_online_page(pfn) \ |
25 | ({ \ | 25 | ({ \ |
26 | struct page *___page = NULL; \ | 26 | struct page *___page = NULL; \ |
27 | unsigned long ___nr = pfn_to_section_nr(pfn); \ | 27 | unsigned long ___pfn = pfn; \ |
28 | \ | 28 | unsigned long ___nr = pfn_to_section_nr(___pfn); \ |
29 | if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ | 29 | \ |
30 | ___page = pfn_to_page(pfn); \ | 30 | if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ |
31 | ___page; \ | 31 | pfn_valid_within(___pfn)) \ |
32 | ___page = pfn_to_page(___pfn); \ | ||
33 | ___page; \ | ||
32 | }) | 34 | }) |
33 | 35 | ||
34 | /* | 36 | /* |
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index de7377815b6b..8ef330027b13 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
@@ -308,6 +308,7 @@ struct mmc_card { | |||
308 | unsigned int nr_parts; | 308 | unsigned int nr_parts; |
309 | 309 | ||
310 | unsigned int bouncesz; /* Bounce buffer size */ | 310 | unsigned int bouncesz; /* Bounce buffer size */ |
311 | struct workqueue_struct *complete_wq; /* Private workqueue */ | ||
311 | }; | 312 | }; |
312 | 313 | ||
313 | static inline bool mmc_large_sector(struct mmc_card *card) | 314 | static inline bool mmc_large_sector(struct mmc_card *card) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..86dbb3e29139 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1483,6 +1483,7 @@ struct net_device_ops { | |||
1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook | 1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
1484 | * @IFF_FAILOVER: device is a failover master device | 1484 | * @IFF_FAILOVER: device is a failover master device |
1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
1486 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | ||
1486 | */ | 1487 | */ |
1487 | enum netdev_priv_flags { | 1488 | enum netdev_priv_flags { |
1488 | IFF_802_1Q_VLAN = 1<<0, | 1489 | IFF_802_1Q_VLAN = 1<<0, |
@@ -1514,6 +1515,7 @@ enum netdev_priv_flags { | |||
1514 | IFF_NO_RX_HANDLER = 1<<26, | 1515 | IFF_NO_RX_HANDLER = 1<<26, |
1515 | IFF_FAILOVER = 1<<27, | 1516 | IFF_FAILOVER = 1<<27, |
1516 | IFF_FAILOVER_SLAVE = 1<<28, | 1517 | IFF_FAILOVER_SLAVE = 1<<28, |
1518 | IFF_L3MDEV_RX_HANDLER = 1<<29, | ||
1517 | }; | 1519 | }; |
1518 | 1520 | ||
1519 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1521 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
@@ -1544,6 +1546,7 @@ enum netdev_priv_flags { | |||
1544 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER | 1546 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
1545 | #define IFF_FAILOVER IFF_FAILOVER | 1547 | #define IFF_FAILOVER IFF_FAILOVER |
1546 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1548 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
1549 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | ||
1547 | 1550 | ||
1548 | /** | 1551 | /** |
1549 | * struct net_device - The DEVICE structure. | 1552 | * struct net_device - The DEVICE structure. |
@@ -4549,6 +4552,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev) | |||
4549 | return dev->priv_flags & IFF_SUPP_NOFCS; | 4552 | return dev->priv_flags & IFF_SUPP_NOFCS; |
4550 | } | 4553 | } |
4551 | 4554 | ||
4555 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) | ||
4556 | { | ||
4557 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; | ||
4558 | } | ||
4559 | |||
4552 | static inline bool netif_is_l3_master(const struct net_device *dev) | 4560 | static inline bool netif_is_l3_master(const struct net_device *dev) |
4553 | { | 4561 | { |
4554 | return dev->priv_flags & IFF_L3MDEV_MASTER; | 4562 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
diff --git a/include/linux/phy.h b/include/linux/phy.h index ef20aeea10cc..127fcc9c3778 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -674,26 +674,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, | |||
674 | size_t phy_speeds(unsigned int *speeds, size_t size, | 674 | size_t phy_speeds(unsigned int *speeds, size_t size, |
675 | unsigned long *mask); | 675 | unsigned long *mask); |
676 | 676 | ||
677 | static inline bool __phy_is_started(struct phy_device *phydev) | ||
678 | { | ||
679 | WARN_ON(!mutex_is_locked(&phydev->lock)); | ||
680 | |||
681 | return phydev->state >= PHY_UP; | ||
682 | } | ||
683 | |||
684 | /** | 677 | /** |
685 | * phy_is_started - Convenience function to check whether PHY is started | 678 | * phy_is_started - Convenience function to check whether PHY is started |
686 | * @phydev: The phy_device struct | 679 | * @phydev: The phy_device struct |
687 | */ | 680 | */ |
688 | static inline bool phy_is_started(struct phy_device *phydev) | 681 | static inline bool phy_is_started(struct phy_device *phydev) |
689 | { | 682 | { |
690 | bool started; | 683 | return phydev->state >= PHY_UP; |
691 | |||
692 | mutex_lock(&phydev->lock); | ||
693 | started = __phy_is_started(phydev); | ||
694 | mutex_unlock(&phydev->lock); | ||
695 | |||
696 | return started; | ||
697 | } | 684 | } |
698 | 685 | ||
699 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); | 686 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 54af4eef169f..fed5be706bc9 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) | |||
105 | 105 | ||
106 | static inline void pm_runtime_mark_last_busy(struct device *dev) | 106 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
107 | { | 107 | { |
108 | WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); | 108 | WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); |
109 | } | 109 | } |
110 | 110 | ||
111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) | 111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d2f90fa92468..bba3afb4e9bf 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -995,7 +995,7 @@ struct task_struct { | |||
995 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ | 995 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
996 | struct list_head cg_list; | 996 | struct list_head cg_list; |
997 | #endif | 997 | #endif |
998 | #ifdef CONFIG_X86_RESCTRL | 998 | #ifdef CONFIG_X86_CPU_RESCTRL |
999 | u32 closid; | 999 | u32 closid; |
1000 | u32 rmid; | 1000 | u32 rmid; |
1001 | #endif | 1001 | #endif |
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index ec912d01126f..ecdc6542070f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h | |||
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ | 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ | 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ |
74 | #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ | ||
74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) | 75 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
75 | 76 | ||
76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ | 77 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
diff --git a/include/linux/signal.h b/include/linux/signal.h index cc7e2c1cd444..9702016734b1 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); | |||
392 | #endif | 392 | #endif |
393 | 393 | ||
394 | #define siginmask(sig, mask) \ | 394 | #define siginmask(sig, mask) \ |
395 | ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) | 395 | ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) |
396 | 396 | ||
397 | #define SIG_KERNEL_ONLY_MASK (\ | 397 | #define SIG_KERNEL_ONLY_MASK (\ |
398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) | 398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7ddfc65586b0..4335bd771ce5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -184,6 +184,7 @@ struct plat_stmmacenet_data { | |||
184 | struct clk *pclk; | 184 | struct clk *pclk; |
185 | struct clk *clk_ptp_ref; | 185 | struct clk *clk_ptp_ref; |
186 | unsigned int clk_ptp_rate; | 186 | unsigned int clk_ptp_rate; |
187 | unsigned int clk_ref_rate; | ||
187 | struct reset_control *stmmac_rst; | 188 | struct reset_control *stmmac_rst; |
188 | struct stmmac_axi *axi; | 189 | struct stmmac_axi *axi; |
189 | int has_gmac4; | 190 | int has_gmac4; |
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h index 00b5e7825508..74ff688568a0 100644 --- a/include/net/inetpeer.h +++ b/include/net/inetpeer.h | |||
@@ -39,6 +39,7 @@ struct inet_peer { | |||
39 | 39 | ||
40 | u32 metrics[RTAX_MAX]; | 40 | u32 metrics[RTAX_MAX]; |
41 | u32 rate_tokens; /* rate limiting for ICMP */ | 41 | u32 rate_tokens; /* rate limiting for ICMP */ |
42 | u32 n_redirects; | ||
42 | unsigned long rate_last; | 43 | unsigned long rate_last; |
43 | /* | 44 | /* |
44 | * Once inet_peer is queued for deletion (refcnt == 0), following field | 45 | * Once inet_peer is queued for deletion (refcnt == 0), following field |
diff --git a/include/net/l3mdev.h b/include/net/l3mdev.h index 78fa0ac4613c..5175fd63cd82 100644 --- a/include/net/l3mdev.h +++ b/include/net/l3mdev.h | |||
@@ -153,7 +153,8 @@ struct sk_buff *l3mdev_l3_rcv(struct sk_buff *skb, u16 proto) | |||
153 | 153 | ||
154 | if (netif_is_l3_slave(skb->dev)) | 154 | if (netif_is_l3_slave(skb->dev)) |
155 | master = netdev_master_upper_dev_get_rcu(skb->dev); | 155 | master = netdev_master_upper_dev_get_rcu(skb->dev); |
156 | else if (netif_is_l3_master(skb->dev)) | 156 | else if (netif_is_l3_master(skb->dev) || |
157 | netif_has_l3_rx_handler(skb->dev)) | ||
157 | master = skb->dev; | 158 | master = skb->dev; |
158 | 159 | ||
159 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) | 160 | if (master && master->l3mdev_ops->l3mdev_l3_rcv) |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 841835a387e1..b4984bbbe157 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -469,9 +469,7 @@ struct nft_set_binding { | |||
469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | 469 | int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, |
470 | struct nft_set_binding *binding); | 470 | struct nft_set_binding *binding); |
471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 471 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
472 | struct nft_set_binding *binding); | 472 | struct nft_set_binding *binding, bool commit); |
473 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
474 | struct nft_set_binding *binding); | ||
475 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); | 473 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set); |
476 | 474 | ||
477 | /** | 475 | /** |
@@ -721,6 +719,13 @@ struct nft_expr_type { | |||
721 | #define NFT_EXPR_STATEFUL 0x1 | 719 | #define NFT_EXPR_STATEFUL 0x1 |
722 | #define NFT_EXPR_GC 0x2 | 720 | #define NFT_EXPR_GC 0x2 |
723 | 721 | ||
722 | enum nft_trans_phase { | ||
723 | NFT_TRANS_PREPARE, | ||
724 | NFT_TRANS_ABORT, | ||
725 | NFT_TRANS_COMMIT, | ||
726 | NFT_TRANS_RELEASE | ||
727 | }; | ||
728 | |||
724 | /** | 729 | /** |
725 | * struct nft_expr_ops - nf_tables expression operations | 730 | * struct nft_expr_ops - nf_tables expression operations |
726 | * | 731 | * |
@@ -750,7 +755,8 @@ struct nft_expr_ops { | |||
750 | void (*activate)(const struct nft_ctx *ctx, | 755 | void (*activate)(const struct nft_ctx *ctx, |
751 | const struct nft_expr *expr); | 756 | const struct nft_expr *expr); |
752 | void (*deactivate)(const struct nft_ctx *ctx, | 757 | void (*deactivate)(const struct nft_ctx *ctx, |
753 | const struct nft_expr *expr); | 758 | const struct nft_expr *expr, |
759 | enum nft_trans_phase phase); | ||
754 | void (*destroy)(const struct nft_ctx *ctx, | 760 | void (*destroy)(const struct nft_ctx *ctx, |
755 | const struct nft_expr *expr); | 761 | const struct nft_expr *expr); |
756 | void (*destroy_clone)(const struct nft_ctx *ctx, | 762 | void (*destroy_clone)(const struct nft_ctx *ctx, |
@@ -1323,12 +1329,15 @@ struct nft_trans_rule { | |||
1323 | struct nft_trans_set { | 1329 | struct nft_trans_set { |
1324 | struct nft_set *set; | 1330 | struct nft_set *set; |
1325 | u32 set_id; | 1331 | u32 set_id; |
1332 | bool bound; | ||
1326 | }; | 1333 | }; |
1327 | 1334 | ||
1328 | #define nft_trans_set(trans) \ | 1335 | #define nft_trans_set(trans) \ |
1329 | (((struct nft_trans_set *)trans->data)->set) | 1336 | (((struct nft_trans_set *)trans->data)->set) |
1330 | #define nft_trans_set_id(trans) \ | 1337 | #define nft_trans_set_id(trans) \ |
1331 | (((struct nft_trans_set *)trans->data)->set_id) | 1338 | (((struct nft_trans_set *)trans->data)->set_id) |
1339 | #define nft_trans_set_bound(trans) \ | ||
1340 | (((struct nft_trans_set *)trans->data)->bound) | ||
1332 | 1341 | ||
1333 | struct nft_trans_chain { | 1342 | struct nft_trans_chain { |
1334 | bool update; | 1343 | bool update; |
diff --git a/include/net/sock.h b/include/net/sock.h index 2b229f7be8eb..f43f935cb113 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1277,7 +1277,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) | |||
1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); | 1277 | percpu_counter_inc(sk->sk_prot->sockets_allocated); |
1278 | } | 1278 | } |
1279 | 1279 | ||
1280 | static inline int | 1280 | static inline u64 |
1281 | sk_sockets_allocated_read_positive(struct sock *sk) | 1281 | sk_sockets_allocated_read_positive(struct sock *sk) |
1282 | { | 1282 | { |
1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); | 1283 | return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); |
diff --git a/include/net/tls.h b/include/net/tls.h index 2a6ac8d642af..1486b60c4de8 100644 --- a/include/net/tls.h +++ b/include/net/tls.h | |||
@@ -120,6 +120,8 @@ struct tls_rec { | |||
120 | struct scatterlist sg_aead_out[2]; | 120 | struct scatterlist sg_aead_out[2]; |
121 | 121 | ||
122 | char aad_space[TLS_AAD_SPACE_SIZE]; | 122 | char aad_space[TLS_AAD_SPACE_SIZE]; |
123 | u8 iv_data[TLS_CIPHER_AES_GCM_128_IV_SIZE + | ||
124 | TLS_CIPHER_AES_GCM_128_SALT_SIZE]; | ||
123 | struct aead_request aead_req; | 125 | struct aead_request aead_req; |
124 | u8 aead_req_ctx[]; | 126 | u8 aead_req_ctx[]; |
125 | }; | 127 | }; |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index a3ceed3a040a..80debf5982ac 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -2579,9 +2579,10 @@ struct ib_device { | |||
2579 | 2579 | ||
2580 | const struct uapi_definition *driver_def; | 2580 | const struct uapi_definition *driver_def; |
2581 | enum rdma_driver_id driver_id; | 2581 | enum rdma_driver_id driver_id; |
2582 | |||
2582 | /* | 2583 | /* |
2583 | * Provides synchronization between device unregistration and netlink | 2584 | * Positive refcount indicates that the device is currently |
2584 | * commands on a device. To be used only by core. | 2585 | * registered and cannot be unregistered. |
2585 | */ | 2586 | */ |
2586 | refcount_t refcount; | 2587 | refcount_t refcount; |
2587 | struct completion unreg_completion; | 2588 | struct completion unreg_completion; |
@@ -3926,6 +3927,25 @@ static inline bool ib_access_writable(int access_flags) | |||
3926 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, | 3927 | int ib_check_mr_status(struct ib_mr *mr, u32 check_mask, |
3927 | struct ib_mr_status *mr_status); | 3928 | struct ib_mr_status *mr_status); |
3928 | 3929 | ||
3930 | /** | ||
3931 | * ib_device_try_get: Hold a registration lock | ||
3932 | * device: The device to lock | ||
3933 | * | ||
3934 | * A device under an active registration lock cannot become unregistered. It | ||
3935 | * is only possible to obtain a registration lock on a device that is fully | ||
3936 | * registered, otherwise this function returns false. | ||
3937 | * | ||
3938 | * The registration lock is only necessary for actions which require the | ||
3939 | * device to still be registered. Uses that only require the device pointer to | ||
3940 | * be valid should use get_device(&ibdev->dev) to hold the memory. | ||
3941 | * | ||
3942 | */ | ||
3943 | static inline bool ib_device_try_get(struct ib_device *dev) | ||
3944 | { | ||
3945 | return refcount_inc_not_zero(&dev->refcount); | ||
3946 | } | ||
3947 | |||
3948 | void ib_device_put(struct ib_device *device); | ||
3929 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, | 3949 | struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u8 port, |
3930 | u16 pkey, const union ib_gid *gid, | 3950 | u16 pkey, const union ib_gid *gid, |
3931 | const struct sockaddr *addr); | 3951 | const struct sockaddr *addr); |
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h index 0cdc3999ecfa..c5188ff724d1 100644 --- a/include/sound/compress_driver.h +++ b/include/sound/compress_driver.h | |||
@@ -173,7 +173,11 @@ static inline void snd_compr_drain_notify(struct snd_compr_stream *stream) | |||
173 | if (snd_BUG_ON(!stream)) | 173 | if (snd_BUG_ON(!stream)) |
174 | return; | 174 | return; |
175 | 175 | ||
176 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | 176 | if (stream->direction == SND_COMPRESS_PLAYBACK) |
177 | stream->runtime->state = SNDRV_PCM_STATE_SETUP; | ||
178 | else | ||
179 | stream->runtime->state = SNDRV_PCM_STATE_PREPARED; | ||
180 | |||
177 | wake_up(&stream->runtime->sleep); | 181 | wake_up(&stream->runtime->sleep); |
178 | } | 182 | } |
179 | 183 | ||
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h index 7fa48b100936..cc7c8d42d4fd 100644 --- a/include/sound/hda_codec.h +++ b/include/sound/hda_codec.h | |||
@@ -68,6 +68,7 @@ struct hda_bus { | |||
68 | unsigned int response_reset:1; /* controller was reset */ | 68 | unsigned int response_reset:1; /* controller was reset */ |
69 | unsigned int in_reset:1; /* during reset operation */ | 69 | unsigned int in_reset:1; /* during reset operation */ |
70 | unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ | 70 | unsigned int no_response_fallback:1; /* don't fallback at RIRB error */ |
71 | unsigned int bus_probing :1; /* during probing process */ | ||
71 | 72 | ||
72 | int primary_dig_out_type; /* primary digital out PCM type */ | 73 | int primary_dig_out_type; /* primary digital out PCM type */ |
73 | unsigned int mixer_assigned; /* codec addr for mixer name */ | 74 | unsigned int mixer_assigned; /* codec addr for mixer name */ |
diff --git a/include/uapi/linux/inet_diag.h b/include/uapi/linux/inet_diag.h index 14565d703291..e8baca85bac6 100644 --- a/include/uapi/linux/inet_diag.h +++ b/include/uapi/linux/inet_diag.h | |||
@@ -137,15 +137,21 @@ enum { | |||
137 | INET_DIAG_TCLASS, | 137 | INET_DIAG_TCLASS, |
138 | INET_DIAG_SKMEMINFO, | 138 | INET_DIAG_SKMEMINFO, |
139 | INET_DIAG_SHUTDOWN, | 139 | INET_DIAG_SHUTDOWN, |
140 | INET_DIAG_DCTCPINFO, | 140 | |
141 | INET_DIAG_PROTOCOL, /* response attribute only */ | 141 | /* |
142 | * Next extenstions cannot be requested in struct inet_diag_req_v2: | ||
143 | * its field idiag_ext has only 8 bits. | ||
144 | */ | ||
145 | |||
146 | INET_DIAG_DCTCPINFO, /* request as INET_DIAG_VEGASINFO */ | ||
147 | INET_DIAG_PROTOCOL, /* response attribute only */ | ||
142 | INET_DIAG_SKV6ONLY, | 148 | INET_DIAG_SKV6ONLY, |
143 | INET_DIAG_LOCALS, | 149 | INET_DIAG_LOCALS, |
144 | INET_DIAG_PEERS, | 150 | INET_DIAG_PEERS, |
145 | INET_DIAG_PAD, | 151 | INET_DIAG_PAD, |
146 | INET_DIAG_MARK, | 152 | INET_DIAG_MARK, /* only with CAP_NET_ADMIN */ |
147 | INET_DIAG_BBRINFO, | 153 | INET_DIAG_BBRINFO, /* request as INET_DIAG_VEGASINFO */ |
148 | INET_DIAG_CLASS_ID, | 154 | INET_DIAG_CLASS_ID, /* request as INET_DIAG_TCLASS */ |
149 | INET_DIAG_MD5SIG, | 155 | INET_DIAG_MD5SIG, |
150 | __INET_DIAG_MAX, | 156 | __INET_DIAG_MAX, |
151 | }; | 157 | }; |
diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index 1196e1c1d4f6..ff8e7dc9d4dd 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h | |||
@@ -79,6 +79,12 @@ | |||
79 | #define VIRTIO_F_RING_PACKED 34 | 79 | #define VIRTIO_F_RING_PACKED 34 |
80 | 80 | ||
81 | /* | 81 | /* |
82 | * This feature indicates that memory accesses by the driver and the | ||
83 | * device are ordered in a way described by the platform. | ||
84 | */ | ||
85 | #define VIRTIO_F_ORDER_PLATFORM 36 | ||
86 | |||
87 | /* | ||
82 | * Does the device support Single Root I/O Virtualization? | 88 | * Does the device support Single Root I/O Virtualization? |
83 | */ | 89 | */ |
84 | #define VIRTIO_F_SR_IOV 37 | 90 | #define VIRTIO_F_SR_IOV 37 |
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 2414f8af26b3..4c4e24c291a5 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h | |||
@@ -213,14 +213,4 @@ struct vring_packed_desc { | |||
213 | __le16 flags; | 213 | __le16 flags; |
214 | }; | 214 | }; |
215 | 215 | ||
216 | struct vring_packed { | ||
217 | unsigned int num; | ||
218 | |||
219 | struct vring_packed_desc *desc; | ||
220 | |||
221 | struct vring_packed_desc_event *driver; | ||
222 | |||
223 | struct vring_packed_desc_event *device; | ||
224 | }; | ||
225 | |||
226 | #endif /* _UAPI_LINUX_VIRTIO_RING_H */ | 216 | #endif /* _UAPI_LINUX_VIRTIO_RING_H */ |
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h index ef3c7ec793a7..eb76b38a00d4 100644 --- a/include/uapi/rdma/hns-abi.h +++ b/include/uapi/rdma/hns-abi.h | |||
@@ -52,6 +52,11 @@ struct hns_roce_ib_create_srq { | |||
52 | __aligned_u64 que_addr; | 52 | __aligned_u64 que_addr; |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct hns_roce_ib_create_srq_resp { | ||
56 | __u32 srqn; | ||
57 | __u32 reserved; | ||
58 | }; | ||
59 | |||
55 | struct hns_roce_ib_create_qp { | 60 | struct hns_roce_ib_create_qp { |
56 | __aligned_u64 buf_addr; | 61 | __aligned_u64 buf_addr; |
57 | __aligned_u64 db_addr; | 62 | __aligned_u64 db_addr; |
diff --git a/init/Kconfig b/init/Kconfig index 513fa544a134..c9386a365eea 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -512,6 +512,17 @@ config PSI_DEFAULT_DISABLED | |||
512 | per default but can be enabled through passing psi=1 on the | 512 | per default but can be enabled through passing psi=1 on the |
513 | kernel commandline during boot. | 513 | kernel commandline during boot. |
514 | 514 | ||
515 | This feature adds some code to the task wakeup and sleep | ||
516 | paths of the scheduler. The overhead is too low to affect | ||
517 | common scheduling-intense workloads in practice (such as | ||
518 | webservers, memcache), but it does show up in artificial | ||
519 | scheduler stress tests, such as hackbench. | ||
520 | |||
521 | If you are paranoid and not sure what the kernel will be | ||
522 | used for, say Y. | ||
523 | |||
524 | Say N if unsure. | ||
525 | |||
515 | endmenu # "CPU/Task time and stats accounting" | 526 | endmenu # "CPU/Task time and stats accounting" |
516 | 527 | ||
517 | config CPU_ISOLATION | 528 | config CPU_ISOLATION |
@@ -825,7 +836,7 @@ config CGROUP_PIDS | |||
825 | PIDs controller is designed to stop this from happening. | 836 | PIDs controller is designed to stop this from happening. |
826 | 837 | ||
827 | It should be noted that organisational operations (such as attaching | 838 | It should be noted that organisational operations (such as attaching |
828 | to a cgroup hierarchy will *not* be blocked by the PIDs controller), | 839 | to a cgroup hierarchy) will *not* be blocked by the PIDs controller, |
829 | since the PIDs limit only affects a process's ability to fork, not to | 840 | since the PIDs limit only affects a process's ability to fork, not to |
830 | attach to a cgroup. | 841 | attach to a cgroup. |
831 | 842 | ||
diff --git a/init/main.c b/init/main.c index e2e80ca3165a..c86a1c8f19f4 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -695,7 +695,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
695 | initrd_start = 0; | 695 | initrd_start = 0; |
696 | } | 696 | } |
697 | #endif | 697 | #endif |
698 | page_ext_init(); | ||
699 | kmemleak_init(); | 698 | kmemleak_init(); |
700 | setup_per_cpu_pageset(); | 699 | setup_per_cpu_pageset(); |
701 | numa_policy_init(); | 700 | numa_policy_init(); |
@@ -1131,6 +1130,8 @@ static noinline void __init kernel_init_freeable(void) | |||
1131 | sched_init_smp(); | 1130 | sched_init_smp(); |
1132 | 1131 | ||
1133 | page_alloc_init_late(); | 1132 | page_alloc_init_late(); |
1133 | /* Initialize page ext after all struct pages are initialized. */ | ||
1134 | page_ext_init(); | ||
1134 | 1135 | ||
1135 | do_basic_setup(); | 1136 | do_basic_setup(); |
1136 | 1137 | ||
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index befe570be5ba..c57bd10340ed 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c | |||
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env, | |||
1459 | 1459 | ||
1460 | /* "typedef void new_void", "const void"...etc */ | 1460 | /* "typedef void new_void", "const void"...etc */ |
1461 | if (!btf_type_is_void(next_type) && | 1461 | if (!btf_type_is_void(next_type) && |
1462 | !btf_type_is_fwd(next_type)) { | 1462 | !btf_type_is_fwd(next_type) && |
1463 | !btf_type_is_func_proto(next_type)) { | ||
1463 | btf_verifier_log_type(env, v->t, "Invalid type_id"); | 1464 | btf_verifier_log_type(env, v->t, "Invalid type_id"); |
1464 | return -EINVAL; | 1465 | return -EINVAL; |
1465 | } | 1466 | } |
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c index ab612fe9862f..d17d05570a3f 100644 --- a/kernel/bpf/cgroup.c +++ b/kernel/bpf/cgroup.c | |||
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk, | |||
572 | bpf_compute_and_save_data_end(skb, &saved_data_end); | 572 | bpf_compute_and_save_data_end(skb, &saved_data_end); |
573 | 573 | ||
574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, | 574 | ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, |
575 | bpf_prog_run_save_cb); | 575 | __bpf_prog_run_save_cb); |
576 | bpf_restore_data_end(skb, saved_data_end); | 576 | bpf_restore_data_end(skb, saved_data_end); |
577 | __skb_pull(skb, offset); | 577 | __skb_pull(skb, offset); |
578 | skb->sk = save_sk; | 578 | skb->sk = save_sk; |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 4b7c76765d9d..f9274114c88d 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l) | |||
686 | } | 686 | } |
687 | 687 | ||
688 | if (htab_is_prealloc(htab)) { | 688 | if (htab_is_prealloc(htab)) { |
689 | pcpu_freelist_push(&htab->freelist, &l->fnode); | 689 | __pcpu_freelist_push(&htab->freelist, &l->fnode); |
690 | } else { | 690 | } else { |
691 | atomic_dec(&htab->count); | 691 | atomic_dec(&htab->count); |
692 | l->htab = htab; | 692 | l->htab = htab; |
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key, | |||
748 | } else { | 748 | } else { |
749 | struct pcpu_freelist_node *l; | 749 | struct pcpu_freelist_node *l; |
750 | 750 | ||
751 | l = pcpu_freelist_pop(&htab->freelist); | 751 | l = __pcpu_freelist_pop(&htab->freelist); |
752 | if (!l) | 752 | if (!l) |
753 | return ERR_PTR(-E2BIG); | 753 | return ERR_PTR(-E2BIG); |
754 | l_new = container_of(l, struct htab_elem, fnode); | 754 | l_new = container_of(l, struct htab_elem, fnode); |
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c index 673fa6fe2d73..0c1b4ba9e90e 100644 --- a/kernel/bpf/percpu_freelist.c +++ b/kernel/bpf/percpu_freelist.c | |||
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s) | |||
28 | free_percpu(s->freelist); | 28 | free_percpu(s->freelist); |
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | 31 | static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head, |
32 | struct pcpu_freelist_node *node) | 32 | struct pcpu_freelist_node *node) |
33 | { | 33 | { |
34 | raw_spin_lock(&head->lock); | 34 | raw_spin_lock(&head->lock); |
35 | node->next = head->first; | 35 | node->next = head->first; |
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head, | |||
37 | raw_spin_unlock(&head->lock); | 37 | raw_spin_unlock(&head->lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | void pcpu_freelist_push(struct pcpu_freelist *s, | 40 | void __pcpu_freelist_push(struct pcpu_freelist *s, |
41 | struct pcpu_freelist_node *node) | 41 | struct pcpu_freelist_node *node) |
42 | { | 42 | { |
43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); | 43 | struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist); |
44 | 44 | ||
45 | __pcpu_freelist_push(head, node); | 45 | ___pcpu_freelist_push(head, node); |
46 | } | ||
47 | |||
48 | void pcpu_freelist_push(struct pcpu_freelist *s, | ||
49 | struct pcpu_freelist_node *node) | ||
50 | { | ||
51 | unsigned long flags; | ||
52 | |||
53 | local_irq_save(flags); | ||
54 | __pcpu_freelist_push(s, node); | ||
55 | local_irq_restore(flags); | ||
46 | } | 56 | } |
47 | 57 | ||
48 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 58 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | |||
63 | for_each_possible_cpu(cpu) { | 73 | for_each_possible_cpu(cpu) { |
64 | again: | 74 | again: |
65 | head = per_cpu_ptr(s->freelist, cpu); | 75 | head = per_cpu_ptr(s->freelist, cpu); |
66 | __pcpu_freelist_push(head, buf); | 76 | ___pcpu_freelist_push(head, buf); |
67 | i++; | 77 | i++; |
68 | buf += elem_size; | 78 | buf += elem_size; |
69 | if (i == nr_elems) | 79 | if (i == nr_elems) |
@@ -74,14 +84,12 @@ again: | |||
74 | local_irq_restore(flags); | 84 | local_irq_restore(flags); |
75 | } | 85 | } |
76 | 86 | ||
77 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | 87 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s) |
78 | { | 88 | { |
79 | struct pcpu_freelist_head *head; | 89 | struct pcpu_freelist_head *head; |
80 | struct pcpu_freelist_node *node; | 90 | struct pcpu_freelist_node *node; |
81 | unsigned long flags; | ||
82 | int orig_cpu, cpu; | 91 | int orig_cpu, cpu; |
83 | 92 | ||
84 | local_irq_save(flags); | ||
85 | orig_cpu = cpu = raw_smp_processor_id(); | 93 | orig_cpu = cpu = raw_smp_processor_id(); |
86 | while (1) { | 94 | while (1) { |
87 | head = per_cpu_ptr(s->freelist, cpu); | 95 | head = per_cpu_ptr(s->freelist, cpu); |
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | |||
89 | node = head->first; | 97 | node = head->first; |
90 | if (node) { | 98 | if (node) { |
91 | head->first = node->next; | 99 | head->first = node->next; |
92 | raw_spin_unlock_irqrestore(&head->lock, flags); | 100 | raw_spin_unlock(&head->lock); |
93 | return node; | 101 | return node; |
94 | } | 102 | } |
95 | raw_spin_unlock(&head->lock); | 103 | raw_spin_unlock(&head->lock); |
96 | cpu = cpumask_next(cpu, cpu_possible_mask); | 104 | cpu = cpumask_next(cpu, cpu_possible_mask); |
97 | if (cpu >= nr_cpu_ids) | 105 | if (cpu >= nr_cpu_ids) |
98 | cpu = 0; | 106 | cpu = 0; |
99 | if (cpu == orig_cpu) { | 107 | if (cpu == orig_cpu) |
100 | local_irq_restore(flags); | ||
101 | return NULL; | 108 | return NULL; |
102 | } | ||
103 | } | 109 | } |
104 | } | 110 | } |
111 | |||
112 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s) | ||
113 | { | ||
114 | struct pcpu_freelist_node *ret; | ||
115 | unsigned long flags; | ||
116 | |||
117 | local_irq_save(flags); | ||
118 | ret = __pcpu_freelist_pop(s); | ||
119 | local_irq_restore(flags); | ||
120 | return ret; | ||
121 | } | ||
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h index 3049aae8ea1e..c3960118e617 100644 --- a/kernel/bpf/percpu_freelist.h +++ b/kernel/bpf/percpu_freelist.h | |||
@@ -22,8 +22,12 @@ struct pcpu_freelist_node { | |||
22 | struct pcpu_freelist_node *next; | 22 | struct pcpu_freelist_node *next; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | /* pcpu_freelist_* do spin_lock_irqsave. */ | ||
25 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | 26 | void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); |
26 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); | 27 | struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *); |
28 | /* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */ | ||
29 | void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *); | ||
30 | struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *); | ||
27 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, | 31 | void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size, |
28 | u32 nr_elems); | 32 | u32 nr_elems); |
29 | int pcpu_freelist_init(struct pcpu_freelist *); | 33 | int pcpu_freelist_init(struct pcpu_freelist *); |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b155cd17c1bd..8577bb7f8be6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
713 | 713 | ||
714 | if (bpf_map_is_dev_bound(map)) { | 714 | if (bpf_map_is_dev_bound(map)) { |
715 | err = bpf_map_offload_lookup_elem(map, key, value); | 715 | err = bpf_map_offload_lookup_elem(map, key, value); |
716 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | 716 | goto done; |
717 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | 717 | } |
718 | |||
719 | preempt_disable(); | ||
720 | this_cpu_inc(bpf_prog_active); | ||
721 | if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || | ||
722 | map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { | ||
718 | err = bpf_percpu_hash_copy(map, key, value); | 723 | err = bpf_percpu_hash_copy(map, key, value); |
719 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { | 724 | } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) { |
720 | err = bpf_percpu_array_copy(map, key, value); | 725 | err = bpf_percpu_array_copy(map, key, value); |
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
744 | } | 749 | } |
745 | rcu_read_unlock(); | 750 | rcu_read_unlock(); |
746 | } | 751 | } |
752 | this_cpu_dec(bpf_prog_active); | ||
753 | preempt_enable(); | ||
747 | 754 | ||
755 | done: | ||
748 | if (err) | 756 | if (err) |
749 | goto free_value; | 757 | goto free_value; |
750 | 758 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 91d5c38eb7e5..d1c6d152da89 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -376,9 +376,6 @@ void __weak arch_smt_update(void) { } | |||
376 | 376 | ||
377 | #ifdef CONFIG_HOTPLUG_SMT | 377 | #ifdef CONFIG_HOTPLUG_SMT |
378 | enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; | 378 | enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED; |
379 | EXPORT_SYMBOL_GPL(cpu_smt_control); | ||
380 | |||
381 | static bool cpu_smt_available __read_mostly; | ||
382 | 379 | ||
383 | void __init cpu_smt_disable(bool force) | 380 | void __init cpu_smt_disable(bool force) |
384 | { | 381 | { |
@@ -397,25 +394,11 @@ void __init cpu_smt_disable(bool force) | |||
397 | 394 | ||
398 | /* | 395 | /* |
399 | * The decision whether SMT is supported can only be done after the full | 396 | * The decision whether SMT is supported can only be done after the full |
400 | * CPU identification. Called from architecture code before non boot CPUs | 397 | * CPU identification. Called from architecture code. |
401 | * are brought up. | ||
402 | */ | ||
403 | void __init cpu_smt_check_topology_early(void) | ||
404 | { | ||
405 | if (!topology_smt_supported()) | ||
406 | cpu_smt_control = CPU_SMT_NOT_SUPPORTED; | ||
407 | } | ||
408 | |||
409 | /* | ||
410 | * If SMT was disabled by BIOS, detect it here, after the CPUs have been | ||
411 | * brought online. This ensures the smt/l1tf sysfs entries are consistent | ||
412 | * with reality. cpu_smt_available is set to true during the bringup of non | ||
413 | * boot CPUs when a SMT sibling is detected. Note, this may overwrite | ||
414 | * cpu_smt_control's previous setting. | ||
415 | */ | 398 | */ |
416 | void __init cpu_smt_check_topology(void) | 399 | void __init cpu_smt_check_topology(void) |
417 | { | 400 | { |
418 | if (!cpu_smt_available) | 401 | if (!topology_smt_supported()) |
419 | cpu_smt_control = CPU_SMT_NOT_SUPPORTED; | 402 | cpu_smt_control = CPU_SMT_NOT_SUPPORTED; |
420 | } | 403 | } |
421 | 404 | ||
@@ -428,18 +411,10 @@ early_param("nosmt", smt_cmdline_disable); | |||
428 | 411 | ||
429 | static inline bool cpu_smt_allowed(unsigned int cpu) | 412 | static inline bool cpu_smt_allowed(unsigned int cpu) |
430 | { | 413 | { |
431 | if (topology_is_primary_thread(cpu)) | 414 | if (cpu_smt_control == CPU_SMT_ENABLED) |
432 | return true; | 415 | return true; |
433 | 416 | ||
434 | /* | 417 | if (topology_is_primary_thread(cpu)) |
435 | * If the CPU is not a 'primary' thread and the booted_once bit is | ||
436 | * set then the processor has SMT support. Store this information | ||
437 | * for the late check of SMT support in cpu_smt_check_topology(). | ||
438 | */ | ||
439 | if (per_cpu(cpuhp_state, cpu).booted_once) | ||
440 | cpu_smt_available = true; | ||
441 | |||
442 | if (cpu_smt_control == CPU_SMT_ENABLED) | ||
443 | return true; | 418 | return true; |
444 | 419 | ||
445 | /* | 420 | /* |
@@ -2090,10 +2065,8 @@ static int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval) | |||
2090 | */ | 2065 | */ |
2091 | cpuhp_offline_cpu_device(cpu); | 2066 | cpuhp_offline_cpu_device(cpu); |
2092 | } | 2067 | } |
2093 | if (!ret) { | 2068 | if (!ret) |
2094 | cpu_smt_control = ctrlval; | 2069 | cpu_smt_control = ctrlval; |
2095 | arch_smt_update(); | ||
2096 | } | ||
2097 | cpu_maps_update_done(); | 2070 | cpu_maps_update_done(); |
2098 | return ret; | 2071 | return ret; |
2099 | } | 2072 | } |
@@ -2104,7 +2077,6 @@ static int cpuhp_smt_enable(void) | |||
2104 | 2077 | ||
2105 | cpu_maps_update_begin(); | 2078 | cpu_maps_update_begin(); |
2106 | cpu_smt_control = CPU_SMT_ENABLED; | 2079 | cpu_smt_control = CPU_SMT_ENABLED; |
2107 | arch_smt_update(); | ||
2108 | for_each_present_cpu(cpu) { | 2080 | for_each_present_cpu(cpu) { |
2109 | /* Skip online CPUs and CPUs on offline nodes */ | 2081 | /* Skip online CPUs and CPUs on offline nodes */ |
2110 | if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) | 2082 | if (cpu_online(cpu) || !node_online(cpu_to_node(cpu))) |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 3cd13a30f732..e5ede6918050 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write, | |||
436 | void __user *buffer, size_t *lenp, | 436 | void __user *buffer, size_t *lenp, |
437 | loff_t *ppos) | 437 | loff_t *ppos) |
438 | { | 438 | { |
439 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 439 | int ret; |
440 | 440 | int perf_cpu = sysctl_perf_cpu_time_max_percent; | |
441 | if (ret || !write) | ||
442 | return ret; | ||
443 | |||
444 | /* | 441 | /* |
445 | * If throttling is disabled don't allow the write: | 442 | * If throttling is disabled don't allow the write: |
446 | */ | 443 | */ |
447 | if (sysctl_perf_cpu_time_max_percent == 100 || | 444 | if (write && (perf_cpu == 100 || perf_cpu == 0)) |
448 | sysctl_perf_cpu_time_max_percent == 0) | ||
449 | return -EINVAL; | 445 | return -EINVAL; |
450 | 446 | ||
447 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
448 | if (ret || !write) | ||
449 | return ret; | ||
450 | |||
451 | max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); | 451 | max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ); |
452 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; | 452 | perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate; |
453 | update_perf_cpu_limits(); | 453 | update_perf_cpu_limits(); |
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4a9937076331..309ef5a64af5 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c | |||
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags) | |||
734 | size = sizeof(struct ring_buffer); | 734 | size = sizeof(struct ring_buffer); |
735 | size += nr_pages * sizeof(void *); | 735 | size += nr_pages * sizeof(void *); |
736 | 736 | ||
737 | if (order_base_2(size) >= MAX_ORDER) | ||
738 | goto fail; | ||
739 | |||
737 | rb = kzalloc(size, GFP_KERNEL); | 740 | rb = kzalloc(size, GFP_KERNEL); |
738 | if (!rb) | 741 | if (!rb) |
739 | goto fail; | 742 | goto fail; |
diff --git a/kernel/exit.c b/kernel/exit.c index 3fb7be001964..2639a30a8aa5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -558,12 +558,14 @@ static struct task_struct *find_alive_thread(struct task_struct *p) | |||
558 | return NULL; | 558 | return NULL; |
559 | } | 559 | } |
560 | 560 | ||
561 | static struct task_struct *find_child_reaper(struct task_struct *father) | 561 | static struct task_struct *find_child_reaper(struct task_struct *father, |
562 | struct list_head *dead) | ||
562 | __releases(&tasklist_lock) | 563 | __releases(&tasklist_lock) |
563 | __acquires(&tasklist_lock) | 564 | __acquires(&tasklist_lock) |
564 | { | 565 | { |
565 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | 566 | struct pid_namespace *pid_ns = task_active_pid_ns(father); |
566 | struct task_struct *reaper = pid_ns->child_reaper; | 567 | struct task_struct *reaper = pid_ns->child_reaper; |
568 | struct task_struct *p, *n; | ||
567 | 569 | ||
568 | if (likely(reaper != father)) | 570 | if (likely(reaper != father)) |
569 | return reaper; | 571 | return reaper; |
@@ -579,6 +581,12 @@ static struct task_struct *find_child_reaper(struct task_struct *father) | |||
579 | panic("Attempted to kill init! exitcode=0x%08x\n", | 581 | panic("Attempted to kill init! exitcode=0x%08x\n", |
580 | father->signal->group_exit_code ?: father->exit_code); | 582 | father->signal->group_exit_code ?: father->exit_code); |
581 | } | 583 | } |
584 | |||
585 | list_for_each_entry_safe(p, n, dead, ptrace_entry) { | ||
586 | list_del_init(&p->ptrace_entry); | ||
587 | release_task(p); | ||
588 | } | ||
589 | |||
582 | zap_pid_ns_processes(pid_ns); | 590 | zap_pid_ns_processes(pid_ns); |
583 | write_lock_irq(&tasklist_lock); | 591 | write_lock_irq(&tasklist_lock); |
584 | 592 | ||
@@ -668,7 +676,7 @@ static void forget_original_parent(struct task_struct *father, | |||
668 | exit_ptrace(father, dead); | 676 | exit_ptrace(father, dead); |
669 | 677 | ||
670 | /* Can drop and reacquire tasklist_lock */ | 678 | /* Can drop and reacquire tasklist_lock */ |
671 | reaper = find_child_reaper(father); | 679 | reaper = find_child_reaper(father, dead); |
672 | if (list_empty(&father->children)) | 680 | if (list_empty(&father->children)) |
673 | return; | 681 | return; |
674 | 682 | ||
diff --git a/kernel/futex.c b/kernel/futex.c index fdd312da0992..a0514e01c3eb 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
@@ -2221,11 +2221,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q) | |||
2221 | * decrement the counter at queue_unlock() when some error has | 2221 | * decrement the counter at queue_unlock() when some error has |
2222 | * occurred and we don't end up adding the task to the list. | 2222 | * occurred and we don't end up adding the task to the list. |
2223 | */ | 2223 | */ |
2224 | hb_waiters_inc(hb); | 2224 | hb_waiters_inc(hb); /* implies smp_mb(); (A) */ |
2225 | 2225 | ||
2226 | q->lock_ptr = &hb->lock; | 2226 | q->lock_ptr = &hb->lock; |
2227 | 2227 | ||
2228 | spin_lock(&hb->lock); /* implies smp_mb(); (A) */ | 2228 | spin_lock(&hb->lock); |
2229 | return hb; | 2229 | return hb; |
2230 | } | 2230 | } |
2231 | 2231 | ||
@@ -2861,35 +2861,39 @@ retry_private: | |||
2861 | * and BUG when futex_unlock_pi() interleaves with this. | 2861 | * and BUG when futex_unlock_pi() interleaves with this. |
2862 | * | 2862 | * |
2863 | * Therefore acquire wait_lock while holding hb->lock, but drop the | 2863 | * Therefore acquire wait_lock while holding hb->lock, but drop the |
2864 | * latter before calling rt_mutex_start_proxy_lock(). This still fully | 2864 | * latter before calling __rt_mutex_start_proxy_lock(). This |
2865 | * serializes against futex_unlock_pi() as that does the exact same | 2865 | * interleaves with futex_unlock_pi() -- which does a similar lock |
2866 | * lock handoff sequence. | 2866 | * handoff -- such that the latter can observe the futex_q::pi_state |
2867 | * before __rt_mutex_start_proxy_lock() is done. | ||
2867 | */ | 2868 | */ |
2868 | raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); | 2869 | raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); |
2869 | spin_unlock(q.lock_ptr); | 2870 | spin_unlock(q.lock_ptr); |
2871 | /* | ||
2872 | * __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter | ||
2873 | * such that futex_unlock_pi() is guaranteed to observe the waiter when | ||
2874 | * it sees the futex_q::pi_state. | ||
2875 | */ | ||
2870 | ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); | 2876 | ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); |
2871 | raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); | 2877 | raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); |
2872 | 2878 | ||
2873 | if (ret) { | 2879 | if (ret) { |
2874 | if (ret == 1) | 2880 | if (ret == 1) |
2875 | ret = 0; | 2881 | ret = 0; |
2876 | 2882 | goto cleanup; | |
2877 | spin_lock(q.lock_ptr); | ||
2878 | goto no_block; | ||
2879 | } | 2883 | } |
2880 | 2884 | ||
2881 | |||
2882 | if (unlikely(to)) | 2885 | if (unlikely(to)) |
2883 | hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); | 2886 | hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); |
2884 | 2887 | ||
2885 | ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); | 2888 | ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); |
2886 | 2889 | ||
2890 | cleanup: | ||
2887 | spin_lock(q.lock_ptr); | 2891 | spin_lock(q.lock_ptr); |
2888 | /* | 2892 | /* |
2889 | * If we failed to acquire the lock (signal/timeout), we must | 2893 | * If we failed to acquire the lock (deadlock/signal/timeout), we must |
2890 | * first acquire the hb->lock before removing the lock from the | 2894 | * first acquire the hb->lock before removing the lock from the |
2891 | * rt_mutex waitqueue, such that we can keep the hb and rt_mutex | 2895 | * rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait |
2892 | * wait lists consistent. | 2896 | * lists consistent. |
2893 | * | 2897 | * |
2894 | * In particular; it is important that futex_unlock_pi() can not | 2898 | * In particular; it is important that futex_unlock_pi() can not |
2895 | * observe this inconsistency. | 2899 | * observe this inconsistency. |
@@ -3013,6 +3017,10 @@ retry: | |||
3013 | * there is no point where we hold neither; and therefore | 3017 | * there is no point where we hold neither; and therefore |
3014 | * wake_futex_pi() must observe a state consistent with what we | 3018 | * wake_futex_pi() must observe a state consistent with what we |
3015 | * observed. | 3019 | * observed. |
3020 | * | ||
3021 | * In particular; this forces __rt_mutex_start_proxy() to | ||
3022 | * complete such that we're guaranteed to observe the | ||
3023 | * rt_waiter. Also see the WARN in wake_futex_pi(). | ||
3016 | */ | 3024 | */ |
3017 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); | 3025 | raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); |
3018 | spin_unlock(&hb->lock); | 3026 | spin_unlock(&hb->lock); |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 581edcc63c26..978d63a8261c 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, | |||
1726 | rt_mutex_set_owner(lock, NULL); | 1726 | rt_mutex_set_owner(lock, NULL); |
1727 | } | 1727 | } |
1728 | 1728 | ||
1729 | /** | ||
1730 | * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task | ||
1731 | * @lock: the rt_mutex to take | ||
1732 | * @waiter: the pre-initialized rt_mutex_waiter | ||
1733 | * @task: the task to prepare | ||
1734 | * | ||
1735 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | ||
1736 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | ||
1737 | * | ||
1738 | * NOTE: does _NOT_ remove the @waiter on failure; must either call | ||
1739 | * rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this. | ||
1740 | * | ||
1741 | * Returns: | ||
1742 | * 0 - task blocked on lock | ||
1743 | * 1 - acquired the lock for task, caller should wake it up | ||
1744 | * <0 - error | ||
1745 | * | ||
1746 | * Special API call for PI-futex support. | ||
1747 | */ | ||
1729 | int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | 1748 | int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1730 | struct rt_mutex_waiter *waiter, | 1749 | struct rt_mutex_waiter *waiter, |
1731 | struct task_struct *task) | 1750 | struct task_struct *task) |
1732 | { | 1751 | { |
1733 | int ret; | 1752 | int ret; |
1734 | 1753 | ||
1754 | lockdep_assert_held(&lock->wait_lock); | ||
1755 | |||
1735 | if (try_to_take_rt_mutex(lock, task, NULL)) | 1756 | if (try_to_take_rt_mutex(lock, task, NULL)) |
1736 | return 1; | 1757 | return 1; |
1737 | 1758 | ||
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1749 | ret = 0; | 1770 | ret = 0; |
1750 | } | 1771 | } |
1751 | 1772 | ||
1752 | if (unlikely(ret)) | ||
1753 | remove_waiter(lock, waiter); | ||
1754 | |||
1755 | debug_rt_mutex_print_deadlock(waiter); | 1773 | debug_rt_mutex_print_deadlock(waiter); |
1756 | 1774 | ||
1757 | return ret; | 1775 | return ret; |
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1763 | * @waiter: the pre-initialized rt_mutex_waiter | 1781 | * @waiter: the pre-initialized rt_mutex_waiter |
1764 | * @task: the task to prepare | 1782 | * @task: the task to prepare |
1765 | * | 1783 | * |
1784 | * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock | ||
1785 | * detection. It does not wait, see rt_mutex_wait_proxy_lock() for that. | ||
1786 | * | ||
1787 | * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter | ||
1788 | * on failure. | ||
1789 | * | ||
1766 | * Returns: | 1790 | * Returns: |
1767 | * 0 - task blocked on lock | 1791 | * 0 - task blocked on lock |
1768 | * 1 - acquired the lock for task, caller should wake it up | 1792 | * 1 - acquired the lock for task, caller should wake it up |
1769 | * <0 - error | 1793 | * <0 - error |
1770 | * | 1794 | * |
1771 | * Special API call for FUTEX_REQUEUE_PI support. | 1795 | * Special API call for PI-futex support. |
1772 | */ | 1796 | */ |
1773 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | 1797 | int rt_mutex_start_proxy_lock(struct rt_mutex *lock, |
1774 | struct rt_mutex_waiter *waiter, | 1798 | struct rt_mutex_waiter *waiter, |
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, | |||
1778 | 1802 | ||
1779 | raw_spin_lock_irq(&lock->wait_lock); | 1803 | raw_spin_lock_irq(&lock->wait_lock); |
1780 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); | 1804 | ret = __rt_mutex_start_proxy_lock(lock, waiter, task); |
1805 | if (unlikely(ret)) | ||
1806 | remove_waiter(lock, waiter); | ||
1781 | raw_spin_unlock_irq(&lock->wait_lock); | 1807 | raw_spin_unlock_irq(&lock->wait_lock); |
1782 | 1808 | ||
1783 | return ret; | 1809 | return ret; |
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, | |||
1845 | * @lock: the rt_mutex we were woken on | 1871 | * @lock: the rt_mutex we were woken on |
1846 | * @waiter: the pre-initialized rt_mutex_waiter | 1872 | * @waiter: the pre-initialized rt_mutex_waiter |
1847 | * | 1873 | * |
1848 | * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). | 1874 | * Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or |
1875 | * rt_mutex_wait_proxy_lock(). | ||
1849 | * | 1876 | * |
1850 | * Unless we acquired the lock; we're still enqueued on the wait-list and can | 1877 | * Unless we acquired the lock; we're still enqueued on the wait-list and can |
1851 | * in fact still be granted ownership until we're removed. Therefore we can | 1878 | * in fact still be granted ownership until we're removed. Therefore we can |
diff --git a/kernel/relay.c b/kernel/relay.c index 04f248644e06..9e0f52375487 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan, | |||
428 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, | 428 | dentry = chan->cb->create_buf_file(tmpname, chan->parent, |
429 | S_IRUSR, buf, | 429 | S_IRUSR, buf, |
430 | &chan->is_global); | 430 | &chan->is_global); |
431 | if (IS_ERR(dentry)) | ||
432 | dentry = NULL; | ||
431 | 433 | ||
432 | kfree(tmpname); | 434 | kfree(tmpname); |
433 | 435 | ||
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu) | |||
461 | dentry = chan->cb->create_buf_file(NULL, NULL, | 463 | dentry = chan->cb->create_buf_file(NULL, NULL, |
462 | S_IRUSR, buf, | 464 | S_IRUSR, buf, |
463 | &chan->is_global); | 465 | &chan->is_global); |
464 | if (WARN_ON(dentry)) | 466 | if (IS_ERR_OR_NULL(dentry)) |
465 | goto free_buf; | 467 | goto free_buf; |
466 | } | 468 | } |
467 | 469 | ||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 50aa2aba69bd..310d0637fe4b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5980,6 +5980,7 @@ static inline int find_idlest_cpu(struct sched_domain *sd, struct task_struct *p | |||
5980 | 5980 | ||
5981 | #ifdef CONFIG_SCHED_SMT | 5981 | #ifdef CONFIG_SCHED_SMT |
5982 | DEFINE_STATIC_KEY_FALSE(sched_smt_present); | 5982 | DEFINE_STATIC_KEY_FALSE(sched_smt_present); |
5983 | EXPORT_SYMBOL_GPL(sched_smt_present); | ||
5983 | 5984 | ||
5984 | static inline void set_idle_cores(int cpu, int val) | 5985 | static inline void set_idle_cores(int cpu, int val) |
5985 | { | 5986 | { |
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c index fe24de3fbc93..c3484785b179 100644 --- a/kernel/sched/psi.c +++ b/kernel/sched/psi.c | |||
@@ -124,6 +124,7 @@ | |||
124 | * sampling of the aggregate task states would be. | 124 | * sampling of the aggregate task states would be. |
125 | */ | 125 | */ |
126 | 126 | ||
127 | #include "../workqueue_internal.h" | ||
127 | #include <linux/sched/loadavg.h> | 128 | #include <linux/sched/loadavg.h> |
128 | #include <linux/seq_file.h> | 129 | #include <linux/seq_file.h> |
129 | #include <linux/proc_fs.h> | 130 | #include <linux/proc_fs.h> |
@@ -480,9 +481,6 @@ static void psi_group_change(struct psi_group *group, int cpu, | |||
480 | groupc->tasks[t]++; | 481 | groupc->tasks[t]++; |
481 | 482 | ||
482 | write_seqcount_end(&groupc->seq); | 483 | write_seqcount_end(&groupc->seq); |
483 | |||
484 | if (!delayed_work_pending(&group->clock_work)) | ||
485 | schedule_delayed_work(&group->clock_work, PSI_FREQ); | ||
486 | } | 484 | } |
487 | 485 | ||
488 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) | 486 | static struct psi_group *iterate_groups(struct task_struct *task, void **iter) |
@@ -513,6 +511,7 @@ void psi_task_change(struct task_struct *task, int clear, int set) | |||
513 | { | 511 | { |
514 | int cpu = task_cpu(task); | 512 | int cpu = task_cpu(task); |
515 | struct psi_group *group; | 513 | struct psi_group *group; |
514 | bool wake_clock = true; | ||
516 | void *iter = NULL; | 515 | void *iter = NULL; |
517 | 516 | ||
518 | if (!task->pid) | 517 | if (!task->pid) |
@@ -530,8 +529,22 @@ void psi_task_change(struct task_struct *task, int clear, int set) | |||
530 | task->psi_flags &= ~clear; | 529 | task->psi_flags &= ~clear; |
531 | task->psi_flags |= set; | 530 | task->psi_flags |= set; |
532 | 531 | ||
533 | while ((group = iterate_groups(task, &iter))) | 532 | /* |
533 | * Periodic aggregation shuts off if there is a period of no | ||
534 | * task changes, so we wake it back up if necessary. However, | ||
535 | * don't do this if the task change is the aggregation worker | ||
536 | * itself going to sleep, or we'll ping-pong forever. | ||
537 | */ | ||
538 | if (unlikely((clear & TSK_RUNNING) && | ||
539 | (task->flags & PF_WQ_WORKER) && | ||
540 | wq_worker_last_func(task) == psi_update_work)) | ||
541 | wake_clock = false; | ||
542 | |||
543 | while ((group = iterate_groups(task, &iter))) { | ||
534 | psi_group_change(group, cpu, clear, set); | 544 | psi_group_change(group, cpu, clear, set); |
545 | if (wake_clock && !delayed_work_pending(&group->clock_work)) | ||
546 | schedule_delayed_work(&group->clock_work, PSI_FREQ); | ||
547 | } | ||
535 | } | 548 | } |
536 | 549 | ||
537 | void psi_memstall_tick(struct task_struct *task, int cpu) | 550 | void psi_memstall_tick(struct task_struct *task, int cpu) |
diff --git a/kernel/signal.c b/kernel/signal.c index e1d7ad8e6ab1..57b7771e20d7 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in | |||
688 | } | 688 | } |
689 | EXPORT_SYMBOL_GPL(dequeue_signal); | 689 | EXPORT_SYMBOL_GPL(dequeue_signal); |
690 | 690 | ||
691 | static int dequeue_synchronous_signal(kernel_siginfo_t *info) | ||
692 | { | ||
693 | struct task_struct *tsk = current; | ||
694 | struct sigpending *pending = &tsk->pending; | ||
695 | struct sigqueue *q, *sync = NULL; | ||
696 | |||
697 | /* | ||
698 | * Might a synchronous signal be in the queue? | ||
699 | */ | ||
700 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) | ||
701 | return 0; | ||
702 | |||
703 | /* | ||
704 | * Return the first synchronous signal in the queue. | ||
705 | */ | ||
706 | list_for_each_entry(q, &pending->list, list) { | ||
707 | /* Synchronous signals have a postive si_code */ | ||
708 | if ((q->info.si_code > SI_USER) && | ||
709 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { | ||
710 | sync = q; | ||
711 | goto next; | ||
712 | } | ||
713 | } | ||
714 | return 0; | ||
715 | next: | ||
716 | /* | ||
717 | * Check if there is another siginfo for the same signal. | ||
718 | */ | ||
719 | list_for_each_entry_continue(q, &pending->list, list) { | ||
720 | if (q->info.si_signo == sync->info.si_signo) | ||
721 | goto still_pending; | ||
722 | } | ||
723 | |||
724 | sigdelset(&pending->signal, sync->info.si_signo); | ||
725 | recalc_sigpending(); | ||
726 | still_pending: | ||
727 | list_del_init(&sync->list); | ||
728 | copy_siginfo(info, &sync->info); | ||
729 | __sigqueue_free(sync); | ||
730 | return info->si_signo; | ||
731 | } | ||
732 | |||
691 | /* | 733 | /* |
692 | * Tell a process that it has a new active signal.. | 734 | * Tell a process that it has a new active signal.. |
693 | * | 735 | * |
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc | |||
1057 | 1099 | ||
1058 | result = TRACE_SIGNAL_DELIVERED; | 1100 | result = TRACE_SIGNAL_DELIVERED; |
1059 | /* | 1101 | /* |
1060 | * Skip useless siginfo allocation for SIGKILL SIGSTOP, | 1102 | * Skip useless siginfo allocation for SIGKILL and kernel threads. |
1061 | * and kernel threads. | ||
1062 | */ | 1103 | */ |
1063 | if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD)) | 1104 | if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) |
1064 | goto out_set; | 1105 | goto out_set; |
1065 | 1106 | ||
1066 | /* | 1107 | /* |
@@ -2394,6 +2435,14 @@ relock: | |||
2394 | goto relock; | 2435 | goto relock; |
2395 | } | 2436 | } |
2396 | 2437 | ||
2438 | /* Has this task already been marked for death? */ | ||
2439 | if (signal_group_exit(signal)) { | ||
2440 | ksig->info.si_signo = signr = SIGKILL; | ||
2441 | sigdelset(¤t->pending.signal, SIGKILL); | ||
2442 | recalc_sigpending(); | ||
2443 | goto fatal; | ||
2444 | } | ||
2445 | |||
2397 | for (;;) { | 2446 | for (;;) { |
2398 | struct k_sigaction *ka; | 2447 | struct k_sigaction *ka; |
2399 | 2448 | ||
@@ -2407,7 +2456,15 @@ relock: | |||
2407 | goto relock; | 2456 | goto relock; |
2408 | } | 2457 | } |
2409 | 2458 | ||
2410 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | 2459 | /* |
2460 | * Signals generated by the execution of an instruction | ||
2461 | * need to be delivered before any other pending signals | ||
2462 | * so that the instruction pointer in the signal stack | ||
2463 | * frame points to the faulting instruction. | ||
2464 | */ | ||
2465 | signr = dequeue_synchronous_signal(&ksig->info); | ||
2466 | if (!signr) | ||
2467 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | ||
2411 | 2468 | ||
2412 | if (!signr) | 2469 | if (!signr) |
2413 | break; /* will return 0 */ | 2470 | break; /* will return 0 */ |
@@ -2489,6 +2546,7 @@ relock: | |||
2489 | continue; | 2546 | continue; |
2490 | } | 2547 | } |
2491 | 2548 | ||
2549 | fatal: | ||
2492 | spin_unlock_irq(&sighand->siglock); | 2550 | spin_unlock_irq(&sighand->siglock); |
2493 | 2551 | ||
2494 | /* | 2552 | /* |
diff --git a/kernel/smp.c b/kernel/smp.c index 163c451af42e..f4cf1b0bb3b8 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -584,8 +584,6 @@ void __init smp_init(void) | |||
584 | num_nodes, (num_nodes > 1 ? "s" : ""), | 584 | num_nodes, (num_nodes > 1 ? "s" : ""), |
585 | num_cpus, (num_cpus > 1 ? "s" : "")); | 585 | num_cpus, (num_cpus > 1 ? "s" : "")); |
586 | 586 | ||
587 | /* Final decision about SMT support */ | ||
588 | cpu_smt_check_topology(); | ||
589 | /* Any cleanup work */ | 587 | /* Any cleanup work */ |
590 | smp_cpus_done(setup_max_cpus); | 588 | smp_cpus_done(setup_max_cpus); |
591 | } | 589 | } |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b068adb9da1..f1a86a0d881d 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog * | |||
1204 | 1204 | ||
1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1205 | int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1206 | { | 1206 | { |
1207 | int err; | 1207 | return __bpf_probe_register(btp, prog); |
1208 | |||
1209 | mutex_lock(&bpf_event_mutex); | ||
1210 | err = __bpf_probe_register(btp, prog); | ||
1211 | mutex_unlock(&bpf_event_mutex); | ||
1212 | return err; | ||
1213 | } | 1208 | } |
1214 | 1209 | ||
1215 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) | 1210 | int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog) |
1216 | { | 1211 | { |
1217 | int err; | 1212 | return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); |
1218 | |||
1219 | mutex_lock(&bpf_event_mutex); | ||
1220 | err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog); | ||
1221 | mutex_unlock(&bpf_event_mutex); | ||
1222 | return err; | ||
1223 | } | 1213 | } |
1224 | 1214 | ||
1225 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, | 1215 | int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id, |
diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 5c56afc17cf8..4737bb8c07a3 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h | |||
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs, | |||
180 | if (unlikely(arg->dynamic)) | 180 | if (unlikely(arg->dynamic)) |
181 | *dl = make_data_loc(maxlen, dyndata - base); | 181 | *dl = make_data_loc(maxlen, dyndata - base); |
182 | ret = process_fetch_insn(arg->code, regs, dl, base); | 182 | ret = process_fetch_insn(arg->code, regs, dl, base); |
183 | if (unlikely(ret < 0 && arg->dynamic)) | 183 | if (unlikely(ret < 0 && arg->dynamic)) { |
184 | *dl = make_data_loc(0, dyndata - base); | 184 | *dl = make_data_loc(0, dyndata - base); |
185 | else | 185 | } else { |
186 | dyndata += ret; | 186 | dyndata += ret; |
187 | maxlen -= ret; | ||
188 | } | ||
187 | } | 189 | } |
188 | } | 190 | } |
189 | 191 | ||
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index e335576b9411..9bde07c06362 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright (C) IBM Corporation, 2010-2012 | 5 | * Copyright (C) IBM Corporation, 2010-2012 |
6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> | 6 | * Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com> |
7 | */ | 7 | */ |
8 | #define pr_fmt(fmt) "trace_kprobe: " fmt | 8 | #define pr_fmt(fmt) "trace_uprobe: " fmt |
9 | 9 | ||
10 | #include <linux/ctype.h> | 10 | #include <linux/ctype.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base) | |||
160 | if (ret >= 0) { | 160 | if (ret >= 0) { |
161 | if (ret == maxlen) | 161 | if (ret == maxlen) |
162 | dst[ret - 1] = '\0'; | 162 | dst[ret - 1] = '\0'; |
163 | else | ||
164 | /* | ||
165 | * Include the terminating null byte. In this case it | ||
166 | * was copied by strncpy_from_user but not accounted | ||
167 | * for in ret. | ||
168 | */ | ||
169 | ret++; | ||
163 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); | 170 | *(u32 *)dest = make_data_loc(ret, (void *)dst - base); |
164 | } | 171 | } |
165 | 172 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 392be4b252f6..fc5d23d752a5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -910,6 +910,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) | |||
910 | } | 910 | } |
911 | 911 | ||
912 | /** | 912 | /** |
913 | * wq_worker_last_func - retrieve worker's last work function | ||
914 | * | ||
915 | * Determine the last function a worker executed. This is called from | ||
916 | * the scheduler to get a worker's last known identity. | ||
917 | * | ||
918 | * CONTEXT: | ||
919 | * spin_lock_irq(rq->lock) | ||
920 | * | ||
921 | * Return: | ||
922 | * The last work function %current executed as a worker, NULL if it | ||
923 | * hasn't executed any work yet. | ||
924 | */ | ||
925 | work_func_t wq_worker_last_func(struct task_struct *task) | ||
926 | { | ||
927 | struct worker *worker = kthread_data(task); | ||
928 | |||
929 | return worker->last_func; | ||
930 | } | ||
931 | |||
932 | /** | ||
913 | * worker_set_flags - set worker flags and adjust nr_running accordingly | 933 | * worker_set_flags - set worker flags and adjust nr_running accordingly |
914 | * @worker: self | 934 | * @worker: self |
915 | * @flags: flags to set | 935 | * @flags: flags to set |
@@ -2184,6 +2204,9 @@ __acquires(&pool->lock) | |||
2184 | if (unlikely(cpu_intensive)) | 2204 | if (unlikely(cpu_intensive)) |
2185 | worker_clr_flags(worker, WORKER_CPU_INTENSIVE); | 2205 | worker_clr_flags(worker, WORKER_CPU_INTENSIVE); |
2186 | 2206 | ||
2207 | /* tag the worker for identification in schedule() */ | ||
2208 | worker->last_func = worker->current_func; | ||
2209 | |||
2187 | /* we're done with it, release */ | 2210 | /* we're done with it, release */ |
2188 | hash_del(&worker->hentry); | 2211 | hash_del(&worker->hentry); |
2189 | worker->current_work = NULL; | 2212 | worker->current_work = NULL; |
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 66fbb5a9e633..cb68b03ca89a 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h | |||
@@ -53,6 +53,9 @@ struct worker { | |||
53 | 53 | ||
54 | /* used only by rescuers to point to the target workqueue */ | 54 | /* used only by rescuers to point to the target workqueue */ |
55 | struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ | 55 | struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ |
56 | |||
57 | /* used by the scheduler to determine a worker's last known identity */ | ||
58 | work_func_t last_func; | ||
56 | }; | 59 | }; |
57 | 60 | ||
58 | /** | 61 | /** |
@@ -67,9 +70,10 @@ static inline struct worker *current_wq_worker(void) | |||
67 | 70 | ||
68 | /* | 71 | /* |
69 | * Scheduler hooks for concurrency managed workqueue. Only to be used from | 72 | * Scheduler hooks for concurrency managed workqueue. Only to be used from |
70 | * sched/core.c and workqueue.c. | 73 | * sched/ and workqueue.c. |
71 | */ | 74 | */ |
72 | void wq_worker_waking_up(struct task_struct *task, int cpu); | 75 | void wq_worker_waking_up(struct task_struct *task, int cpu); |
73 | struct task_struct *wq_worker_sleeping(struct task_struct *task); | 76 | struct task_struct *wq_worker_sleeping(struct task_struct *task); |
77 | work_func_t wq_worker_last_func(struct task_struct *task); | ||
74 | 78 | ||
75 | #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ | 79 | #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ |
diff --git a/lib/test_kmod.c b/lib/test_kmod.c index d82d022111e0..9cf77628fc91 100644 --- a/lib/test_kmod.c +++ b/lib/test_kmod.c | |||
@@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config) | |||
632 | config->test_driver = NULL; | 632 | config->test_driver = NULL; |
633 | 633 | ||
634 | kfree_const(config->test_fs); | 634 | kfree_const(config->test_fs); |
635 | config->test_driver = NULL; | 635 | config->test_fs = NULL; |
636 | } | 636 | } |
637 | 637 | ||
638 | static void kmod_config_free(struct kmod_test_device *test_dev) | 638 | static void kmod_config_free(struct kmod_test_device *test_dev) |
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 6a8ac7626797..e52f8cafe227 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c | |||
@@ -541,38 +541,45 @@ static unsigned int __init print_ht(struct rhltable *rhlt) | |||
541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, | 541 | static int __init test_insert_dup(struct test_obj_rhl *rhl_test_objects, |
542 | int cnt, bool slow) | 542 | int cnt, bool slow) |
543 | { | 543 | { |
544 | struct rhltable rhlt; | 544 | struct rhltable *rhlt; |
545 | unsigned int i, ret; | 545 | unsigned int i, ret; |
546 | const char *key; | 546 | const char *key; |
547 | int err = 0; | 547 | int err = 0; |
548 | 548 | ||
549 | err = rhltable_init(&rhlt, &test_rht_params_dup); | 549 | rhlt = kmalloc(sizeof(*rhlt), GFP_KERNEL); |
550 | if (WARN_ON(err)) | 550 | if (WARN_ON(!rhlt)) |
551 | return -EINVAL; | ||
552 | |||
553 | err = rhltable_init(rhlt, &test_rht_params_dup); | ||
554 | if (WARN_ON(err)) { | ||
555 | kfree(rhlt); | ||
551 | return err; | 556 | return err; |
557 | } | ||
552 | 558 | ||
553 | for (i = 0; i < cnt; i++) { | 559 | for (i = 0; i < cnt; i++) { |
554 | rhl_test_objects[i].value.tid = i; | 560 | rhl_test_objects[i].value.tid = i; |
555 | key = rht_obj(&rhlt.ht, &rhl_test_objects[i].list_node.rhead); | 561 | key = rht_obj(&rhlt->ht, &rhl_test_objects[i].list_node.rhead); |
556 | key += test_rht_params_dup.key_offset; | 562 | key += test_rht_params_dup.key_offset; |
557 | 563 | ||
558 | if (slow) { | 564 | if (slow) { |
559 | err = PTR_ERR(rhashtable_insert_slow(&rhlt.ht, key, | 565 | err = PTR_ERR(rhashtable_insert_slow(&rhlt->ht, key, |
560 | &rhl_test_objects[i].list_node.rhead)); | 566 | &rhl_test_objects[i].list_node.rhead)); |
561 | if (err == -EAGAIN) | 567 | if (err == -EAGAIN) |
562 | err = 0; | 568 | err = 0; |
563 | } else | 569 | } else |
564 | err = rhltable_insert(&rhlt, | 570 | err = rhltable_insert(rhlt, |
565 | &rhl_test_objects[i].list_node, | 571 | &rhl_test_objects[i].list_node, |
566 | test_rht_params_dup); | 572 | test_rht_params_dup); |
567 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) | 573 | if (WARN(err, "error %d on element %d/%d (%s)\n", err, i, cnt, slow? "slow" : "fast")) |
568 | goto skip_print; | 574 | goto skip_print; |
569 | } | 575 | } |
570 | 576 | ||
571 | ret = print_ht(&rhlt); | 577 | ret = print_ht(rhlt); |
572 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); | 578 | WARN(ret != cnt, "missing rhltable elements (%d != %d, %s)\n", ret, cnt, slow? "slow" : "fast"); |
573 | 579 | ||
574 | skip_print: | 580 | skip_print: |
575 | rhltable_destroy(&rhlt); | 581 | rhltable_destroy(rhlt); |
582 | kfree(rhlt); | ||
576 | 583 | ||
577 | return 0; | 584 | return 0; |
578 | } | 585 | } |
@@ -1674,7 +1674,8 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
1674 | if (!pmd_present(pmd)) | 1674 | if (!pmd_present(pmd)) |
1675 | return 0; | 1675 | return 0; |
1676 | 1676 | ||
1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) { | 1677 | if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd) || |
1678 | pmd_devmap(pmd))) { | ||
1678 | /* | 1679 | /* |
1679 | * NUMA hinting faults need to be handled in the GUP | 1680 | * NUMA hinting faults need to be handled in the GUP |
1680 | * slowpath for accounting purposes and so that they | 1681 | * slowpath for accounting purposes and so that they |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index df2e7dd5ff17..afef61656c1e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -4268,7 +4268,8 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
4268 | break; | 4268 | break; |
4269 | } | 4269 | } |
4270 | if (ret & VM_FAULT_RETRY) { | 4270 | if (ret & VM_FAULT_RETRY) { |
4271 | if (nonblocking) | 4271 | if (nonblocking && |
4272 | !(fault_flags & FAULT_FLAG_RETRY_NOWAIT)) | ||
4272 | *nonblocking = 0; | 4273 | *nonblocking = 0; |
4273 | *nr_pages = 0; | 4274 | *nr_pages = 0; |
4274 | /* | 4275 | /* |
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 0a14fcff70ed..e2bb06c1b45e 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile | |||
@@ -5,6 +5,7 @@ UBSAN_SANITIZE_generic.o := n | |||
5 | UBSAN_SANITIZE_tags.o := n | 5 | UBSAN_SANITIZE_tags.o := n |
6 | KCOV_INSTRUMENT := n | 6 | KCOV_INSTRUMENT := n |
7 | 7 | ||
8 | CFLAGS_REMOVE_common.o = -pg | ||
8 | CFLAGS_REMOVE_generic.o = -pg | 9 | CFLAGS_REMOVE_generic.o = -pg |
9 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 | 10 | # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 |
10 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 | 11 | # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533 |
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 7c72f2a95785..831be5ff5f4d 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c | |||
@@ -372,7 +372,8 @@ static void kill_procs(struct list_head *to_kill, int forcekill, bool fail, | |||
372 | if (fail || tk->addr_valid == 0) { | 372 | if (fail || tk->addr_valid == 0) { |
373 | pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", | 373 | pr_err("Memory failure: %#lx: forcibly killing %s:%d because of failure to unmap corrupted page\n", |
374 | pfn, tk->tsk->comm, tk->tsk->pid); | 374 | pfn, tk->tsk->comm, tk->tsk->pid); |
375 | force_sig(SIGKILL, tk->tsk); | 375 | do_send_sig_info(SIGKILL, SEND_SIG_PRIV, |
376 | tk->tsk, PIDTYPE_PID); | ||
376 | } | 377 | } |
377 | 378 | ||
378 | /* | 379 | /* |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b9a667d36c55..124e794867c5 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -1233,7 +1233,8 @@ static bool is_pageblock_removable_nolock(struct page *page) | |||
1233 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) | 1233 | bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) |
1234 | { | 1234 | { |
1235 | struct page *page = pfn_to_page(start_pfn); | 1235 | struct page *page = pfn_to_page(start_pfn); |
1236 | struct page *end_page = page + nr_pages; | 1236 | unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page))); |
1237 | struct page *end_page = pfn_to_page(end_pfn); | ||
1237 | 1238 | ||
1238 | /* Check the starting page of each pageblock within the range */ | 1239 | /* Check the starting page of each pageblock within the range */ |
1239 | for (; page < end_page; page = next_active_pageblock(page)) { | 1240 | for (; page < end_page; page = next_active_pageblock(page)) { |
@@ -1273,6 +1274,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
1273 | i++; | 1274 | i++; |
1274 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) | 1275 | if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) |
1275 | continue; | 1276 | continue; |
1277 | /* Check if we got outside of the zone */ | ||
1278 | if (zone && !zone_spans_pfn(zone, pfn + i)) | ||
1279 | return 0; | ||
1276 | page = pfn_to_page(pfn + i); | 1280 | page = pfn_to_page(pfn + i); |
1277 | if (zone && page_zone(page) != zone) | 1281 | if (zone && page_zone(page) != zone) |
1278 | return 0; | 1282 | return 0; |
@@ -1301,23 +1305,27 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
1301 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) | 1305 | static unsigned long scan_movable_pages(unsigned long start, unsigned long end) |
1302 | { | 1306 | { |
1303 | unsigned long pfn; | 1307 | unsigned long pfn; |
1304 | struct page *page; | 1308 | |
1305 | for (pfn = start; pfn < end; pfn++) { | 1309 | for (pfn = start; pfn < end; pfn++) { |
1306 | if (pfn_valid(pfn)) { | 1310 | struct page *page, *head; |
1307 | page = pfn_to_page(pfn); | 1311 | unsigned long skip; |
1308 | if (PageLRU(page)) | 1312 | |
1309 | return pfn; | 1313 | if (!pfn_valid(pfn)) |
1310 | if (__PageMovable(page)) | 1314 | continue; |
1311 | return pfn; | 1315 | page = pfn_to_page(pfn); |
1312 | if (PageHuge(page)) { | 1316 | if (PageLRU(page)) |
1313 | if (hugepage_migration_supported(page_hstate(page)) && | 1317 | return pfn; |
1314 | page_huge_active(page)) | 1318 | if (__PageMovable(page)) |
1315 | return pfn; | 1319 | return pfn; |
1316 | else | 1320 | |
1317 | pfn = round_up(pfn + 1, | 1321 | if (!PageHuge(page)) |
1318 | 1 << compound_order(page)) - 1; | 1322 | continue; |
1319 | } | 1323 | head = compound_head(page); |
1320 | } | 1324 | if (hugepage_migration_supported(page_hstate(head)) && |
1325 | page_huge_active(head)) | ||
1326 | return pfn; | ||
1327 | skip = (1 << compound_order(head)) - (page - head); | ||
1328 | pfn += skip - 1; | ||
1321 | } | 1329 | } |
1322 | return 0; | 1330 | return 0; |
1323 | } | 1331 | } |
@@ -1344,7 +1352,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1344 | { | 1352 | { |
1345 | unsigned long pfn; | 1353 | unsigned long pfn; |
1346 | struct page *page; | 1354 | struct page *page; |
1347 | int not_managed = 0; | ||
1348 | int ret = 0; | 1355 | int ret = 0; |
1349 | LIST_HEAD(source); | 1356 | LIST_HEAD(source); |
1350 | 1357 | ||
@@ -1392,7 +1399,6 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1392 | else | 1399 | else |
1393 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); | 1400 | ret = isolate_movable_page(page, ISOLATE_UNEVICTABLE); |
1394 | if (!ret) { /* Success */ | 1401 | if (!ret) { /* Success */ |
1395 | put_page(page); | ||
1396 | list_add_tail(&page->lru, &source); | 1402 | list_add_tail(&page->lru, &source); |
1397 | if (!__PageMovable(page)) | 1403 | if (!__PageMovable(page)) |
1398 | inc_node_page_state(page, NR_ISOLATED_ANON + | 1404 | inc_node_page_state(page, NR_ISOLATED_ANON + |
@@ -1401,22 +1407,10 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1401 | } else { | 1407 | } else { |
1402 | pr_warn("failed to isolate pfn %lx\n", pfn); | 1408 | pr_warn("failed to isolate pfn %lx\n", pfn); |
1403 | dump_page(page, "isolation failed"); | 1409 | dump_page(page, "isolation failed"); |
1404 | put_page(page); | ||
1405 | /* Because we don't have big zone->lock. we should | ||
1406 | check this again here. */ | ||
1407 | if (page_count(page)) { | ||
1408 | not_managed++; | ||
1409 | ret = -EBUSY; | ||
1410 | break; | ||
1411 | } | ||
1412 | } | 1410 | } |
1411 | put_page(page); | ||
1413 | } | 1412 | } |
1414 | if (!list_empty(&source)) { | 1413 | if (!list_empty(&source)) { |
1415 | if (not_managed) { | ||
1416 | putback_movable_pages(&source); | ||
1417 | goto out; | ||
1418 | } | ||
1419 | |||
1420 | /* Allocate a new page from the nearest neighbor node */ | 1414 | /* Allocate a new page from the nearest neighbor node */ |
1421 | ret = migrate_pages(&source, new_node_page, NULL, 0, | 1415 | ret = migrate_pages(&source, new_node_page, NULL, 0, |
1422 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); | 1416 | MIGRATE_SYNC, MR_MEMORY_HOTPLUG); |
@@ -1429,7 +1423,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) | |||
1429 | putback_movable_pages(&source); | 1423 | putback_movable_pages(&source); |
1430 | } | 1424 | } |
1431 | } | 1425 | } |
1432 | out: | 1426 | |
1433 | return ret; | 1427 | return ret; |
1434 | } | 1428 | } |
1435 | 1429 | ||
@@ -1576,7 +1570,6 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1576 | we assume this for now. .*/ | 1570 | we assume this for now. .*/ |
1577 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, | 1571 | if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, |
1578 | &valid_end)) { | 1572 | &valid_end)) { |
1579 | mem_hotplug_done(); | ||
1580 | ret = -EINVAL; | 1573 | ret = -EINVAL; |
1581 | reason = "multizone range"; | 1574 | reason = "multizone range"; |
1582 | goto failed_removal; | 1575 | goto failed_removal; |
@@ -1591,7 +1584,6 @@ static int __ref __offline_pages(unsigned long start_pfn, | |||
1591 | MIGRATE_MOVABLE, | 1584 | MIGRATE_MOVABLE, |
1592 | SKIP_HWPOISON | REPORT_FAILURE); | 1585 | SKIP_HWPOISON | REPORT_FAILURE); |
1593 | if (ret) { | 1586 | if (ret) { |
1594 | mem_hotplug_done(); | ||
1595 | reason = "failure to isolate range"; | 1587 | reason = "failure to isolate range"; |
1596 | goto failed_removal; | 1588 | goto failed_removal; |
1597 | } | 1589 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index a16b15090df3..d4fd680be3b0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -709,7 +709,6 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
709 | /* Simple case, sync compaction */ | 709 | /* Simple case, sync compaction */ |
710 | if (mode != MIGRATE_ASYNC) { | 710 | if (mode != MIGRATE_ASYNC) { |
711 | do { | 711 | do { |
712 | get_bh(bh); | ||
713 | lock_buffer(bh); | 712 | lock_buffer(bh); |
714 | bh = bh->b_this_page; | 713 | bh = bh->b_this_page; |
715 | 714 | ||
@@ -720,18 +719,15 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
720 | 719 | ||
721 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ | 720 | /* async case, we cannot block on lock_buffer so use trylock_buffer */ |
722 | do { | 721 | do { |
723 | get_bh(bh); | ||
724 | if (!trylock_buffer(bh)) { | 722 | if (!trylock_buffer(bh)) { |
725 | /* | 723 | /* |
726 | * We failed to lock the buffer and cannot stall in | 724 | * We failed to lock the buffer and cannot stall in |
727 | * async migration. Release the taken locks | 725 | * async migration. Release the taken locks |
728 | */ | 726 | */ |
729 | struct buffer_head *failed_bh = bh; | 727 | struct buffer_head *failed_bh = bh; |
730 | put_bh(failed_bh); | ||
731 | bh = head; | 728 | bh = head; |
732 | while (bh != failed_bh) { | 729 | while (bh != failed_bh) { |
733 | unlock_buffer(bh); | 730 | unlock_buffer(bh); |
734 | put_bh(bh); | ||
735 | bh = bh->b_this_page; | 731 | bh = bh->b_this_page; |
736 | } | 732 | } |
737 | return false; | 733 | return false; |
@@ -818,7 +814,6 @@ unlock_buffers: | |||
818 | bh = head; | 814 | bh = head; |
819 | do { | 815 | do { |
820 | unlock_buffer(bh); | 816 | unlock_buffer(bh); |
821 | put_bh(bh); | ||
822 | bh = bh->b_this_page; | 817 | bh = bh->b_this_page; |
823 | 818 | ||
824 | } while (bh != head); | 819 | } while (bh != head); |
@@ -1135,10 +1130,13 @@ out: | |||
1135 | * If migration is successful, decrease refcount of the newpage | 1130 | * If migration is successful, decrease refcount of the newpage |
1136 | * which will not free the page because new page owner increased | 1131 | * which will not free the page because new page owner increased |
1137 | * refcounter. As well, if it is LRU page, add the page to LRU | 1132 | * refcounter. As well, if it is LRU page, add the page to LRU |
1138 | * list in here. | 1133 | * list in here. Use the old state of the isolated source page to |
1134 | * determine if we migrated a LRU page. newpage was already unlocked | ||
1135 | * and possibly modified by its owner - don't rely on the page | ||
1136 | * state. | ||
1139 | */ | 1137 | */ |
1140 | if (rc == MIGRATEPAGE_SUCCESS) { | 1138 | if (rc == MIGRATEPAGE_SUCCESS) { |
1141 | if (unlikely(__PageMovable(newpage))) | 1139 | if (unlikely(!is_lru)) |
1142 | put_page(newpage); | 1140 | put_page(newpage); |
1143 | else | 1141 | else |
1144 | putback_lru_page(newpage); | 1142 | putback_lru_page(newpage); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index f0e8cd9edb1a..26ea8636758f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -647,8 +647,8 @@ static int oom_reaper(void *unused) | |||
647 | 647 | ||
648 | static void wake_oom_reaper(struct task_struct *tsk) | 648 | static void wake_oom_reaper(struct task_struct *tsk) |
649 | { | 649 | { |
650 | /* tsk is already queued? */ | 650 | /* mm is already queued? */ |
651 | if (tsk == oom_reaper_list || tsk->oom_reaper_list) | 651 | if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags)) |
652 | return; | 652 | return; |
653 | 653 | ||
654 | get_task_struct(tsk); | 654 | get_task_struct(tsk); |
@@ -975,6 +975,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message) | |||
975 | * still freeing memory. | 975 | * still freeing memory. |
976 | */ | 976 | */ |
977 | read_lock(&tasklist_lock); | 977 | read_lock(&tasklist_lock); |
978 | |||
979 | /* | ||
980 | * The task 'p' might have already exited before reaching here. The | ||
981 | * put_task_struct() will free task_struct 'p' while the loop still try | ||
982 | * to access the field of 'p', so, get an extra reference. | ||
983 | */ | ||
984 | get_task_struct(p); | ||
978 | for_each_thread(p, t) { | 985 | for_each_thread(p, t) { |
979 | list_for_each_entry(child, &t->children, sibling) { | 986 | list_for_each_entry(child, &t->children, sibling) { |
980 | unsigned int child_points; | 987 | unsigned int child_points; |
@@ -994,6 +1001,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) | |||
994 | } | 1001 | } |
995 | } | 1002 | } |
996 | } | 1003 | } |
1004 | put_task_struct(p); | ||
997 | read_unlock(&tasklist_lock); | 1005 | read_unlock(&tasklist_lock); |
998 | 1006 | ||
999 | /* | 1007 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 35fdde041f5c..46285d28e43b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4675,11 +4675,11 @@ refill: | |||
4675 | /* Even if we own the page, we do not use atomic_set(). | 4675 | /* Even if we own the page, we do not use atomic_set(). |
4676 | * This would break get_page_unless_zero() users. | 4676 | * This would break get_page_unless_zero() users. |
4677 | */ | 4677 | */ |
4678 | page_ref_add(page, size - 1); | 4678 | page_ref_add(page, size); |
4679 | 4679 | ||
4680 | /* reset page count bias and offset to start of new frag */ | 4680 | /* reset page count bias and offset to start of new frag */ |
4681 | nc->pfmemalloc = page_is_pfmemalloc(page); | 4681 | nc->pfmemalloc = page_is_pfmemalloc(page); |
4682 | nc->pagecnt_bias = size; | 4682 | nc->pagecnt_bias = size + 1; |
4683 | nc->offset = size; | 4683 | nc->offset = size; |
4684 | } | 4684 | } |
4685 | 4685 | ||
@@ -4695,10 +4695,10 @@ refill: | |||
4695 | size = nc->size; | 4695 | size = nc->size; |
4696 | #endif | 4696 | #endif |
4697 | /* OK, page count is 0, we can safely set it */ | 4697 | /* OK, page count is 0, we can safely set it */ |
4698 | set_page_count(page, size); | 4698 | set_page_count(page, size + 1); |
4699 | 4699 | ||
4700 | /* reset page count bias and offset to start of new frag */ | 4700 | /* reset page count bias and offset to start of new frag */ |
4701 | nc->pagecnt_bias = size; | 4701 | nc->pagecnt_bias = size + 1; |
4702 | offset = size - fragsz; | 4702 | offset = size - fragsz; |
4703 | } | 4703 | } |
4704 | 4704 | ||
diff --git a/mm/page_ext.c b/mm/page_ext.c index ae44f7adbe07..8c78b8d45117 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c | |||
@@ -398,10 +398,8 @@ void __init page_ext_init(void) | |||
398 | * We know some arch can have a nodes layout such as | 398 | * We know some arch can have a nodes layout such as |
399 | * -------------pfn--------------> | 399 | * -------------pfn--------------> |
400 | * N0 | N1 | N2 | N0 | N1 | N2|.... | 400 | * N0 | N1 | N2 | N0 | N1 | N2|.... |
401 | * | ||
402 | * Take into account DEFERRED_STRUCT_PAGE_INIT. | ||
403 | */ | 401 | */ |
404 | if (early_pfn_to_nid(pfn) != nid) | 402 | if (pfn_to_nid(pfn) != nid) |
405 | continue; | 403 | continue; |
406 | if (init_section_page_ext(pfn, nid)) | 404 | if (init_section_page_ext(pfn, nid)) |
407 | goto oom; | 405 | goto oom; |
diff --git a/mm/vmscan.c b/mm/vmscan.c index a714c4f800e9..e979705bbf32 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -491,16 +491,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, | |||
491 | delta = freeable / 2; | 491 | delta = freeable / 2; |
492 | } | 492 | } |
493 | 493 | ||
494 | /* | ||
495 | * Make sure we apply some minimal pressure on default priority | ||
496 | * even on small cgroups. Stale objects are not only consuming memory | ||
497 | * by themselves, but can also hold a reference to a dying cgroup, | ||
498 | * preventing it from being reclaimed. A dying cgroup with all | ||
499 | * corresponding structures like per-cpu stats and kmem caches | ||
500 | * can be really big, so it may lead to a significant waste of memory. | ||
501 | */ | ||
502 | delta = max_t(unsigned long long, delta, min(freeable, batch_size)); | ||
503 | |||
504 | total_scan += delta; | 494 | total_scan += delta; |
505 | if (total_scan < 0) { | 495 | if (total_scan < 0) { |
506 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", | 496 | pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", |
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c index e8090f099eb8..ef0dec20c7d8 100644 --- a/net/batman-adv/bat_v_elp.c +++ b/net/batman-adv/bat_v_elp.c | |||
@@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh) | |||
104 | 104 | ||
105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); | 105 | ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); |
106 | 106 | ||
107 | /* free the TID stats immediately */ | ||
108 | cfg80211_sinfo_release_content(&sinfo); | ||
109 | |||
107 | dev_put(real_netdev); | 110 | dev_put(real_netdev); |
108 | if (ret == -ENOENT) { | 111 | if (ret == -ENOENT) { |
109 | /* Node is not associated anymore! It would be | 112 | /* Node is not associated anymore! It would be |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index 508f4416dfc9..415d494cbe22 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include "main.h" | 20 | #include "main.h" |
21 | 21 | ||
22 | #include <linux/atomic.h> | 22 | #include <linux/atomic.h> |
23 | #include <linux/bug.h> | ||
24 | #include <linux/byteorder/generic.h> | 23 | #include <linux/byteorder/generic.h> |
25 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
26 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
@@ -179,8 +178,10 @@ static bool batadv_is_on_batman_iface(const struct net_device *net_dev) | |||
179 | parent_dev = __dev_get_by_index((struct net *)parent_net, | 178 | parent_dev = __dev_get_by_index((struct net *)parent_net, |
180 | dev_get_iflink(net_dev)); | 179 | dev_get_iflink(net_dev)); |
181 | /* if we got a NULL parent_dev there is something broken.. */ | 180 | /* if we got a NULL parent_dev there is something broken.. */ |
182 | if (WARN(!parent_dev, "Cannot find parent device")) | 181 | if (!parent_dev) { |
182 | pr_err("Cannot find parent device\n"); | ||
183 | return false; | 183 | return false; |
184 | } | ||
184 | 185 | ||
185 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) | 186 | if (batadv_mutual_parents(net_dev, net, parent_dev, parent_net)) |
186 | return false; | 187 | return false; |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 5db5a0a4c959..ffc83bebfe40 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -221,10 +221,14 @@ static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, | |||
221 | 221 | ||
222 | netif_trans_update(soft_iface); | 222 | netif_trans_update(soft_iface); |
223 | vid = batadv_get_vid(skb, 0); | 223 | vid = batadv_get_vid(skb, 0); |
224 | |||
225 | skb_reset_mac_header(skb); | ||
224 | ethhdr = eth_hdr(skb); | 226 | ethhdr = eth_hdr(skb); |
225 | 227 | ||
226 | switch (ntohs(ethhdr->h_proto)) { | 228 | switch (ntohs(ethhdr->h_proto)) { |
227 | case ETH_P_8021Q: | 229 | case ETH_P_8021Q: |
230 | if (!pskb_may_pull(skb, sizeof(*vhdr))) | ||
231 | goto dropped; | ||
228 | vhdr = vlan_eth_hdr(skb); | 232 | vhdr = vlan_eth_hdr(skb); |
229 | 233 | ||
230 | /* drop batman-in-batman packets to prevent loops */ | 234 | /* drop batman-in-batman packets to prevent loops */ |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 5e55cef0cec3..6693e209efe8 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user, | |||
2293 | 2293 | ||
2294 | xt_compat_lock(NFPROTO_BRIDGE); | 2294 | xt_compat_lock(NFPROTO_BRIDGE); |
2295 | 2295 | ||
2296 | ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); | 2296 | if (tmp.nentries) { |
2297 | if (ret < 0) | 2297 | ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries); |
2298 | goto out_unlock; | 2298 | if (ret < 0) |
2299 | goto out_unlock; | ||
2300 | } | ||
2301 | |||
2299 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); | 2302 | ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); |
2300 | if (ret < 0) | 2303 | if (ret < 0) |
2301 | goto out_unlock; | 2304 | goto out_unlock; |
diff --git a/net/core/dev.c b/net/core/dev.c index 82f20022259d..8e276e0192a1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -8712,6 +8712,9 @@ int init_dummy_netdev(struct net_device *dev) | |||
8712 | set_bit(__LINK_STATE_PRESENT, &dev->state); | 8712 | set_bit(__LINK_STATE_PRESENT, &dev->state); |
8713 | set_bit(__LINK_STATE_START, &dev->state); | 8713 | set_bit(__LINK_STATE_START, &dev->state); |
8714 | 8714 | ||
8715 | /* napi_busy_loop stats accounting wants this */ | ||
8716 | dev_net_set(dev, &init_net); | ||
8717 | |||
8715 | /* Note : We dont allocate pcpu_refcnt for dummy devices, | 8718 | /* Note : We dont allocate pcpu_refcnt for dummy devices, |
8716 | * because users of this 'device' dont need to change | 8719 | * because users of this 'device' dont need to change |
8717 | * its refcount. | 8720 | * its refcount. |
diff --git a/net/core/filter.c b/net/core/filter.c index 7559d6835ecb..7a54dc11ac2d 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -4112,10 +4112,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, | |||
4112 | /* Only some socketops are supported */ | 4112 | /* Only some socketops are supported */ |
4113 | switch (optname) { | 4113 | switch (optname) { |
4114 | case SO_RCVBUF: | 4114 | case SO_RCVBUF: |
4115 | val = min_t(u32, val, sysctl_rmem_max); | ||
4115 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | 4116 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; |
4116 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); | 4117 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); |
4117 | break; | 4118 | break; |
4118 | case SO_SNDBUF: | 4119 | case SO_SNDBUF: |
4120 | val = min_t(u32, val, sysctl_wmem_max); | ||
4119 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | 4121 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; |
4120 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | 4122 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); |
4121 | break; | 4123 | break; |
diff --git a/net/core/skmsg.c b/net/core/skmsg.c index d6d5c20d7044..8c826603bf36 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c | |||
@@ -545,8 +545,7 @@ static void sk_psock_destroy_deferred(struct work_struct *gc) | |||
545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); | 545 | struct sk_psock *psock = container_of(gc, struct sk_psock, gc); |
546 | 546 | ||
547 | /* No sk_callback_lock since already detached. */ | 547 | /* No sk_callback_lock since already detached. */ |
548 | if (psock->parser.enabled) | 548 | strp_done(&psock->parser.strp); |
549 | strp_done(&psock->parser.strp); | ||
550 | 549 | ||
551 | cancel_work_sync(&psock->work); | 550 | cancel_work_sync(&psock->work); |
552 | 551 | ||
diff --git a/net/core/sock.c b/net/core/sock.c index 6aa2e7e0b4fb..bc3512f230a3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -2380,7 +2380,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) | |||
2380 | } | 2380 | } |
2381 | 2381 | ||
2382 | if (sk_has_memory_pressure(sk)) { | 2382 | if (sk_has_memory_pressure(sk)) { |
2383 | int alloc; | 2383 | u64 alloc; |
2384 | 2384 | ||
2385 | if (!sk_under_memory_pressure(sk)) | 2385 | if (!sk_under_memory_pressure(sk)) |
2386 | return 1; | 2386 | return 1; |
diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 6eb837a47b5c..baaaeb2b2c42 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h | |||
@@ -202,7 +202,7 @@ static inline void ccid_hc_tx_packet_recv(struct ccid *ccid, struct sock *sk, | |||
202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | 202 | static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, |
203 | u8 pkt, u8 opt, u8 *val, u8 len) | 203 | u8 pkt, u8 opt, u8 *val, u8 len) |
204 | { | 204 | { |
205 | if (ccid->ccid_ops->ccid_hc_tx_parse_options == NULL) | 205 | if (!ccid || !ccid->ccid_ops->ccid_hc_tx_parse_options) |
206 | return 0; | 206 | return 0; |
207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); | 207 | return ccid->ccid_ops->ccid_hc_tx_parse_options(sk, pkt, opt, val, len); |
208 | } | 208 | } |
@@ -214,7 +214,7 @@ static inline int ccid_hc_tx_parse_options(struct ccid *ccid, struct sock *sk, | |||
214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, | 214 | static inline int ccid_hc_rx_parse_options(struct ccid *ccid, struct sock *sk, |
215 | u8 pkt, u8 opt, u8 *val, u8 len) | 215 | u8 pkt, u8 opt, u8 *val, u8 len) |
216 | { | 216 | { |
217 | if (ccid->ccid_ops->ccid_hc_rx_parse_options == NULL) | 217 | if (!ccid || !ccid->ccid_ops->ccid_hc_rx_parse_options) |
218 | return 0; | 218 | return 0; |
219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); | 219 | return ccid->ccid_ops->ccid_hc_rx_parse_options(sk, pkt, opt, val, len); |
220 | } | 220 | } |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index d0b3e69c6b39..0962f9201baa 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -56,7 +56,7 @@ | |||
56 | #include <net/dn_neigh.h> | 56 | #include <net/dn_neigh.h> |
57 | #include <net/dn_fib.h> | 57 | #include <net/dn_fib.h> |
58 | 58 | ||
59 | #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn)) | 59 | #define DN_IFREQ_SIZE (offsetof(struct ifreq, ifr_ifru) + sizeof(struct sockaddr_dn)) |
60 | 60 | ||
61 | static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; | 61 | static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00}; |
62 | static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; | 62 | static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00}; |
diff --git a/net/dsa/master.c b/net/dsa/master.c index 71bb15f491c8..54f5551fb799 100644 --- a/net/dsa/master.c +++ b/net/dsa/master.c | |||
@@ -205,6 +205,8 @@ static void dsa_master_reset_mtu(struct net_device *dev) | |||
205 | rtnl_unlock(); | 205 | rtnl_unlock(); |
206 | } | 206 | } |
207 | 207 | ||
208 | static struct lock_class_key dsa_master_addr_list_lock_key; | ||
209 | |||
208 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | 210 | int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) |
209 | { | 211 | { |
210 | int ret; | 212 | int ret; |
@@ -218,6 +220,8 @@ int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp) | |||
218 | wmb(); | 220 | wmb(); |
219 | 221 | ||
220 | dev->dsa_ptr = cpu_dp; | 222 | dev->dsa_ptr = cpu_dp; |
223 | lockdep_set_class(&dev->addr_list_lock, | ||
224 | &dsa_master_addr_list_lock_key); | ||
221 | 225 | ||
222 | ret = dsa_master_ethtool_setup(dev); | 226 | ret = dsa_master_ethtool_setup(dev); |
223 | if (ret) | 227 | if (ret) |
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index a3fcc1d01615..a1c9fe155057 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
@@ -140,11 +140,14 @@ static int dsa_slave_close(struct net_device *dev) | |||
140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) | 140 | static void dsa_slave_change_rx_flags(struct net_device *dev, int change) |
141 | { | 141 | { |
142 | struct net_device *master = dsa_slave_to_master(dev); | 142 | struct net_device *master = dsa_slave_to_master(dev); |
143 | 143 | if (dev->flags & IFF_UP) { | |
144 | if (change & IFF_ALLMULTI) | 144 | if (change & IFF_ALLMULTI) |
145 | dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1); | 145 | dev_set_allmulti(master, |
146 | if (change & IFF_PROMISC) | 146 | dev->flags & IFF_ALLMULTI ? 1 : -1); |
147 | dev_set_promiscuity(master, dev->flags & IFF_PROMISC ? 1 : -1); | 147 | if (change & IFF_PROMISC) |
148 | dev_set_promiscuity(master, | ||
149 | dev->flags & IFF_PROMISC ? 1 : -1); | ||
150 | } | ||
148 | } | 151 | } |
149 | 152 | ||
150 | static void dsa_slave_set_rx_mode(struct net_device *dev) | 153 | static void dsa_slave_set_rx_mode(struct net_device *dev) |
@@ -639,7 +642,7 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e) | |||
639 | int ret; | 642 | int ret; |
640 | 643 | ||
641 | /* Port's PHY and MAC both need to be EEE capable */ | 644 | /* Port's PHY and MAC both need to be EEE capable */ |
642 | if (!dev->phydev && !dp->pl) | 645 | if (!dev->phydev || !dp->pl) |
643 | return -ENODEV; | 646 | return -ENODEV; |
644 | 647 | ||
645 | if (!ds->ops->set_mac_eee) | 648 | if (!ds->ops->set_mac_eee) |
@@ -659,7 +662,7 @@ static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e) | |||
659 | int ret; | 662 | int ret; |
660 | 663 | ||
661 | /* Port's PHY and MAC both need to be EEE capable */ | 664 | /* Port's PHY and MAC both need to be EEE capable */ |
662 | if (!dev->phydev && !dp->pl) | 665 | if (!dev->phydev || !dp->pl) |
663 | return -ENODEV; | 666 | return -ENODEV; |
664 | 667 | ||
665 | if (!ds->ops->get_mac_eee) | 668 | if (!ds->ops->get_mac_eee) |
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 1a4e9ff02762..5731670c560b 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c | |||
@@ -108,6 +108,7 @@ static size_t inet_sk_attr_size(struct sock *sk, | |||
108 | + nla_total_size(1) /* INET_DIAG_TOS */ | 108 | + nla_total_size(1) /* INET_DIAG_TOS */ |
109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 109 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
110 | + nla_total_size(4) /* INET_DIAG_MARK */ | 110 | + nla_total_size(4) /* INET_DIAG_MARK */ |
111 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
111 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 112 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
112 | + nla_total_size(sizeof(struct inet_diag_msg)) | 113 | + nla_total_size(sizeof(struct inet_diag_msg)) |
113 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) | 114 | + nla_total_size(SK_MEMINFO_VARS * sizeof(u32)) |
@@ -287,12 +288,19 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk, | |||
287 | goto errout; | 288 | goto errout; |
288 | } | 289 | } |
289 | 290 | ||
290 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1))) { | 291 | if (ext & (1 << (INET_DIAG_CLASS_ID - 1)) || |
292 | ext & (1 << (INET_DIAG_TCLASS - 1))) { | ||
291 | u32 classid = 0; | 293 | u32 classid = 0; |
292 | 294 | ||
293 | #ifdef CONFIG_SOCK_CGROUP_DATA | 295 | #ifdef CONFIG_SOCK_CGROUP_DATA |
294 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); | 296 | classid = sock_cgroup_classid(&sk->sk_cgrp_data); |
295 | #endif | 297 | #endif |
298 | /* Fallback to socket priority if class id isn't set. | ||
299 | * Classful qdiscs use it as direct reference to class. | ||
300 | * For cgroup2 classid is always zero. | ||
301 | */ | ||
302 | if (!classid) | ||
303 | classid = sk->sk_priority; | ||
296 | 304 | ||
297 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) | 305 | if (nla_put_u32(skb, INET_DIAG_CLASS_ID, classid)) |
298 | goto errout; | 306 | goto errout; |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d757b9642d0d..be778599bfed 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -216,6 +216,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, | |||
216 | atomic_set(&p->rid, 0); | 216 | atomic_set(&p->rid, 0); |
217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; | 217 | p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; |
218 | p->rate_tokens = 0; | 218 | p->rate_tokens = 0; |
219 | p->n_redirects = 0; | ||
219 | /* 60*HZ is arbitrary, but chosen enough high so that the first | 220 | /* 60*HZ is arbitrary, but chosen enough high so that the first |
220 | * calculation of tokens is at its maximum. | 221 | * calculation of tokens is at its maximum. |
221 | */ | 222 | */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 20a64fe6254b..3978f807fa8b 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1455,12 +1455,17 @@ static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
1455 | { | 1455 | { |
1456 | struct ip_tunnel *t = netdev_priv(dev); | 1456 | struct ip_tunnel *t = netdev_priv(dev); |
1457 | struct ip_tunnel_parm *p = &t->parms; | 1457 | struct ip_tunnel_parm *p = &t->parms; |
1458 | __be16 o_flags = p->o_flags; | ||
1459 | |||
1460 | if ((t->erspan_ver == 1 || t->erspan_ver == 2) && | ||
1461 | !t->collect_md) | ||
1462 | o_flags |= TUNNEL_KEY; | ||
1458 | 1463 | ||
1459 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 1464 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
1460 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 1465 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
1461 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 1466 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
1462 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 1467 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
1463 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 1468 | gre_tnl_flags_to_gre_flags(o_flags)) || |
1464 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 1469 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
1465 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 1470 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
1466 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || | 1471 | nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) || |
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index d7b43e700023..68a21bf75dd0 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c | |||
@@ -74,6 +74,33 @@ drop: | |||
74 | return 0; | 74 | return 0; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi, | ||
78 | int encap_type) | ||
79 | { | ||
80 | struct ip_tunnel *tunnel; | ||
81 | const struct iphdr *iph = ip_hdr(skb); | ||
82 | struct net *net = dev_net(skb->dev); | ||
83 | struct ip_tunnel_net *itn = net_generic(net, vti_net_id); | ||
84 | |||
85 | tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | ||
86 | iph->saddr, iph->daddr, 0); | ||
87 | if (tunnel) { | ||
88 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | ||
89 | goto drop; | ||
90 | |||
91 | XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel; | ||
92 | |||
93 | skb->dev = tunnel->dev; | ||
94 | |||
95 | return xfrm_input(skb, nexthdr, spi, encap_type); | ||
96 | } | ||
97 | |||
98 | return -EINVAL; | ||
99 | drop: | ||
100 | kfree_skb(skb); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
77 | static int vti_rcv(struct sk_buff *skb) | 104 | static int vti_rcv(struct sk_buff *skb) |
78 | { | 105 | { |
79 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; | 106 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; |
@@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb) | |||
82 | return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); | 109 | return vti_input(skb, ip_hdr(skb)->protocol, 0, 0); |
83 | } | 110 | } |
84 | 111 | ||
112 | static int vti_rcv_ipip(struct sk_buff *skb) | ||
113 | { | ||
114 | XFRM_SPI_SKB_CB(skb)->family = AF_INET; | ||
115 | XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr); | ||
116 | |||
117 | return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0); | ||
118 | } | ||
119 | |||
85 | static int vti_rcv_cb(struct sk_buff *skb, int err) | 120 | static int vti_rcv_cb(struct sk_buff *skb, int err) |
86 | { | 121 | { |
87 | unsigned short family; | 122 | unsigned short family; |
@@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = { | |||
435 | .priority = 100, | 470 | .priority = 100, |
436 | }; | 471 | }; |
437 | 472 | ||
473 | static struct xfrm_tunnel ipip_handler __read_mostly = { | ||
474 | .handler = vti_rcv_ipip, | ||
475 | .err_handler = vti4_err, | ||
476 | .priority = 0, | ||
477 | }; | ||
478 | |||
438 | static int __net_init vti_init_net(struct net *net) | 479 | static int __net_init vti_init_net(struct net *net) |
439 | { | 480 | { |
440 | int err; | 481 | int err; |
@@ -603,6 +644,13 @@ static int __init vti_init(void) | |||
603 | if (err < 0) | 644 | if (err < 0) |
604 | goto xfrm_proto_comp_failed; | 645 | goto xfrm_proto_comp_failed; |
605 | 646 | ||
647 | msg = "ipip tunnel"; | ||
648 | err = xfrm4_tunnel_register(&ipip_handler, AF_INET); | ||
649 | if (err < 0) { | ||
650 | pr_info("%s: cant't register tunnel\n",__func__); | ||
651 | goto xfrm_tunnel_failed; | ||
652 | } | ||
653 | |||
606 | msg = "netlink interface"; | 654 | msg = "netlink interface"; |
607 | err = rtnl_link_register(&vti_link_ops); | 655 | err = rtnl_link_register(&vti_link_ops); |
608 | if (err < 0) | 656 | if (err < 0) |
@@ -612,6 +660,8 @@ static int __init vti_init(void) | |||
612 | 660 | ||
613 | rtnl_link_failed: | 661 | rtnl_link_failed: |
614 | xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); | 662 | xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP); |
663 | xfrm_tunnel_failed: | ||
664 | xfrm4_tunnel_deregister(&ipip_handler, AF_INET); | ||
615 | xfrm_proto_comp_failed: | 665 | xfrm_proto_comp_failed: |
616 | xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); | 666 | xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH); |
617 | xfrm_proto_ah_failed: | 667 | xfrm_proto_ah_failed: |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index b61977db9b7f..2a909e5f9ba0 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -846,9 +846,9 @@ static int clusterip_net_init(struct net *net) | |||
846 | 846 | ||
847 | static void clusterip_net_exit(struct net *net) | 847 | static void clusterip_net_exit(struct net *net) |
848 | { | 848 | { |
849 | #ifdef CONFIG_PROC_FS | ||
849 | struct clusterip_net *cn = clusterip_pernet(net); | 850 | struct clusterip_net *cn = clusterip_pernet(net); |
850 | 851 | ||
851 | #ifdef CONFIG_PROC_FS | ||
852 | mutex_lock(&cn->mutex); | 852 | mutex_lock(&cn->mutex); |
853 | proc_remove(cn->procdir); | 853 | proc_remove(cn->procdir); |
854 | cn->procdir = NULL; | 854 | cn->procdir = NULL; |
diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c index 2687db015b6f..fa2ba7c500e4 100644 --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c | |||
@@ -215,6 +215,7 @@ int nf_nat_icmp_reply_translation(struct sk_buff *skb, | |||
215 | 215 | ||
216 | /* Change outer to look like the reply to an incoming packet */ | 216 | /* Change outer to look like the reply to an incoming packet */ |
217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 217 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
218 | target.dst.protonum = IPPROTO_ICMP; | ||
218 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) | 219 | if (!nf_nat_ipv4_manip_pkt(skb, 0, &target, manip)) |
219 | return 0; | 220 | return 0; |
220 | 221 | ||
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c index a0aa13bcabda..0a8a60c1bf9a 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic_main.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic_main.c | |||
@@ -105,6 +105,8 @@ static void fast_csum(struct snmp_ctx *ctx, unsigned char offset) | |||
105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, | 105 | int snmp_version(void *context, size_t hdrlen, unsigned char tag, |
106 | const void *data, size_t datalen) | 106 | const void *data, size_t datalen) |
107 | { | 107 | { |
108 | if (datalen != 1) | ||
109 | return -EINVAL; | ||
108 | if (*(unsigned char *)data > 1) | 110 | if (*(unsigned char *)data > 1) |
109 | return -ENOTSUPP; | 111 | return -ENOTSUPP; |
110 | return 1; | 112 | return 1; |
@@ -114,8 +116,11 @@ int snmp_helper(void *context, size_t hdrlen, unsigned char tag, | |||
114 | const void *data, size_t datalen) | 116 | const void *data, size_t datalen) |
115 | { | 117 | { |
116 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; | 118 | struct snmp_ctx *ctx = (struct snmp_ctx *)context; |
117 | __be32 *pdata = (__be32 *)data; | 119 | __be32 *pdata; |
118 | 120 | ||
121 | if (datalen != 4) | ||
122 | return -EINVAL; | ||
123 | pdata = (__be32 *)data; | ||
119 | if (*pdata == ctx->from) { | 124 | if (*pdata == ctx->from) { |
120 | pr_debug("%s: %pI4 to %pI4\n", __func__, | 125 | pr_debug("%s: %pI4 to %pI4\n", __func__, |
121 | (void *)&ctx->from, (void *)&ctx->to); | 126 | (void *)&ctx->from, (void *)&ctx->to); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ce92f73cf104..5163b64f8fb3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -887,13 +887,15 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
887 | /* No redirected packets during ip_rt_redirect_silence; | 887 | /* No redirected packets during ip_rt_redirect_silence; |
888 | * reset the algorithm. | 888 | * reset the algorithm. |
889 | */ | 889 | */ |
890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) | 890 | if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) { |
891 | peer->rate_tokens = 0; | 891 | peer->rate_tokens = 0; |
892 | peer->n_redirects = 0; | ||
893 | } | ||
892 | 894 | ||
893 | /* Too many ignored redirects; do not send anything | 895 | /* Too many ignored redirects; do not send anything |
894 | * set dst.rate_last to the last seen redirected packet. | 896 | * set dst.rate_last to the last seen redirected packet. |
895 | */ | 897 | */ |
896 | if (peer->rate_tokens >= ip_rt_redirect_number) { | 898 | if (peer->n_redirects >= ip_rt_redirect_number) { |
897 | peer->rate_last = jiffies; | 899 | peer->rate_last = jiffies; |
898 | goto out_put_peer; | 900 | goto out_put_peer; |
899 | } | 901 | } |
@@ -910,6 +912,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) | |||
910 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); | 912 | icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw); |
911 | peer->rate_last = jiffies; | 913 | peer->rate_last = jiffies; |
912 | ++peer->rate_tokens; | 914 | ++peer->rate_tokens; |
915 | ++peer->n_redirects; | ||
913 | #ifdef CONFIG_IP_ROUTE_VERBOSE | 916 | #ifdef CONFIG_IP_ROUTE_VERBOSE |
914 | if (log_martians && | 917 | if (log_martians && |
915 | peer->rate_tokens == ip_rt_redirect_number) | 918 | peer->rate_tokens == ip_rt_redirect_number) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 84c358804355..72ffd3d760ff 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1165,7 +1165,8 @@ check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires) | |||
1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { | 1165 | list_for_each_entry(ifa, &idev->addr_list, if_list) { |
1166 | if (ifa == ifp) | 1166 | if (ifa == ifp) |
1167 | continue; | 1167 | continue; |
1168 | if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr, | 1168 | if (ifa->prefix_len != ifp->prefix_len || |
1169 | !ipv6_prefix_equal(&ifa->addr, &ifp->addr, | ||
1169 | ifp->prefix_len)) | 1170 | ifp->prefix_len)) |
1170 | continue; | 1171 | continue; |
1171 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) | 1172 | if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE)) |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 4416368dbd49..801a9a0c217e 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -2098,12 +2098,17 @@ static int ip6gre_fill_info(struct sk_buff *skb, const struct net_device *dev) | |||
2098 | { | 2098 | { |
2099 | struct ip6_tnl *t = netdev_priv(dev); | 2099 | struct ip6_tnl *t = netdev_priv(dev); |
2100 | struct __ip6_tnl_parm *p = &t->parms; | 2100 | struct __ip6_tnl_parm *p = &t->parms; |
2101 | __be16 o_flags = p->o_flags; | ||
2102 | |||
2103 | if ((p->erspan_ver == 1 || p->erspan_ver == 2) && | ||
2104 | !p->collect_md) | ||
2105 | o_flags |= TUNNEL_KEY; | ||
2101 | 2106 | ||
2102 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || | 2107 | if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) || |
2103 | nla_put_be16(skb, IFLA_GRE_IFLAGS, | 2108 | nla_put_be16(skb, IFLA_GRE_IFLAGS, |
2104 | gre_tnl_flags_to_gre_flags(p->i_flags)) || | 2109 | gre_tnl_flags_to_gre_flags(p->i_flags)) || |
2105 | nla_put_be16(skb, IFLA_GRE_OFLAGS, | 2110 | nla_put_be16(skb, IFLA_GRE_OFLAGS, |
2106 | gre_tnl_flags_to_gre_flags(p->o_flags)) || | 2111 | gre_tnl_flags_to_gre_flags(o_flags)) || |
2107 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || | 2112 | nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) || |
2108 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || | 2113 | nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) || |
2109 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || | 2114 | nla_put_in6_addr(skb, IFLA_GRE_LOCAL, &p->laddr) || |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 30337b38274b..cc01aa3f2b5e 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -1516,6 +1516,9 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) | |||
1516 | continue; | 1516 | continue; |
1517 | rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); | 1517 | rhltable_remove(&mrt->mfc_hash, &c->mnode, ip6mr_rht_params); |
1518 | list_del_rcu(&c->list); | 1518 | list_del_rcu(&c->list); |
1519 | call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), | ||
1520 | FIB_EVENT_ENTRY_DEL, | ||
1521 | (struct mfc6_cache *)c, mrt->id); | ||
1519 | mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); | 1522 | mr6_netlink_event(mrt, (struct mfc6_cache *)c, RTM_DELROUTE); |
1520 | mr_cache_put(c); | 1523 | mr_cache_put(c); |
1521 | } | 1524 | } |
@@ -1524,10 +1527,6 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all) | |||
1524 | spin_lock_bh(&mfc_unres_lock); | 1527 | spin_lock_bh(&mfc_unres_lock); |
1525 | list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { | 1528 | list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) { |
1526 | list_del(&c->list); | 1529 | list_del(&c->list); |
1527 | call_ip6mr_mfc_entry_notifiers(read_pnet(&mrt->net), | ||
1528 | FIB_EVENT_ENTRY_DEL, | ||
1529 | (struct mfc6_cache *)c, | ||
1530 | mrt->id); | ||
1531 | mr6_netlink_event(mrt, (struct mfc6_cache *)c, | 1530 | mr6_netlink_event(mrt, (struct mfc6_cache *)c, |
1532 | RTM_DELROUTE); | 1531 | RTM_DELROUTE); |
1533 | ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); | 1532 | ip6mr_destroy_unres(mrt, (struct mfc6_cache *)c); |
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8b075f0bc351..6d0b1f3e927b 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c | |||
@@ -23,9 +23,11 @@ int ip6_route_me_harder(struct net *net, struct sk_buff *skb) | |||
23 | struct sock *sk = sk_to_full_sk(skb->sk); | 23 | struct sock *sk = sk_to_full_sk(skb->sk); |
24 | unsigned int hh_len; | 24 | unsigned int hh_len; |
25 | struct dst_entry *dst; | 25 | struct dst_entry *dst; |
26 | int strict = (ipv6_addr_type(&iph->daddr) & | ||
27 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)); | ||
26 | struct flowi6 fl6 = { | 28 | struct flowi6 fl6 = { |
27 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : | 29 | .flowi6_oif = sk && sk->sk_bound_dev_if ? sk->sk_bound_dev_if : |
28 | rt6_need_strict(&iph->daddr) ? skb_dst(skb)->dev->ifindex : 0, | 30 | strict ? skb_dst(skb)->dev->ifindex : 0, |
29 | .flowi6_mark = skb->mark, | 31 | .flowi6_mark = skb->mark, |
30 | .flowi6_uid = sock_net_uid(net, sk), | 32 | .flowi6_uid = sock_net_uid(net, sk), |
31 | .daddr = iph->daddr, | 33 | .daddr = iph->daddr, |
diff --git a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c index 23022447eb49..7a41ee3c11b4 100644 --- a/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c +++ b/net/ipv6/netfilter/nf_nat_l3proto_ipv6.c | |||
@@ -226,6 +226,7 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, | |||
226 | } | 226 | } |
227 | 227 | ||
228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); | 228 | nf_ct_invert_tuplepr(&target, &ct->tuplehash[!dir].tuple); |
229 | target.dst.protonum = IPPROTO_ICMPV6; | ||
229 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) | 230 | if (!nf_nat_ipv6_manip_pkt(skb, 0, &target, manip)) |
230 | return 0; | 231 | return 0; |
231 | 232 | ||
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index 8d0ba757a46c..9b2f272ca164 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c | |||
@@ -221,9 +221,7 @@ static int seg6_genl_get_tunsrc(struct sk_buff *skb, struct genl_info *info) | |||
221 | rcu_read_unlock(); | 221 | rcu_read_unlock(); |
222 | 222 | ||
223 | genlmsg_end(msg, hdr); | 223 | genlmsg_end(msg, hdr); |
224 | genlmsg_reply(msg, info); | 224 | return genlmsg_reply(msg, info); |
225 | |||
226 | return 0; | ||
227 | 225 | ||
228 | nla_put_failure: | 226 | nla_put_failure: |
229 | rcu_read_unlock(); | 227 | rcu_read_unlock(); |
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index 8181ee7e1e27..ee5403cbe655 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c | |||
@@ -146,6 +146,8 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) | |||
146 | } else { | 146 | } else { |
147 | ip6_flow_hdr(hdr, 0, flowlabel); | 147 | ip6_flow_hdr(hdr, 0, flowlabel); |
148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); | 148 | hdr->hop_limit = ip6_dst_hoplimit(skb_dst(skb)); |
149 | |||
150 | memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); | ||
149 | } | 151 | } |
150 | 152 | ||
151 | hdr->nexthdr = NEXTHDR_ROUTING; | 153 | hdr->nexthdr = NEXTHDR_ROUTING; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 1e03305c0549..e8a1dabef803 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -546,7 +546,8 @@ static int ipip6_err(struct sk_buff *skb, u32 info) | |||
546 | } | 546 | } |
547 | 547 | ||
548 | err = 0; | 548 | err = 0; |
549 | if (!ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | 549 | if (__in6_dev_get(skb->dev) && |
550 | !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4, type, data_len)) | ||
550 | goto out; | 551 | goto out; |
551 | 552 | ||
552 | if (t->parms.iph.daddr == 0) | 553 | if (t->parms.iph.daddr == 0) |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 26f1d435696a..fed6becc5daf 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -83,8 +83,7 @@ | |||
83 | #define L2TP_SLFLAG_S 0x40000000 | 83 | #define L2TP_SLFLAG_S 0x40000000 |
84 | #define L2TP_SL_SEQ_MASK 0x00ffffff | 84 | #define L2TP_SL_SEQ_MASK 0x00ffffff |
85 | 85 | ||
86 | #define L2TP_HDR_SIZE_SEQ 10 | 86 | #define L2TP_HDR_SIZE_MAX 14 |
87 | #define L2TP_HDR_SIZE_NOSEQ 6 | ||
88 | 87 | ||
89 | /* Default trace flags */ | 88 | /* Default trace flags */ |
90 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 | 89 | #define L2TP_DEFAULT_DEBUG_FLAGS 0 |
@@ -808,7 +807,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
808 | __skb_pull(skb, sizeof(struct udphdr)); | 807 | __skb_pull(skb, sizeof(struct udphdr)); |
809 | 808 | ||
810 | /* Short packet? */ | 809 | /* Short packet? */ |
811 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) { | 810 | if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { |
812 | l2tp_info(tunnel, L2TP_MSG_DATA, | 811 | l2tp_info(tunnel, L2TP_MSG_DATA, |
813 | "%s: recv short packet (len=%d)\n", | 812 | "%s: recv short packet (len=%d)\n", |
814 | tunnel->name, skb->len); | 813 | tunnel->name, skb->len); |
@@ -884,6 +883,10 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) | |||
884 | goto error; | 883 | goto error; |
885 | } | 884 | } |
886 | 885 | ||
886 | if (tunnel->version == L2TP_HDR_VER_3 && | ||
887 | l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
888 | goto error; | ||
889 | |||
887 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); | 890 | l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); |
888 | l2tp_session_dec_refcount(session); | 891 | l2tp_session_dec_refcount(session); |
889 | 892 | ||
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 9c9afe94d389..b2ce90260c35 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -301,6 +301,26 @@ static inline bool l2tp_tunnel_uses_xfrm(const struct l2tp_tunnel *tunnel) | |||
301 | } | 301 | } |
302 | #endif | 302 | #endif |
303 | 303 | ||
304 | static inline int l2tp_v3_ensure_opt_in_linear(struct l2tp_session *session, struct sk_buff *skb, | ||
305 | unsigned char **ptr, unsigned char **optr) | ||
306 | { | ||
307 | int opt_len = session->peer_cookie_len + l2tp_get_l2specific_len(session); | ||
308 | |||
309 | if (opt_len > 0) { | ||
310 | int off = *ptr - *optr; | ||
311 | |||
312 | if (!pskb_may_pull(skb, off + opt_len)) | ||
313 | return -1; | ||
314 | |||
315 | if (skb->data != *optr) { | ||
316 | *optr = skb->data; | ||
317 | *ptr = skb->data + off; | ||
318 | } | ||
319 | } | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
304 | #define l2tp_printk(ptr, type, func, fmt, ...) \ | 324 | #define l2tp_printk(ptr, type, func, fmt, ...) \ |
305 | do { \ | 325 | do { \ |
306 | if (((ptr)->debug) & (type)) \ | 326 | if (((ptr)->debug) & (type)) \ |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 35f6f86d4dcc..d4c60523c549 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -165,6 +165,9 @@ static int l2tp_ip_recv(struct sk_buff *skb) | |||
165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 165 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
166 | } | 166 | } |
167 | 167 | ||
168 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
169 | goto discard_sess; | ||
170 | |||
168 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 171 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
169 | l2tp_session_dec_refcount(session); | 172 | l2tp_session_dec_refcount(session); |
170 | 173 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 237f1a4a0b0c..0ae6899edac0 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -178,6 +178,9 @@ static int l2tp_ip6_recv(struct sk_buff *skb) | |||
178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); | 178 | print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, ptr, length); |
179 | } | 179 | } |
180 | 180 | ||
181 | if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) | ||
182 | goto discard_sess; | ||
183 | |||
181 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); | 184 | l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); |
182 | l2tp_session_dec_refcount(session); | 185 | l2tp_session_dec_refcount(session); |
183 | 186 | ||
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 69e831bc317b..54821fb1a960 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright 2007, Michael Wu <flamingice@sourmilk.net> |
9 | * Copyright 2007-2010, Intel Corporation | 9 | * Copyright 2007-2010, Intel Corporation |
10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH | 10 | * Copyright(c) 2015-2017 Intel Deutschland GmbH |
11 | * Copyright (C) 2018 Intel Corporation | 11 | * Copyright (C) 2018 - 2019 Intel Corporation |
12 | * | 12 | * |
13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
@@ -366,6 +366,8 @@ int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, | |||
366 | 366 | ||
367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); | 367 | set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state); |
368 | 368 | ||
369 | ieee80211_agg_stop_txq(sta, tid); | ||
370 | |||
369 | spin_unlock_bh(&sta->lock); | 371 | spin_unlock_bh(&sta->lock); |
370 | 372 | ||
371 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", | 373 | ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n", |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f170d6c6629a..928f13a208b0 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1938,9 +1938,16 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1938 | int head_need, bool may_encrypt) | 1938 | int head_need, bool may_encrypt) |
1939 | { | 1939 | { |
1940 | struct ieee80211_local *local = sdata->local; | 1940 | struct ieee80211_local *local = sdata->local; |
1941 | struct ieee80211_hdr *hdr; | ||
1942 | bool enc_tailroom; | ||
1941 | int tail_need = 0; | 1943 | int tail_need = 0; |
1942 | 1944 | ||
1943 | if (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt) { | 1945 | hdr = (struct ieee80211_hdr *) skb->data; |
1946 | enc_tailroom = may_encrypt && | ||
1947 | (sdata->crypto_tx_tailroom_needed_cnt || | ||
1948 | ieee80211_is_mgmt(hdr->frame_control)); | ||
1949 | |||
1950 | if (enc_tailroom) { | ||
1944 | tail_need = IEEE80211_ENCRYPT_TAILROOM; | 1951 | tail_need = IEEE80211_ENCRYPT_TAILROOM; |
1945 | tail_need -= skb_tailroom(skb); | 1952 | tail_need -= skb_tailroom(skb); |
1946 | tail_need = max_t(int, tail_need, 0); | 1953 | tail_need = max_t(int, tail_need, 0); |
@@ -1948,8 +1955,7 @@ static int ieee80211_skb_resize(struct ieee80211_sub_if_data *sdata, | |||
1948 | 1955 | ||
1949 | if (skb_cloned(skb) && | 1956 | if (skb_cloned(skb) && |
1950 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || | 1957 | (!ieee80211_hw_check(&local->hw, SUPPORTS_CLONED_SKBS) || |
1951 | !skb_clone_writable(skb, ETH_HLEN) || | 1958 | !skb_clone_writable(skb, ETH_HLEN) || enc_tailroom)) |
1952 | (may_encrypt && sdata->crypto_tx_tailroom_needed_cnt))) | ||
1953 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); | 1959 | I802_DEBUG_INC(local->tx_expand_skb_head_cloned); |
1954 | else if (head_need || tail_need) | 1960 | else if (head_need || tail_need) |
1955 | I802_DEBUG_INC(local->tx_expand_skb_head); | 1961 | I802_DEBUG_INC(local->tx_expand_skb_head); |
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index d0eb38b890aa..ba950ae974fc 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH | 7 | * Copyright (C) 2015-2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
@@ -2146,6 +2146,10 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
2146 | case NL80211_IFTYPE_AP_VLAN: | 2146 | case NL80211_IFTYPE_AP_VLAN: |
2147 | case NL80211_IFTYPE_MONITOR: | 2147 | case NL80211_IFTYPE_MONITOR: |
2148 | break; | 2148 | break; |
2149 | case NL80211_IFTYPE_ADHOC: | ||
2150 | if (sdata->vif.bss_conf.ibss_joined) | ||
2151 | WARN_ON(drv_join_ibss(local, sdata)); | ||
2152 | /* fall through */ | ||
2149 | default: | 2153 | default: |
2150 | ieee80211_reconfig_stations(sdata); | 2154 | ieee80211_reconfig_stations(sdata); |
2151 | /* fall through */ | 2155 | /* fall through */ |
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index cad48d07c818..8401cefd9f65 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig | |||
@@ -29,6 +29,7 @@ config IP_VS_IPV6 | |||
29 | bool "IPv6 support for IPVS" | 29 | bool "IPv6 support for IPVS" |
30 | depends on IPV6 = y || IP_VS = IPV6 | 30 | depends on IPV6 = y || IP_VS = IPV6 |
31 | select IP6_NF_IPTABLES | 31 | select IP6_NF_IPTABLES |
32 | select NF_DEFRAG_IPV6 | ||
32 | ---help--- | 33 | ---help--- |
33 | Add IPv6 support to IPVS. | 34 | Add IPv6 support to IPVS. |
34 | 35 | ||
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index fe9abf3cc10a..235205c93e14 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -1536,14 +1536,12 @@ ip_vs_try_to_schedule(struct netns_ipvs *ipvs, int af, struct sk_buff *skb, | |||
1536 | /* sorry, all this trouble for a no-hit :) */ | 1536 | /* sorry, all this trouble for a no-hit :) */ |
1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, | 1537 | IP_VS_DBG_PKT(12, af, pp, skb, iph->off, |
1538 | "ip_vs_in: packet continues traversal as normal"); | 1538 | "ip_vs_in: packet continues traversal as normal"); |
1539 | if (iph->fragoffs) { | 1539 | |
1540 | /* Fragment that couldn't be mapped to a conn entry | 1540 | /* Fragment couldn't be mapped to a conn entry */ |
1541 | * is missing module nf_defrag_ipv6 | 1541 | if (iph->fragoffs) |
1542 | */ | ||
1543 | IP_VS_DBG_RL("Unhandled frag, load nf_defrag_ipv6\n"); | ||
1544 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, | 1542 | IP_VS_DBG_PKT(7, af, pp, skb, iph->off, |
1545 | "unhandled fragment"); | 1543 | "unhandled fragment"); |
1546 | } | 1544 | |
1547 | *verdict = NF_ACCEPT; | 1545 | *verdict = NF_ACCEPT; |
1548 | return 0; | 1546 | return 0; |
1549 | } | 1547 | } |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 432141f04af3..86afacb07e5f 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #ifdef CONFIG_IP_VS_IPV6 | 43 | #ifdef CONFIG_IP_VS_IPV6 |
44 | #include <net/ipv6.h> | 44 | #include <net/ipv6.h> |
45 | #include <net/ip6_route.h> | 45 | #include <net/ip6_route.h> |
46 | #include <net/netfilter/ipv6/nf_defrag_ipv6.h> | ||
46 | #endif | 47 | #endif |
47 | #include <net/route.h> | 48 | #include <net/route.h> |
48 | #include <net/sock.h> | 49 | #include <net/sock.h> |
@@ -895,6 +896,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
895 | { | 896 | { |
896 | struct ip_vs_dest *dest; | 897 | struct ip_vs_dest *dest; |
897 | unsigned int atype, i; | 898 | unsigned int atype, i; |
899 | int ret = 0; | ||
898 | 900 | ||
899 | EnterFunction(2); | 901 | EnterFunction(2); |
900 | 902 | ||
@@ -905,6 +907,10 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
905 | atype & IPV6_ADDR_LINKLOCAL) && | 907 | atype & IPV6_ADDR_LINKLOCAL) && |
906 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) | 908 | !__ip_vs_addr_is_local_v6(svc->ipvs->net, &udest->addr.in6)) |
907 | return -EINVAL; | 909 | return -EINVAL; |
910 | |||
911 | ret = nf_defrag_ipv6_enable(svc->ipvs->net); | ||
912 | if (ret) | ||
913 | return ret; | ||
908 | } else | 914 | } else |
909 | #endif | 915 | #endif |
910 | { | 916 | { |
@@ -1228,6 +1234,10 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u, | |||
1228 | ret = -EINVAL; | 1234 | ret = -EINVAL; |
1229 | goto out_err; | 1235 | goto out_err; |
1230 | } | 1236 | } |
1237 | |||
1238 | ret = nf_defrag_ipv6_enable(ipvs->net); | ||
1239 | if (ret) | ||
1240 | goto out_err; | ||
1231 | } | 1241 | } |
1232 | #endif | 1242 | #endif |
1233 | 1243 | ||
@@ -2221,6 +2231,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user | |||
2221 | u->udp_timeout); | 2231 | u->udp_timeout); |
2222 | 2232 | ||
2223 | #ifdef CONFIG_IP_VS_PROTO_TCP | 2233 | #ifdef CONFIG_IP_VS_PROTO_TCP |
2234 | if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) || | ||
2235 | u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) { | ||
2236 | return -EINVAL; | ||
2237 | } | ||
2238 | #endif | ||
2239 | |||
2240 | #ifdef CONFIG_IP_VS_PROTO_UDP | ||
2241 | if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ)) | ||
2242 | return -EINVAL; | ||
2243 | #endif | ||
2244 | |||
2245 | #ifdef CONFIG_IP_VS_PROTO_TCP | ||
2224 | if (u->tcp_timeout) { | 2246 | if (u->tcp_timeout) { |
2225 | pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); | 2247 | pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP); |
2226 | pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] | 2248 | pd->timeout_table[IP_VS_TCP_S_ESTABLISHED] |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 741b533148ba..db4d46332e86 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple, | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { | 1009 | if (nf_ct_key_equal(h, tuple, zone, net)) { |
1010 | /* Tuple is taken already, so caller will need to find | ||
1011 | * a new source port to use. | ||
1012 | * | ||
1013 | * Only exception: | ||
1014 | * If the *original tuples* are identical, then both | ||
1015 | * conntracks refer to the same flow. | ||
1016 | * This is a rare situation, it can occur e.g. when | ||
1017 | * more than one UDP packet is sent from same socket | ||
1018 | * in different threads. | ||
1019 | * | ||
1020 | * Let nf_ct_resolve_clash() deal with this later. | ||
1021 | */ | ||
1022 | if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple, | ||
1023 | &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple)) | ||
1024 | continue; | ||
1025 | |||
1010 | NF_CT_STAT_INC_ATOMIC(net, found); | 1026 | NF_CT_STAT_INC_ATOMIC(net, found); |
1011 | rcu_read_unlock(); | 1027 | rcu_read_unlock(); |
1012 | return 1; | 1028 | return 1; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index fb07f6cfc719..5a92f23f179f 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -116,6 +116,23 @@ static void nft_trans_destroy(struct nft_trans *trans) | |||
116 | kfree(trans); | 116 | kfree(trans); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void nft_set_trans_bind(const struct nft_ctx *ctx, struct nft_set *set) | ||
120 | { | ||
121 | struct net *net = ctx->net; | ||
122 | struct nft_trans *trans; | ||
123 | |||
124 | if (!nft_set_is_anonymous(set)) | ||
125 | return; | ||
126 | |||
127 | list_for_each_entry_reverse(trans, &net->nft.commit_list, list) { | ||
128 | if (trans->msg_type == NFT_MSG_NEWSET && | ||
129 | nft_trans_set(trans) == set) { | ||
130 | nft_trans_set_bound(trans) = true; | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | } | ||
135 | |||
119 | static int nf_tables_register_hook(struct net *net, | 136 | static int nf_tables_register_hook(struct net *net, |
120 | const struct nft_table *table, | 137 | const struct nft_table *table, |
121 | struct nft_chain *chain) | 138 | struct nft_chain *chain) |
@@ -211,18 +228,6 @@ static int nft_delchain(struct nft_ctx *ctx) | |||
211 | return err; | 228 | return err; |
212 | } | 229 | } |
213 | 230 | ||
214 | /* either expr ops provide both activate/deactivate, or neither */ | ||
215 | static bool nft_expr_check_ops(const struct nft_expr_ops *ops) | ||
216 | { | ||
217 | if (!ops) | ||
218 | return true; | ||
219 | |||
220 | if (WARN_ON_ONCE((!ops->activate ^ !ops->deactivate))) | ||
221 | return false; | ||
222 | |||
223 | return true; | ||
224 | } | ||
225 | |||
226 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, | 231 | static void nft_rule_expr_activate(const struct nft_ctx *ctx, |
227 | struct nft_rule *rule) | 232 | struct nft_rule *rule) |
228 | { | 233 | { |
@@ -238,14 +243,15 @@ static void nft_rule_expr_activate(const struct nft_ctx *ctx, | |||
238 | } | 243 | } |
239 | 244 | ||
240 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, | 245 | static void nft_rule_expr_deactivate(const struct nft_ctx *ctx, |
241 | struct nft_rule *rule) | 246 | struct nft_rule *rule, |
247 | enum nft_trans_phase phase) | ||
242 | { | 248 | { |
243 | struct nft_expr *expr; | 249 | struct nft_expr *expr; |
244 | 250 | ||
245 | expr = nft_expr_first(rule); | 251 | expr = nft_expr_first(rule); |
246 | while (expr != nft_expr_last(rule) && expr->ops) { | 252 | while (expr != nft_expr_last(rule) && expr->ops) { |
247 | if (expr->ops->deactivate) | 253 | if (expr->ops->deactivate) |
248 | expr->ops->deactivate(ctx, expr); | 254 | expr->ops->deactivate(ctx, expr, phase); |
249 | 255 | ||
250 | expr = nft_expr_next(expr); | 256 | expr = nft_expr_next(expr); |
251 | } | 257 | } |
@@ -296,7 +302,7 @@ static int nft_delrule(struct nft_ctx *ctx, struct nft_rule *rule) | |||
296 | nft_trans_destroy(trans); | 302 | nft_trans_destroy(trans); |
297 | return err; | 303 | return err; |
298 | } | 304 | } |
299 | nft_rule_expr_deactivate(ctx, rule); | 305 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_PREPARE); |
300 | 306 | ||
301 | return 0; | 307 | return 0; |
302 | } | 308 | } |
@@ -1929,9 +1935,6 @@ static int nf_tables_delchain(struct net *net, struct sock *nlsk, | |||
1929 | */ | 1935 | */ |
1930 | int nft_register_expr(struct nft_expr_type *type) | 1936 | int nft_register_expr(struct nft_expr_type *type) |
1931 | { | 1937 | { |
1932 | if (!nft_expr_check_ops(type->ops)) | ||
1933 | return -EINVAL; | ||
1934 | |||
1935 | nfnl_lock(NFNL_SUBSYS_NFTABLES); | 1938 | nfnl_lock(NFNL_SUBSYS_NFTABLES); |
1936 | if (type->family == NFPROTO_UNSPEC) | 1939 | if (type->family == NFPROTO_UNSPEC) |
1937 | list_add_tail_rcu(&type->list, &nf_tables_expressions); | 1940 | list_add_tail_rcu(&type->list, &nf_tables_expressions); |
@@ -2079,10 +2082,6 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx, | |||
2079 | err = PTR_ERR(ops); | 2082 | err = PTR_ERR(ops); |
2080 | goto err1; | 2083 | goto err1; |
2081 | } | 2084 | } |
2082 | if (!nft_expr_check_ops(ops)) { | ||
2083 | err = -EINVAL; | ||
2084 | goto err1; | ||
2085 | } | ||
2086 | } else | 2085 | } else |
2087 | ops = type->ops; | 2086 | ops = type->ops; |
2088 | 2087 | ||
@@ -2511,7 +2510,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx, | |||
2511 | static void nf_tables_rule_release(const struct nft_ctx *ctx, | 2510 | static void nf_tables_rule_release(const struct nft_ctx *ctx, |
2512 | struct nft_rule *rule) | 2511 | struct nft_rule *rule) |
2513 | { | 2512 | { |
2514 | nft_rule_expr_deactivate(ctx, rule); | 2513 | nft_rule_expr_deactivate(ctx, rule, NFT_TRANS_RELEASE); |
2515 | nf_tables_rule_destroy(ctx, rule); | 2514 | nf_tables_rule_destroy(ctx, rule); |
2516 | } | 2515 | } |
2517 | 2516 | ||
@@ -3708,39 +3707,30 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, | |||
3708 | bind: | 3707 | bind: |
3709 | binding->chain = ctx->chain; | 3708 | binding->chain = ctx->chain; |
3710 | list_add_tail_rcu(&binding->list, &set->bindings); | 3709 | list_add_tail_rcu(&binding->list, &set->bindings); |
3710 | nft_set_trans_bind(ctx, set); | ||
3711 | |||
3711 | return 0; | 3712 | return 0; |
3712 | } | 3713 | } |
3713 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); | 3714 | EXPORT_SYMBOL_GPL(nf_tables_bind_set); |
3714 | 3715 | ||
3715 | void nf_tables_rebind_set(const struct nft_ctx *ctx, struct nft_set *set, | ||
3716 | struct nft_set_binding *binding) | ||
3717 | { | ||
3718 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | ||
3719 | nft_is_active(ctx->net, set)) | ||
3720 | list_add_tail_rcu(&set->list, &ctx->table->sets); | ||
3721 | |||
3722 | list_add_tail_rcu(&binding->list, &set->bindings); | ||
3723 | } | ||
3724 | EXPORT_SYMBOL_GPL(nf_tables_rebind_set); | ||
3725 | |||
3726 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, | 3716 | void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set, |
3727 | struct nft_set_binding *binding) | 3717 | struct nft_set_binding *binding, bool event) |
3728 | { | 3718 | { |
3729 | list_del_rcu(&binding->list); | 3719 | list_del_rcu(&binding->list); |
3730 | 3720 | ||
3731 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3721 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) { |
3732 | nft_is_active(ctx->net, set)) | ||
3733 | list_del_rcu(&set->list); | 3722 | list_del_rcu(&set->list); |
3723 | if (event) | ||
3724 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, | ||
3725 | GFP_KERNEL); | ||
3726 | } | ||
3734 | } | 3727 | } |
3735 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); | 3728 | EXPORT_SYMBOL_GPL(nf_tables_unbind_set); |
3736 | 3729 | ||
3737 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) | 3730 | void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set) |
3738 | { | 3731 | { |
3739 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set) && | 3732 | if (list_empty(&set->bindings) && nft_set_is_anonymous(set)) |
3740 | nft_is_active(ctx->net, set)) { | ||
3741 | nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC); | ||
3742 | nft_set_destroy(set); | 3733 | nft_set_destroy(set); |
3743 | } | ||
3744 | } | 3734 | } |
3745 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); | 3735 | EXPORT_SYMBOL_GPL(nf_tables_destroy_set); |
3746 | 3736 | ||
@@ -6535,6 +6525,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) | |||
6535 | nf_tables_rule_notify(&trans->ctx, | 6525 | nf_tables_rule_notify(&trans->ctx, |
6536 | nft_trans_rule(trans), | 6526 | nft_trans_rule(trans), |
6537 | NFT_MSG_DELRULE); | 6527 | NFT_MSG_DELRULE); |
6528 | nft_rule_expr_deactivate(&trans->ctx, | ||
6529 | nft_trans_rule(trans), | ||
6530 | NFT_TRANS_COMMIT); | ||
6538 | break; | 6531 | break; |
6539 | case NFT_MSG_NEWSET: | 6532 | case NFT_MSG_NEWSET: |
6540 | nft_clear(net, nft_trans_set(trans)); | 6533 | nft_clear(net, nft_trans_set(trans)); |
@@ -6621,7 +6614,8 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
6621 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); | 6614 | nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); |
6622 | break; | 6615 | break; |
6623 | case NFT_MSG_NEWSET: | 6616 | case NFT_MSG_NEWSET: |
6624 | nft_set_destroy(nft_trans_set(trans)); | 6617 | if (!nft_trans_set_bound(trans)) |
6618 | nft_set_destroy(nft_trans_set(trans)); | ||
6625 | break; | 6619 | break; |
6626 | case NFT_MSG_NEWSETELEM: | 6620 | case NFT_MSG_NEWSETELEM: |
6627 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 6621 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
@@ -6682,7 +6676,9 @@ static int __nf_tables_abort(struct net *net) | |||
6682 | case NFT_MSG_NEWRULE: | 6676 | case NFT_MSG_NEWRULE: |
6683 | trans->ctx.chain->use--; | 6677 | trans->ctx.chain->use--; |
6684 | list_del_rcu(&nft_trans_rule(trans)->list); | 6678 | list_del_rcu(&nft_trans_rule(trans)->list); |
6685 | nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans)); | 6679 | nft_rule_expr_deactivate(&trans->ctx, |
6680 | nft_trans_rule(trans), | ||
6681 | NFT_TRANS_ABORT); | ||
6686 | break; | 6682 | break; |
6687 | case NFT_MSG_DELRULE: | 6683 | case NFT_MSG_DELRULE: |
6688 | trans->ctx.chain->use++; | 6684 | trans->ctx.chain->use++; |
@@ -6692,7 +6688,8 @@ static int __nf_tables_abort(struct net *net) | |||
6692 | break; | 6688 | break; |
6693 | case NFT_MSG_NEWSET: | 6689 | case NFT_MSG_NEWSET: |
6694 | trans->ctx.table->use--; | 6690 | trans->ctx.table->use--; |
6695 | list_del_rcu(&nft_trans_set(trans)->list); | 6691 | if (!nft_trans_set_bound(trans)) |
6692 | list_del_rcu(&nft_trans_set(trans)->list); | ||
6696 | break; | 6693 | break; |
6697 | case NFT_MSG_DELSET: | 6694 | case NFT_MSG_DELSET: |
6698 | trans->ctx.table->use++; | 6695 | trans->ctx.table->use++; |
diff --git a/net/netfilter/nfnetlink_osf.c b/net/netfilter/nfnetlink_osf.c index 6f41dd74729d..1f1d90c1716b 100644 --- a/net/netfilter/nfnetlink_osf.c +++ b/net/netfilter/nfnetlink_osf.c | |||
@@ -66,6 +66,7 @@ static bool nf_osf_match_one(const struct sk_buff *skb, | |||
66 | int ttl_check, | 66 | int ttl_check, |
67 | struct nf_osf_hdr_ctx *ctx) | 67 | struct nf_osf_hdr_ctx *ctx) |
68 | { | 68 | { |
69 | const __u8 *optpinit = ctx->optp; | ||
69 | unsigned int check_WSS = 0; | 70 | unsigned int check_WSS = 0; |
70 | int fmatch = FMATCH_WRONG; | 71 | int fmatch = FMATCH_WRONG; |
71 | int foptsize, optnum; | 72 | int foptsize, optnum; |
@@ -155,6 +156,9 @@ static bool nf_osf_match_one(const struct sk_buff *skb, | |||
155 | } | 156 | } |
156 | } | 157 | } |
157 | 158 | ||
159 | if (fmatch != FMATCH_OK) | ||
160 | ctx->optp = optpinit; | ||
161 | |||
158 | return fmatch == FMATCH_OK; | 162 | return fmatch == FMATCH_OK; |
159 | } | 163 | } |
160 | 164 | ||
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 7334e0b80a5e..0a4bad55a8aa 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -22,11 +22,15 @@ | |||
22 | #include <linux/netfilter_bridge/ebtables.h> | 22 | #include <linux/netfilter_bridge/ebtables.h> |
23 | #include <linux/netfilter_arp/arp_tables.h> | 23 | #include <linux/netfilter_arp/arp_tables.h> |
24 | #include <net/netfilter/nf_tables.h> | 24 | #include <net/netfilter/nf_tables.h> |
25 | #include <net/netns/generic.h> | ||
25 | 26 | ||
26 | struct nft_xt { | 27 | struct nft_xt { |
27 | struct list_head head; | 28 | struct list_head head; |
28 | struct nft_expr_ops ops; | 29 | struct nft_expr_ops ops; |
29 | unsigned int refcnt; | 30 | refcount_t refcnt; |
31 | |||
32 | /* used only when transaction mutex is locked */ | ||
33 | unsigned int listcnt; | ||
30 | 34 | ||
31 | /* Unlike other expressions, ops doesn't have static storage duration. | 35 | /* Unlike other expressions, ops doesn't have static storage duration. |
32 | * nft core assumes they do. We use kfree_rcu so that nft core can | 36 | * nft core assumes they do. We use kfree_rcu so that nft core can |
@@ -43,10 +47,39 @@ struct nft_xt_match_priv { | |||
43 | void *info; | 47 | void *info; |
44 | }; | 48 | }; |
45 | 49 | ||
50 | struct nft_compat_net { | ||
51 | struct list_head nft_target_list; | ||
52 | struct list_head nft_match_list; | ||
53 | }; | ||
54 | |||
55 | static unsigned int nft_compat_net_id __read_mostly; | ||
56 | static struct nft_expr_type nft_match_type; | ||
57 | static struct nft_expr_type nft_target_type; | ||
58 | |||
59 | static struct nft_compat_net *nft_compat_pernet(struct net *net) | ||
60 | { | ||
61 | return net_generic(net, nft_compat_net_id); | ||
62 | } | ||
63 | |||
64 | static void nft_xt_get(struct nft_xt *xt) | ||
65 | { | ||
66 | /* refcount_inc() warns on 0 -> 1 transition, but we can't | ||
67 | * init the reference count to 1 in .select_ops -- we can't | ||
68 | * undo such an increase when another expression inside the same | ||
69 | * rule fails afterwards. | ||
70 | */ | ||
71 | if (xt->listcnt == 0) | ||
72 | refcount_set(&xt->refcnt, 1); | ||
73 | else | ||
74 | refcount_inc(&xt->refcnt); | ||
75 | |||
76 | xt->listcnt++; | ||
77 | } | ||
78 | |||
46 | static bool nft_xt_put(struct nft_xt *xt) | 79 | static bool nft_xt_put(struct nft_xt *xt) |
47 | { | 80 | { |
48 | if (--xt->refcnt == 0) { | 81 | if (refcount_dec_and_test(&xt->refcnt)) { |
49 | list_del(&xt->head); | 82 | WARN_ON_ONCE(!list_empty(&xt->head)); |
50 | kfree_rcu(xt, rcu_head); | 83 | kfree_rcu(xt, rcu_head); |
51 | return true; | 84 | return true; |
52 | } | 85 | } |
@@ -273,7 +306,7 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
273 | return -EINVAL; | 306 | return -EINVAL; |
274 | 307 | ||
275 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 308 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
276 | nft_xt->refcnt++; | 309 | nft_xt_get(nft_xt); |
277 | return 0; | 310 | return 0; |
278 | } | 311 | } |
279 | 312 | ||
@@ -282,6 +315,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
282 | { | 315 | { |
283 | struct xt_target *target = expr->ops->data; | 316 | struct xt_target *target = expr->ops->data; |
284 | void *info = nft_expr_priv(expr); | 317 | void *info = nft_expr_priv(expr); |
318 | struct module *me = target->me; | ||
285 | struct xt_tgdtor_param par; | 319 | struct xt_tgdtor_param par; |
286 | 320 | ||
287 | par.net = ctx->net; | 321 | par.net = ctx->net; |
@@ -292,7 +326,7 @@ nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
292 | par.target->destroy(&par); | 326 | par.target->destroy(&par); |
293 | 327 | ||
294 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) | 328 | if (nft_xt_put(container_of(expr->ops, struct nft_xt, ops))) |
295 | module_put(target->me); | 329 | module_put(me); |
296 | } | 330 | } |
297 | 331 | ||
298 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, | 332 | static int nft_extension_dump_info(struct sk_buff *skb, int attr, |
@@ -486,7 +520,7 @@ __nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
486 | return ret; | 520 | return ret; |
487 | 521 | ||
488 | nft_xt = container_of(expr->ops, struct nft_xt, ops); | 522 | nft_xt = container_of(expr->ops, struct nft_xt, ops); |
489 | nft_xt->refcnt++; | 523 | nft_xt_get(nft_xt); |
490 | return 0; | 524 | return 0; |
491 | } | 525 | } |
492 | 526 | ||
@@ -540,6 +574,18 @@ nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | |||
540 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); | 574 | __nft_match_destroy(ctx, expr, nft_expr_priv(expr)); |
541 | } | 575 | } |
542 | 576 | ||
577 | static void nft_compat_deactivate(const struct nft_ctx *ctx, | ||
578 | const struct nft_expr *expr, | ||
579 | enum nft_trans_phase phase) | ||
580 | { | ||
581 | struct nft_xt *xt = container_of(expr->ops, struct nft_xt, ops); | ||
582 | |||
583 | if (phase == NFT_TRANS_ABORT || phase == NFT_TRANS_COMMIT) { | ||
584 | if (--xt->listcnt == 0) | ||
585 | list_del_init(&xt->head); | ||
586 | } | ||
587 | } | ||
588 | |||
543 | static void | 589 | static void |
544 | nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) | 590 | nft_match_large_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) |
545 | { | 591 | { |
@@ -734,10 +780,6 @@ static const struct nfnetlink_subsystem nfnl_compat_subsys = { | |||
734 | .cb = nfnl_nft_compat_cb, | 780 | .cb = nfnl_nft_compat_cb, |
735 | }; | 781 | }; |
736 | 782 | ||
737 | static LIST_HEAD(nft_match_list); | ||
738 | |||
739 | static struct nft_expr_type nft_match_type; | ||
740 | |||
741 | static bool nft_match_cmp(const struct xt_match *match, | 783 | static bool nft_match_cmp(const struct xt_match *match, |
742 | const char *name, u32 rev, u32 family) | 784 | const char *name, u32 rev, u32 family) |
743 | { | 785 | { |
@@ -749,6 +791,7 @@ static const struct nft_expr_ops * | |||
749 | nft_match_select_ops(const struct nft_ctx *ctx, | 791 | nft_match_select_ops(const struct nft_ctx *ctx, |
750 | const struct nlattr * const tb[]) | 792 | const struct nlattr * const tb[]) |
751 | { | 793 | { |
794 | struct nft_compat_net *cn; | ||
752 | struct nft_xt *nft_match; | 795 | struct nft_xt *nft_match; |
753 | struct xt_match *match; | 796 | struct xt_match *match; |
754 | unsigned int matchsize; | 797 | unsigned int matchsize; |
@@ -765,8 +808,10 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
765 | rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); | 808 | rev = ntohl(nla_get_be32(tb[NFTA_MATCH_REV])); |
766 | family = ctx->family; | 809 | family = ctx->family; |
767 | 810 | ||
811 | cn = nft_compat_pernet(ctx->net); | ||
812 | |||
768 | /* Re-use the existing match if it's already loaded. */ | 813 | /* Re-use the existing match if it's already loaded. */ |
769 | list_for_each_entry(nft_match, &nft_match_list, head) { | 814 | list_for_each_entry(nft_match, &cn->nft_match_list, head) { |
770 | struct xt_match *match = nft_match->ops.data; | 815 | struct xt_match *match = nft_match->ops.data; |
771 | 816 | ||
772 | if (nft_match_cmp(match, mt_name, rev, family)) | 817 | if (nft_match_cmp(match, mt_name, rev, family)) |
@@ -789,11 +834,12 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
789 | goto err; | 834 | goto err; |
790 | } | 835 | } |
791 | 836 | ||
792 | nft_match->refcnt = 0; | 837 | refcount_set(&nft_match->refcnt, 0); |
793 | nft_match->ops.type = &nft_match_type; | 838 | nft_match->ops.type = &nft_match_type; |
794 | nft_match->ops.eval = nft_match_eval; | 839 | nft_match->ops.eval = nft_match_eval; |
795 | nft_match->ops.init = nft_match_init; | 840 | nft_match->ops.init = nft_match_init; |
796 | nft_match->ops.destroy = nft_match_destroy; | 841 | nft_match->ops.destroy = nft_match_destroy; |
842 | nft_match->ops.deactivate = nft_compat_deactivate; | ||
797 | nft_match->ops.dump = nft_match_dump; | 843 | nft_match->ops.dump = nft_match_dump; |
798 | nft_match->ops.validate = nft_match_validate; | 844 | nft_match->ops.validate = nft_match_validate; |
799 | nft_match->ops.data = match; | 845 | nft_match->ops.data = match; |
@@ -810,7 +856,8 @@ nft_match_select_ops(const struct nft_ctx *ctx, | |||
810 | 856 | ||
811 | nft_match->ops.size = matchsize; | 857 | nft_match->ops.size = matchsize; |
812 | 858 | ||
813 | list_add(&nft_match->head, &nft_match_list); | 859 | nft_match->listcnt = 0; |
860 | list_add(&nft_match->head, &cn->nft_match_list); | ||
814 | 861 | ||
815 | return &nft_match->ops; | 862 | return &nft_match->ops; |
816 | err: | 863 | err: |
@@ -826,10 +873,6 @@ static struct nft_expr_type nft_match_type __read_mostly = { | |||
826 | .owner = THIS_MODULE, | 873 | .owner = THIS_MODULE, |
827 | }; | 874 | }; |
828 | 875 | ||
829 | static LIST_HEAD(nft_target_list); | ||
830 | |||
831 | static struct nft_expr_type nft_target_type; | ||
832 | |||
833 | static bool nft_target_cmp(const struct xt_target *tg, | 876 | static bool nft_target_cmp(const struct xt_target *tg, |
834 | const char *name, u32 rev, u32 family) | 877 | const char *name, u32 rev, u32 family) |
835 | { | 878 | { |
@@ -841,6 +884,7 @@ static const struct nft_expr_ops * | |||
841 | nft_target_select_ops(const struct nft_ctx *ctx, | 884 | nft_target_select_ops(const struct nft_ctx *ctx, |
842 | const struct nlattr * const tb[]) | 885 | const struct nlattr * const tb[]) |
843 | { | 886 | { |
887 | struct nft_compat_net *cn; | ||
844 | struct nft_xt *nft_target; | 888 | struct nft_xt *nft_target; |
845 | struct xt_target *target; | 889 | struct xt_target *target; |
846 | char *tg_name; | 890 | char *tg_name; |
@@ -861,8 +905,9 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
861 | strcmp(tg_name, "standard") == 0) | 905 | strcmp(tg_name, "standard") == 0) |
862 | return ERR_PTR(-EINVAL); | 906 | return ERR_PTR(-EINVAL); |
863 | 907 | ||
908 | cn = nft_compat_pernet(ctx->net); | ||
864 | /* Re-use the existing target if it's already loaded. */ | 909 | /* Re-use the existing target if it's already loaded. */ |
865 | list_for_each_entry(nft_target, &nft_target_list, head) { | 910 | list_for_each_entry(nft_target, &cn->nft_target_list, head) { |
866 | struct xt_target *target = nft_target->ops.data; | 911 | struct xt_target *target = nft_target->ops.data; |
867 | 912 | ||
868 | if (!target->target) | 913 | if (!target->target) |
@@ -893,11 +938,12 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
893 | goto err; | 938 | goto err; |
894 | } | 939 | } |
895 | 940 | ||
896 | nft_target->refcnt = 0; | 941 | refcount_set(&nft_target->refcnt, 0); |
897 | nft_target->ops.type = &nft_target_type; | 942 | nft_target->ops.type = &nft_target_type; |
898 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); | 943 | nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); |
899 | nft_target->ops.init = nft_target_init; | 944 | nft_target->ops.init = nft_target_init; |
900 | nft_target->ops.destroy = nft_target_destroy; | 945 | nft_target->ops.destroy = nft_target_destroy; |
946 | nft_target->ops.deactivate = nft_compat_deactivate; | ||
901 | nft_target->ops.dump = nft_target_dump; | 947 | nft_target->ops.dump = nft_target_dump; |
902 | nft_target->ops.validate = nft_target_validate; | 948 | nft_target->ops.validate = nft_target_validate; |
903 | nft_target->ops.data = target; | 949 | nft_target->ops.data = target; |
@@ -907,7 +953,8 @@ nft_target_select_ops(const struct nft_ctx *ctx, | |||
907 | else | 953 | else |
908 | nft_target->ops.eval = nft_target_eval_xt; | 954 | nft_target->ops.eval = nft_target_eval_xt; |
909 | 955 | ||
910 | list_add(&nft_target->head, &nft_target_list); | 956 | nft_target->listcnt = 0; |
957 | list_add(&nft_target->head, &cn->nft_target_list); | ||
911 | 958 | ||
912 | return &nft_target->ops; | 959 | return &nft_target->ops; |
913 | err: | 960 | err: |
@@ -923,13 +970,74 @@ static struct nft_expr_type nft_target_type __read_mostly = { | |||
923 | .owner = THIS_MODULE, | 970 | .owner = THIS_MODULE, |
924 | }; | 971 | }; |
925 | 972 | ||
973 | static int __net_init nft_compat_init_net(struct net *net) | ||
974 | { | ||
975 | struct nft_compat_net *cn = nft_compat_pernet(net); | ||
976 | |||
977 | INIT_LIST_HEAD(&cn->nft_target_list); | ||
978 | INIT_LIST_HEAD(&cn->nft_match_list); | ||
979 | |||
980 | return 0; | ||
981 | } | ||
982 | |||
983 | static void __net_exit nft_compat_exit_net(struct net *net) | ||
984 | { | ||
985 | struct nft_compat_net *cn = nft_compat_pernet(net); | ||
986 | struct nft_xt *xt, *next; | ||
987 | |||
988 | if (list_empty(&cn->nft_match_list) && | ||
989 | list_empty(&cn->nft_target_list)) | ||
990 | return; | ||
991 | |||
992 | /* If there was an error that caused nft_xt expr to not be initialized | ||
993 | * fully and noone else requested the same expression later, the lists | ||
994 | * contain 0-refcount entries that still hold module reference. | ||
995 | * | ||
996 | * Clean them here. | ||
997 | */ | ||
998 | mutex_lock(&net->nft.commit_mutex); | ||
999 | list_for_each_entry_safe(xt, next, &cn->nft_target_list, head) { | ||
1000 | struct xt_target *target = xt->ops.data; | ||
1001 | |||
1002 | list_del_init(&xt->head); | ||
1003 | |||
1004 | if (refcount_read(&xt->refcnt)) | ||
1005 | continue; | ||
1006 | module_put(target->me); | ||
1007 | kfree(xt); | ||
1008 | } | ||
1009 | |||
1010 | list_for_each_entry_safe(xt, next, &cn->nft_match_list, head) { | ||
1011 | struct xt_match *match = xt->ops.data; | ||
1012 | |||
1013 | list_del_init(&xt->head); | ||
1014 | |||
1015 | if (refcount_read(&xt->refcnt)) | ||
1016 | continue; | ||
1017 | module_put(match->me); | ||
1018 | kfree(xt); | ||
1019 | } | ||
1020 | mutex_unlock(&net->nft.commit_mutex); | ||
1021 | } | ||
1022 | |||
1023 | static struct pernet_operations nft_compat_net_ops = { | ||
1024 | .init = nft_compat_init_net, | ||
1025 | .exit = nft_compat_exit_net, | ||
1026 | .id = &nft_compat_net_id, | ||
1027 | .size = sizeof(struct nft_compat_net), | ||
1028 | }; | ||
1029 | |||
926 | static int __init nft_compat_module_init(void) | 1030 | static int __init nft_compat_module_init(void) |
927 | { | 1031 | { |
928 | int ret; | 1032 | int ret; |
929 | 1033 | ||
1034 | ret = register_pernet_subsys(&nft_compat_net_ops); | ||
1035 | if (ret < 0) | ||
1036 | goto err_target; | ||
1037 | |||
930 | ret = nft_register_expr(&nft_match_type); | 1038 | ret = nft_register_expr(&nft_match_type); |
931 | if (ret < 0) | 1039 | if (ret < 0) |
932 | return ret; | 1040 | goto err_pernet; |
933 | 1041 | ||
934 | ret = nft_register_expr(&nft_target_type); | 1042 | ret = nft_register_expr(&nft_target_type); |
935 | if (ret < 0) | 1043 | if (ret < 0) |
@@ -942,45 +1050,21 @@ static int __init nft_compat_module_init(void) | |||
942 | } | 1050 | } |
943 | 1051 | ||
944 | return ret; | 1052 | return ret; |
945 | |||
946 | err_target: | 1053 | err_target: |
947 | nft_unregister_expr(&nft_target_type); | 1054 | nft_unregister_expr(&nft_target_type); |
948 | err_match: | 1055 | err_match: |
949 | nft_unregister_expr(&nft_match_type); | 1056 | nft_unregister_expr(&nft_match_type); |
1057 | err_pernet: | ||
1058 | unregister_pernet_subsys(&nft_compat_net_ops); | ||
950 | return ret; | 1059 | return ret; |
951 | } | 1060 | } |
952 | 1061 | ||
953 | static void __exit nft_compat_module_exit(void) | 1062 | static void __exit nft_compat_module_exit(void) |
954 | { | 1063 | { |
955 | struct nft_xt *xt, *next; | ||
956 | |||
957 | /* list should be empty here, it can be non-empty only in case there | ||
958 | * was an error that caused nft_xt expr to not be initialized fully | ||
959 | * and noone else requested the same expression later. | ||
960 | * | ||
961 | * In this case, the lists contain 0-refcount entries that still | ||
962 | * hold module reference. | ||
963 | */ | ||
964 | list_for_each_entry_safe(xt, next, &nft_target_list, head) { | ||
965 | struct xt_target *target = xt->ops.data; | ||
966 | |||
967 | if (WARN_ON_ONCE(xt->refcnt)) | ||
968 | continue; | ||
969 | module_put(target->me); | ||
970 | kfree(xt); | ||
971 | } | ||
972 | |||
973 | list_for_each_entry_safe(xt, next, &nft_match_list, head) { | ||
974 | struct xt_match *match = xt->ops.data; | ||
975 | |||
976 | if (WARN_ON_ONCE(xt->refcnt)) | ||
977 | continue; | ||
978 | module_put(match->me); | ||
979 | kfree(xt); | ||
980 | } | ||
981 | nfnetlink_subsys_unregister(&nfnl_compat_subsys); | 1064 | nfnetlink_subsys_unregister(&nfnl_compat_subsys); |
982 | nft_unregister_expr(&nft_target_type); | 1065 | nft_unregister_expr(&nft_target_type); |
983 | nft_unregister_expr(&nft_match_type); | 1066 | nft_unregister_expr(&nft_match_type); |
1067 | unregister_pernet_subsys(&nft_compat_net_ops); | ||
984 | } | 1068 | } |
985 | 1069 | ||
986 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); | 1070 | MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_NFT_COMPAT); |
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 07d4efd3d851..f1172f99752b 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -235,20 +235,17 @@ err1: | |||
235 | return err; | 235 | return err; |
236 | } | 236 | } |
237 | 237 | ||
238 | static void nft_dynset_activate(const struct nft_ctx *ctx, | ||
239 | const struct nft_expr *expr) | ||
240 | { | ||
241 | struct nft_dynset *priv = nft_expr_priv(expr); | ||
242 | |||
243 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
244 | } | ||
245 | |||
246 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, | 238 | static void nft_dynset_deactivate(const struct nft_ctx *ctx, |
247 | const struct nft_expr *expr) | 239 | const struct nft_expr *expr, |
240 | enum nft_trans_phase phase) | ||
248 | { | 241 | { |
249 | struct nft_dynset *priv = nft_expr_priv(expr); | 242 | struct nft_dynset *priv = nft_expr_priv(expr); |
250 | 243 | ||
251 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 244 | if (phase == NFT_TRANS_PREPARE) |
245 | return; | ||
246 | |||
247 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
248 | phase == NFT_TRANS_COMMIT); | ||
252 | } | 249 | } |
253 | 250 | ||
254 | static void nft_dynset_destroy(const struct nft_ctx *ctx, | 251 | static void nft_dynset_destroy(const struct nft_ctx *ctx, |
@@ -296,7 +293,6 @@ static const struct nft_expr_ops nft_dynset_ops = { | |||
296 | .eval = nft_dynset_eval, | 293 | .eval = nft_dynset_eval, |
297 | .init = nft_dynset_init, | 294 | .init = nft_dynset_init, |
298 | .destroy = nft_dynset_destroy, | 295 | .destroy = nft_dynset_destroy, |
299 | .activate = nft_dynset_activate, | ||
300 | .deactivate = nft_dynset_deactivate, | 296 | .deactivate = nft_dynset_deactivate, |
301 | .dump = nft_dynset_dump, | 297 | .dump = nft_dynset_dump, |
302 | }; | 298 | }; |
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 0777a93211e2..3f6d1d2a6281 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c | |||
@@ -72,10 +72,14 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, | 74 | static void nft_immediate_deactivate(const struct nft_ctx *ctx, |
75 | const struct nft_expr *expr) | 75 | const struct nft_expr *expr, |
76 | enum nft_trans_phase phase) | ||
76 | { | 77 | { |
77 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); | 78 | const struct nft_immediate_expr *priv = nft_expr_priv(expr); |
78 | 79 | ||
80 | if (phase == NFT_TRANS_COMMIT) | ||
81 | return; | ||
82 | |||
79 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); | 83 | return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg)); |
80 | } | 84 | } |
81 | 85 | ||
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index 227b2b15a19c..14496da5141d 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -121,20 +121,17 @@ static int nft_lookup_init(const struct nft_ctx *ctx, | |||
121 | return 0; | 121 | return 0; |
122 | } | 122 | } |
123 | 123 | ||
124 | static void nft_lookup_activate(const struct nft_ctx *ctx, | ||
125 | const struct nft_expr *expr) | ||
126 | { | ||
127 | struct nft_lookup *priv = nft_expr_priv(expr); | ||
128 | |||
129 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
130 | } | ||
131 | |||
132 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, | 124 | static void nft_lookup_deactivate(const struct nft_ctx *ctx, |
133 | const struct nft_expr *expr) | 125 | const struct nft_expr *expr, |
126 | enum nft_trans_phase phase) | ||
134 | { | 127 | { |
135 | struct nft_lookup *priv = nft_expr_priv(expr); | 128 | struct nft_lookup *priv = nft_expr_priv(expr); |
136 | 129 | ||
137 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 130 | if (phase == NFT_TRANS_PREPARE) |
131 | return; | ||
132 | |||
133 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
134 | phase == NFT_TRANS_COMMIT); | ||
138 | } | 135 | } |
139 | 136 | ||
140 | static void nft_lookup_destroy(const struct nft_ctx *ctx, | 137 | static void nft_lookup_destroy(const struct nft_ctx *ctx, |
@@ -225,7 +222,6 @@ static const struct nft_expr_ops nft_lookup_ops = { | |||
225 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), | 222 | .size = NFT_EXPR_SIZE(sizeof(struct nft_lookup)), |
226 | .eval = nft_lookup_eval, | 223 | .eval = nft_lookup_eval, |
227 | .init = nft_lookup_init, | 224 | .init = nft_lookup_init, |
228 | .activate = nft_lookup_activate, | ||
229 | .deactivate = nft_lookup_deactivate, | 225 | .deactivate = nft_lookup_deactivate, |
230 | .destroy = nft_lookup_destroy, | 226 | .destroy = nft_lookup_destroy, |
231 | .dump = nft_lookup_dump, | 227 | .dump = nft_lookup_dump, |
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index a3185ca2a3a9..ae178e914486 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
@@ -155,20 +155,17 @@ nla_put_failure: | |||
155 | return -1; | 155 | return -1; |
156 | } | 156 | } |
157 | 157 | ||
158 | static void nft_objref_map_activate(const struct nft_ctx *ctx, | ||
159 | const struct nft_expr *expr) | ||
160 | { | ||
161 | struct nft_objref_map *priv = nft_expr_priv(expr); | ||
162 | |||
163 | nf_tables_rebind_set(ctx, priv->set, &priv->binding); | ||
164 | } | ||
165 | |||
166 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, | 158 | static void nft_objref_map_deactivate(const struct nft_ctx *ctx, |
167 | const struct nft_expr *expr) | 159 | const struct nft_expr *expr, |
160 | enum nft_trans_phase phase) | ||
168 | { | 161 | { |
169 | struct nft_objref_map *priv = nft_expr_priv(expr); | 162 | struct nft_objref_map *priv = nft_expr_priv(expr); |
170 | 163 | ||
171 | nf_tables_unbind_set(ctx, priv->set, &priv->binding); | 164 | if (phase == NFT_TRANS_PREPARE) |
165 | return; | ||
166 | |||
167 | nf_tables_unbind_set(ctx, priv->set, &priv->binding, | ||
168 | phase == NFT_TRANS_COMMIT); | ||
172 | } | 169 | } |
173 | 170 | ||
174 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, | 171 | static void nft_objref_map_destroy(const struct nft_ctx *ctx, |
@@ -185,7 +182,6 @@ static const struct nft_expr_ops nft_objref_map_ops = { | |||
185 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), | 182 | .size = NFT_EXPR_SIZE(sizeof(struct nft_objref_map)), |
186 | .eval = nft_objref_map_eval, | 183 | .eval = nft_objref_map_eval, |
187 | .init = nft_objref_map_init, | 184 | .init = nft_objref_map_init, |
188 | .activate = nft_objref_map_activate, | ||
189 | .deactivate = nft_objref_map_deactivate, | 185 | .deactivate = nft_objref_map_deactivate, |
190 | .destroy = nft_objref_map_destroy, | 186 | .destroy = nft_objref_map_destroy, |
191 | .dump = nft_objref_map_dump, | 187 | .dump = nft_objref_map_dump, |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index aecadd471e1d..13e1ac333fa4 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -1899,7 +1899,7 @@ static int __init xt_init(void) | |||
1899 | seqcount_init(&per_cpu(xt_recseq, i)); | 1899 | seqcount_init(&per_cpu(xt_recseq, i)); |
1900 | } | 1900 | } |
1901 | 1901 | ||
1902 | xt = kmalloc_array(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); | 1902 | xt = kcalloc(NFPROTO_NUMPROTO, sizeof(struct xt_af), GFP_KERNEL); |
1903 | if (!xt) | 1903 | if (!xt) |
1904 | return -ENOMEM; | 1904 | return -ENOMEM; |
1905 | 1905 | ||
diff --git a/net/netrom/nr_timer.c b/net/netrom/nr_timer.c index cbd51ed5a2d7..908e53ab47a4 100644 --- a/net/netrom/nr_timer.c +++ b/net/netrom/nr_timer.c | |||
@@ -52,21 +52,21 @@ void nr_start_t1timer(struct sock *sk) | |||
52 | { | 52 | { |
53 | struct nr_sock *nr = nr_sk(sk); | 53 | struct nr_sock *nr = nr_sk(sk); |
54 | 54 | ||
55 | mod_timer(&nr->t1timer, jiffies + nr->t1); | 55 | sk_reset_timer(sk, &nr->t1timer, jiffies + nr->t1); |
56 | } | 56 | } |
57 | 57 | ||
58 | void nr_start_t2timer(struct sock *sk) | 58 | void nr_start_t2timer(struct sock *sk) |
59 | { | 59 | { |
60 | struct nr_sock *nr = nr_sk(sk); | 60 | struct nr_sock *nr = nr_sk(sk); |
61 | 61 | ||
62 | mod_timer(&nr->t2timer, jiffies + nr->t2); | 62 | sk_reset_timer(sk, &nr->t2timer, jiffies + nr->t2); |
63 | } | 63 | } |
64 | 64 | ||
65 | void nr_start_t4timer(struct sock *sk) | 65 | void nr_start_t4timer(struct sock *sk) |
66 | { | 66 | { |
67 | struct nr_sock *nr = nr_sk(sk); | 67 | struct nr_sock *nr = nr_sk(sk); |
68 | 68 | ||
69 | mod_timer(&nr->t4timer, jiffies + nr->t4); | 69 | sk_reset_timer(sk, &nr->t4timer, jiffies + nr->t4); |
70 | } | 70 | } |
71 | 71 | ||
72 | void nr_start_idletimer(struct sock *sk) | 72 | void nr_start_idletimer(struct sock *sk) |
@@ -74,37 +74,37 @@ void nr_start_idletimer(struct sock *sk) | |||
74 | struct nr_sock *nr = nr_sk(sk); | 74 | struct nr_sock *nr = nr_sk(sk); |
75 | 75 | ||
76 | if (nr->idle > 0) | 76 | if (nr->idle > 0) |
77 | mod_timer(&nr->idletimer, jiffies + nr->idle); | 77 | sk_reset_timer(sk, &nr->idletimer, jiffies + nr->idle); |
78 | } | 78 | } |
79 | 79 | ||
80 | void nr_start_heartbeat(struct sock *sk) | 80 | void nr_start_heartbeat(struct sock *sk) |
81 | { | 81 | { |
82 | mod_timer(&sk->sk_timer, jiffies + 5 * HZ); | 82 | sk_reset_timer(sk, &sk->sk_timer, jiffies + 5 * HZ); |
83 | } | 83 | } |
84 | 84 | ||
85 | void nr_stop_t1timer(struct sock *sk) | 85 | void nr_stop_t1timer(struct sock *sk) |
86 | { | 86 | { |
87 | del_timer(&nr_sk(sk)->t1timer); | 87 | sk_stop_timer(sk, &nr_sk(sk)->t1timer); |
88 | } | 88 | } |
89 | 89 | ||
90 | void nr_stop_t2timer(struct sock *sk) | 90 | void nr_stop_t2timer(struct sock *sk) |
91 | { | 91 | { |
92 | del_timer(&nr_sk(sk)->t2timer); | 92 | sk_stop_timer(sk, &nr_sk(sk)->t2timer); |
93 | } | 93 | } |
94 | 94 | ||
95 | void nr_stop_t4timer(struct sock *sk) | 95 | void nr_stop_t4timer(struct sock *sk) |
96 | { | 96 | { |
97 | del_timer(&nr_sk(sk)->t4timer); | 97 | sk_stop_timer(sk, &nr_sk(sk)->t4timer); |
98 | } | 98 | } |
99 | 99 | ||
100 | void nr_stop_idletimer(struct sock *sk) | 100 | void nr_stop_idletimer(struct sock *sk) |
101 | { | 101 | { |
102 | del_timer(&nr_sk(sk)->idletimer); | 102 | sk_stop_timer(sk, &nr_sk(sk)->idletimer); |
103 | } | 103 | } |
104 | 104 | ||
105 | void nr_stop_heartbeat(struct sock *sk) | 105 | void nr_stop_heartbeat(struct sock *sk) |
106 | { | 106 | { |
107 | del_timer(&sk->sk_timer); | 107 | sk_stop_timer(sk, &sk->sk_timer); |
108 | } | 108 | } |
109 | 109 | ||
110 | int nr_t1timer_running(struct sock *sk) | 110 | int nr_t1timer_running(struct sock *sk) |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3b1a78906bc0..1cd1d83a4be0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -4292,7 +4292,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u, | |||
4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; | 4292 | rb->frames_per_block = req->tp_block_size / req->tp_frame_size; |
4293 | if (unlikely(rb->frames_per_block == 0)) | 4293 | if (unlikely(rb->frames_per_block == 0)) |
4294 | goto out; | 4294 | goto out; |
4295 | if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr)) | 4295 | if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr)) |
4296 | goto out; | 4296 | goto out; |
4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != | 4297 | if (unlikely((rb->frames_per_block * req->tp_block_nr) != |
4298 | req->tp_frame_nr)) | 4298 | req->tp_frame_nr)) |
diff --git a/net/rds/bind.c b/net/rds/bind.c index 762d2c6788a3..17c9d9f0c848 100644 --- a/net/rds/bind.c +++ b/net/rds/bind.c | |||
@@ -78,10 +78,10 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port, | |||
78 | __rds_create_bind_key(key, addr, port, scope_id); | 78 | __rds_create_bind_key(key, addr, port, scope_id); |
79 | rcu_read_lock(); | 79 | rcu_read_lock(); |
80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); | 80 | rs = rhashtable_lookup(&bind_hash_table, key, ht_parms); |
81 | if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD)) | 81 | if (rs && (sock_flag(rds_rs_to_sk(rs), SOCK_DEAD) || |
82 | rds_sock_addref(rs); | 82 | !refcount_inc_not_zero(&rds_rs_to_sk(rs)->sk_refcnt))) |
83 | else | ||
84 | rs = NULL; | 83 | rs = NULL; |
84 | |||
85 | rcu_read_unlock(); | 85 | rcu_read_unlock(); |
86 | 86 | ||
87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, | 87 | rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr, |
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c index 77e9f85a2c92..f2ff21d7df08 100644 --- a/net/rose/rose_route.c +++ b/net/rose/rose_route.c | |||
@@ -850,6 +850,7 @@ void rose_link_device_down(struct net_device *dev) | |||
850 | 850 | ||
851 | /* | 851 | /* |
852 | * Route a frame to an appropriate AX.25 connection. | 852 | * Route a frame to an appropriate AX.25 connection. |
853 | * A NULL ax25_cb indicates an internally generated frame. | ||
853 | */ | 854 | */ |
854 | int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | 855 | int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) |
855 | { | 856 | { |
@@ -867,6 +868,10 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25) | |||
867 | 868 | ||
868 | if (skb->len < ROSE_MIN_LEN) | 869 | if (skb->len < ROSE_MIN_LEN) |
869 | return res; | 870 | return res; |
871 | |||
872 | if (!ax25) | ||
873 | return rose_loopback_queue(skb, NULL); | ||
874 | |||
870 | frametype = skb->data[2]; | 875 | frametype = skb->data[2]; |
871 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); | 876 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); |
872 | if (frametype == ROSE_CALL_REQUEST && | 877 | if (frametype == ROSE_CALL_REQUEST && |
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c index eaf19ebaa964..3f7bb11f3290 100644 --- a/net/rxrpc/recvmsg.c +++ b/net/rxrpc/recvmsg.c | |||
@@ -596,6 +596,7 @@ error_requeue_call: | |||
596 | } | 596 | } |
597 | error_no_call: | 597 | error_no_call: |
598 | release_sock(&rx->sk); | 598 | release_sock(&rx->sk); |
599 | error_trace: | ||
599 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); | 600 | trace_rxrpc_recvmsg(call, rxrpc_recvmsg_return, 0, 0, 0, ret); |
600 | return ret; | 601 | return ret; |
601 | 602 | ||
@@ -604,7 +605,7 @@ wait_interrupted: | |||
604 | wait_error: | 605 | wait_error: |
605 | finish_wait(sk_sleep(&rx->sk), &wait); | 606 | finish_wait(sk_sleep(&rx->sk), &wait); |
606 | call = NULL; | 607 | call = NULL; |
607 | goto error_no_call; | 608 | goto error_trace; |
608 | } | 609 | } |
609 | 610 | ||
610 | /** | 611 | /** |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index f6aa57fbbbaf..12ca9d13db83 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -1371,7 +1371,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1371 | if (!tc_skip_hw(fnew->flags)) { | 1371 | if (!tc_skip_hw(fnew->flags)) { |
1372 | err = fl_hw_replace_filter(tp, fnew, extack); | 1372 | err = fl_hw_replace_filter(tp, fnew, extack); |
1373 | if (err) | 1373 | if (err) |
1374 | goto errout_mask; | 1374 | goto errout_mask_ht; |
1375 | } | 1375 | } |
1376 | 1376 | ||
1377 | if (!tc_in_hw(fnew->flags)) | 1377 | if (!tc_in_hw(fnew->flags)) |
@@ -1401,6 +1401,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
1401 | kfree(mask); | 1401 | kfree(mask); |
1402 | return 0; | 1402 | return 0; |
1403 | 1403 | ||
1404 | errout_mask_ht: | ||
1405 | rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node, | ||
1406 | fnew->mask->filter_ht_params); | ||
1407 | |||
1404 | errout_mask: | 1408 | errout_mask: |
1405 | fl_mask_put(head, fnew->mask, false); | 1409 | fl_mask_put(head, fnew->mask, false); |
1406 | 1410 | ||
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 9ccc93f257db..38bb882bb958 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -48,7 +48,7 @@ struct tcindex_data { | |||
48 | u32 hash; /* hash table size; 0 if undefined */ | 48 | u32 hash; /* hash table size; 0 if undefined */ |
49 | u32 alloc_hash; /* allocated size */ | 49 | u32 alloc_hash; /* allocated size */ |
50 | u32 fall_through; /* 0: only classify if explicit match */ | 50 | u32 fall_through; /* 0: only classify if explicit match */ |
51 | struct rcu_head rcu; | 51 | struct rcu_work rwork; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) | 54 | static inline int tcindex_filter_is_set(struct tcindex_filter_result *r) |
@@ -221,17 +221,11 @@ found: | |||
221 | return 0; | 221 | return 0; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int tcindex_destroy_element(struct tcf_proto *tp, | 224 | static void tcindex_destroy_work(struct work_struct *work) |
225 | void *arg, struct tcf_walker *walker) | ||
226 | { | ||
227 | bool last; | ||
228 | |||
229 | return tcindex_delete(tp, arg, &last, NULL); | ||
230 | } | ||
231 | |||
232 | static void __tcindex_destroy(struct rcu_head *head) | ||
233 | { | 225 | { |
234 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 226 | struct tcindex_data *p = container_of(to_rcu_work(work), |
227 | struct tcindex_data, | ||
228 | rwork); | ||
235 | 229 | ||
236 | kfree(p->perfect); | 230 | kfree(p->perfect); |
237 | kfree(p->h); | 231 | kfree(p->h); |
@@ -258,9 +252,11 @@ static int tcindex_filter_result_init(struct tcindex_filter_result *r) | |||
258 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 252 | return tcf_exts_init(&r->exts, TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
259 | } | 253 | } |
260 | 254 | ||
261 | static void __tcindex_partial_destroy(struct rcu_head *head) | 255 | static void tcindex_partial_destroy_work(struct work_struct *work) |
262 | { | 256 | { |
263 | struct tcindex_data *p = container_of(head, struct tcindex_data, rcu); | 257 | struct tcindex_data *p = container_of(to_rcu_work(work), |
258 | struct tcindex_data, | ||
259 | rwork); | ||
264 | 260 | ||
265 | kfree(p->perfect); | 261 | kfree(p->perfect); |
266 | kfree(p); | 262 | kfree(p); |
@@ -275,7 +271,7 @@ static void tcindex_free_perfect_hash(struct tcindex_data *cp) | |||
275 | kfree(cp->perfect); | 271 | kfree(cp->perfect); |
276 | } | 272 | } |
277 | 273 | ||
278 | static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | 274 | static int tcindex_alloc_perfect_hash(struct net *net, struct tcindex_data *cp) |
279 | { | 275 | { |
280 | int i, err = 0; | 276 | int i, err = 0; |
281 | 277 | ||
@@ -289,6 +285,9 @@ static int tcindex_alloc_perfect_hash(struct tcindex_data *cp) | |||
289 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); | 285 | TCA_TCINDEX_ACT, TCA_TCINDEX_POLICE); |
290 | if (err < 0) | 286 | if (err < 0) |
291 | goto errout; | 287 | goto errout; |
288 | #ifdef CONFIG_NET_CLS_ACT | ||
289 | cp->perfect[i].exts.net = net; | ||
290 | #endif | ||
292 | } | 291 | } |
293 | 292 | ||
294 | return 0; | 293 | return 0; |
@@ -305,9 +304,9 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
305 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) | 304 | struct nlattr *est, bool ovr, struct netlink_ext_ack *extack) |
306 | { | 305 | { |
307 | struct tcindex_filter_result new_filter_result, *old_r = r; | 306 | struct tcindex_filter_result new_filter_result, *old_r = r; |
308 | struct tcindex_filter_result cr; | ||
309 | struct tcindex_data *cp = NULL, *oldp; | 307 | struct tcindex_data *cp = NULL, *oldp; |
310 | struct tcindex_filter *f = NULL; /* make gcc behave */ | 308 | struct tcindex_filter *f = NULL; /* make gcc behave */ |
309 | struct tcf_result cr = {}; | ||
311 | int err, balloc = 0; | 310 | int err, balloc = 0; |
312 | struct tcf_exts e; | 311 | struct tcf_exts e; |
313 | 312 | ||
@@ -337,7 +336,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
337 | if (p->perfect) { | 336 | if (p->perfect) { |
338 | int i; | 337 | int i; |
339 | 338 | ||
340 | if (tcindex_alloc_perfect_hash(cp) < 0) | 339 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
341 | goto errout; | 340 | goto errout; |
342 | for (i = 0; i < cp->hash; i++) | 341 | for (i = 0; i < cp->hash; i++) |
343 | cp->perfect[i].res = p->perfect[i].res; | 342 | cp->perfect[i].res = p->perfect[i].res; |
@@ -348,11 +347,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
348 | err = tcindex_filter_result_init(&new_filter_result); | 347 | err = tcindex_filter_result_init(&new_filter_result); |
349 | if (err < 0) | 348 | if (err < 0) |
350 | goto errout1; | 349 | goto errout1; |
351 | err = tcindex_filter_result_init(&cr); | ||
352 | if (err < 0) | ||
353 | goto errout1; | ||
354 | if (old_r) | 350 | if (old_r) |
355 | cr.res = r->res; | 351 | cr = r->res; |
356 | 352 | ||
357 | if (tb[TCA_TCINDEX_HASH]) | 353 | if (tb[TCA_TCINDEX_HASH]) |
358 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); | 354 | cp->hash = nla_get_u32(tb[TCA_TCINDEX_HASH]); |
@@ -406,7 +402,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
406 | err = -ENOMEM; | 402 | err = -ENOMEM; |
407 | if (!cp->perfect && !cp->h) { | 403 | if (!cp->perfect && !cp->h) { |
408 | if (valid_perfect_hash(cp)) { | 404 | if (valid_perfect_hash(cp)) { |
409 | if (tcindex_alloc_perfect_hash(cp) < 0) | 405 | if (tcindex_alloc_perfect_hash(net, cp) < 0) |
410 | goto errout_alloc; | 406 | goto errout_alloc; |
411 | balloc = 1; | 407 | balloc = 1; |
412 | } else { | 408 | } else { |
@@ -443,8 +439,8 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
443 | } | 439 | } |
444 | 440 | ||
445 | if (tb[TCA_TCINDEX_CLASSID]) { | 441 | if (tb[TCA_TCINDEX_CLASSID]) { |
446 | cr.res.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); | 442 | cr.classid = nla_get_u32(tb[TCA_TCINDEX_CLASSID]); |
447 | tcf_bind_filter(tp, &cr.res, base); | 443 | tcf_bind_filter(tp, &cr, base); |
448 | } | 444 | } |
449 | 445 | ||
450 | if (old_r && old_r != r) { | 446 | if (old_r && old_r != r) { |
@@ -456,7 +452,7 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
456 | } | 452 | } |
457 | 453 | ||
458 | oldp = p; | 454 | oldp = p; |
459 | r->res = cr.res; | 455 | r->res = cr; |
460 | tcf_exts_change(&r->exts, &e); | 456 | tcf_exts_change(&r->exts, &e); |
461 | 457 | ||
462 | rcu_assign_pointer(tp->root, cp); | 458 | rcu_assign_pointer(tp->root, cp); |
@@ -475,10 +471,12 @@ tcindex_set_parms(struct net *net, struct tcf_proto *tp, unsigned long base, | |||
475 | ; /* nothing */ | 471 | ; /* nothing */ |
476 | 472 | ||
477 | rcu_assign_pointer(*fp, f); | 473 | rcu_assign_pointer(*fp, f); |
474 | } else { | ||
475 | tcf_exts_destroy(&new_filter_result.exts); | ||
478 | } | 476 | } |
479 | 477 | ||
480 | if (oldp) | 478 | if (oldp) |
481 | call_rcu(&oldp->rcu, __tcindex_partial_destroy); | 479 | tcf_queue_work(&oldp->rwork, tcindex_partial_destroy_work); |
482 | return 0; | 480 | return 0; |
483 | 481 | ||
484 | errout_alloc: | 482 | errout_alloc: |
@@ -487,7 +485,6 @@ errout_alloc: | |||
487 | else if (balloc == 2) | 485 | else if (balloc == 2) |
488 | kfree(cp->h); | 486 | kfree(cp->h); |
489 | errout1: | 487 | errout1: |
490 | tcf_exts_destroy(&cr.exts); | ||
491 | tcf_exts_destroy(&new_filter_result.exts); | 488 | tcf_exts_destroy(&new_filter_result.exts); |
492 | errout: | 489 | errout: |
493 | kfree(cp); | 490 | kfree(cp); |
@@ -562,15 +559,34 @@ static void tcindex_destroy(struct tcf_proto *tp, | |||
562 | struct netlink_ext_ack *extack) | 559 | struct netlink_ext_ack *extack) |
563 | { | 560 | { |
564 | struct tcindex_data *p = rtnl_dereference(tp->root); | 561 | struct tcindex_data *p = rtnl_dereference(tp->root); |
565 | struct tcf_walker walker; | 562 | int i; |
566 | 563 | ||
567 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); | 564 | pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p); |
568 | walker.count = 0; | ||
569 | walker.skip = 0; | ||
570 | walker.fn = tcindex_destroy_element; | ||
571 | tcindex_walk(tp, &walker); | ||
572 | 565 | ||
573 | call_rcu(&p->rcu, __tcindex_destroy); | 566 | if (p->perfect) { |
567 | for (i = 0; i < p->hash; i++) { | ||
568 | struct tcindex_filter_result *r = p->perfect + i; | ||
569 | |||
570 | tcf_unbind_filter(tp, &r->res); | ||
571 | if (tcf_exts_get_net(&r->exts)) | ||
572 | tcf_queue_work(&r->rwork, | ||
573 | tcindex_destroy_rexts_work); | ||
574 | else | ||
575 | __tcindex_destroy_rexts(r); | ||
576 | } | ||
577 | } | ||
578 | |||
579 | for (i = 0; p->h && i < p->hash; i++) { | ||
580 | struct tcindex_filter *f, *next; | ||
581 | bool last; | ||
582 | |||
583 | for (f = rtnl_dereference(p->h[i]); f; f = next) { | ||
584 | next = rtnl_dereference(f->next); | ||
585 | tcindex_delete(tp, &f->result, &last, NULL); | ||
586 | } | ||
587 | } | ||
588 | |||
589 | tcf_queue_work(&p->rwork, tcindex_destroy_work); | ||
574 | } | 590 | } |
575 | 591 | ||
576 | 592 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 66ba2ce2320f..968a85fe4d4a 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -500,7 +500,7 @@ static void dev_watchdog_down(struct net_device *dev) | |||
500 | * netif_carrier_on - set carrier | 500 | * netif_carrier_on - set carrier |
501 | * @dev: network device | 501 | * @dev: network device |
502 | * | 502 | * |
503 | * Device has detected that carrier. | 503 | * Device has detected acquisition of carrier. |
504 | */ | 504 | */ |
505 | void netif_carrier_on(struct net_device *dev) | 505 | void netif_carrier_on(struct net_device *dev) |
506 | { | 506 | { |
diff --git a/net/sctp/diag.c b/net/sctp/diag.c index 078f01a8d582..435847d98b51 100644 --- a/net/sctp/diag.c +++ b/net/sctp/diag.c | |||
@@ -256,6 +256,7 @@ static size_t inet_assoc_attr_size(struct sctp_association *asoc) | |||
256 | + nla_total_size(1) /* INET_DIAG_TOS */ | 256 | + nla_total_size(1) /* INET_DIAG_TOS */ |
257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ | 257 | + nla_total_size(1) /* INET_DIAG_TCLASS */ |
258 | + nla_total_size(4) /* INET_DIAG_MARK */ | 258 | + nla_total_size(4) /* INET_DIAG_MARK */ |
259 | + nla_total_size(4) /* INET_DIAG_CLASS_ID */ | ||
259 | + nla_total_size(addrlen * asoc->peer.transport_count) | 260 | + nla_total_size(addrlen * asoc->peer.transport_count) |
260 | + nla_total_size(addrlen * addrcnt) | 261 | + nla_total_size(addrlen * addrcnt) |
261 | + nla_total_size(sizeof(struct inet_diag_meminfo)) | 262 | + nla_total_size(sizeof(struct inet_diag_meminfo)) |
diff --git a/net/sctp/offload.c b/net/sctp/offload.c index 123e9f2dc226..edfcf16e704c 100644 --- a/net/sctp/offload.c +++ b/net/sctp/offload.c | |||
@@ -36,6 +36,7 @@ static __le32 sctp_gso_make_checksum(struct sk_buff *skb) | |||
36 | { | 36 | { |
37 | skb->ip_summed = CHECKSUM_NONE; | 37 | skb->ip_summed = CHECKSUM_NONE; |
38 | skb->csum_not_inet = 0; | 38 | skb->csum_not_inet = 0; |
39 | gso_reset_checksum(skb, ~0); | ||
39 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); | 40 | return sctp_compute_cksum(skb, skb_transport_offset(skb)); |
40 | } | 41 | } |
41 | 42 | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index f93c3cf9e567..65d6d04546ae 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -2027,7 +2027,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; | 2027 | struct sctp_endpoint *ep = sctp_sk(sk)->ep; |
2028 | struct sctp_transport *transport = NULL; | 2028 | struct sctp_transport *transport = NULL; |
2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; | 2029 | struct sctp_sndrcvinfo _sinfo, *sinfo; |
2030 | struct sctp_association *asoc; | 2030 | struct sctp_association *asoc, *tmp; |
2031 | struct sctp_cmsgs cmsgs; | 2031 | struct sctp_cmsgs cmsgs; |
2032 | union sctp_addr *daddr; | 2032 | union sctp_addr *daddr; |
2033 | bool new = false; | 2033 | bool new = false; |
@@ -2053,7 +2053,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len) | |||
2053 | 2053 | ||
2054 | /* SCTP_SENDALL process */ | 2054 | /* SCTP_SENDALL process */ |
2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { | 2055 | if ((sflags & SCTP_SENDALL) && sctp_style(sk, UDP)) { |
2056 | list_for_each_entry(asoc, &ep->asocs, asocs) { | 2056 | list_for_each_entry_safe(asoc, tmp, &ep->asocs, asocs) { |
2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, | 2057 | err = sctp_sendmsg_check_sflags(asoc, sflags, msg, |
2058 | msg_len); | 2058 | msg_len); |
2059 | if (err == 0) | 2059 | if (err == 0) |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 80e0ae5534ec..2936ed17bf9e 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -84,6 +84,19 @@ static void fa_zero(struct flex_array *fa, size_t index, size_t count) | |||
84 | } | 84 | } |
85 | } | 85 | } |
86 | 86 | ||
87 | static size_t fa_index(struct flex_array *fa, void *elem, size_t count) | ||
88 | { | ||
89 | size_t index = 0; | ||
90 | |||
91 | while (count--) { | ||
92 | if (elem == flex_array_get(fa, index)) | ||
93 | break; | ||
94 | index++; | ||
95 | } | ||
96 | |||
97 | return index; | ||
98 | } | ||
99 | |||
87 | /* Migrates chunks from stream queues to new stream queues if needed, | 100 | /* Migrates chunks from stream queues to new stream queues if needed, |
88 | * but not across associations. Also, removes those chunks to streams | 101 | * but not across associations. Also, removes those chunks to streams |
89 | * higher than the new max. | 102 | * higher than the new max. |
@@ -131,8 +144,10 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream, | |||
131 | } | 144 | } |
132 | } | 145 | } |
133 | 146 | ||
134 | for (i = outcnt; i < stream->outcnt; i++) | 147 | for (i = outcnt; i < stream->outcnt; i++) { |
135 | kfree(SCTP_SO(stream, i)->ext); | 148 | kfree(SCTP_SO(stream, i)->ext); |
149 | SCTP_SO(stream, i)->ext = NULL; | ||
150 | } | ||
136 | } | 151 | } |
137 | 152 | ||
138 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | 153 | static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, |
@@ -147,6 +162,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt, | |||
147 | 162 | ||
148 | if (stream->out) { | 163 | if (stream->out) { |
149 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); | 164 | fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt)); |
165 | if (stream->out_curr) { | ||
166 | size_t index = fa_index(stream->out, stream->out_curr, | ||
167 | stream->outcnt); | ||
168 | |||
169 | BUG_ON(index == stream->outcnt); | ||
170 | stream->out_curr = flex_array_get(out, index); | ||
171 | } | ||
150 | fa_free(stream->out); | 172 | fa_free(stream->out); |
151 | } | 173 | } |
152 | 174 | ||
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c4e56602e0c6..b04a813fc865 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c | |||
@@ -1505,6 +1505,11 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
1505 | 1505 | ||
1506 | smc = smc_sk(sk); | 1506 | smc = smc_sk(sk); |
1507 | lock_sock(sk); | 1507 | lock_sock(sk); |
1508 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | ||
1509 | /* socket was connected before, no more data to read */ | ||
1510 | rc = 0; | ||
1511 | goto out; | ||
1512 | } | ||
1508 | if ((sk->sk_state == SMC_INIT) || | 1513 | if ((sk->sk_state == SMC_INIT) || |
1509 | (sk->sk_state == SMC_LISTEN) || | 1514 | (sk->sk_state == SMC_LISTEN) || |
1510 | (sk->sk_state == SMC_CLOSED)) | 1515 | (sk->sk_state == SMC_CLOSED)) |
@@ -1840,7 +1845,11 @@ static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, | |||
1840 | 1845 | ||
1841 | smc = smc_sk(sk); | 1846 | smc = smc_sk(sk); |
1842 | lock_sock(sk); | 1847 | lock_sock(sk); |
1843 | 1848 | if (sk->sk_state == SMC_CLOSED && (sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1849 | /* socket was connected before, no more data to read */ | ||
1850 | rc = 0; | ||
1851 | goto out; | ||
1852 | } | ||
1844 | if (sk->sk_state == SMC_INIT || | 1853 | if (sk->sk_state == SMC_INIT || |
1845 | sk->sk_state == SMC_LISTEN || | 1854 | sk->sk_state == SMC_LISTEN || |
1846 | sk->sk_state == SMC_CLOSED) | 1855 | sk->sk_state == SMC_CLOSED) |
diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index db83332ac1c8..fb07ad8d69a6 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c | |||
@@ -21,13 +21,6 @@ | |||
21 | 21 | ||
22 | /********************************** send *************************************/ | 22 | /********************************** send *************************************/ |
23 | 23 | ||
24 | struct smc_cdc_tx_pend { | ||
25 | struct smc_connection *conn; /* socket connection */ | ||
26 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
27 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
28 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
29 | }; | ||
30 | |||
31 | /* handler for send/transmission completion of a CDC msg */ | 24 | /* handler for send/transmission completion of a CDC msg */ |
32 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | 25 | static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, |
33 | struct smc_link *link, | 26 | struct smc_link *link, |
@@ -61,12 +54,14 @@ static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd, | |||
61 | 54 | ||
62 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 55 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
63 | struct smc_wr_buf **wr_buf, | 56 | struct smc_wr_buf **wr_buf, |
57 | struct smc_rdma_wr **wr_rdma_buf, | ||
64 | struct smc_cdc_tx_pend **pend) | 58 | struct smc_cdc_tx_pend **pend) |
65 | { | 59 | { |
66 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | 60 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; |
67 | int rc; | 61 | int rc; |
68 | 62 | ||
69 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, | 63 | rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf, |
64 | wr_rdma_buf, | ||
70 | (struct smc_wr_tx_pend_priv **)pend); | 65 | (struct smc_wr_tx_pend_priv **)pend); |
71 | if (!conn->alert_token_local) | 66 | if (!conn->alert_token_local) |
72 | /* abnormal termination */ | 67 | /* abnormal termination */ |
@@ -96,6 +91,7 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
96 | struct smc_wr_buf *wr_buf, | 91 | struct smc_wr_buf *wr_buf, |
97 | struct smc_cdc_tx_pend *pend) | 92 | struct smc_cdc_tx_pend *pend) |
98 | { | 93 | { |
94 | union smc_host_cursor cfed; | ||
99 | struct smc_link *link; | 95 | struct smc_link *link; |
100 | int rc; | 96 | int rc; |
101 | 97 | ||
@@ -105,12 +101,10 @@ int smc_cdc_msg_send(struct smc_connection *conn, | |||
105 | 101 | ||
106 | conn->tx_cdc_seq++; | 102 | conn->tx_cdc_seq++; |
107 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; | 103 | conn->local_tx_ctrl.seqno = conn->tx_cdc_seq; |
108 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, | 104 | smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf, conn, &cfed); |
109 | &conn->local_tx_ctrl, conn); | ||
110 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); | 105 | rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend); |
111 | if (!rc) | 106 | if (!rc) |
112 | smc_curs_copy(&conn->rx_curs_confirmed, | 107 | smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn); |
113 | &conn->local_tx_ctrl.cons, conn); | ||
114 | 108 | ||
115 | return rc; | 109 | return rc; |
116 | } | 110 | } |
@@ -121,11 +115,14 @@ static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn) | |||
121 | struct smc_wr_buf *wr_buf; | 115 | struct smc_wr_buf *wr_buf; |
122 | int rc; | 116 | int rc; |
123 | 117 | ||
124 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | 118 | rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend); |
125 | if (rc) | 119 | if (rc) |
126 | return rc; | 120 | return rc; |
127 | 121 | ||
128 | return smc_cdc_msg_send(conn, wr_buf, pend); | 122 | spin_lock_bh(&conn->send_lock); |
123 | rc = smc_cdc_msg_send(conn, wr_buf, pend); | ||
124 | spin_unlock_bh(&conn->send_lock); | ||
125 | return rc; | ||
129 | } | 126 | } |
130 | 127 | ||
131 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) | 128 | int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn) |
diff --git a/net/smc/smc_cdc.h b/net/smc/smc_cdc.h index b5bfe38c7f9b..f1cdde9d4b89 100644 --- a/net/smc/smc_cdc.h +++ b/net/smc/smc_cdc.h | |||
@@ -160,7 +160,9 @@ static inline void smcd_curs_copy(union smcd_cdc_cursor *tgt, | |||
160 | #endif | 160 | #endif |
161 | } | 161 | } |
162 | 162 | ||
163 | /* calculate cursor difference between old and new, where old <= new */ | 163 | /* calculate cursor difference between old and new, where old <= new and |
164 | * difference cannot exceed size | ||
165 | */ | ||
164 | static inline int smc_curs_diff(unsigned int size, | 166 | static inline int smc_curs_diff(unsigned int size, |
165 | union smc_host_cursor *old, | 167 | union smc_host_cursor *old, |
166 | union smc_host_cursor *new) | 168 | union smc_host_cursor *new) |
@@ -185,28 +187,51 @@ static inline int smc_curs_comp(unsigned int size, | |||
185 | return smc_curs_diff(size, old, new); | 187 | return smc_curs_diff(size, old, new); |
186 | } | 188 | } |
187 | 189 | ||
190 | /* calculate cursor difference between old and new, where old <= new and | ||
191 | * difference may exceed size | ||
192 | */ | ||
193 | static inline int smc_curs_diff_large(unsigned int size, | ||
194 | union smc_host_cursor *old, | ||
195 | union smc_host_cursor *new) | ||
196 | { | ||
197 | if (old->wrap < new->wrap) | ||
198 | return min_t(int, | ||
199 | (size - old->count) + new->count + | ||
200 | (new->wrap - old->wrap - 1) * size, | ||
201 | size); | ||
202 | |||
203 | if (old->wrap > new->wrap) /* wrap has switched from 0xffff to 0x0000 */ | ||
204 | return min_t(int, | ||
205 | (size - old->count) + new->count + | ||
206 | (new->wrap + 0xffff - old->wrap) * size, | ||
207 | size); | ||
208 | |||
209 | return max_t(int, 0, (new->count - old->count)); | ||
210 | } | ||
211 | |||
188 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, | 212 | static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer, |
189 | union smc_host_cursor *local, | 213 | union smc_host_cursor *local, |
214 | union smc_host_cursor *save, | ||
190 | struct smc_connection *conn) | 215 | struct smc_connection *conn) |
191 | { | 216 | { |
192 | union smc_host_cursor temp; | 217 | smc_curs_copy(save, local, conn); |
193 | 218 | peer->count = htonl(save->count); | |
194 | smc_curs_copy(&temp, local, conn); | 219 | peer->wrap = htons(save->wrap); |
195 | peer->count = htonl(temp.count); | ||
196 | peer->wrap = htons(temp.wrap); | ||
197 | /* peer->reserved = htons(0); must be ensured by caller */ | 220 | /* peer->reserved = htons(0); must be ensured by caller */ |
198 | } | 221 | } |
199 | 222 | ||
200 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, | 223 | static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer, |
201 | struct smc_host_cdc_msg *local, | 224 | struct smc_connection *conn, |
202 | struct smc_connection *conn) | 225 | union smc_host_cursor *save) |
203 | { | 226 | { |
227 | struct smc_host_cdc_msg *local = &conn->local_tx_ctrl; | ||
228 | |||
204 | peer->common.type = local->common.type; | 229 | peer->common.type = local->common.type; |
205 | peer->len = local->len; | 230 | peer->len = local->len; |
206 | peer->seqno = htons(local->seqno); | 231 | peer->seqno = htons(local->seqno); |
207 | peer->token = htonl(local->token); | 232 | peer->token = htonl(local->token); |
208 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, conn); | 233 | smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn); |
209 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, conn); | 234 | smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn); |
210 | peer->prod_flags = local->prod_flags; | 235 | peer->prod_flags = local->prod_flags; |
211 | peer->conn_state_flags = local->conn_state_flags; | 236 | peer->conn_state_flags = local->conn_state_flags; |
212 | } | 237 | } |
@@ -270,10 +295,16 @@ static inline void smc_cdc_msg_to_host(struct smc_host_cdc_msg *local, | |||
270 | smcr_cdc_msg_to_host(local, peer, conn); | 295 | smcr_cdc_msg_to_host(local, peer, conn); |
271 | } | 296 | } |
272 | 297 | ||
273 | struct smc_cdc_tx_pend; | 298 | struct smc_cdc_tx_pend { |
299 | struct smc_connection *conn; /* socket connection */ | ||
300 | union smc_host_cursor cursor; /* tx sndbuf cursor sent */ | ||
301 | union smc_host_cursor p_cursor; /* rx RMBE cursor produced */ | ||
302 | u16 ctrl_seq; /* conn. tx sequence # */ | ||
303 | }; | ||
274 | 304 | ||
275 | int smc_cdc_get_free_slot(struct smc_connection *conn, | 305 | int smc_cdc_get_free_slot(struct smc_connection *conn, |
276 | struct smc_wr_buf **wr_buf, | 306 | struct smc_wr_buf **wr_buf, |
307 | struct smc_rdma_wr **wr_rdma_buf, | ||
277 | struct smc_cdc_tx_pend **pend); | 308 | struct smc_cdc_tx_pend **pend); |
278 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); | 309 | void smc_cdc_tx_dismiss_slots(struct smc_connection *conn); |
279 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, | 310 | int smc_cdc_msg_send(struct smc_connection *conn, struct smc_wr_buf *wr_buf, |
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c index 776e9dfc915d..d53fd588d1f5 100644 --- a/net/smc/smc_clc.c +++ b/net/smc/smc_clc.c | |||
@@ -378,7 +378,7 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info) | |||
378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); | 378 | vec.iov_len = sizeof(struct smc_clc_msg_decline); |
379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, | 379 | len = kernel_sendmsg(smc->clcsock, &msg, &vec, 1, |
380 | sizeof(struct smc_clc_msg_decline)); | 380 | sizeof(struct smc_clc_msg_decline)); |
381 | if (len < sizeof(struct smc_clc_msg_decline)) | 381 | if (len < 0 || len < sizeof(struct smc_clc_msg_decline)) |
382 | len = -EPROTO; | 382 | len = -EPROTO; |
383 | return len > 0 ? 0 : len; | 383 | return len > 0 ? 0 : len; |
384 | } | 384 | } |
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c index ea2b87f29469..e39cadda1bf5 100644 --- a/net/smc/smc_close.c +++ b/net/smc/smc_close.c | |||
@@ -345,14 +345,7 @@ static void smc_close_passive_work(struct work_struct *work) | |||
345 | 345 | ||
346 | switch (sk->sk_state) { | 346 | switch (sk->sk_state) { |
347 | case SMC_INIT: | 347 | case SMC_INIT: |
348 | if (atomic_read(&conn->bytes_to_rcv) || | 348 | sk->sk_state = SMC_APPCLOSEWAIT1; |
349 | (rxflags->peer_done_writing && | ||
350 | !smc_cdc_rxed_any_close(conn))) { | ||
351 | sk->sk_state = SMC_APPCLOSEWAIT1; | ||
352 | } else { | ||
353 | sk->sk_state = SMC_CLOSED; | ||
354 | sock_put(sk); /* passive closing */ | ||
355 | } | ||
356 | break; | 349 | break; |
357 | case SMC_ACTIVE: | 350 | case SMC_ACTIVE: |
358 | sk->sk_state = SMC_APPCLOSEWAIT1; | 351 | sk->sk_state = SMC_APPCLOSEWAIT1; |
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index 35c1cdc93e1c..aa1c551cee81 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c | |||
@@ -128,6 +128,8 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn) | |||
128 | { | 128 | { |
129 | struct smc_link_group *lgr = conn->lgr; | 129 | struct smc_link_group *lgr = conn->lgr; |
130 | 130 | ||
131 | if (!lgr) | ||
132 | return; | ||
131 | write_lock_bh(&lgr->conns_lock); | 133 | write_lock_bh(&lgr->conns_lock); |
132 | if (conn->alert_token_local) { | 134 | if (conn->alert_token_local) { |
133 | __smc_lgr_unregister_conn(conn); | 135 | __smc_lgr_unregister_conn(conn); |
@@ -300,13 +302,13 @@ static void smc_buf_unuse(struct smc_connection *conn, | |||
300 | conn->sndbuf_desc->used = 0; | 302 | conn->sndbuf_desc->used = 0; |
301 | if (conn->rmb_desc) { | 303 | if (conn->rmb_desc) { |
302 | if (!conn->rmb_desc->regerr) { | 304 | if (!conn->rmb_desc->regerr) { |
303 | conn->rmb_desc->used = 0; | ||
304 | if (!lgr->is_smcd) { | 305 | if (!lgr->is_smcd) { |
305 | /* unregister rmb with peer */ | 306 | /* unregister rmb with peer */ |
306 | smc_llc_do_delete_rkey( | 307 | smc_llc_do_delete_rkey( |
307 | &lgr->lnk[SMC_SINGLE_LINK], | 308 | &lgr->lnk[SMC_SINGLE_LINK], |
308 | conn->rmb_desc); | 309 | conn->rmb_desc); |
309 | } | 310 | } |
311 | conn->rmb_desc->used = 0; | ||
310 | } else { | 312 | } else { |
311 | /* buf registration failed, reuse not possible */ | 313 | /* buf registration failed, reuse not possible */ |
312 | write_lock_bh(&lgr->rmbs_lock); | 314 | write_lock_bh(&lgr->rmbs_lock); |
@@ -628,6 +630,8 @@ int smc_conn_create(struct smc_sock *smc, bool is_smcd, int srv_first_contact, | |||
628 | local_contact = SMC_REUSE_CONTACT; | 630 | local_contact = SMC_REUSE_CONTACT; |
629 | conn->lgr = lgr; | 631 | conn->lgr = lgr; |
630 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ | 632 | smc_lgr_register_conn(conn); /* add smc conn to lgr */ |
633 | if (delayed_work_pending(&lgr->free_work)) | ||
634 | cancel_delayed_work(&lgr->free_work); | ||
631 | write_unlock_bh(&lgr->conns_lock); | 635 | write_unlock_bh(&lgr->conns_lock); |
632 | break; | 636 | break; |
633 | } | 637 | } |
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h index b00287989a3d..8806d2afa6ed 100644 --- a/net/smc/smc_core.h +++ b/net/smc/smc_core.h | |||
@@ -52,6 +52,24 @@ enum smc_wr_reg_state { | |||
52 | FAILED /* ib_wr_reg_mr response: failure */ | 52 | FAILED /* ib_wr_reg_mr response: failure */ |
53 | }; | 53 | }; |
54 | 54 | ||
55 | struct smc_rdma_sge { /* sges for RDMA writes */ | ||
56 | struct ib_sge wr_tx_rdma_sge[SMC_IB_MAX_SEND_SGE]; | ||
57 | }; | ||
58 | |||
59 | #define SMC_MAX_RDMA_WRITES 2 /* max. # of RDMA writes per | ||
60 | * message send | ||
61 | */ | ||
62 | |||
63 | struct smc_rdma_sges { /* sges per message send */ | ||
64 | struct smc_rdma_sge tx_rdma_sge[SMC_MAX_RDMA_WRITES]; | ||
65 | }; | ||
66 | |||
67 | struct smc_rdma_wr { /* work requests per message | ||
68 | * send | ||
69 | */ | ||
70 | struct ib_rdma_wr wr_tx_rdma[SMC_MAX_RDMA_WRITES]; | ||
71 | }; | ||
72 | |||
55 | struct smc_link { | 73 | struct smc_link { |
56 | struct smc_ib_device *smcibdev; /* ib-device */ | 74 | struct smc_ib_device *smcibdev; /* ib-device */ |
57 | u8 ibport; /* port - values 1 | 2 */ | 75 | u8 ibport; /* port - values 1 | 2 */ |
@@ -64,6 +82,8 @@ struct smc_link { | |||
64 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ | 82 | struct smc_wr_buf *wr_tx_bufs; /* WR send payload buffers */ |
65 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ | 83 | struct ib_send_wr *wr_tx_ibs; /* WR send meta data */ |
66 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ | 84 | struct ib_sge *wr_tx_sges; /* WR send gather meta data */ |
85 | struct smc_rdma_sges *wr_tx_rdma_sges;/*RDMA WRITE gather meta data*/ | ||
86 | struct smc_rdma_wr *wr_tx_rdmas; /* WR RDMA WRITE */ | ||
67 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ | 87 | struct smc_wr_tx_pend *wr_tx_pends; /* WR send waiting for CQE */ |
68 | /* above four vectors have wr_tx_cnt elements and use the same index */ | 88 | /* above four vectors have wr_tx_cnt elements and use the same index */ |
69 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ | 89 | dma_addr_t wr_tx_dma_addr; /* DMA address of wr_tx_bufs */ |
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c index e519ef29c0ff..76487a16934e 100644 --- a/net/smc/smc_ib.c +++ b/net/smc/smc_ib.c | |||
@@ -289,8 +289,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk) | |||
289 | 289 | ||
290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | 290 | static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) |
291 | { | 291 | { |
292 | struct smc_ib_device *smcibdev = | 292 | struct smc_link *lnk = (struct smc_link *)priv; |
293 | (struct smc_ib_device *)ibevent->device; | 293 | struct smc_ib_device *smcibdev = lnk->smcibdev; |
294 | u8 port_idx; | 294 | u8 port_idx; |
295 | 295 | ||
296 | switch (ibevent->event) { | 296 | switch (ibevent->event) { |
@@ -298,7 +298,7 @@ static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) | |||
298 | case IB_EVENT_GID_CHANGE: | 298 | case IB_EVENT_GID_CHANGE: |
299 | case IB_EVENT_PORT_ERR: | 299 | case IB_EVENT_PORT_ERR: |
300 | case IB_EVENT_QP_ACCESS_ERR: | 300 | case IB_EVENT_QP_ACCESS_ERR: |
301 | port_idx = ibevent->element.port_num - 1; | 301 | port_idx = ibevent->element.qp->port - 1; |
302 | set_bit(port_idx, &smcibdev->port_event_mask); | 302 | set_bit(port_idx, &smcibdev->port_event_mask); |
303 | schedule_work(&smcibdev->port_event_work); | 303 | schedule_work(&smcibdev->port_event_work); |
304 | break; | 304 | break; |
diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index a6d3623d06f4..4fd60c522802 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c | |||
@@ -166,7 +166,8 @@ static int smc_llc_add_pending_send(struct smc_link *link, | |||
166 | { | 166 | { |
167 | int rc; | 167 | int rc; |
168 | 168 | ||
169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, pend); | 169 | rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, |
170 | pend); | ||
170 | if (rc < 0) | 171 | if (rc < 0) |
171 | return rc; | 172 | return rc; |
172 | BUILD_BUG_ON_MSG( | 173 | BUILD_BUG_ON_MSG( |
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c index 7cb3e4f07c10..632c3109dee5 100644 --- a/net/smc/smc_pnet.c +++ b/net/smc/smc_pnet.c | |||
@@ -27,7 +27,7 @@ | |||
27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { | 27 | static struct nla_policy smc_pnet_policy[SMC_PNETID_MAX + 1] = { |
28 | [SMC_PNETID_NAME] = { | 28 | [SMC_PNETID_NAME] = { |
29 | .type = NLA_NUL_STRING, | 29 | .type = NLA_NUL_STRING, |
30 | .len = SMC_MAX_PNETID_LEN - 1 | 30 | .len = SMC_MAX_PNETID_LEN |
31 | }, | 31 | }, |
32 | [SMC_PNETID_ETHNAME] = { | 32 | [SMC_PNETID_ETHNAME] = { |
33 | .type = NLA_NUL_STRING, | 33 | .type = NLA_NUL_STRING, |
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c index d8366ed51757..f93f3580c100 100644 --- a/net/smc/smc_tx.c +++ b/net/smc/smc_tx.c | |||
@@ -165,12 +165,11 @@ int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) | |||
165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; | 165 | conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; |
166 | 166 | ||
167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { | 167 | if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { |
168 | if (send_done) | ||
169 | return send_done; | ||
168 | rc = smc_tx_wait(smc, msg->msg_flags); | 170 | rc = smc_tx_wait(smc, msg->msg_flags); |
169 | if (rc) { | 171 | if (rc) |
170 | if (send_done) | ||
171 | return send_done; | ||
172 | goto out_err; | 172 | goto out_err; |
173 | } | ||
174 | continue; | 173 | continue; |
175 | } | 174 | } |
176 | 175 | ||
@@ -267,27 +266,23 @@ int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, | |||
267 | 266 | ||
268 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ | 267 | /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ |
269 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, | 268 | static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, |
270 | int num_sges, struct ib_sge sges[]) | 269 | int num_sges, struct ib_rdma_wr *rdma_wr) |
271 | { | 270 | { |
272 | struct smc_link_group *lgr = conn->lgr; | 271 | struct smc_link_group *lgr = conn->lgr; |
273 | struct ib_rdma_wr rdma_wr; | ||
274 | struct smc_link *link; | 272 | struct smc_link *link; |
275 | int rc; | 273 | int rc; |
276 | 274 | ||
277 | memset(&rdma_wr, 0, sizeof(rdma_wr)); | ||
278 | link = &lgr->lnk[SMC_SINGLE_LINK]; | 275 | link = &lgr->lnk[SMC_SINGLE_LINK]; |
279 | rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); | 276 | rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); |
280 | rdma_wr.wr.sg_list = sges; | 277 | rdma_wr->wr.num_sge = num_sges; |
281 | rdma_wr.wr.num_sge = num_sges; | 278 | rdma_wr->remote_addr = |
282 | rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; | ||
283 | rdma_wr.remote_addr = | ||
284 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + | 279 | lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + |
285 | /* RMBE within RMB */ | 280 | /* RMBE within RMB */ |
286 | conn->tx_off + | 281 | conn->tx_off + |
287 | /* offset within RMBE */ | 282 | /* offset within RMBE */ |
288 | peer_rmbe_offset; | 283 | peer_rmbe_offset; |
289 | rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; | 284 | rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; |
290 | rc = ib_post_send(link->roce_qp, &rdma_wr.wr, NULL); | 285 | rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); |
291 | if (rc) { | 286 | if (rc) { |
292 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; | 287 | conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; |
293 | smc_lgr_terminate(lgr); | 288 | smc_lgr_terminate(lgr); |
@@ -314,24 +309,25 @@ static inline void smc_tx_advance_cursors(struct smc_connection *conn, | |||
314 | /* SMC-R helper for smc_tx_rdma_writes() */ | 309 | /* SMC-R helper for smc_tx_rdma_writes() */ |
315 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | 310 | static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, |
316 | size_t src_off, size_t src_len, | 311 | size_t src_off, size_t src_len, |
317 | size_t dst_off, size_t dst_len) | 312 | size_t dst_off, size_t dst_len, |
313 | struct smc_rdma_wr *wr_rdma_buf) | ||
318 | { | 314 | { |
319 | dma_addr_t dma_addr = | 315 | dma_addr_t dma_addr = |
320 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); | 316 | sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); |
321 | struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK]; | ||
322 | int src_len_sum = src_len, dst_len_sum = dst_len; | 317 | int src_len_sum = src_len, dst_len_sum = dst_len; |
323 | struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; | ||
324 | int sent_count = src_off; | 318 | int sent_count = src_off; |
325 | int srcchunk, dstchunk; | 319 | int srcchunk, dstchunk; |
326 | int num_sges; | 320 | int num_sges; |
327 | int rc; | 321 | int rc; |
328 | 322 | ||
329 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { | 323 | for (dstchunk = 0; dstchunk < 2; dstchunk++) { |
324 | struct ib_sge *sge = | ||
325 | wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; | ||
326 | |||
330 | num_sges = 0; | 327 | num_sges = 0; |
331 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { | 328 | for (srcchunk = 0; srcchunk < 2; srcchunk++) { |
332 | sges[srcchunk].addr = dma_addr + src_off; | 329 | sge[srcchunk].addr = dma_addr + src_off; |
333 | sges[srcchunk].length = src_len; | 330 | sge[srcchunk].length = src_len; |
334 | sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; | ||
335 | num_sges++; | 331 | num_sges++; |
336 | 332 | ||
337 | src_off += src_len; | 333 | src_off += src_len; |
@@ -344,7 +340,8 @@ static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
344 | src_len = dst_len - src_len; /* remainder */ | 340 | src_len = dst_len - src_len; /* remainder */ |
345 | src_len_sum += src_len; | 341 | src_len_sum += src_len; |
346 | } | 342 | } |
347 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); | 343 | rc = smc_tx_rdma_write(conn, dst_off, num_sges, |
344 | &wr_rdma_buf->wr_tx_rdma[dstchunk]); | ||
348 | if (rc) | 345 | if (rc) |
349 | return rc; | 346 | return rc; |
350 | if (dst_len_sum == len) | 347 | if (dst_len_sum == len) |
@@ -403,7 +400,8 @@ static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, | |||
403 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; | 400 | /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; |
404 | * usable snd_wnd as max transmit | 401 | * usable snd_wnd as max transmit |
405 | */ | 402 | */ |
406 | static int smc_tx_rdma_writes(struct smc_connection *conn) | 403 | static int smc_tx_rdma_writes(struct smc_connection *conn, |
404 | struct smc_rdma_wr *wr_rdma_buf) | ||
407 | { | 405 | { |
408 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ | 406 | size_t len, src_len, dst_off, dst_len; /* current chunk values */ |
409 | union smc_host_cursor sent, prep, prod, cons; | 407 | union smc_host_cursor sent, prep, prod, cons; |
@@ -464,7 +462,7 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
464 | dst_off, dst_len); | 462 | dst_off, dst_len); |
465 | else | 463 | else |
466 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, | 464 | rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, |
467 | dst_off, dst_len); | 465 | dst_off, dst_len, wr_rdma_buf); |
468 | if (rc) | 466 | if (rc) |
469 | return rc; | 467 | return rc; |
470 | 468 | ||
@@ -485,31 +483,30 @@ static int smc_tx_rdma_writes(struct smc_connection *conn) | |||
485 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) | 483 | static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) |
486 | { | 484 | { |
487 | struct smc_cdc_producer_flags *pflags; | 485 | struct smc_cdc_producer_flags *pflags; |
486 | struct smc_rdma_wr *wr_rdma_buf; | ||
488 | struct smc_cdc_tx_pend *pend; | 487 | struct smc_cdc_tx_pend *pend; |
489 | struct smc_wr_buf *wr_buf; | 488 | struct smc_wr_buf *wr_buf; |
490 | int rc; | 489 | int rc; |
491 | 490 | ||
492 | spin_lock_bh(&conn->send_lock); | 491 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); |
493 | rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); | ||
494 | if (rc < 0) { | 492 | if (rc < 0) { |
495 | if (rc == -EBUSY) { | 493 | if (rc == -EBUSY) { |
496 | struct smc_sock *smc = | 494 | struct smc_sock *smc = |
497 | container_of(conn, struct smc_sock, conn); | 495 | container_of(conn, struct smc_sock, conn); |
498 | 496 | ||
499 | if (smc->sk.sk_err == ECONNABORTED) { | 497 | if (smc->sk.sk_err == ECONNABORTED) |
500 | rc = sock_error(&smc->sk); | 498 | return sock_error(&smc->sk); |
501 | goto out_unlock; | ||
502 | } | ||
503 | rc = 0; | 499 | rc = 0; |
504 | if (conn->alert_token_local) /* connection healthy */ | 500 | if (conn->alert_token_local) /* connection healthy */ |
505 | mod_delayed_work(system_wq, &conn->tx_work, | 501 | mod_delayed_work(system_wq, &conn->tx_work, |
506 | SMC_TX_WORK_DELAY); | 502 | SMC_TX_WORK_DELAY); |
507 | } | 503 | } |
508 | goto out_unlock; | 504 | return rc; |
509 | } | 505 | } |
510 | 506 | ||
507 | spin_lock_bh(&conn->send_lock); | ||
511 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { | 508 | if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { |
512 | rc = smc_tx_rdma_writes(conn); | 509 | rc = smc_tx_rdma_writes(conn, wr_rdma_buf); |
513 | if (rc) { | 510 | if (rc) { |
514 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], | 511 | smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], |
515 | (struct smc_wr_tx_pend_priv *)pend); | 512 | (struct smc_wr_tx_pend_priv *)pend); |
@@ -536,7 +533,7 @@ static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) | |||
536 | 533 | ||
537 | spin_lock_bh(&conn->send_lock); | 534 | spin_lock_bh(&conn->send_lock); |
538 | if (!pflags->urg_data_present) | 535 | if (!pflags->urg_data_present) |
539 | rc = smc_tx_rdma_writes(conn); | 536 | rc = smc_tx_rdma_writes(conn, NULL); |
540 | if (!rc) | 537 | if (!rc) |
541 | rc = smcd_cdc_msg_send(conn); | 538 | rc = smcd_cdc_msg_send(conn); |
542 | 539 | ||
@@ -598,7 +595,8 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force) | |||
598 | if (to_confirm > conn->rmbe_update_limit) { | 595 | if (to_confirm > conn->rmbe_update_limit) { |
599 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); | 596 | smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); |
600 | sender_free = conn->rmb_desc->len - | 597 | sender_free = conn->rmb_desc->len - |
601 | smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); | 598 | smc_curs_diff_large(conn->rmb_desc->len, |
599 | &cfed, &prod); | ||
602 | } | 600 | } |
603 | 601 | ||
604 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || | 602 | if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || |
diff --git a/net/smc/smc_wr.c b/net/smc/smc_wr.c index c2694750a6a8..253aa75dc2b6 100644 --- a/net/smc/smc_wr.c +++ b/net/smc/smc_wr.c | |||
@@ -160,6 +160,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
160 | * @link: Pointer to smc_link used to later send the message. | 160 | * @link: Pointer to smc_link used to later send the message. |
161 | * @handler: Send completion handler function pointer. | 161 | * @handler: Send completion handler function pointer. |
162 | * @wr_buf: Out value returns pointer to message buffer. | 162 | * @wr_buf: Out value returns pointer to message buffer. |
163 | * @wr_rdma_buf: Out value returns pointer to rdma work request. | ||
163 | * @wr_pend_priv: Out value returns pointer serving as handler context. | 164 | * @wr_pend_priv: Out value returns pointer serving as handler context. |
164 | * | 165 | * |
165 | * Return: 0 on success, or -errno on error. | 166 | * Return: 0 on success, or -errno on error. |
@@ -167,6 +168,7 @@ static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) | |||
167 | int smc_wr_tx_get_free_slot(struct smc_link *link, | 168 | int smc_wr_tx_get_free_slot(struct smc_link *link, |
168 | smc_wr_tx_handler handler, | 169 | smc_wr_tx_handler handler, |
169 | struct smc_wr_buf **wr_buf, | 170 | struct smc_wr_buf **wr_buf, |
171 | struct smc_rdma_wr **wr_rdma_buf, | ||
170 | struct smc_wr_tx_pend_priv **wr_pend_priv) | 172 | struct smc_wr_tx_pend_priv **wr_pend_priv) |
171 | { | 173 | { |
172 | struct smc_wr_tx_pend *wr_pend; | 174 | struct smc_wr_tx_pend *wr_pend; |
@@ -204,6 +206,8 @@ int smc_wr_tx_get_free_slot(struct smc_link *link, | |||
204 | wr_ib = &link->wr_tx_ibs[idx]; | 206 | wr_ib = &link->wr_tx_ibs[idx]; |
205 | wr_ib->wr_id = wr_id; | 207 | wr_ib->wr_id = wr_id; |
206 | *wr_buf = &link->wr_tx_bufs[idx]; | 208 | *wr_buf = &link->wr_tx_bufs[idx]; |
209 | if (wr_rdma_buf) | ||
210 | *wr_rdma_buf = &link->wr_tx_rdmas[idx]; | ||
207 | *wr_pend_priv = &wr_pend->priv; | 211 | *wr_pend_priv = &wr_pend->priv; |
208 | return 0; | 212 | return 0; |
209 | } | 213 | } |
@@ -218,10 +222,10 @@ int smc_wr_tx_put_slot(struct smc_link *link, | |||
218 | u32 idx = pend->idx; | 222 | u32 idx = pend->idx; |
219 | 223 | ||
220 | /* clear the full struct smc_wr_tx_pend including .priv */ | 224 | /* clear the full struct smc_wr_tx_pend including .priv */ |
221 | memset(&link->wr_tx_pends[pend->idx], 0, | 225 | memset(&link->wr_tx_pends[idx], 0, |
222 | sizeof(link->wr_tx_pends[pend->idx])); | 226 | sizeof(link->wr_tx_pends[idx])); |
223 | memset(&link->wr_tx_bufs[pend->idx], 0, | 227 | memset(&link->wr_tx_bufs[idx], 0, |
224 | sizeof(link->wr_tx_bufs[pend->idx])); | 228 | sizeof(link->wr_tx_bufs[idx])); |
225 | test_and_clear_bit(idx, link->wr_tx_mask); | 229 | test_and_clear_bit(idx, link->wr_tx_mask); |
226 | return 1; | 230 | return 1; |
227 | } | 231 | } |
@@ -465,12 +469,26 @@ static void smc_wr_init_sge(struct smc_link *lnk) | |||
465 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; | 469 | lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; |
466 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; | 470 | lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; |
467 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; | 471 | lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; |
472 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = | ||
473 | lnk->roce_pd->local_dma_lkey; | ||
474 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = | ||
475 | lnk->roce_pd->local_dma_lkey; | ||
476 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = | ||
477 | lnk->roce_pd->local_dma_lkey; | ||
478 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = | ||
479 | lnk->roce_pd->local_dma_lkey; | ||
468 | lnk->wr_tx_ibs[i].next = NULL; | 480 | lnk->wr_tx_ibs[i].next = NULL; |
469 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; | 481 | lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; |
470 | lnk->wr_tx_ibs[i].num_sge = 1; | 482 | lnk->wr_tx_ibs[i].num_sge = 1; |
471 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; | 483 | lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; |
472 | lnk->wr_tx_ibs[i].send_flags = | 484 | lnk->wr_tx_ibs[i].send_flags = |
473 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 485 | IB_SEND_SIGNALED | IB_SEND_SOLICITED; |
486 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; | ||
487 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; | ||
488 | lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = | ||
489 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; | ||
490 | lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = | ||
491 | lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; | ||
474 | } | 492 | } |
475 | for (i = 0; i < lnk->wr_rx_cnt; i++) { | 493 | for (i = 0; i < lnk->wr_rx_cnt; i++) { |
476 | lnk->wr_rx_sges[i].addr = | 494 | lnk->wr_rx_sges[i].addr = |
@@ -521,8 +539,12 @@ void smc_wr_free_link_mem(struct smc_link *lnk) | |||
521 | lnk->wr_tx_mask = NULL; | 539 | lnk->wr_tx_mask = NULL; |
522 | kfree(lnk->wr_tx_sges); | 540 | kfree(lnk->wr_tx_sges); |
523 | lnk->wr_tx_sges = NULL; | 541 | lnk->wr_tx_sges = NULL; |
542 | kfree(lnk->wr_tx_rdma_sges); | ||
543 | lnk->wr_tx_rdma_sges = NULL; | ||
524 | kfree(lnk->wr_rx_sges); | 544 | kfree(lnk->wr_rx_sges); |
525 | lnk->wr_rx_sges = NULL; | 545 | lnk->wr_rx_sges = NULL; |
546 | kfree(lnk->wr_tx_rdmas); | ||
547 | lnk->wr_tx_rdmas = NULL; | ||
526 | kfree(lnk->wr_rx_ibs); | 548 | kfree(lnk->wr_rx_ibs); |
527 | lnk->wr_rx_ibs = NULL; | 549 | lnk->wr_rx_ibs = NULL; |
528 | kfree(lnk->wr_tx_ibs); | 550 | kfree(lnk->wr_tx_ibs); |
@@ -552,10 +574,20 @@ int smc_wr_alloc_link_mem(struct smc_link *link) | |||
552 | GFP_KERNEL); | 574 | GFP_KERNEL); |
553 | if (!link->wr_rx_ibs) | 575 | if (!link->wr_rx_ibs) |
554 | goto no_mem_wr_tx_ibs; | 576 | goto no_mem_wr_tx_ibs; |
577 | link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, | ||
578 | sizeof(link->wr_tx_rdmas[0]), | ||
579 | GFP_KERNEL); | ||
580 | if (!link->wr_tx_rdmas) | ||
581 | goto no_mem_wr_rx_ibs; | ||
582 | link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, | ||
583 | sizeof(link->wr_tx_rdma_sges[0]), | ||
584 | GFP_KERNEL); | ||
585 | if (!link->wr_tx_rdma_sges) | ||
586 | goto no_mem_wr_tx_rdmas; | ||
555 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), | 587 | link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), |
556 | GFP_KERNEL); | 588 | GFP_KERNEL); |
557 | if (!link->wr_tx_sges) | 589 | if (!link->wr_tx_sges) |
558 | goto no_mem_wr_rx_ibs; | 590 | goto no_mem_wr_tx_rdma_sges; |
559 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, | 591 | link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, |
560 | sizeof(link->wr_rx_sges[0]), | 592 | sizeof(link->wr_rx_sges[0]), |
561 | GFP_KERNEL); | 593 | GFP_KERNEL); |
@@ -579,6 +611,10 @@ no_mem_wr_rx_sges: | |||
579 | kfree(link->wr_rx_sges); | 611 | kfree(link->wr_rx_sges); |
580 | no_mem_wr_tx_sges: | 612 | no_mem_wr_tx_sges: |
581 | kfree(link->wr_tx_sges); | 613 | kfree(link->wr_tx_sges); |
614 | no_mem_wr_tx_rdma_sges: | ||
615 | kfree(link->wr_tx_rdma_sges); | ||
616 | no_mem_wr_tx_rdmas: | ||
617 | kfree(link->wr_tx_rdmas); | ||
582 | no_mem_wr_rx_ibs: | 618 | no_mem_wr_rx_ibs: |
583 | kfree(link->wr_rx_ibs); | 619 | kfree(link->wr_rx_ibs); |
584 | no_mem_wr_tx_ibs: | 620 | no_mem_wr_tx_ibs: |
diff --git a/net/smc/smc_wr.h b/net/smc/smc_wr.h index 1d85bb14fd6f..09bf32fd3959 100644 --- a/net/smc/smc_wr.h +++ b/net/smc/smc_wr.h | |||
@@ -85,6 +85,7 @@ void smc_wr_add_dev(struct smc_ib_device *smcibdev); | |||
85 | 85 | ||
86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, | 86 | int smc_wr_tx_get_free_slot(struct smc_link *link, smc_wr_tx_handler handler, |
87 | struct smc_wr_buf **wr_buf, | 87 | struct smc_wr_buf **wr_buf, |
88 | struct smc_rdma_wr **wrs, | ||
88 | struct smc_wr_tx_pend_priv **wr_pend_priv); | 89 | struct smc_wr_tx_pend_priv **wr_pend_priv); |
89 | int smc_wr_tx_put_slot(struct smc_link *link, | 90 | int smc_wr_tx_put_slot(struct smc_link *link, |
90 | struct smc_wr_tx_pend_priv *wr_pend_priv); | 91 | struct smc_wr_tx_pend_priv *wr_pend_priv); |
diff --git a/net/socket.c b/net/socket.c index e89884e2197b..d80d87a395ea 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -941,8 +941,7 @@ void dlci_ioctl_set(int (*hook) (unsigned int, void __user *)) | |||
941 | EXPORT_SYMBOL(dlci_ioctl_set); | 941 | EXPORT_SYMBOL(dlci_ioctl_set); |
942 | 942 | ||
943 | static long sock_do_ioctl(struct net *net, struct socket *sock, | 943 | static long sock_do_ioctl(struct net *net, struct socket *sock, |
944 | unsigned int cmd, unsigned long arg, | 944 | unsigned int cmd, unsigned long arg) |
945 | unsigned int ifreq_size) | ||
946 | { | 945 | { |
947 | int err; | 946 | int err; |
948 | void __user *argp = (void __user *)arg; | 947 | void __user *argp = (void __user *)arg; |
@@ -968,11 +967,11 @@ static long sock_do_ioctl(struct net *net, struct socket *sock, | |||
968 | } else { | 967 | } else { |
969 | struct ifreq ifr; | 968 | struct ifreq ifr; |
970 | bool need_copyout; | 969 | bool need_copyout; |
971 | if (copy_from_user(&ifr, argp, ifreq_size)) | 970 | if (copy_from_user(&ifr, argp, sizeof(struct ifreq))) |
972 | return -EFAULT; | 971 | return -EFAULT; |
973 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); | 972 | err = dev_ioctl(net, cmd, &ifr, &need_copyout); |
974 | if (!err && need_copyout) | 973 | if (!err && need_copyout) |
975 | if (copy_to_user(argp, &ifr, ifreq_size)) | 974 | if (copy_to_user(argp, &ifr, sizeof(struct ifreq))) |
976 | return -EFAULT; | 975 | return -EFAULT; |
977 | } | 976 | } |
978 | return err; | 977 | return err; |
@@ -1071,8 +1070,7 @@ static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
1071 | err = open_related_ns(&net->ns, get_net_ns); | 1070 | err = open_related_ns(&net->ns, get_net_ns); |
1072 | break; | 1071 | break; |
1073 | default: | 1072 | default: |
1074 | err = sock_do_ioctl(net, sock, cmd, arg, | 1073 | err = sock_do_ioctl(net, sock, cmd, arg); |
1075 | sizeof(struct ifreq)); | ||
1076 | break; | 1074 | break; |
1077 | } | 1075 | } |
1078 | return err; | 1076 | return err; |
@@ -2780,8 +2778,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, | |||
2780 | int err; | 2778 | int err; |
2781 | 2779 | ||
2782 | set_fs(KERNEL_DS); | 2780 | set_fs(KERNEL_DS); |
2783 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv, | 2781 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); |
2784 | sizeof(struct compat_ifreq)); | ||
2785 | set_fs(old_fs); | 2782 | set_fs(old_fs); |
2786 | if (!err) | 2783 | if (!err) |
2787 | err = compat_put_timeval(&ktv, up); | 2784 | err = compat_put_timeval(&ktv, up); |
@@ -2797,8 +2794,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, | |||
2797 | int err; | 2794 | int err; |
2798 | 2795 | ||
2799 | set_fs(KERNEL_DS); | 2796 | set_fs(KERNEL_DS); |
2800 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts, | 2797 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); |
2801 | sizeof(struct compat_ifreq)); | ||
2802 | set_fs(old_fs); | 2798 | set_fs(old_fs); |
2803 | if (!err) | 2799 | if (!err) |
2804 | err = compat_put_timespec(&kts, up); | 2800 | err = compat_put_timespec(&kts, up); |
@@ -2994,6 +2990,54 @@ static int compat_ifr_data_ioctl(struct net *net, unsigned int cmd, | |||
2994 | return dev_ioctl(net, cmd, &ifreq, NULL); | 2990 | return dev_ioctl(net, cmd, &ifreq, NULL); |
2995 | } | 2991 | } |
2996 | 2992 | ||
2993 | static int compat_ifreq_ioctl(struct net *net, struct socket *sock, | ||
2994 | unsigned int cmd, | ||
2995 | struct compat_ifreq __user *uifr32) | ||
2996 | { | ||
2997 | struct ifreq __user *uifr; | ||
2998 | int err; | ||
2999 | |||
3000 | /* Handle the fact that while struct ifreq has the same *layout* on | ||
3001 | * 32/64 for everything but ifreq::ifru_ifmap and ifreq::ifru_data, | ||
3002 | * which are handled elsewhere, it still has different *size* due to | ||
3003 | * ifreq::ifru_ifmap (which is 16 bytes on 32 bit, 24 bytes on 64-bit, | ||
3004 | * resulting in struct ifreq being 32 and 40 bytes respectively). | ||
3005 | * As a result, if the struct happens to be at the end of a page and | ||
3006 | * the next page isn't readable/writable, we get a fault. To prevent | ||
3007 | * that, copy back and forth to the full size. | ||
3008 | */ | ||
3009 | |||
3010 | uifr = compat_alloc_user_space(sizeof(*uifr)); | ||
3011 | if (copy_in_user(uifr, uifr32, sizeof(*uifr32))) | ||
3012 | return -EFAULT; | ||
3013 | |||
3014 | err = sock_do_ioctl(net, sock, cmd, (unsigned long)uifr); | ||
3015 | |||
3016 | if (!err) { | ||
3017 | switch (cmd) { | ||
3018 | case SIOCGIFFLAGS: | ||
3019 | case SIOCGIFMETRIC: | ||
3020 | case SIOCGIFMTU: | ||
3021 | case SIOCGIFMEM: | ||
3022 | case SIOCGIFHWADDR: | ||
3023 | case SIOCGIFINDEX: | ||
3024 | case SIOCGIFADDR: | ||
3025 | case SIOCGIFBRDADDR: | ||
3026 | case SIOCGIFDSTADDR: | ||
3027 | case SIOCGIFNETMASK: | ||
3028 | case SIOCGIFPFLAGS: | ||
3029 | case SIOCGIFTXQLEN: | ||
3030 | case SIOCGMIIPHY: | ||
3031 | case SIOCGMIIREG: | ||
3032 | case SIOCGIFNAME: | ||
3033 | if (copy_in_user(uifr32, uifr, sizeof(*uifr32))) | ||
3034 | err = -EFAULT; | ||
3035 | break; | ||
3036 | } | ||
3037 | } | ||
3038 | return err; | ||
3039 | } | ||
3040 | |||
2997 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, | 3041 | static int compat_sioc_ifmap(struct net *net, unsigned int cmd, |
2998 | struct compat_ifreq __user *uifr32) | 3042 | struct compat_ifreq __user *uifr32) |
2999 | { | 3043 | { |
@@ -3109,8 +3153,7 @@ static int routing_ioctl(struct net *net, struct socket *sock, | |||
3109 | } | 3153 | } |
3110 | 3154 | ||
3111 | set_fs(KERNEL_DS); | 3155 | set_fs(KERNEL_DS); |
3112 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r, | 3156 | ret = sock_do_ioctl(net, sock, cmd, (unsigned long) r); |
3113 | sizeof(struct compat_ifreq)); | ||
3114 | set_fs(old_fs); | 3157 | set_fs(old_fs); |
3115 | 3158 | ||
3116 | out: | 3159 | out: |
@@ -3210,21 +3253,22 @@ static int compat_sock_ioctl_trans(struct file *file, struct socket *sock, | |||
3210 | case SIOCSIFTXQLEN: | 3253 | case SIOCSIFTXQLEN: |
3211 | case SIOCBRADDIF: | 3254 | case SIOCBRADDIF: |
3212 | case SIOCBRDELIF: | 3255 | case SIOCBRDELIF: |
3256 | case SIOCGIFNAME: | ||
3213 | case SIOCSIFNAME: | 3257 | case SIOCSIFNAME: |
3214 | case SIOCGMIIPHY: | 3258 | case SIOCGMIIPHY: |
3215 | case SIOCGMIIREG: | 3259 | case SIOCGMIIREG: |
3216 | case SIOCSMIIREG: | 3260 | case SIOCSMIIREG: |
3217 | case SIOCSARP: | ||
3218 | case SIOCGARP: | ||
3219 | case SIOCDARP: | ||
3220 | case SIOCATMARK: | ||
3221 | case SIOCBONDENSLAVE: | 3261 | case SIOCBONDENSLAVE: |
3222 | case SIOCBONDRELEASE: | 3262 | case SIOCBONDRELEASE: |
3223 | case SIOCBONDSETHWADDR: | 3263 | case SIOCBONDSETHWADDR: |
3224 | case SIOCBONDCHANGEACTIVE: | 3264 | case SIOCBONDCHANGEACTIVE: |
3225 | case SIOCGIFNAME: | 3265 | return compat_ifreq_ioctl(net, sock, cmd, argp); |
3226 | return sock_do_ioctl(net, sock, cmd, arg, | 3266 | |
3227 | sizeof(struct compat_ifreq)); | 3267 | case SIOCSARP: |
3268 | case SIOCGARP: | ||
3269 | case SIOCDARP: | ||
3270 | case SIOCATMARK: | ||
3271 | return sock_do_ioctl(net, sock, cmd, arg); | ||
3228 | } | 3272 | } |
3229 | 3273 | ||
3230 | return -ENOIOCTLCMD; | 3274 | return -ENOIOCTLCMD; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index cf51b8f9b15f..1f200119268c 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -537,6 +537,99 @@ void svc_rdma_sync_reply_hdr(struct svcxprt_rdma *rdma, | |||
537 | DMA_TO_DEVICE); | 537 | DMA_TO_DEVICE); |
538 | } | 538 | } |
539 | 539 | ||
540 | /* If the xdr_buf has more elements than the device can | ||
541 | * transmit in a single RDMA Send, then the reply will | ||
542 | * have to be copied into a bounce buffer. | ||
543 | */ | ||
544 | static bool svc_rdma_pull_up_needed(struct svcxprt_rdma *rdma, | ||
545 | struct xdr_buf *xdr, | ||
546 | __be32 *wr_lst) | ||
547 | { | ||
548 | int elements; | ||
549 | |||
550 | /* xdr->head */ | ||
551 | elements = 1; | ||
552 | |||
553 | /* xdr->pages */ | ||
554 | if (!wr_lst) { | ||
555 | unsigned int remaining; | ||
556 | unsigned long pageoff; | ||
557 | |||
558 | pageoff = xdr->page_base & ~PAGE_MASK; | ||
559 | remaining = xdr->page_len; | ||
560 | while (remaining) { | ||
561 | ++elements; | ||
562 | remaining -= min_t(u32, PAGE_SIZE - pageoff, | ||
563 | remaining); | ||
564 | pageoff = 0; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* xdr->tail */ | ||
569 | if (xdr->tail[0].iov_len) | ||
570 | ++elements; | ||
571 | |||
572 | /* assume 1 SGE is needed for the transport header */ | ||
573 | return elements >= rdma->sc_max_send_sges; | ||
574 | } | ||
575 | |||
576 | /* The device is not capable of sending the reply directly. | ||
577 | * Assemble the elements of @xdr into the transport header | ||
578 | * buffer. | ||
579 | */ | ||
580 | static int svc_rdma_pull_up_reply_msg(struct svcxprt_rdma *rdma, | ||
581 | struct svc_rdma_send_ctxt *ctxt, | ||
582 | struct xdr_buf *xdr, __be32 *wr_lst) | ||
583 | { | ||
584 | unsigned char *dst, *tailbase; | ||
585 | unsigned int taillen; | ||
586 | |||
587 | dst = ctxt->sc_xprt_buf; | ||
588 | dst += ctxt->sc_sges[0].length; | ||
589 | |||
590 | memcpy(dst, xdr->head[0].iov_base, xdr->head[0].iov_len); | ||
591 | dst += xdr->head[0].iov_len; | ||
592 | |||
593 | tailbase = xdr->tail[0].iov_base; | ||
594 | taillen = xdr->tail[0].iov_len; | ||
595 | if (wr_lst) { | ||
596 | u32 xdrpad; | ||
597 | |||
598 | xdrpad = xdr_padsize(xdr->page_len); | ||
599 | if (taillen && xdrpad) { | ||
600 | tailbase += xdrpad; | ||
601 | taillen -= xdrpad; | ||
602 | } | ||
603 | } else { | ||
604 | unsigned int len, remaining; | ||
605 | unsigned long pageoff; | ||
606 | struct page **ppages; | ||
607 | |||
608 | ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT); | ||
609 | pageoff = xdr->page_base & ~PAGE_MASK; | ||
610 | remaining = xdr->page_len; | ||
611 | while (remaining) { | ||
612 | len = min_t(u32, PAGE_SIZE - pageoff, remaining); | ||
613 | |||
614 | memcpy(dst, page_address(*ppages), len); | ||
615 | remaining -= len; | ||
616 | dst += len; | ||
617 | pageoff = 0; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | if (taillen) | ||
622 | memcpy(dst, tailbase, taillen); | ||
623 | |||
624 | ctxt->sc_sges[0].length += xdr->len; | ||
625 | ib_dma_sync_single_for_device(rdma->sc_pd->device, | ||
626 | ctxt->sc_sges[0].addr, | ||
627 | ctxt->sc_sges[0].length, | ||
628 | DMA_TO_DEVICE); | ||
629 | |||
630 | return 0; | ||
631 | } | ||
632 | |||
540 | /* svc_rdma_map_reply_msg - Map the buffer holding RPC message | 633 | /* svc_rdma_map_reply_msg - Map the buffer holding RPC message |
541 | * @rdma: controlling transport | 634 | * @rdma: controlling transport |
542 | * @ctxt: send_ctxt for the Send WR | 635 | * @ctxt: send_ctxt for the Send WR |
@@ -559,8 +652,10 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
559 | u32 xdr_pad; | 652 | u32 xdr_pad; |
560 | int ret; | 653 | int ret; |
561 | 654 | ||
562 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 655 | if (svc_rdma_pull_up_needed(rdma, xdr, wr_lst)) |
563 | return -EIO; | 656 | return svc_rdma_pull_up_reply_msg(rdma, ctxt, xdr, wr_lst); |
657 | |||
658 | ++ctxt->sc_cur_sge_no; | ||
564 | ret = svc_rdma_dma_map_buf(rdma, ctxt, | 659 | ret = svc_rdma_dma_map_buf(rdma, ctxt, |
565 | xdr->head[0].iov_base, | 660 | xdr->head[0].iov_base, |
566 | xdr->head[0].iov_len); | 661 | xdr->head[0].iov_len); |
@@ -591,8 +686,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
591 | while (remaining) { | 686 | while (remaining) { |
592 | len = min_t(u32, PAGE_SIZE - page_off, remaining); | 687 | len = min_t(u32, PAGE_SIZE - page_off, remaining); |
593 | 688 | ||
594 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 689 | ++ctxt->sc_cur_sge_no; |
595 | return -EIO; | ||
596 | ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, | 690 | ret = svc_rdma_dma_map_page(rdma, ctxt, *ppages++, |
597 | page_off, len); | 691 | page_off, len); |
598 | if (ret < 0) | 692 | if (ret < 0) |
@@ -606,8 +700,7 @@ int svc_rdma_map_reply_msg(struct svcxprt_rdma *rdma, | |||
606 | len = xdr->tail[0].iov_len; | 700 | len = xdr->tail[0].iov_len; |
607 | tail: | 701 | tail: |
608 | if (len) { | 702 | if (len) { |
609 | if (++ctxt->sc_cur_sge_no >= rdma->sc_max_send_sges) | 703 | ++ctxt->sc_cur_sge_no; |
610 | return -EIO; | ||
611 | ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); | 704 | ret = svc_rdma_dma_map_buf(rdma, ctxt, base, len); |
612 | if (ret < 0) | 705 | if (ret < 0) |
613 | return ret; | 706 | return ret; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 924c17d46903..57f86c63a463 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -419,12 +419,9 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
419 | /* Transport header, head iovec, tail iovec */ | 419 | /* Transport header, head iovec, tail iovec */ |
420 | newxprt->sc_max_send_sges = 3; | 420 | newxprt->sc_max_send_sges = 3; |
421 | /* Add one SGE per page list entry */ | 421 | /* Add one SGE per page list entry */ |
422 | newxprt->sc_max_send_sges += svcrdma_max_req_size / PAGE_SIZE; | 422 | newxprt->sc_max_send_sges += (svcrdma_max_req_size / PAGE_SIZE) + 1; |
423 | if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) { | 423 | if (newxprt->sc_max_send_sges > dev->attrs.max_send_sge) |
424 | pr_err("svcrdma: too few Send SGEs available (%d needed)\n", | 424 | newxprt->sc_max_send_sges = dev->attrs.max_send_sge; |
425 | newxprt->sc_max_send_sges); | ||
426 | goto errout; | ||
427 | } | ||
428 | newxprt->sc_max_req_size = svcrdma_max_req_size; | 425 | newxprt->sc_max_req_size = svcrdma_max_req_size; |
429 | newxprt->sc_max_requests = svcrdma_max_requests; | 426 | newxprt->sc_max_requests = svcrdma_max_requests; |
430 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; | 427 | newxprt->sc_max_bc_requests = svcrdma_max_bc_requests; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 2792a3cae682..85ad5c0678d0 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1145,7 +1145,7 @@ static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb, | |||
1145 | default: | 1145 | default: |
1146 | pr_warn("Dropping received illegal msg type\n"); | 1146 | pr_warn("Dropping received illegal msg type\n"); |
1147 | kfree_skb(skb); | 1147 | kfree_skb(skb); |
1148 | return false; | 1148 | return true; |
1149 | }; | 1149 | }; |
1150 | } | 1150 | } |
1151 | 1151 | ||
@@ -1425,6 +1425,10 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, | |||
1425 | l->rcv_unacked = 0; | 1425 | l->rcv_unacked = 0; |
1426 | } else { | 1426 | } else { |
1427 | /* RESET_MSG or ACTIVATE_MSG */ | 1427 | /* RESET_MSG or ACTIVATE_MSG */ |
1428 | if (mtyp == ACTIVATE_MSG) { | ||
1429 | msg_set_dest_session_valid(hdr, 1); | ||
1430 | msg_set_dest_session(hdr, l->peer_session); | ||
1431 | } | ||
1428 | msg_set_max_pkt(hdr, l->advertised_mtu); | 1432 | msg_set_max_pkt(hdr, l->advertised_mtu); |
1429 | strcpy(data, l->if_name); | 1433 | strcpy(data, l->if_name); |
1430 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); | 1434 | msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME); |
@@ -1642,6 +1646,17 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1642 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | 1646 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
1643 | break; | 1647 | break; |
1644 | } | 1648 | } |
1649 | |||
1650 | /* If this endpoint was re-created while peer was ESTABLISHING | ||
1651 | * it doesn't know current session number. Force re-synch. | ||
1652 | */ | ||
1653 | if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) && | ||
1654 | l->session != msg_dest_session(hdr)) { | ||
1655 | if (less(l->session, msg_dest_session(hdr))) | ||
1656 | l->session = msg_dest_session(hdr) + 1; | ||
1657 | break; | ||
1658 | } | ||
1659 | |||
1645 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ | 1660 | /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ |
1646 | if (mtyp == RESET_MSG || !link_is_up(l)) | 1661 | if (mtyp == RESET_MSG || !link_is_up(l)) |
1647 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); | 1662 | rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); |
diff --git a/net/tipc/msg.h b/net/tipc/msg.h index a0924956bb61..d7e4b8b93f9d 100644 --- a/net/tipc/msg.h +++ b/net/tipc/msg.h | |||
@@ -360,6 +360,28 @@ static inline void msg_set_bcast_ack(struct tipc_msg *m, u16 n) | |||
360 | msg_set_bits(m, 1, 0, 0xffff, n); | 360 | msg_set_bits(m, 1, 0, 0xffff, n); |
361 | } | 361 | } |
362 | 362 | ||
363 | /* Note: reusing bits in word 1 for ACTIVATE_MSG only, to re-synch | ||
364 | * link peer session number | ||
365 | */ | ||
366 | static inline bool msg_dest_session_valid(struct tipc_msg *m) | ||
367 | { | ||
368 | return msg_bits(m, 1, 16, 0x1); | ||
369 | } | ||
370 | |||
371 | static inline void msg_set_dest_session_valid(struct tipc_msg *m, bool valid) | ||
372 | { | ||
373 | msg_set_bits(m, 1, 16, 0x1, valid); | ||
374 | } | ||
375 | |||
376 | static inline u16 msg_dest_session(struct tipc_msg *m) | ||
377 | { | ||
378 | return msg_bits(m, 1, 0, 0xffff); | ||
379 | } | ||
380 | |||
381 | static inline void msg_set_dest_session(struct tipc_msg *m, u16 n) | ||
382 | { | ||
383 | msg_set_bits(m, 1, 0, 0xffff, n); | ||
384 | } | ||
363 | 385 | ||
364 | /* | 386 | /* |
365 | * Word 2 | 387 | * Word 2 |
diff --git a/net/tipc/node.c b/net/tipc/node.c index db2a6c3e0be9..2dc4919ab23c 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -830,15 +830,16 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) | |||
830 | tipc_node_write_lock(n); | 830 | tipc_node_write_lock(n); |
831 | if (!tipc_link_is_establishing(l)) { | 831 | if (!tipc_link_is_establishing(l)) { |
832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); | 832 | __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); |
833 | if (delete) { | ||
834 | kfree(l); | ||
835 | le->link = NULL; | ||
836 | n->link_cnt--; | ||
837 | } | ||
838 | } else { | 833 | } else { |
839 | /* Defuse pending tipc_node_link_up() */ | 834 | /* Defuse pending tipc_node_link_up() */ |
835 | tipc_link_reset(l); | ||
840 | tipc_link_fsm_evt(l, LINK_RESET_EVT); | 836 | tipc_link_fsm_evt(l, LINK_RESET_EVT); |
841 | } | 837 | } |
838 | if (delete) { | ||
839 | kfree(l); | ||
840 | le->link = NULL; | ||
841 | n->link_cnt--; | ||
842 | } | ||
842 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); | 843 | trace_tipc_node_link_down(n, true, "node link down or deleted!"); |
843 | tipc_node_write_unlock(n); | 844 | tipc_node_write_unlock(n); |
844 | if (delete) | 845 | if (delete) |
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c index 11cdc8f7db63..bf5b54b513bc 100644 --- a/net/tls/tls_sw.c +++ b/net/tls/tls_sw.c | |||
@@ -439,6 +439,8 @@ static int tls_do_encryption(struct sock *sk, | |||
439 | struct scatterlist *sge = sk_msg_elem(msg_en, start); | 439 | struct scatterlist *sge = sk_msg_elem(msg_en, start); |
440 | int rc; | 440 | int rc; |
441 | 441 | ||
442 | memcpy(rec->iv_data, tls_ctx->tx.iv, sizeof(rec->iv_data)); | ||
443 | |||
442 | sge->offset += tls_ctx->tx.prepend_size; | 444 | sge->offset += tls_ctx->tx.prepend_size; |
443 | sge->length -= tls_ctx->tx.prepend_size; | 445 | sge->length -= tls_ctx->tx.prepend_size; |
444 | 446 | ||
@@ -448,7 +450,7 @@ static int tls_do_encryption(struct sock *sk, | |||
448 | aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); | 450 | aead_request_set_ad(aead_req, TLS_AAD_SPACE_SIZE); |
449 | aead_request_set_crypt(aead_req, rec->sg_aead_in, | 451 | aead_request_set_crypt(aead_req, rec->sg_aead_in, |
450 | rec->sg_aead_out, | 452 | rec->sg_aead_out, |
451 | data_len, tls_ctx->tx.iv); | 453 | data_len, rec->iv_data); |
452 | 454 | ||
453 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, | 455 | aead_request_set_callback(aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, |
454 | tls_encrypt_done, sk); | 456 | tls_encrypt_done, sk); |
@@ -1792,7 +1794,9 @@ void tls_sw_free_resources_tx(struct sock *sk) | |||
1792 | if (atomic_read(&ctx->encrypt_pending)) | 1794 | if (atomic_read(&ctx->encrypt_pending)) |
1793 | crypto_wait_req(-EINPROGRESS, &ctx->async_wait); | 1795 | crypto_wait_req(-EINPROGRESS, &ctx->async_wait); |
1794 | 1796 | ||
1797 | release_sock(sk); | ||
1795 | cancel_delayed_work_sync(&ctx->tx_work.work); | 1798 | cancel_delayed_work_sync(&ctx->tx_work.work); |
1799 | lock_sock(sk); | ||
1796 | 1800 | ||
1797 | /* Tx whatever records we can transmit and abandon the rest */ | 1801 | /* Tx whatever records we can transmit and abandon the rest */ |
1798 | tls_tx_records(sk, -1); | 1802 | tls_tx_records(sk, -1); |
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c index 5d3cce9e8744..15eb5d3d4750 100644 --- a/net/vmw_vsock/virtio_transport.c +++ b/net/vmw_vsock/virtio_transport.c | |||
@@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void) | |||
75 | { | 75 | { |
76 | struct virtio_vsock *vsock = virtio_vsock_get(); | 76 | struct virtio_vsock *vsock = virtio_vsock_get(); |
77 | 77 | ||
78 | if (!vsock) | ||
79 | return VMADDR_CID_ANY; | ||
80 | |||
78 | return vsock->guest_cid; | 81 | return vsock->guest_cid; |
79 | } | 82 | } |
80 | 83 | ||
@@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
584 | 587 | ||
585 | virtio_vsock_update_guest_cid(vsock); | 588 | virtio_vsock_update_guest_cid(vsock); |
586 | 589 | ||
587 | ret = vsock_core_init(&virtio_transport.transport); | ||
588 | if (ret < 0) | ||
589 | goto out_vqs; | ||
590 | |||
591 | vsock->rx_buf_nr = 0; | 590 | vsock->rx_buf_nr = 0; |
592 | vsock->rx_buf_max_nr = 0; | 591 | vsock->rx_buf_max_nr = 0; |
593 | atomic_set(&vsock->queued_replies, 0); | 592 | atomic_set(&vsock->queued_replies, 0); |
@@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev) | |||
618 | mutex_unlock(&the_virtio_vsock_mutex); | 617 | mutex_unlock(&the_virtio_vsock_mutex); |
619 | return 0; | 618 | return 0; |
620 | 619 | ||
621 | out_vqs: | ||
622 | vsock->vdev->config->del_vqs(vsock->vdev); | ||
623 | out: | 620 | out: |
624 | kfree(vsock); | 621 | kfree(vsock); |
625 | mutex_unlock(&the_virtio_vsock_mutex); | 622 | mutex_unlock(&the_virtio_vsock_mutex); |
@@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
637 | flush_work(&vsock->event_work); | 634 | flush_work(&vsock->event_work); |
638 | flush_work(&vsock->send_pkt_work); | 635 | flush_work(&vsock->send_pkt_work); |
639 | 636 | ||
637 | /* Reset all connected sockets when the device disappear */ | ||
638 | vsock_for_each_connected_socket(virtio_vsock_reset_sock); | ||
639 | |||
640 | vdev->config->reset(vdev); | 640 | vdev->config->reset(vdev); |
641 | 641 | ||
642 | mutex_lock(&vsock->rx_lock); | 642 | mutex_lock(&vsock->rx_lock); |
@@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev) | |||
669 | 669 | ||
670 | mutex_lock(&the_virtio_vsock_mutex); | 670 | mutex_lock(&the_virtio_vsock_mutex); |
671 | the_virtio_vsock = NULL; | 671 | the_virtio_vsock = NULL; |
672 | vsock_core_exit(); | ||
673 | mutex_unlock(&the_virtio_vsock_mutex); | 672 | mutex_unlock(&the_virtio_vsock_mutex); |
674 | 673 | ||
675 | vdev->config->del_vqs(vdev); | 674 | vdev->config->del_vqs(vdev); |
@@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void) | |||
702 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); | 701 | virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0); |
703 | if (!virtio_vsock_workqueue) | 702 | if (!virtio_vsock_workqueue) |
704 | return -ENOMEM; | 703 | return -ENOMEM; |
704 | |||
705 | ret = register_virtio_driver(&virtio_vsock_driver); | 705 | ret = register_virtio_driver(&virtio_vsock_driver); |
706 | if (ret) | 706 | if (ret) |
707 | destroy_workqueue(virtio_vsock_workqueue); | 707 | goto out_wq; |
708 | |||
709 | ret = vsock_core_init(&virtio_transport.transport); | ||
710 | if (ret) | ||
711 | goto out_vdr; | ||
712 | |||
713 | return 0; | ||
714 | |||
715 | out_vdr: | ||
716 | unregister_virtio_driver(&virtio_vsock_driver); | ||
717 | out_wq: | ||
718 | destroy_workqueue(virtio_vsock_workqueue); | ||
708 | return ret; | 719 | return ret; |
720 | |||
709 | } | 721 | } |
710 | 722 | ||
711 | static void __exit virtio_vsock_exit(void) | 723 | static void __exit virtio_vsock_exit(void) |
712 | { | 724 | { |
725 | vsock_core_exit(); | ||
713 | unregister_virtio_driver(&virtio_vsock_driver); | 726 | unregister_virtio_driver(&virtio_vsock_driver); |
714 | destroy_workqueue(virtio_vsock_workqueue); | 727 | destroy_workqueue(virtio_vsock_workqueue); |
715 | } | 728 | } |
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index c361ce782412..c3d5ab01fba7 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c | |||
@@ -1651,6 +1651,10 @@ static void vmci_transport_cleanup(struct work_struct *work) | |||
1651 | 1651 | ||
1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) | 1652 | static void vmci_transport_destruct(struct vsock_sock *vsk) |
1653 | { | 1653 | { |
1654 | /* transport can be NULL if we hit a failure at init() time */ | ||
1655 | if (!vmci_trans(vsk)) | ||
1656 | return; | ||
1657 | |||
1654 | /* Ensure that the detach callback doesn't use the sk/vsk | 1658 | /* Ensure that the detach callback doesn't use the sk/vsk |
1655 | * we are about to destruct. | 1659 | * we are about to destruct. |
1656 | */ | 1660 | */ |
diff --git a/net/wireless/ap.c b/net/wireless/ap.c index 882d97bdc6bf..550ac9d827fe 100644 --- a/net/wireless/ap.c +++ b/net/wireless/ap.c | |||
@@ -41,6 +41,8 @@ int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev, | |||
41 | cfg80211_sched_dfs_chan_update(rdev); | 41 | cfg80211_sched_dfs_chan_update(rdev); |
42 | } | 42 | } |
43 | 43 | ||
44 | schedule_work(&cfg80211_disconnect_work); | ||
45 | |||
44 | return err; | 46 | return err; |
45 | } | 47 | } |
46 | 48 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 623dfe5e211c..b36ad8efb5e5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -1068,6 +1068,8 @@ static void __cfg80211_unregister_wdev(struct wireless_dev *wdev, bool sync) | |||
1068 | 1068 | ||
1069 | ASSERT_RTNL(); | 1069 | ASSERT_RTNL(); |
1070 | 1070 | ||
1071 | flush_work(&wdev->pmsr_free_wk); | ||
1072 | |||
1071 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); | 1073 | nl80211_notify_iface(rdev, wdev, NL80211_CMD_DEL_INTERFACE); |
1072 | 1074 | ||
1073 | list_del_rcu(&wdev->list); | 1075 | list_del_rcu(&wdev->list); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index c5d6f3418601..f6b40563dc63 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -445,6 +445,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev); | |||
445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, | 445 | bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range, |
446 | u32 center_freq_khz, u32 bw_khz); | 446 | u32 center_freq_khz, u32 bw_khz); |
447 | 447 | ||
448 | extern struct work_struct cfg80211_disconnect_work; | ||
449 | |||
448 | /** | 450 | /** |
449 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable | 451 | * cfg80211_chandef_dfs_usable - checks if chandef is DFS usable |
450 | * @wiphy: the wiphy to validate against | 452 | * @wiphy: the wiphy to validate against |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 74150ad95823..d91a408db113 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -250,7 +250,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { | |||
250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = | 250 | [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = |
251 | NLA_POLICY_MAX(NLA_U8, 15), | 251 | NLA_POLICY_MAX(NLA_U8, 15), |
252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = | 252 | [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = |
253 | NLA_POLICY_MAX(NLA_U8, 15), | 253 | NLA_POLICY_MAX(NLA_U8, 31), |
254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, | 254 | [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, |
255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, | 255 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, |
256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, | 256 | [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, |
diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index de9286703280..0216ab555249 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c | |||
@@ -256,8 +256,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
256 | if (err) | 256 | if (err) |
257 | goto out_err; | 257 | goto out_err; |
258 | } else { | 258 | } else { |
259 | memcpy(req->mac_addr, nla_data(info->attrs[NL80211_ATTR_MAC]), | 259 | memcpy(req->mac_addr, wdev_address(wdev), ETH_ALEN); |
260 | ETH_ALEN); | ||
261 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); | 260 | memset(req->mac_addr_mask, 0xff, ETH_ALEN); |
262 | } | 261 | } |
263 | 262 | ||
@@ -272,6 +271,7 @@ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info) | |||
272 | 271 | ||
273 | req->n_peers = count; | 272 | req->n_peers = count; |
274 | req->cookie = cfg80211_assign_cookie(rdev); | 273 | req->cookie = cfg80211_assign_cookie(rdev); |
274 | req->nl_portid = info->snd_portid; | ||
275 | 275 | ||
276 | err = rdev_start_pmsr(rdev, wdev, req); | 276 | err = rdev_start_pmsr(rdev, wdev, req); |
277 | if (err) | 277 | if (err) |
@@ -530,14 +530,14 @@ free: | |||
530 | } | 530 | } |
531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); | 531 | EXPORT_SYMBOL_GPL(cfg80211_pmsr_report); |
532 | 532 | ||
533 | void cfg80211_pmsr_free_wk(struct work_struct *work) | 533 | static void cfg80211_pmsr_process_abort(struct wireless_dev *wdev) |
534 | { | 534 | { |
535 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
536 | pmsr_free_wk); | ||
537 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); | 535 | struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy); |
538 | struct cfg80211_pmsr_request *req, *tmp; | 536 | struct cfg80211_pmsr_request *req, *tmp; |
539 | LIST_HEAD(free_list); | 537 | LIST_HEAD(free_list); |
540 | 538 | ||
539 | lockdep_assert_held(&wdev->mtx); | ||
540 | |||
541 | spin_lock_bh(&wdev->pmsr_lock); | 541 | spin_lock_bh(&wdev->pmsr_lock); |
542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { | 542 | list_for_each_entry_safe(req, tmp, &wdev->pmsr_list, list) { |
543 | if (req->nl_portid) | 543 | if (req->nl_portid) |
@@ -547,14 +547,22 @@ void cfg80211_pmsr_free_wk(struct work_struct *work) | |||
547 | spin_unlock_bh(&wdev->pmsr_lock); | 547 | spin_unlock_bh(&wdev->pmsr_lock); |
548 | 548 | ||
549 | list_for_each_entry_safe(req, tmp, &free_list, list) { | 549 | list_for_each_entry_safe(req, tmp, &free_list, list) { |
550 | wdev_lock(wdev); | ||
551 | rdev_abort_pmsr(rdev, wdev, req); | 550 | rdev_abort_pmsr(rdev, wdev, req); |
552 | wdev_unlock(wdev); | ||
553 | 551 | ||
554 | kfree(req); | 552 | kfree(req); |
555 | } | 553 | } |
556 | } | 554 | } |
557 | 555 | ||
556 | void cfg80211_pmsr_free_wk(struct work_struct *work) | ||
557 | { | ||
558 | struct wireless_dev *wdev = container_of(work, struct wireless_dev, | ||
559 | pmsr_free_wk); | ||
560 | |||
561 | wdev_lock(wdev); | ||
562 | cfg80211_pmsr_process_abort(wdev); | ||
563 | wdev_unlock(wdev); | ||
564 | } | ||
565 | |||
558 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | 566 | void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) |
559 | { | 567 | { |
560 | struct cfg80211_pmsr_request *req; | 568 | struct cfg80211_pmsr_request *req; |
@@ -568,8 +576,8 @@ void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev) | |||
568 | spin_unlock_bh(&wdev->pmsr_lock); | 576 | spin_unlock_bh(&wdev->pmsr_lock); |
569 | 577 | ||
570 | if (found) | 578 | if (found) |
571 | schedule_work(&wdev->pmsr_free_wk); | 579 | cfg80211_pmsr_process_abort(wdev); |
572 | flush_work(&wdev->pmsr_free_wk); | 580 | |
573 | WARN_ON(!list_empty(&wdev->pmsr_list)); | 581 | WARN_ON(!list_empty(&wdev->pmsr_list)); |
574 | } | 582 | } |
575 | 583 | ||
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f741d8376a46..7d34cb884840 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -667,7 +667,7 @@ static void disconnect_work(struct work_struct *work) | |||
667 | rtnl_unlock(); | 667 | rtnl_unlock(); |
668 | } | 668 | } |
669 | 669 | ||
670 | static DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); | 670 | DECLARE_WORK(cfg80211_disconnect_work, disconnect_work); |
671 | 671 | ||
672 | 672 | ||
673 | /* | 673 | /* |
diff --git a/net/wireless/util.c b/net/wireless/util.c index cd48cdd582c0..ec30e3732c7b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007-2009 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright 2017 Intel Deutschland GmbH | 7 | * Copyright 2017 Intel Deutschland GmbH |
8 | * Copyright (C) 2018 Intel Corporation | 8 | * Copyright (C) 2018-2019 Intel Corporation |
9 | */ | 9 | */ |
10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
11 | #include <linux/bitops.h> | 11 | #include <linux/bitops.h> |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mpls.h> | 19 | #include <linux/mpls.h> |
20 | #include <linux/gcd.h> | 20 | #include <linux/gcd.h> |
21 | #include <linux/bitfield.h> | 21 | #include <linux/bitfield.h> |
22 | #include <linux/nospec.h> | ||
22 | #include "core.h" | 23 | #include "core.h" |
23 | #include "rdev-ops.h" | 24 | #include "rdev-ops.h" |
24 | 25 | ||
@@ -715,20 +716,25 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
715 | { | 716 | { |
716 | unsigned int dscp; | 717 | unsigned int dscp; |
717 | unsigned char vlan_priority; | 718 | unsigned char vlan_priority; |
719 | unsigned int ret; | ||
718 | 720 | ||
719 | /* skb->priority values from 256->263 are magic values to | 721 | /* skb->priority values from 256->263 are magic values to |
720 | * directly indicate a specific 802.1d priority. This is used | 722 | * directly indicate a specific 802.1d priority. This is used |
721 | * to allow 802.1d priority to be passed directly in from VLAN | 723 | * to allow 802.1d priority to be passed directly in from VLAN |
722 | * tags, etc. | 724 | * tags, etc. |
723 | */ | 725 | */ |
724 | if (skb->priority >= 256 && skb->priority <= 263) | 726 | if (skb->priority >= 256 && skb->priority <= 263) { |
725 | return skb->priority - 256; | 727 | ret = skb->priority - 256; |
728 | goto out; | ||
729 | } | ||
726 | 730 | ||
727 | if (skb_vlan_tag_present(skb)) { | 731 | if (skb_vlan_tag_present(skb)) { |
728 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) | 732 | vlan_priority = (skb_vlan_tag_get(skb) & VLAN_PRIO_MASK) |
729 | >> VLAN_PRIO_SHIFT; | 733 | >> VLAN_PRIO_SHIFT; |
730 | if (vlan_priority > 0) | 734 | if (vlan_priority > 0) { |
731 | return vlan_priority; | 735 | ret = vlan_priority; |
736 | goto out; | ||
737 | } | ||
732 | } | 738 | } |
733 | 739 | ||
734 | switch (skb->protocol) { | 740 | switch (skb->protocol) { |
@@ -747,8 +753,9 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
747 | if (!mpls) | 753 | if (!mpls) |
748 | return 0; | 754 | return 0; |
749 | 755 | ||
750 | return (ntohl(mpls->entry) & MPLS_LS_TC_MASK) | 756 | ret = (ntohl(mpls->entry) & MPLS_LS_TC_MASK) |
751 | >> MPLS_LS_TC_SHIFT; | 757 | >> MPLS_LS_TC_SHIFT; |
758 | goto out; | ||
752 | } | 759 | } |
753 | case htons(ETH_P_80221): | 760 | case htons(ETH_P_80221): |
754 | /* 802.21 is always network control traffic */ | 761 | /* 802.21 is always network control traffic */ |
@@ -761,18 +768,24 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb, | |||
761 | unsigned int i, tmp_dscp = dscp >> 2; | 768 | unsigned int i, tmp_dscp = dscp >> 2; |
762 | 769 | ||
763 | for (i = 0; i < qos_map->num_des; i++) { | 770 | for (i = 0; i < qos_map->num_des; i++) { |
764 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) | 771 | if (tmp_dscp == qos_map->dscp_exception[i].dscp) { |
765 | return qos_map->dscp_exception[i].up; | 772 | ret = qos_map->dscp_exception[i].up; |
773 | goto out; | ||
774 | } | ||
766 | } | 775 | } |
767 | 776 | ||
768 | for (i = 0; i < 8; i++) { | 777 | for (i = 0; i < 8; i++) { |
769 | if (tmp_dscp >= qos_map->up[i].low && | 778 | if (tmp_dscp >= qos_map->up[i].low && |
770 | tmp_dscp <= qos_map->up[i].high) | 779 | tmp_dscp <= qos_map->up[i].high) { |
771 | return i; | 780 | ret = i; |
781 | goto out; | ||
782 | } | ||
772 | } | 783 | } |
773 | } | 784 | } |
774 | 785 | ||
775 | return dscp >> 5; | 786 | ret = dscp >> 5; |
787 | out: | ||
788 | return array_index_nospec(ret, IEEE80211_NUM_TIDS); | ||
776 | } | 789 | } |
777 | EXPORT_SYMBOL(cfg80211_classify8021d); | 790 | EXPORT_SYMBOL(cfg80211_classify8021d); |
778 | 791 | ||
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index 5121729b8b63..ec3a828672ef 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -352,17 +352,15 @@ static unsigned int x25_new_lci(struct x25_neigh *nb) | |||
352 | unsigned int lci = 1; | 352 | unsigned int lci = 1; |
353 | struct sock *sk; | 353 | struct sock *sk; |
354 | 354 | ||
355 | read_lock_bh(&x25_list_lock); | 355 | while ((sk = x25_find_socket(lci, nb)) != NULL) { |
356 | |||
357 | while ((sk = __x25_find_socket(lci, nb)) != NULL) { | ||
358 | sock_put(sk); | 356 | sock_put(sk); |
359 | if (++lci == 4096) { | 357 | if (++lci == 4096) { |
360 | lci = 0; | 358 | lci = 0; |
361 | break; | 359 | break; |
362 | } | 360 | } |
361 | cond_resched(); | ||
363 | } | 362 | } |
364 | 363 | ||
365 | read_unlock_bh(&x25_list_lock); | ||
366 | return lci; | 364 | return lci; |
367 | } | 365 | } |
368 | 366 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 934492bad8e0..ba0a4048c846 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -680,16 +680,6 @@ static void xfrm_hash_resize(struct work_struct *work) | |||
680 | mutex_unlock(&hash_resize_mutex); | 680 | mutex_unlock(&hash_resize_mutex); |
681 | } | 681 | } |
682 | 682 | ||
683 | static void xfrm_hash_reset_inexact_table(struct net *net) | ||
684 | { | ||
685 | struct xfrm_pol_inexact_bin *b; | ||
686 | |||
687 | lockdep_assert_held(&net->xfrm.xfrm_policy_lock); | ||
688 | |||
689 | list_for_each_entry(b, &net->xfrm.inexact_bins, inexact_bins) | ||
690 | INIT_HLIST_HEAD(&b->hhead); | ||
691 | } | ||
692 | |||
693 | /* Make sure *pol can be inserted into fastbin. | 683 | /* Make sure *pol can be inserted into fastbin. |
694 | * Useful to check that later insert requests will be sucessful | 684 | * Useful to check that later insert requests will be sucessful |
695 | * (provided xfrm_policy_lock is held throughout). | 685 | * (provided xfrm_policy_lock is held throughout). |
@@ -833,13 +823,13 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net, | |||
833 | u16 family) | 823 | u16 family) |
834 | { | 824 | { |
835 | unsigned int matched_s, matched_d; | 825 | unsigned int matched_s, matched_d; |
836 | struct hlist_node *newpos = NULL; | ||
837 | struct xfrm_policy *policy, *p; | 826 | struct xfrm_policy *policy, *p; |
838 | 827 | ||
839 | matched_s = 0; | 828 | matched_s = 0; |
840 | matched_d = 0; | 829 | matched_d = 0; |
841 | 830 | ||
842 | list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { | 831 | list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { |
832 | struct hlist_node *newpos = NULL; | ||
843 | bool matches_s, matches_d; | 833 | bool matches_s, matches_d; |
844 | 834 | ||
845 | if (!policy->bydst_reinsert) | 835 | if (!policy->bydst_reinsert) |
@@ -849,16 +839,19 @@ static void xfrm_policy_inexact_list_reinsert(struct net *net, | |||
849 | 839 | ||
850 | policy->bydst_reinsert = false; | 840 | policy->bydst_reinsert = false; |
851 | hlist_for_each_entry(p, &n->hhead, bydst) { | 841 | hlist_for_each_entry(p, &n->hhead, bydst) { |
852 | if (policy->priority >= p->priority) | 842 | if (policy->priority > p->priority) |
843 | newpos = &p->bydst; | ||
844 | else if (policy->priority == p->priority && | ||
845 | policy->pos > p->pos) | ||
853 | newpos = &p->bydst; | 846 | newpos = &p->bydst; |
854 | else | 847 | else |
855 | break; | 848 | break; |
856 | } | 849 | } |
857 | 850 | ||
858 | if (newpos) | 851 | if (newpos) |
859 | hlist_add_behind(&policy->bydst, newpos); | 852 | hlist_add_behind_rcu(&policy->bydst, newpos); |
860 | else | 853 | else |
861 | hlist_add_head(&policy->bydst, &n->hhead); | 854 | hlist_add_head_rcu(&policy->bydst, &n->hhead); |
862 | 855 | ||
863 | /* paranoia checks follow. | 856 | /* paranoia checks follow. |
864 | * Check that the reinserted policy matches at least | 857 | * Check that the reinserted policy matches at least |
@@ -893,12 +886,13 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, | |||
893 | struct rb_root *new, | 886 | struct rb_root *new, |
894 | u16 family) | 887 | u16 family) |
895 | { | 888 | { |
896 | struct rb_node **p, *parent = NULL; | ||
897 | struct xfrm_pol_inexact_node *node; | 889 | struct xfrm_pol_inexact_node *node; |
890 | struct rb_node **p, *parent; | ||
898 | 891 | ||
899 | /* we should not have another subtree here */ | 892 | /* we should not have another subtree here */ |
900 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); | 893 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&n->root)); |
901 | 894 | restart: | |
895 | parent = NULL; | ||
902 | p = &new->rb_node; | 896 | p = &new->rb_node; |
903 | while (*p) { | 897 | while (*p) { |
904 | u8 prefixlen; | 898 | u8 prefixlen; |
@@ -918,12 +912,11 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, | |||
918 | } else { | 912 | } else { |
919 | struct xfrm_policy *tmp; | 913 | struct xfrm_policy *tmp; |
920 | 914 | ||
921 | hlist_for_each_entry(tmp, &node->hhead, bydst) | 915 | hlist_for_each_entry(tmp, &n->hhead, bydst) { |
922 | tmp->bydst_reinsert = true; | ||
923 | hlist_for_each_entry(tmp, &n->hhead, bydst) | ||
924 | tmp->bydst_reinsert = true; | 916 | tmp->bydst_reinsert = true; |
917 | hlist_del_rcu(&tmp->bydst); | ||
918 | } | ||
925 | 919 | ||
926 | INIT_HLIST_HEAD(&node->hhead); | ||
927 | xfrm_policy_inexact_list_reinsert(net, node, family); | 920 | xfrm_policy_inexact_list_reinsert(net, node, family); |
928 | 921 | ||
929 | if (node->prefixlen == n->prefixlen) { | 922 | if (node->prefixlen == n->prefixlen) { |
@@ -935,8 +928,7 @@ static void xfrm_policy_inexact_node_reinsert(struct net *net, | |||
935 | kfree_rcu(n, rcu); | 928 | kfree_rcu(n, rcu); |
936 | n = node; | 929 | n = node; |
937 | n->prefixlen = prefixlen; | 930 | n->prefixlen = prefixlen; |
938 | *p = new->rb_node; | 931 | goto restart; |
939 | parent = NULL; | ||
940 | } | 932 | } |
941 | } | 933 | } |
942 | 934 | ||
@@ -965,12 +957,11 @@ static void xfrm_policy_inexact_node_merge(struct net *net, | |||
965 | family); | 957 | family); |
966 | } | 958 | } |
967 | 959 | ||
968 | hlist_for_each_entry(tmp, &v->hhead, bydst) | 960 | hlist_for_each_entry(tmp, &v->hhead, bydst) { |
969 | tmp->bydst_reinsert = true; | ||
970 | hlist_for_each_entry(tmp, &n->hhead, bydst) | ||
971 | tmp->bydst_reinsert = true; | 961 | tmp->bydst_reinsert = true; |
962 | hlist_del_rcu(&tmp->bydst); | ||
963 | } | ||
972 | 964 | ||
973 | INIT_HLIST_HEAD(&n->hhead); | ||
974 | xfrm_policy_inexact_list_reinsert(net, n, family); | 965 | xfrm_policy_inexact_list_reinsert(net, n, family); |
975 | } | 966 | } |
976 | 967 | ||
@@ -1235,6 +1226,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
1235 | } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); | 1226 | } while (read_seqretry(&net->xfrm.policy_hthresh.lock, seq)); |
1236 | 1227 | ||
1237 | spin_lock_bh(&net->xfrm.xfrm_policy_lock); | 1228 | spin_lock_bh(&net->xfrm.xfrm_policy_lock); |
1229 | write_seqcount_begin(&xfrm_policy_hash_generation); | ||
1238 | 1230 | ||
1239 | /* make sure that we can insert the indirect policies again before | 1231 | /* make sure that we can insert the indirect policies again before |
1240 | * we start with destructive action. | 1232 | * we start with destructive action. |
@@ -1278,10 +1270,14 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
1278 | } | 1270 | } |
1279 | 1271 | ||
1280 | /* reset the bydst and inexact table in all directions */ | 1272 | /* reset the bydst and inexact table in all directions */ |
1281 | xfrm_hash_reset_inexact_table(net); | ||
1282 | |||
1283 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { | 1273 | for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { |
1284 | INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); | 1274 | struct hlist_node *n; |
1275 | |||
1276 | hlist_for_each_entry_safe(policy, n, | ||
1277 | &net->xfrm.policy_inexact[dir], | ||
1278 | bydst_inexact_list) | ||
1279 | hlist_del_init(&policy->bydst_inexact_list); | ||
1280 | |||
1285 | hmask = net->xfrm.policy_bydst[dir].hmask; | 1281 | hmask = net->xfrm.policy_bydst[dir].hmask; |
1286 | odst = net->xfrm.policy_bydst[dir].table; | 1282 | odst = net->xfrm.policy_bydst[dir].table; |
1287 | for (i = hmask; i >= 0; i--) | 1283 | for (i = hmask; i >= 0; i--) |
@@ -1313,6 +1309,9 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
1313 | newpos = NULL; | 1309 | newpos = NULL; |
1314 | chain = policy_hash_bysel(net, &policy->selector, | 1310 | chain = policy_hash_bysel(net, &policy->selector, |
1315 | policy->family, dir); | 1311 | policy->family, dir); |
1312 | |||
1313 | hlist_del_rcu(&policy->bydst); | ||
1314 | |||
1316 | if (!chain) { | 1315 | if (!chain) { |
1317 | void *p = xfrm_policy_inexact_insert(policy, dir, 0); | 1316 | void *p = xfrm_policy_inexact_insert(policy, dir, 0); |
1318 | 1317 | ||
@@ -1334,6 +1333,7 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
1334 | 1333 | ||
1335 | out_unlock: | 1334 | out_unlock: |
1336 | __xfrm_policy_inexact_flush(net); | 1335 | __xfrm_policy_inexact_flush(net); |
1336 | write_seqcount_end(&xfrm_policy_hash_generation); | ||
1337 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); | 1337 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); |
1338 | 1338 | ||
1339 | mutex_unlock(&hash_resize_mutex); | 1339 | mutex_unlock(&hash_resize_mutex); |
@@ -2600,7 +2600,10 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
2600 | dst_copy_metrics(dst1, dst); | 2600 | dst_copy_metrics(dst1, dst); |
2601 | 2601 | ||
2602 | if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { | 2602 | if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { |
2603 | __u32 mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); | 2603 | __u32 mark = 0; |
2604 | |||
2605 | if (xfrm[i]->props.smark.v || xfrm[i]->props.smark.m) | ||
2606 | mark = xfrm_smark_get(fl->flowi_mark, xfrm[i]); | ||
2604 | 2607 | ||
2605 | family = xfrm[i]->props.family; | 2608 | family = xfrm[i]->props.family; |
2606 | dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, | 2609 | dst = xfrm_dst_lookup(xfrm[i], tos, fl->flowi_oif, |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 277c1c46fe94..c6d26afcf89d 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -1488,10 +1488,15 @@ static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family) | |||
1488 | if (!ut[i].family) | 1488 | if (!ut[i].family) |
1489 | ut[i].family = family; | 1489 | ut[i].family = family; |
1490 | 1490 | ||
1491 | if ((ut[i].mode == XFRM_MODE_TRANSPORT) && | 1491 | switch (ut[i].mode) { |
1492 | (ut[i].family != prev_family)) | 1492 | case XFRM_MODE_TUNNEL: |
1493 | return -EINVAL; | 1493 | case XFRM_MODE_BEET: |
1494 | 1494 | break; | |
1495 | default: | ||
1496 | if (ut[i].family != prev_family) | ||
1497 | return -EINVAL; | ||
1498 | break; | ||
1499 | } | ||
1495 | if (ut[i].mode >= XFRM_MODE_MAX) | 1500 | if (ut[i].mode >= XFRM_MODE_MAX) |
1496 | return -EINVAL; | 1501 | return -EINVAL; |
1497 | 1502 | ||
diff --git a/samples/mei/mei-amt-version.c b/samples/mei/mei-amt-version.c index 33e67bd1dc34..32234481ad7d 100644 --- a/samples/mei/mei-amt-version.c +++ b/samples/mei/mei-amt-version.c | |||
@@ -117,7 +117,7 @@ static bool mei_init(struct mei *me, const uuid_le *guid, | |||
117 | 117 | ||
118 | me->verbose = verbose; | 118 | me->verbose = verbose; |
119 | 119 | ||
120 | me->fd = open("/dev/mei", O_RDWR); | 120 | me->fd = open("/dev/mei0", O_RDWR); |
121 | if (me->fd == -1) { | 121 | if (me->fd == -1) { |
122 | mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); | 122 | mei_err(me, "Cannot establish a handle to the Intel MEI driver\n"); |
123 | goto err; | 123 | goto err; |
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index 08c88de0ffda..11975ec8d566 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c | |||
@@ -1444,7 +1444,10 @@ check: | |||
1444 | new = aa_label_merge(label, target, GFP_KERNEL); | 1444 | new = aa_label_merge(label, target, GFP_KERNEL); |
1445 | if (IS_ERR_OR_NULL(new)) { | 1445 | if (IS_ERR_OR_NULL(new)) { |
1446 | info = "failed to build target label"; | 1446 | info = "failed to build target label"; |
1447 | error = PTR_ERR(new); | 1447 | if (!new) |
1448 | error = -ENOMEM; | ||
1449 | else | ||
1450 | error = PTR_ERR(new); | ||
1448 | new = NULL; | 1451 | new = NULL; |
1449 | perms.allow = 0; | 1452 | perms.allow = 0; |
1450 | goto audit; | 1453 | goto audit; |
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 2c010874329f..8db1731d046a 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -1599,12 +1599,14 @@ static unsigned int apparmor_ipv4_postroute(void *priv, | |||
1599 | return apparmor_ip_postroute(priv, skb, state); | 1599 | return apparmor_ip_postroute(priv, skb, state); |
1600 | } | 1600 | } |
1601 | 1601 | ||
1602 | #if IS_ENABLED(CONFIG_IPV6) | ||
1602 | static unsigned int apparmor_ipv6_postroute(void *priv, | 1603 | static unsigned int apparmor_ipv6_postroute(void *priv, |
1603 | struct sk_buff *skb, | 1604 | struct sk_buff *skb, |
1604 | const struct nf_hook_state *state) | 1605 | const struct nf_hook_state *state) |
1605 | { | 1606 | { |
1606 | return apparmor_ip_postroute(priv, skb, state); | 1607 | return apparmor_ip_postroute(priv, skb, state); |
1607 | } | 1608 | } |
1609 | #endif | ||
1608 | 1610 | ||
1609 | static const struct nf_hook_ops apparmor_nf_ops[] = { | 1611 | static const struct nf_hook_ops apparmor_nf_ops[] = { |
1610 | { | 1612 | { |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index 40013b26f671..6c0b30391ba9 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -2177,16 +2177,11 @@ snd_pcm_sframes_t __snd_pcm_lib_xfer(struct snd_pcm_substream *substream, | |||
2177 | snd_pcm_update_hw_ptr(substream); | 2177 | snd_pcm_update_hw_ptr(substream); |
2178 | 2178 | ||
2179 | if (!is_playback && | 2179 | if (!is_playback && |
2180 | runtime->status->state == SNDRV_PCM_STATE_PREPARED) { | 2180 | runtime->status->state == SNDRV_PCM_STATE_PREPARED && |
2181 | if (size >= runtime->start_threshold) { | 2181 | size >= runtime->start_threshold) { |
2182 | err = snd_pcm_start(substream); | 2182 | err = snd_pcm_start(substream); |
2183 | if (err < 0) | 2183 | if (err < 0) |
2184 | goto _end_unlock; | ||
2185 | } else { | ||
2186 | /* nothing to do */ | ||
2187 | err = 0; | ||
2188 | goto _end_unlock; | 2184 | goto _end_unlock; |
2189 | } | ||
2190 | } | 2185 | } |
2191 | 2186 | ||
2192 | avail = snd_pcm_avail(substream); | 2187 | avail = snd_pcm_avail(substream); |
diff --git a/sound/pci/hda/hda_bind.c b/sound/pci/hda/hda_bind.c index 9174f1b3a987..1ec706ced75c 100644 --- a/sound/pci/hda/hda_bind.c +++ b/sound/pci/hda/hda_bind.c | |||
@@ -115,7 +115,8 @@ static int hda_codec_driver_probe(struct device *dev) | |||
115 | err = snd_hda_codec_build_controls(codec); | 115 | err = snd_hda_codec_build_controls(codec); |
116 | if (err < 0) | 116 | if (err < 0) |
117 | goto error_module; | 117 | goto error_module; |
118 | if (codec->card->registered) { | 118 | /* only register after the bus probe finished; otherwise it's racy */ |
119 | if (!codec->bus->bus_probing && codec->card->registered) { | ||
119 | err = snd_card_register(codec->card); | 120 | err = snd_card_register(codec->card); |
120 | if (err < 0) | 121 | if (err < 0) |
121 | goto error_module; | 122 | goto error_module; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index e784130ea4e0..e5c49003e75f 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2185,6 +2185,7 @@ static int azx_probe_continue(struct azx *chip) | |||
2185 | int dev = chip->dev_index; | 2185 | int dev = chip->dev_index; |
2186 | int err; | 2186 | int err; |
2187 | 2187 | ||
2188 | to_hda_bus(bus)->bus_probing = 1; | ||
2188 | hda->probe_continued = 1; | 2189 | hda->probe_continued = 1; |
2189 | 2190 | ||
2190 | /* bind with i915 if needed */ | 2191 | /* bind with i915 if needed */ |
@@ -2269,6 +2270,7 @@ out_free: | |||
2269 | if (err < 0) | 2270 | if (err < 0) |
2270 | hda->init_failed = 1; | 2271 | hda->init_failed = 1; |
2271 | complete_all(&hda->probe_wait); | 2272 | complete_all(&hda->probe_wait); |
2273 | to_hda_bus(bus)->bus_probing = 0; | ||
2272 | return err; | 2274 | return err; |
2273 | } | 2275 | } |
2274 | 2276 | ||
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index e5bdbc245682..29882bda7632 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c | |||
@@ -8451,8 +8451,10 @@ static void ca0132_free(struct hda_codec *codec) | |||
8451 | ca0132_exit_chip(codec); | 8451 | ca0132_exit_chip(codec); |
8452 | 8452 | ||
8453 | snd_hda_power_down(codec); | 8453 | snd_hda_power_down(codec); |
8454 | if (IS_ENABLED(CONFIG_PCI) && spec->mem_base) | 8454 | #ifdef CONFIG_PCI |
8455 | if (spec->mem_base) | ||
8455 | pci_iounmap(codec->bus->pci, spec->mem_base); | 8456 | pci_iounmap(codec->bus->pci, spec->mem_base); |
8457 | #endif | ||
8456 | kfree(spec->spec_init_verbs); | 8458 | kfree(spec->spec_init_verbs); |
8457 | kfree(codec->spec); | 8459 | kfree(codec->spec); |
8458 | } | 8460 | } |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 152f54137082..a4ee7656d9ee 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -924,6 +924,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = { | |||
924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), | 924 | SND_PCI_QUIRK(0x103c, 0x807C, "HP EliteBook 820 G3", CXT_FIXUP_HP_DOCK), |
925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), | 925 | SND_PCI_QUIRK(0x103c, 0x80FD, "HP ProBook 640 G2", CXT_FIXUP_HP_DOCK), |
926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), | 926 | SND_PCI_QUIRK(0x103c, 0x828c, "HP EliteBook 840 G4", CXT_FIXUP_HP_DOCK), |
927 | SND_PCI_QUIRK(0x103c, 0x83b2, "HP EliteBook 840 G5", CXT_FIXUP_HP_DOCK), | ||
927 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), | 928 | SND_PCI_QUIRK(0x103c, 0x83b3, "HP EliteBook 830 G5", CXT_FIXUP_HP_DOCK), |
928 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), | 929 | SND_PCI_QUIRK(0x103c, 0x83d3, "HP ProBook 640 G4", CXT_FIXUP_HP_DOCK), |
929 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), | 930 | SND_PCI_QUIRK(0x103c, 0x8174, "HP Spectre x360", CXT_FIXUP_HP_SPECTRE), |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index b4f472157ebd..6df758adff84 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -117,6 +117,7 @@ struct alc_spec { | |||
117 | int codec_variant; /* flag for other variants */ | 117 | int codec_variant; /* flag for other variants */ |
118 | unsigned int has_alc5505_dsp:1; | 118 | unsigned int has_alc5505_dsp:1; |
119 | unsigned int no_depop_delay:1; | 119 | unsigned int no_depop_delay:1; |
120 | unsigned int done_hp_init:1; | ||
120 | 121 | ||
121 | /* for PLL fix */ | 122 | /* for PLL fix */ |
122 | hda_nid_t pll_nid; | 123 | hda_nid_t pll_nid; |
@@ -514,6 +515,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type) | |||
514 | } | 515 | } |
515 | } | 516 | } |
516 | 517 | ||
518 | /* get a primary headphone pin if available */ | ||
519 | static hda_nid_t alc_get_hp_pin(struct alc_spec *spec) | ||
520 | { | ||
521 | if (spec->gen.autocfg.hp_pins[0]) | ||
522 | return spec->gen.autocfg.hp_pins[0]; | ||
523 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
524 | return spec->gen.autocfg.line_out_pins[0]; | ||
525 | return 0; | ||
526 | } | ||
517 | 527 | ||
518 | /* | 528 | /* |
519 | * Realtek SSID verification | 529 | * Realtek SSID verification |
@@ -724,9 +734,7 @@ do_sku: | |||
724 | * 15 : 1 --> enable the function "Mute internal speaker | 734 | * 15 : 1 --> enable the function "Mute internal speaker |
725 | * when the external headphone out jack is plugged" | 735 | * when the external headphone out jack is plugged" |
726 | */ | 736 | */ |
727 | if (!spec->gen.autocfg.hp_pins[0] && | 737 | if (!alc_get_hp_pin(spec)) { |
728 | !(spec->gen.autocfg.line_out_pins[0] && | ||
729 | spec->gen.autocfg.line_out_type == AUTO_PIN_HP_OUT)) { | ||
730 | hda_nid_t nid; | 738 | hda_nid_t nid; |
731 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ | 739 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ |
732 | nid = ports[tmp]; | 740 | nid = ports[tmp]; |
@@ -2958,7 +2966,7 @@ static void alc282_restore_default_value(struct hda_codec *codec) | |||
2958 | static void alc282_init(struct hda_codec *codec) | 2966 | static void alc282_init(struct hda_codec *codec) |
2959 | { | 2967 | { |
2960 | struct alc_spec *spec = codec->spec; | 2968 | struct alc_spec *spec = codec->spec; |
2961 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 2969 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
2962 | bool hp_pin_sense; | 2970 | bool hp_pin_sense; |
2963 | int coef78; | 2971 | int coef78; |
2964 | 2972 | ||
@@ -2995,7 +3003,7 @@ static void alc282_init(struct hda_codec *codec) | |||
2995 | static void alc282_shutup(struct hda_codec *codec) | 3003 | static void alc282_shutup(struct hda_codec *codec) |
2996 | { | 3004 | { |
2997 | struct alc_spec *spec = codec->spec; | 3005 | struct alc_spec *spec = codec->spec; |
2998 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3006 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
2999 | bool hp_pin_sense; | 3007 | bool hp_pin_sense; |
3000 | int coef78; | 3008 | int coef78; |
3001 | 3009 | ||
@@ -3073,14 +3081,9 @@ static void alc283_restore_default_value(struct hda_codec *codec) | |||
3073 | static void alc283_init(struct hda_codec *codec) | 3081 | static void alc283_init(struct hda_codec *codec) |
3074 | { | 3082 | { |
3075 | struct alc_spec *spec = codec->spec; | 3083 | struct alc_spec *spec = codec->spec; |
3076 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3084 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3077 | bool hp_pin_sense; | 3085 | bool hp_pin_sense; |
3078 | 3086 | ||
3079 | if (!spec->gen.autocfg.hp_outs) { | ||
3080 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
3081 | hp_pin = spec->gen.autocfg.line_out_pins[0]; | ||
3082 | } | ||
3083 | |||
3084 | alc283_restore_default_value(codec); | 3087 | alc283_restore_default_value(codec); |
3085 | 3088 | ||
3086 | if (!hp_pin) | 3089 | if (!hp_pin) |
@@ -3114,14 +3117,9 @@ static void alc283_init(struct hda_codec *codec) | |||
3114 | static void alc283_shutup(struct hda_codec *codec) | 3117 | static void alc283_shutup(struct hda_codec *codec) |
3115 | { | 3118 | { |
3116 | struct alc_spec *spec = codec->spec; | 3119 | struct alc_spec *spec = codec->spec; |
3117 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3120 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3118 | bool hp_pin_sense; | 3121 | bool hp_pin_sense; |
3119 | 3122 | ||
3120 | if (!spec->gen.autocfg.hp_outs) { | ||
3121 | if (spec->gen.autocfg.line_out_type == AC_JACK_HP_OUT) | ||
3122 | hp_pin = spec->gen.autocfg.line_out_pins[0]; | ||
3123 | } | ||
3124 | |||
3125 | if (!hp_pin) { | 3123 | if (!hp_pin) { |
3126 | alc269_shutup(codec); | 3124 | alc269_shutup(codec); |
3127 | return; | 3125 | return; |
@@ -3155,7 +3153,7 @@ static void alc283_shutup(struct hda_codec *codec) | |||
3155 | static void alc256_init(struct hda_codec *codec) | 3153 | static void alc256_init(struct hda_codec *codec) |
3156 | { | 3154 | { |
3157 | struct alc_spec *spec = codec->spec; | 3155 | struct alc_spec *spec = codec->spec; |
3158 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3156 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3159 | bool hp_pin_sense; | 3157 | bool hp_pin_sense; |
3160 | 3158 | ||
3161 | if (!hp_pin) | 3159 | if (!hp_pin) |
@@ -3191,7 +3189,7 @@ static void alc256_init(struct hda_codec *codec) | |||
3191 | static void alc256_shutup(struct hda_codec *codec) | 3189 | static void alc256_shutup(struct hda_codec *codec) |
3192 | { | 3190 | { |
3193 | struct alc_spec *spec = codec->spec; | 3191 | struct alc_spec *spec = codec->spec; |
3194 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3192 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3195 | bool hp_pin_sense; | 3193 | bool hp_pin_sense; |
3196 | 3194 | ||
3197 | if (!hp_pin) { | 3195 | if (!hp_pin) { |
@@ -3227,7 +3225,7 @@ static void alc256_shutup(struct hda_codec *codec) | |||
3227 | static void alc225_init(struct hda_codec *codec) | 3225 | static void alc225_init(struct hda_codec *codec) |
3228 | { | 3226 | { |
3229 | struct alc_spec *spec = codec->spec; | 3227 | struct alc_spec *spec = codec->spec; |
3230 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3228 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3231 | bool hp1_pin_sense, hp2_pin_sense; | 3229 | bool hp1_pin_sense, hp2_pin_sense; |
3232 | 3230 | ||
3233 | if (!hp_pin) | 3231 | if (!hp_pin) |
@@ -3270,7 +3268,7 @@ static void alc225_init(struct hda_codec *codec) | |||
3270 | static void alc225_shutup(struct hda_codec *codec) | 3268 | static void alc225_shutup(struct hda_codec *codec) |
3271 | { | 3269 | { |
3272 | struct alc_spec *spec = codec->spec; | 3270 | struct alc_spec *spec = codec->spec; |
3273 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3271 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3274 | bool hp1_pin_sense, hp2_pin_sense; | 3272 | bool hp1_pin_sense, hp2_pin_sense; |
3275 | 3273 | ||
3276 | if (!hp_pin) { | 3274 | if (!hp_pin) { |
@@ -3314,7 +3312,7 @@ static void alc225_shutup(struct hda_codec *codec) | |||
3314 | static void alc_default_init(struct hda_codec *codec) | 3312 | static void alc_default_init(struct hda_codec *codec) |
3315 | { | 3313 | { |
3316 | struct alc_spec *spec = codec->spec; | 3314 | struct alc_spec *spec = codec->spec; |
3317 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3315 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3318 | bool hp_pin_sense; | 3316 | bool hp_pin_sense; |
3319 | 3317 | ||
3320 | if (!hp_pin) | 3318 | if (!hp_pin) |
@@ -3343,7 +3341,7 @@ static void alc_default_init(struct hda_codec *codec) | |||
3343 | static void alc_default_shutup(struct hda_codec *codec) | 3341 | static void alc_default_shutup(struct hda_codec *codec) |
3344 | { | 3342 | { |
3345 | struct alc_spec *spec = codec->spec; | 3343 | struct alc_spec *spec = codec->spec; |
3346 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 3344 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
3347 | bool hp_pin_sense; | 3345 | bool hp_pin_sense; |
3348 | 3346 | ||
3349 | if (!hp_pin) { | 3347 | if (!hp_pin) { |
@@ -3372,6 +3370,48 @@ static void alc_default_shutup(struct hda_codec *codec) | |||
3372 | snd_hda_shutup_pins(codec); | 3370 | snd_hda_shutup_pins(codec); |
3373 | } | 3371 | } |
3374 | 3372 | ||
3373 | static void alc294_hp_init(struct hda_codec *codec) | ||
3374 | { | ||
3375 | struct alc_spec *spec = codec->spec; | ||
3376 | hda_nid_t hp_pin = alc_get_hp_pin(spec); | ||
3377 | int i, val; | ||
3378 | |||
3379 | if (!hp_pin) | ||
3380 | return; | ||
3381 | |||
3382 | snd_hda_codec_write(codec, hp_pin, 0, | ||
3383 | AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); | ||
3384 | |||
3385 | msleep(100); | ||
3386 | |||
3387 | snd_hda_codec_write(codec, hp_pin, 0, | ||
3388 | AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); | ||
3389 | |||
3390 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ | ||
3391 | alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ | ||
3392 | |||
3393 | /* Wait for depop procedure finish */ | ||
3394 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
3395 | for (i = 0; i < 20 && val & 0x0080; i++) { | ||
3396 | msleep(50); | ||
3397 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
3398 | } | ||
3399 | /* Set HP depop to auto mode */ | ||
3400 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); | ||
3401 | msleep(50); | ||
3402 | } | ||
3403 | |||
3404 | static void alc294_init(struct hda_codec *codec) | ||
3405 | { | ||
3406 | struct alc_spec *spec = codec->spec; | ||
3407 | |||
3408 | if (!spec->done_hp_init) { | ||
3409 | alc294_hp_init(codec); | ||
3410 | spec->done_hp_init = true; | ||
3411 | } | ||
3412 | alc_default_init(codec); | ||
3413 | } | ||
3414 | |||
3375 | static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, | 3415 | static void alc5505_coef_set(struct hda_codec *codec, unsigned int index_reg, |
3376 | unsigned int val) | 3416 | unsigned int val) |
3377 | { | 3417 | { |
@@ -4737,7 +4777,7 @@ static void alc_update_headset_mode(struct hda_codec *codec) | |||
4737 | struct alc_spec *spec = codec->spec; | 4777 | struct alc_spec *spec = codec->spec; |
4738 | 4778 | ||
4739 | hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; | 4779 | hda_nid_t mux_pin = spec->gen.imux_pins[spec->gen.cur_mux[0]]; |
4740 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | 4780 | hda_nid_t hp_pin = alc_get_hp_pin(spec); |
4741 | 4781 | ||
4742 | int new_headset_mode; | 4782 | int new_headset_mode; |
4743 | 4783 | ||
@@ -5016,7 +5056,7 @@ static void alc_fixup_tpt470_dock(struct hda_codec *codec, | |||
5016 | static void alc_shutup_dell_xps13(struct hda_codec *codec) | 5056 | static void alc_shutup_dell_xps13(struct hda_codec *codec) |
5017 | { | 5057 | { |
5018 | struct alc_spec *spec = codec->spec; | 5058 | struct alc_spec *spec = codec->spec; |
5019 | int hp_pin = spec->gen.autocfg.hp_pins[0]; | 5059 | int hp_pin = alc_get_hp_pin(spec); |
5020 | 5060 | ||
5021 | /* Prevent pop noises when headphones are plugged in */ | 5061 | /* Prevent pop noises when headphones are plugged in */ |
5022 | snd_hda_codec_write(codec, hp_pin, 0, | 5062 | snd_hda_codec_write(codec, hp_pin, 0, |
@@ -5109,7 +5149,7 @@ static void alc271_hp_gate_mic_jack(struct hda_codec *codec, | |||
5109 | 5149 | ||
5110 | if (action == HDA_FIXUP_ACT_PROBE) { | 5150 | if (action == HDA_FIXUP_ACT_PROBE) { |
5111 | int mic_pin = find_ext_mic_pin(codec); | 5151 | int mic_pin = find_ext_mic_pin(codec); |
5112 | int hp_pin = spec->gen.autocfg.hp_pins[0]; | 5152 | int hp_pin = alc_get_hp_pin(spec); |
5113 | 5153 | ||
5114 | if (snd_BUG_ON(!mic_pin || !hp_pin)) | 5154 | if (snd_BUG_ON(!mic_pin || !hp_pin)) |
5115 | return; | 5155 | return; |
@@ -5591,6 +5631,7 @@ enum { | |||
5591 | ALC294_FIXUP_ASUS_HEADSET_MIC, | 5631 | ALC294_FIXUP_ASUS_HEADSET_MIC, |
5592 | ALC294_FIXUP_ASUS_SPK, | 5632 | ALC294_FIXUP_ASUS_SPK, |
5593 | ALC225_FIXUP_HEADSET_JACK, | 5633 | ALC225_FIXUP_HEADSET_JACK, |
5634 | ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE, | ||
5594 | }; | 5635 | }; |
5595 | 5636 | ||
5596 | static const struct hda_fixup alc269_fixups[] = { | 5637 | static const struct hda_fixup alc269_fixups[] = { |
@@ -6537,6 +6578,15 @@ static const struct hda_fixup alc269_fixups[] = { | |||
6537 | .type = HDA_FIXUP_FUNC, | 6578 | .type = HDA_FIXUP_FUNC, |
6538 | .v.func = alc_fixup_headset_jack, | 6579 | .v.func = alc_fixup_headset_jack, |
6539 | }, | 6580 | }, |
6581 | [ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE] = { | ||
6582 | .type = HDA_FIXUP_PINS, | ||
6583 | .v.pins = (const struct hda_pintbl[]) { | ||
6584 | { 0x1a, 0x01a1913c }, /* use as headset mic, without its own jack detect */ | ||
6585 | { } | ||
6586 | }, | ||
6587 | .chained = true, | ||
6588 | .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC | ||
6589 | }, | ||
6540 | }; | 6590 | }; |
6541 | 6591 | ||
6542 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { | 6592 | static const struct snd_pci_quirk alc269_fixup_tbl[] = { |
@@ -6715,6 +6765,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
6715 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), | 6765 | SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), |
6716 | SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), | 6766 | SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), |
6717 | SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), | 6767 | SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), |
6768 | SND_PCI_QUIRK(0x1558, 0x1325, "System76 Darter Pro (darp5)", ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE), | ||
6718 | SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), | 6769 | SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC233_FIXUP_LENOVO_MULTI_CODECS), |
6719 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), | 6770 | SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE), |
6720 | SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), | 6771 | SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE), |
@@ -7373,37 +7424,6 @@ static void alc269_fill_coef(struct hda_codec *codec) | |||
7373 | alc_update_coef_idx(codec, 0x4, 0, 1<<11); | 7424 | alc_update_coef_idx(codec, 0x4, 0, 1<<11); |
7374 | } | 7425 | } |
7375 | 7426 | ||
7376 | static void alc294_hp_init(struct hda_codec *codec) | ||
7377 | { | ||
7378 | struct alc_spec *spec = codec->spec; | ||
7379 | hda_nid_t hp_pin = spec->gen.autocfg.hp_pins[0]; | ||
7380 | int i, val; | ||
7381 | |||
7382 | if (!hp_pin) | ||
7383 | return; | ||
7384 | |||
7385 | snd_hda_codec_write(codec, hp_pin, 0, | ||
7386 | AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); | ||
7387 | |||
7388 | msleep(100); | ||
7389 | |||
7390 | snd_hda_codec_write(codec, hp_pin, 0, | ||
7391 | AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0); | ||
7392 | |||
7393 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */ | ||
7394 | alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */ | ||
7395 | |||
7396 | /* Wait for depop procedure finish */ | ||
7397 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
7398 | for (i = 0; i < 20 && val & 0x0080; i++) { | ||
7399 | msleep(50); | ||
7400 | val = alc_read_coefex_idx(codec, 0x58, 0x01); | ||
7401 | } | ||
7402 | /* Set HP depop to auto mode */ | ||
7403 | alc_update_coef_idx(codec, 0x6f, 0x000f, 0x000b); | ||
7404 | msleep(50); | ||
7405 | } | ||
7406 | |||
7407 | /* | 7427 | /* |
7408 | */ | 7428 | */ |
7409 | static int patch_alc269(struct hda_codec *codec) | 7429 | static int patch_alc269(struct hda_codec *codec) |
@@ -7529,7 +7549,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
7529 | spec->codec_variant = ALC269_TYPE_ALC294; | 7549 | spec->codec_variant = ALC269_TYPE_ALC294; |
7530 | spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ | 7550 | spec->gen.mixer_nid = 0; /* ALC2x4 does not have any loopback mixer path */ |
7531 | alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ | 7551 | alc_update_coef_idx(codec, 0x6b, 0x0018, (1<<4) | (1<<3)); /* UAJ MIC Vref control by verb */ |
7532 | alc294_hp_init(codec); | 7552 | spec->init_hook = alc294_init; |
7533 | break; | 7553 | break; |
7534 | case 0x10ec0300: | 7554 | case 0x10ec0300: |
7535 | spec->codec_variant = ALC269_TYPE_ALC300; | 7555 | spec->codec_variant = ALC269_TYPE_ALC300; |
@@ -7541,7 +7561,7 @@ static int patch_alc269(struct hda_codec *codec) | |||
7541 | spec->codec_variant = ALC269_TYPE_ALC700; | 7561 | spec->codec_variant = ALC269_TYPE_ALC700; |
7542 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ | 7562 | spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */ |
7543 | alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ | 7563 | alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */ |
7544 | alc294_hp_init(codec); | 7564 | spec->init_hook = alc294_init; |
7545 | break; | 7565 | break; |
7546 | 7566 | ||
7547 | } | 7567 | } |
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index d00734d31e04..e5b6769b9797 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c | |||
@@ -795,6 +795,8 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
795 | if (hcd->spdif) | 795 | if (hcd->spdif) |
796 | hcp->daidrv[i] = hdmi_spdif_dai; | 796 | hcp->daidrv[i] = hdmi_spdif_dai; |
797 | 797 | ||
798 | dev_set_drvdata(dev, hcp); | ||
799 | |||
798 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, | 800 | ret = devm_snd_soc_register_component(dev, &hdmi_driver, hcp->daidrv, |
799 | dai_count); | 801 | dai_count); |
800 | if (ret) { | 802 | if (ret) { |
@@ -802,8 +804,6 @@ static int hdmi_codec_probe(struct platform_device *pdev) | |||
802 | __func__, ret); | 804 | __func__, ret); |
803 | return ret; | 805 | return ret; |
804 | } | 806 | } |
805 | |||
806 | dev_set_drvdata(dev, hcp); | ||
807 | return 0; | 807 | return 0; |
808 | } | 808 | } |
809 | 809 | ||
diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 89c43b26c379..a9b91bcfcc09 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c | |||
@@ -1778,7 +1778,9 @@ static const struct snd_soc_dapm_route rt5682_dapm_routes[] = { | |||
1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, | 1778 | {"ADC Stereo1 Filter", NULL, "ADC STO1 ASRC", is_using_asrc}, |
1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, | 1779 | {"DAC Stereo1 Filter", NULL, "DAC STO1 ASRC", is_using_asrc}, |
1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, | 1780 | {"ADC STO1 ASRC", NULL, "AD ASRC"}, |
1781 | {"ADC STO1 ASRC", NULL, "DA ASRC"}, | ||
1781 | {"ADC STO1 ASRC", NULL, "CLKDET"}, | 1782 | {"ADC STO1 ASRC", NULL, "CLKDET"}, |
1783 | {"DAC STO1 ASRC", NULL, "AD ASRC"}, | ||
1782 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, | 1784 | {"DAC STO1 ASRC", NULL, "DA ASRC"}, |
1783 | {"DAC STO1 ASRC", NULL, "CLKDET"}, | 1785 | {"DAC STO1 ASRC", NULL, "CLKDET"}, |
1784 | 1786 | ||
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c index d6c62aa13041..ce00fe2f6aae 100644 --- a/sound/soc/samsung/i2s.c +++ b/sound/soc/samsung/i2s.c | |||
@@ -700,6 +700,7 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
700 | { | 700 | { |
701 | struct i2s_dai *i2s = to_info(dai); | 701 | struct i2s_dai *i2s = to_info(dai); |
702 | u32 mod, mask = 0, val = 0; | 702 | u32 mod, mask = 0, val = 0; |
703 | struct clk *rclksrc; | ||
703 | unsigned long flags; | 704 | unsigned long flags; |
704 | 705 | ||
705 | WARN_ON(!pm_runtime_active(dai->dev)); | 706 | WARN_ON(!pm_runtime_active(dai->dev)); |
@@ -782,6 +783,10 @@ static int i2s_hw_params(struct snd_pcm_substream *substream, | |||
782 | 783 | ||
783 | i2s->frmclk = params_rate(params); | 784 | i2s->frmclk = params_rate(params); |
784 | 785 | ||
786 | rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
787 | if (rclksrc && !IS_ERR(rclksrc)) | ||
788 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
789 | |||
785 | return 0; | 790 | return 0; |
786 | } | 791 | } |
787 | 792 | ||
@@ -886,11 +891,6 @@ static int config_setup(struct i2s_dai *i2s) | |||
886 | return 0; | 891 | return 0; |
887 | 892 | ||
888 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { | 893 | if (!(i2s->quirks & QUIRK_NO_MUXPSR)) { |
889 | struct clk *rclksrc = i2s->clk_table[CLK_I2S_RCLK_SRC]; | ||
890 | |||
891 | if (rclksrc && !IS_ERR(rclksrc)) | ||
892 | i2s->rclk_srcrate = clk_get_rate(rclksrc); | ||
893 | |||
894 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; | 894 | psr = i2s->rclk_srcrate / i2s->frmclk / rfs; |
895 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); | 895 | writel(((psr - 1) << 8) | PSR_PSREN, i2s->addr + I2SPSR); |
896 | dev_dbg(&i2s->pdev->dev, | 896 | dev_dbg(&i2s->pdev->dev, |
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c index 59e250cc2e9d..e819e965e1db 100644 --- a/sound/soc/sh/rcar/core.c +++ b/sound/soc/sh/rcar/core.c | |||
@@ -1526,14 +1526,14 @@ int rsnd_kctrl_new(struct rsnd_mod *mod, | |||
1526 | int ret; | 1526 | int ret; |
1527 | 1527 | ||
1528 | /* | 1528 | /* |
1529 | * 1) Avoid duplicate register (ex. MIXer case) | 1529 | * 1) Avoid duplicate register for DVC with MIX case |
1530 | * 2) re-register if card was rebinded | 1530 | * 2) Allow duplicate register for MIX |
1531 | * 3) re-register if card was rebinded | ||
1531 | */ | 1532 | */ |
1532 | list_for_each_entry(kctrl, &card->controls, list) { | 1533 | list_for_each_entry(kctrl, &card->controls, list) { |
1533 | struct rsnd_kctrl_cfg *c = kctrl->private_data; | 1534 | struct rsnd_kctrl_cfg *c = kctrl->private_data; |
1534 | 1535 | ||
1535 | if (strcmp(kctrl->id.name, name) == 0 && | 1536 | if (c == cfg) |
1536 | c->mod == mod) | ||
1537 | return 0; | 1537 | return 0; |
1538 | } | 1538 | } |
1539 | 1539 | ||
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c index 45ef295743ec..f5afab631abb 100644 --- a/sound/soc/sh/rcar/ssi.c +++ b/sound/soc/sh/rcar/ssi.c | |||
@@ -286,7 +286,7 @@ static int rsnd_ssi_master_clk_start(struct rsnd_mod *mod, | |||
286 | if (rsnd_ssi_is_multi_slave(mod, io)) | 286 | if (rsnd_ssi_is_multi_slave(mod, io)) |
287 | return 0; | 287 | return 0; |
288 | 288 | ||
289 | if (ssi->usrcnt > 1) { | 289 | if (ssi->usrcnt > 0) { |
290 | if (ssi->rate != rate) { | 290 | if (ssi->rate != rate) { |
291 | dev_err(dev, "SSI parent/child should use same rate\n"); | 291 | dev_err(dev, "SSI parent/child should use same rate\n"); |
292 | return -EINVAL; | 292 | return -EINVAL; |
diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c index c5934adcfd01..c74991dd18ab 100644 --- a/sound/soc/sh/rcar/ssiu.c +++ b/sound/soc/sh/rcar/ssiu.c | |||
@@ -79,7 +79,7 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod, | |||
79 | break; | 79 | break; |
80 | case 9: | 80 | case 9: |
81 | for (i = 0; i < 4; i++) | 81 | for (i = 0; i < 4; i++) |
82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << (id * 4)); | 82 | rsnd_mod_write(mod, SSI_SYS_STATUS((i * 2) + 1), 0xf << 4); |
83 | break; | 83 | break; |
84 | } | 84 | } |
85 | 85 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index aae450ba4f08..50617db05c46 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -735,12 +735,17 @@ static struct snd_soc_component *soc_find_component( | |||
735 | const struct device_node *of_node, const char *name) | 735 | const struct device_node *of_node, const char *name) |
736 | { | 736 | { |
737 | struct snd_soc_component *component; | 737 | struct snd_soc_component *component; |
738 | struct device_node *component_of_node; | ||
738 | 739 | ||
739 | lockdep_assert_held(&client_mutex); | 740 | lockdep_assert_held(&client_mutex); |
740 | 741 | ||
741 | for_each_component(component) { | 742 | for_each_component(component) { |
742 | if (of_node) { | 743 | if (of_node) { |
743 | if (component->dev->of_node == of_node) | 744 | component_of_node = component->dev->of_node; |
745 | if (!component_of_node && component->dev->parent) | ||
746 | component_of_node = component->dev->parent->of_node; | ||
747 | |||
748 | if (component_of_node == of_node) | ||
744 | return component; | 749 | return component; |
745 | } else if (name && strcmp(component->name, name) == 0) { | 750 | } else if (name && strcmp(component->name, name) == 0) { |
746 | return component; | 751 | return component; |
@@ -951,7 +956,7 @@ static void soc_remove_dai(struct snd_soc_dai *dai, int order) | |||
951 | { | 956 | { |
952 | int err; | 957 | int err; |
953 | 958 | ||
954 | if (!dai || !dai->probed || | 959 | if (!dai || !dai->probed || !dai->driver || |
955 | dai->driver->remove_order != order) | 960 | dai->driver->remove_order != order) |
956 | return; | 961 | return; |
957 | 962 | ||
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 2c4c13419539..20bad755888b 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -70,12 +70,16 @@ static int dapm_up_seq[] = { | |||
70 | [snd_soc_dapm_clock_supply] = 1, | 70 | [snd_soc_dapm_clock_supply] = 1, |
71 | [snd_soc_dapm_supply] = 2, | 71 | [snd_soc_dapm_supply] = 2, |
72 | [snd_soc_dapm_micbias] = 3, | 72 | [snd_soc_dapm_micbias] = 3, |
73 | [snd_soc_dapm_vmid] = 3, | ||
73 | [snd_soc_dapm_dai_link] = 2, | 74 | [snd_soc_dapm_dai_link] = 2, |
74 | [snd_soc_dapm_dai_in] = 4, | 75 | [snd_soc_dapm_dai_in] = 4, |
75 | [snd_soc_dapm_dai_out] = 4, | 76 | [snd_soc_dapm_dai_out] = 4, |
76 | [snd_soc_dapm_aif_in] = 4, | 77 | [snd_soc_dapm_aif_in] = 4, |
77 | [snd_soc_dapm_aif_out] = 4, | 78 | [snd_soc_dapm_aif_out] = 4, |
78 | [snd_soc_dapm_mic] = 5, | 79 | [snd_soc_dapm_mic] = 5, |
80 | [snd_soc_dapm_siggen] = 5, | ||
81 | [snd_soc_dapm_input] = 5, | ||
82 | [snd_soc_dapm_output] = 5, | ||
79 | [snd_soc_dapm_mux] = 6, | 83 | [snd_soc_dapm_mux] = 6, |
80 | [snd_soc_dapm_demux] = 6, | 84 | [snd_soc_dapm_demux] = 6, |
81 | [snd_soc_dapm_dac] = 7, | 85 | [snd_soc_dapm_dac] = 7, |
@@ -83,11 +87,19 @@ static int dapm_up_seq[] = { | |||
83 | [snd_soc_dapm_mixer] = 8, | 87 | [snd_soc_dapm_mixer] = 8, |
84 | [snd_soc_dapm_mixer_named_ctl] = 8, | 88 | [snd_soc_dapm_mixer_named_ctl] = 8, |
85 | [snd_soc_dapm_pga] = 9, | 89 | [snd_soc_dapm_pga] = 9, |
90 | [snd_soc_dapm_buffer] = 9, | ||
91 | [snd_soc_dapm_scheduler] = 9, | ||
92 | [snd_soc_dapm_effect] = 9, | ||
93 | [snd_soc_dapm_src] = 9, | ||
94 | [snd_soc_dapm_asrc] = 9, | ||
95 | [snd_soc_dapm_encoder] = 9, | ||
96 | [snd_soc_dapm_decoder] = 9, | ||
86 | [snd_soc_dapm_adc] = 10, | 97 | [snd_soc_dapm_adc] = 10, |
87 | [snd_soc_dapm_out_drv] = 11, | 98 | [snd_soc_dapm_out_drv] = 11, |
88 | [snd_soc_dapm_hp] = 11, | 99 | [snd_soc_dapm_hp] = 11, |
89 | [snd_soc_dapm_spk] = 11, | 100 | [snd_soc_dapm_spk] = 11, |
90 | [snd_soc_dapm_line] = 11, | 101 | [snd_soc_dapm_line] = 11, |
102 | [snd_soc_dapm_sink] = 11, | ||
91 | [snd_soc_dapm_kcontrol] = 12, | 103 | [snd_soc_dapm_kcontrol] = 12, |
92 | [snd_soc_dapm_post] = 13, | 104 | [snd_soc_dapm_post] = 13, |
93 | }; | 105 | }; |
@@ -100,13 +112,25 @@ static int dapm_down_seq[] = { | |||
100 | [snd_soc_dapm_spk] = 3, | 112 | [snd_soc_dapm_spk] = 3, |
101 | [snd_soc_dapm_line] = 3, | 113 | [snd_soc_dapm_line] = 3, |
102 | [snd_soc_dapm_out_drv] = 3, | 114 | [snd_soc_dapm_out_drv] = 3, |
115 | [snd_soc_dapm_sink] = 3, | ||
103 | [snd_soc_dapm_pga] = 4, | 116 | [snd_soc_dapm_pga] = 4, |
117 | [snd_soc_dapm_buffer] = 4, | ||
118 | [snd_soc_dapm_scheduler] = 4, | ||
119 | [snd_soc_dapm_effect] = 4, | ||
120 | [snd_soc_dapm_src] = 4, | ||
121 | [snd_soc_dapm_asrc] = 4, | ||
122 | [snd_soc_dapm_encoder] = 4, | ||
123 | [snd_soc_dapm_decoder] = 4, | ||
104 | [snd_soc_dapm_switch] = 5, | 124 | [snd_soc_dapm_switch] = 5, |
105 | [snd_soc_dapm_mixer_named_ctl] = 5, | 125 | [snd_soc_dapm_mixer_named_ctl] = 5, |
106 | [snd_soc_dapm_mixer] = 5, | 126 | [snd_soc_dapm_mixer] = 5, |
107 | [snd_soc_dapm_dac] = 6, | 127 | [snd_soc_dapm_dac] = 6, |
108 | [snd_soc_dapm_mic] = 7, | 128 | [snd_soc_dapm_mic] = 7, |
129 | [snd_soc_dapm_siggen] = 7, | ||
130 | [snd_soc_dapm_input] = 7, | ||
131 | [snd_soc_dapm_output] = 7, | ||
109 | [snd_soc_dapm_micbias] = 8, | 132 | [snd_soc_dapm_micbias] = 8, |
133 | [snd_soc_dapm_vmid] = 8, | ||
110 | [snd_soc_dapm_mux] = 9, | 134 | [snd_soc_dapm_mux] = 9, |
111 | [snd_soc_dapm_demux] = 9, | 135 | [snd_soc_dapm_demux] = 9, |
112 | [snd_soc_dapm_aif_in] = 10, | 136 | [snd_soc_dapm_aif_in] = 10, |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index 045ef136903d..fc79ec6927e3 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
@@ -502,6 +502,7 @@ static void remove_dai(struct snd_soc_component *comp, | |||
502 | { | 502 | { |
503 | struct snd_soc_dai_driver *dai_drv = | 503 | struct snd_soc_dai_driver *dai_drv = |
504 | container_of(dobj, struct snd_soc_dai_driver, dobj); | 504 | container_of(dobj, struct snd_soc_dai_driver, dobj); |
505 | struct snd_soc_dai *dai; | ||
505 | 506 | ||
506 | if (pass != SOC_TPLG_PASS_PCM_DAI) | 507 | if (pass != SOC_TPLG_PASS_PCM_DAI) |
507 | return; | 508 | return; |
@@ -509,6 +510,10 @@ static void remove_dai(struct snd_soc_component *comp, | |||
509 | if (dobj->ops && dobj->ops->dai_unload) | 510 | if (dobj->ops && dobj->ops->dai_unload) |
510 | dobj->ops->dai_unload(comp, dobj); | 511 | dobj->ops->dai_unload(comp, dobj); |
511 | 512 | ||
513 | list_for_each_entry(dai, &comp->dai_list, list) | ||
514 | if (dai->driver == dai_drv) | ||
515 | dai->driver = NULL; | ||
516 | |||
512 | kfree(dai_drv->name); | 517 | kfree(dai_drv->name); |
513 | list_del(&dobj->list); | 518 | list_del(&dobj->list); |
514 | kfree(dai_drv); | 519 | kfree(dai_drv); |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 382847154227..db114f3977e0 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -314,6 +314,9 @@ static int search_roland_implicit_fb(struct usb_device *dev, int ifnum, | |||
314 | return 0; | 314 | return 0; |
315 | } | 315 | } |
316 | 316 | ||
317 | /* Setup an implicit feedback endpoint from a quirk. Returns 0 if no quirk | ||
318 | * applies. Returns 1 if a quirk was found. | ||
319 | */ | ||
317 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, | 320 | static int set_sync_ep_implicit_fb_quirk(struct snd_usb_substream *subs, |
318 | struct usb_device *dev, | 321 | struct usb_device *dev, |
319 | struct usb_interface_descriptor *altsd, | 322 | struct usb_interface_descriptor *altsd, |
@@ -384,7 +387,7 @@ add_sync_ep: | |||
384 | 387 | ||
385 | subs->data_endpoint->sync_master = subs->sync_endpoint; | 388 | subs->data_endpoint->sync_master = subs->sync_endpoint; |
386 | 389 | ||
387 | return 0; | 390 | return 1; |
388 | } | 391 | } |
389 | 392 | ||
390 | static int set_sync_endpoint(struct snd_usb_substream *subs, | 393 | static int set_sync_endpoint(struct snd_usb_substream *subs, |
@@ -423,6 +426,10 @@ static int set_sync_endpoint(struct snd_usb_substream *subs, | |||
423 | if (err < 0) | 426 | if (err < 0) |
424 | return err; | 427 | return err; |
425 | 428 | ||
429 | /* endpoint set by quirk */ | ||
430 | if (err > 0) | ||
431 | return 0; | ||
432 | |||
426 | if (altsd->bNumEndpoints < 2) | 433 | if (altsd->bNumEndpoints < 2) |
427 | return 0; | 434 | return 0; |
428 | 435 | ||
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index ebbadb3a7094..7e65fe853ee3 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -1492,6 +1492,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
1492 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; | 1492 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
1493 | break; | 1493 | break; |
1494 | 1494 | ||
1495 | case USB_ID(0x10cb, 0x0103): /* The Bit Opus #3; with fp->dsd_raw */ | ||
1495 | case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ | 1496 | case USB_ID(0x152a, 0x85de): /* SMSL D1 DAC */ |
1496 | case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ | 1497 | case USB_ID(0x16d0, 0x09dd): /* Encore mDSD */ |
1497 | case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ | 1498 | case USB_ID(0x0d8c, 0x0316): /* Hegel HD12 DSD */ |
@@ -1566,6 +1567,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, | |||
1566 | case 0x20b1: /* XMOS based devices */ | 1567 | case 0x20b1: /* XMOS based devices */ |
1567 | case 0x152a: /* Thesycon devices */ | 1568 | case 0x152a: /* Thesycon devices */ |
1568 | case 0x25ce: /* Mytek devices */ | 1569 | case 0x25ce: /* Mytek devices */ |
1570 | case 0x2ab6: /* T+A devices */ | ||
1569 | if (fp->dsd_raw) | 1571 | if (fp->dsd_raw) |
1570 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; | 1572 | return SNDRV_PCM_FMTBIT_DSD_U32_BE; |
1571 | break; | 1573 | break; |
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 897483457bf0..f7261fad45c1 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c | |||
@@ -297,10 +297,8 @@ char *get_fdinfo(int fd, const char *key) | |||
297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); | 297 | snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd); |
298 | 298 | ||
299 | fdi = fopen(path, "r"); | 299 | fdi = fopen(path, "r"); |
300 | if (!fdi) { | 300 | if (!fdi) |
301 | p_err("can't open fdinfo: %s", strerror(errno)); | ||
302 | return NULL; | 301 | return NULL; |
303 | } | ||
304 | 302 | ||
305 | while ((n = getline(&line, &line_n, fdi)) > 0) { | 303 | while ((n = getline(&line, &line_n, fdi)) > 0) { |
306 | char *value; | 304 | char *value; |
@@ -313,7 +311,6 @@ char *get_fdinfo(int fd, const char *key) | |||
313 | 311 | ||
314 | value = strchr(line, '\t'); | 312 | value = strchr(line, '\t'); |
315 | if (!value || !value[1]) { | 313 | if (!value || !value[1]) { |
316 | p_err("malformed fdinfo!?"); | ||
317 | free(line); | 314 | free(line); |
318 | return NULL; | 315 | return NULL; |
319 | } | 316 | } |
@@ -326,7 +323,6 @@ char *get_fdinfo(int fd, const char *key) | |||
326 | return line; | 323 | return line; |
327 | } | 324 | } |
328 | 325 | ||
329 | p_err("key '%s' not found in fdinfo", key); | ||
330 | free(line); | 326 | free(line); |
331 | fclose(fdi); | 327 | fclose(fdi); |
332 | return NULL; | 328 | return NULL; |
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index 2037e3dc864b..1ef1ee2280a2 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c | |||
@@ -347,6 +347,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val, | |||
347 | return argv + i; | 347 | return argv + i; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* on per cpu maps we must copy the provided value on all value instances */ | ||
351 | static void fill_per_cpu_value(struct bpf_map_info *info, void *value) | ||
352 | { | ||
353 | unsigned int i, n, step; | ||
354 | |||
355 | if (!map_is_per_cpu(info->type)) | ||
356 | return; | ||
357 | |||
358 | n = get_possible_cpus(); | ||
359 | step = round_up(info->value_size, 8); | ||
360 | for (i = 1; i < n; i++) | ||
361 | memcpy(value + i * step, value, info->value_size); | ||
362 | } | ||
363 | |||
350 | static int parse_elem(char **argv, struct bpf_map_info *info, | 364 | static int parse_elem(char **argv, struct bpf_map_info *info, |
351 | void *key, void *value, __u32 key_size, __u32 value_size, | 365 | void *key, void *value, __u32 key_size, __u32 value_size, |
352 | __u32 *flags, __u32 **value_fd) | 366 | __u32 *flags, __u32 **value_fd) |
@@ -426,6 +440,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info, | |||
426 | argv = parse_bytes(argv, "value", value, value_size); | 440 | argv = parse_bytes(argv, "value", value, value_size); |
427 | if (!argv) | 441 | if (!argv) |
428 | return -1; | 442 | return -1; |
443 | |||
444 | fill_per_cpu_value(info, value); | ||
429 | } | 445 | } |
430 | 446 | ||
431 | return parse_elem(argv, info, key, NULL, key_size, value_size, | 447 | return parse_elem(argv, info, key, NULL, key_size, value_size, |
@@ -497,10 +513,9 @@ static int show_map_close_json(int fd, struct bpf_map_info *info) | |||
497 | jsonw_uint_field(json_wtr, "owner_prog_type", | 513 | jsonw_uint_field(json_wtr, "owner_prog_type", |
498 | prog_type); | 514 | prog_type); |
499 | } | 515 | } |
500 | if (atoi(owner_jited)) | 516 | if (owner_jited) |
501 | jsonw_bool_field(json_wtr, "owner_jited", true); | 517 | jsonw_bool_field(json_wtr, "owner_jited", |
502 | else | 518 | !!atoi(owner_jited)); |
503 | jsonw_bool_field(json_wtr, "owner_jited", false); | ||
504 | 519 | ||
505 | free(owner_prog_type); | 520 | free(owner_prog_type); |
506 | free(owner_jited); | 521 | free(owner_jited); |
@@ -553,7 +568,8 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
553 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); | 568 | char *owner_prog_type = get_fdinfo(fd, "owner_prog_type"); |
554 | char *owner_jited = get_fdinfo(fd, "owner_jited"); | 569 | char *owner_jited = get_fdinfo(fd, "owner_jited"); |
555 | 570 | ||
556 | printf("\n\t"); | 571 | if (owner_prog_type || owner_jited) |
572 | printf("\n\t"); | ||
557 | if (owner_prog_type) { | 573 | if (owner_prog_type) { |
558 | unsigned int prog_type = atoi(owner_prog_type); | 574 | unsigned int prog_type = atoi(owner_prog_type); |
559 | 575 | ||
@@ -563,10 +579,9 @@ static int show_map_close_plain(int fd, struct bpf_map_info *info) | |||
563 | else | 579 | else |
564 | printf("owner_prog_type %d ", prog_type); | 580 | printf("owner_prog_type %d ", prog_type); |
565 | } | 581 | } |
566 | if (atoi(owner_jited)) | 582 | if (owner_jited) |
567 | printf("owner jited"); | 583 | printf("owner%s jited", |
568 | else | 584 | atoi(owner_jited) ? "" : " not"); |
569 | printf("owner not jited"); | ||
570 | 585 | ||
571 | free(owner_prog_type); | 586 | free(owner_prog_type); |
572 | free(owner_jited); | 587 | free(owner_jited); |
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 2d1bb7d6ff51..b54ed82b9589 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c | |||
@@ -78,13 +78,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size) | |||
78 | 78 | ||
79 | static int prog_fd_by_tag(unsigned char *tag) | 79 | static int prog_fd_by_tag(unsigned char *tag) |
80 | { | 80 | { |
81 | struct bpf_prog_info info = {}; | ||
82 | __u32 len = sizeof(info); | ||
83 | unsigned int id = 0; | 81 | unsigned int id = 0; |
84 | int err; | 82 | int err; |
85 | int fd; | 83 | int fd; |
86 | 84 | ||
87 | while (true) { | 85 | while (true) { |
86 | struct bpf_prog_info info = {}; | ||
87 | __u32 len = sizeof(info); | ||
88 | |||
88 | err = bpf_prog_get_next_id(id, &id); | 89 | err = bpf_prog_get_next_id(id, &id); |
89 | if (err) { | 90 | if (err) { |
90 | p_err("%s", strerror(errno)); | 91 | p_err("%s", strerror(errno)); |
diff --git a/tools/iio/iio_generic_buffer.c b/tools/iio/iio_generic_buffer.c index 3040830d7797..84545666a09c 100644 --- a/tools/iio/iio_generic_buffer.c +++ b/tools/iio/iio_generic_buffer.c | |||
@@ -330,7 +330,7 @@ static const struct option longopts[] = { | |||
330 | 330 | ||
331 | int main(int argc, char **argv) | 331 | int main(int argc, char **argv) |
332 | { | 332 | { |
333 | unsigned long long num_loops = 2; | 333 | long long num_loops = 2; |
334 | unsigned long timedelay = 1000000; | 334 | unsigned long timedelay = 1000000; |
335 | unsigned long buf_len = 128; | 335 | unsigned long buf_len = 128; |
336 | 336 | ||
diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index fd92ce8388fc..57aaeaf8e192 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h | |||
@@ -15,6 +15,8 @@ | |||
15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" | 15 | #include "../../arch/ia64/include/uapi/asm/bitsperlong.h" |
16 | #elif defined(__riscv) | 16 | #elif defined(__riscv) |
17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" | 17 | #include "../../arch/riscv/include/uapi/asm/bitsperlong.h" |
18 | #elif defined(__alpha__) | ||
19 | #include "../../arch/alpha/include/uapi/asm/bitsperlong.h" | ||
18 | #else | 20 | #else |
19 | #include <asm-generic/bitsperlong.h> | 21 | #include <asm-generic/bitsperlong.h> |
20 | #endif | 22 | #endif |
diff --git a/tools/include/uapi/linux/in.h b/tools/include/uapi/linux/in.h index f6052e70bf40..a55cb8b10165 100644 --- a/tools/include/uapi/linux/in.h +++ b/tools/include/uapi/linux/in.h | |||
@@ -268,7 +268,7 @@ struct sockaddr_in { | |||
268 | #define IN_MULTICAST(a) IN_CLASSD(a) | 268 | #define IN_MULTICAST(a) IN_CLASSD(a) |
269 | #define IN_MULTICAST_NET 0xe0000000 | 269 | #define IN_MULTICAST_NET 0xe0000000 |
270 | 270 | ||
271 | #define IN_BADCLASS(a) ((((long int) (a) ) == 0xffffffff) | 271 | #define IN_BADCLASS(a) (((long int) (a) ) == (long int)0xffffffff) |
272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) | 272 | #define IN_EXPERIMENTAL(a) IN_BADCLASS((a)) |
273 | 273 | ||
274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) | 274 | #define IN_CLASSE(a) ((((long int) (a)) & 0xf0000000) == 0xf0000000) |
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 095aebdc5bb7..e6150f21267d 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt | |||
@@ -19,8 +19,11 @@ C2C stands for Cache To Cache. | |||
19 | The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows | 19 | The perf c2c tool provides means for Shared Data C2C/HITM analysis. It allows |
20 | you to track down the cacheline contentions. | 20 | you to track down the cacheline contentions. |
21 | 21 | ||
22 | The tool is based on x86's load latency and precise store facility events | 22 | On x86, the tool is based on load latency and precise store facility events |
23 | provided by Intel CPUs. These events provide: | 23 | provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling |
24 | with thresholding feature. | ||
25 | |||
26 | These events provide: | ||
24 | - memory address of the access | 27 | - memory address of the access |
25 | - type of the access (load and store details) | 28 | - type of the access (load and store details) |
26 | - latency (in cycles) of the load access | 29 | - latency (in cycles) of the load access |
@@ -46,7 +49,7 @@ RECORD OPTIONS | |||
46 | 49 | ||
47 | -l:: | 50 | -l:: |
48 | --ldlat:: | 51 | --ldlat:: |
49 | Configure mem-loads latency. | 52 | Configure mem-loads latency. (x86 only) |
50 | 53 | ||
51 | -k:: | 54 | -k:: |
52 | --all-kernel:: | 55 | --all-kernel:: |
@@ -119,11 +122,16 @@ Following perf record options are configured by default: | |||
119 | -W,-d,--phys-data,--sample-cpu | 122 | -W,-d,--phys-data,--sample-cpu |
120 | 123 | ||
121 | Unless specified otherwise with '-e' option, following events are monitored by | 124 | Unless specified otherwise with '-e' option, following events are monitored by |
122 | default: | 125 | default on x86: |
123 | 126 | ||
124 | cpu/mem-loads,ldlat=30/P | 127 | cpu/mem-loads,ldlat=30/P |
125 | cpu/mem-stores/P | 128 | cpu/mem-stores/P |
126 | 129 | ||
130 | and following on PowerPC: | ||
131 | |||
132 | cpu/mem-loads/ | ||
133 | cpu/mem-stores/ | ||
134 | |||
127 | User can pass any 'perf record' option behind '--' mark, like (to enable | 135 | User can pass any 'perf record' option behind '--' mark, like (to enable |
128 | callchains and system wide monitoring): | 136 | callchains and system wide monitoring): |
129 | 137 | ||
diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index f8d2167cf3e7..199ea0f0a6c0 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt | |||
@@ -82,7 +82,7 @@ RECORD OPTIONS | |||
82 | Be more verbose (show counter open errors, etc) | 82 | Be more verbose (show counter open errors, etc) |
83 | 83 | ||
84 | --ldlat <n>:: | 84 | --ldlat <n>:: |
85 | Specify desired latency for loads event. | 85 | Specify desired latency for loads event. (x86 only) |
86 | 86 | ||
87 | In addition, for report all perf report options are valid, and for record | 87 | In addition, for report all perf report options are valid, and for record |
88 | all perf record options. | 88 | all perf record options. |
diff --git a/tools/perf/arch/powerpc/util/Build b/tools/perf/arch/powerpc/util/Build index 2e6595310420..ba98bd006488 100644 --- a/tools/perf/arch/powerpc/util/Build +++ b/tools/perf/arch/powerpc/util/Build | |||
@@ -2,6 +2,7 @@ libperf-y += header.o | |||
2 | libperf-y += sym-handling.o | 2 | libperf-y += sym-handling.o |
3 | libperf-y += kvm-stat.o | 3 | libperf-y += kvm-stat.o |
4 | libperf-y += perf_regs.o | 4 | libperf-y += perf_regs.o |
5 | libperf-y += mem-events.o | ||
5 | 6 | ||
6 | libperf-$(CONFIG_DWARF) += dwarf-regs.o | 7 | libperf-$(CONFIG_DWARF) += dwarf-regs.o |
7 | libperf-$(CONFIG_DWARF) += skip-callchain-idx.o | 8 | libperf-$(CONFIG_DWARF) += skip-callchain-idx.o |
diff --git a/tools/perf/arch/powerpc/util/mem-events.c b/tools/perf/arch/powerpc/util/mem-events.c new file mode 100644 index 000000000000..d08311f04e95 --- /dev/null +++ b/tools/perf/arch/powerpc/util/mem-events.c | |||
@@ -0,0 +1,11 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include "mem-events.h" | ||
3 | |||
4 | /* PowerPC does not support 'ldlat' parameter. */ | ||
5 | char *perf_mem_events__name(int i) | ||
6 | { | ||
7 | if (i == PERF_MEM_EVENTS__LOAD) | ||
8 | return (char *) "cpu/mem-loads/"; | ||
9 | |||
10 | return (char *) "cpu/mem-stores/"; | ||
11 | } | ||
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index d079f36d342d..ac221f137ed2 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -1681,13 +1681,8 @@ static void perf_sample__fprint_metric(struct perf_script *script, | |||
1681 | .force_header = false, | 1681 | .force_header = false, |
1682 | }; | 1682 | }; |
1683 | struct perf_evsel *ev2; | 1683 | struct perf_evsel *ev2; |
1684 | static bool init; | ||
1685 | u64 val; | 1684 | u64 val; |
1686 | 1685 | ||
1687 | if (!init) { | ||
1688 | perf_stat__init_shadow_stats(); | ||
1689 | init = true; | ||
1690 | } | ||
1691 | if (!evsel->stats) | 1686 | if (!evsel->stats) |
1692 | perf_evlist__alloc_stats(script->session->evlist, false); | 1687 | perf_evlist__alloc_stats(script->session->evlist, false); |
1693 | if (evsel_script(evsel->leader)->gnum++ == 0) | 1688 | if (evsel_script(evsel->leader)->gnum++ == 0) |
@@ -1794,7 +1789,7 @@ static void process_event(struct perf_script *script, | |||
1794 | return; | 1789 | return; |
1795 | } | 1790 | } |
1796 | 1791 | ||
1797 | if (PRINT_FIELD(TRACE)) { | 1792 | if (PRINT_FIELD(TRACE) && sample->raw_data) { |
1798 | event_format__fprintf(evsel->tp_format, sample->cpu, | 1793 | event_format__fprintf(evsel->tp_format, sample->cpu, |
1799 | sample->raw_data, sample->raw_size, fp); | 1794 | sample->raw_data, sample->raw_size, fp); |
1800 | } | 1795 | } |
@@ -2359,6 +2354,8 @@ static int __cmd_script(struct perf_script *script) | |||
2359 | 2354 | ||
2360 | signal(SIGINT, sig_handler); | 2355 | signal(SIGINT, sig_handler); |
2361 | 2356 | ||
2357 | perf_stat__init_shadow_stats(); | ||
2358 | |||
2362 | /* override event processing functions */ | 2359 | /* override event processing functions */ |
2363 | if (script->show_task_events) { | 2360 | if (script->show_task_events) { |
2364 | script->tool.comm = process_comm_event; | 2361 | script->tool.comm = process_comm_event; |
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index ed4583128b9c..b36061cd1ab8 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c | |||
@@ -2514,19 +2514,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp); | |||
2514 | 2514 | ||
2515 | static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) | 2515 | static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist) |
2516 | { | 2516 | { |
2517 | struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname"); | 2517 | bool found = false; |
2518 | struct perf_evsel *evsel, *tmp; | ||
2519 | struct parse_events_error err = { .idx = 0, }; | ||
2520 | int ret = parse_events(evlist, "probe:vfs_getname*", &err); | ||
2518 | 2521 | ||
2519 | if (IS_ERR(evsel)) | 2522 | if (ret) |
2520 | return false; | 2523 | return false; |
2521 | 2524 | ||
2522 | if (perf_evsel__field(evsel, "pathname") == NULL) { | 2525 | evlist__for_each_entry_safe(evlist, evsel, tmp) { |
2526 | if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname")) | ||
2527 | continue; | ||
2528 | |||
2529 | if (perf_evsel__field(evsel, "pathname")) { | ||
2530 | evsel->handler = trace__vfs_getname; | ||
2531 | found = true; | ||
2532 | continue; | ||
2533 | } | ||
2534 | |||
2535 | list_del_init(&evsel->node); | ||
2536 | evsel->evlist = NULL; | ||
2523 | perf_evsel__delete(evsel); | 2537 | perf_evsel__delete(evsel); |
2524 | return false; | ||
2525 | } | 2538 | } |
2526 | 2539 | ||
2527 | evsel->handler = trace__vfs_getname; | 2540 | return found; |
2528 | perf_evlist__add(evlist, evsel); | ||
2529 | return true; | ||
2530 | } | 2541 | } |
2531 | 2542 | ||
2532 | static struct perf_evsel *perf_evsel__new_pgfault(u64 config) | 2543 | static struct perf_evsel *perf_evsel__new_pgfault(u64 config) |
diff --git a/tools/perf/tests/attr.py b/tools/perf/tests/attr.py index 44090a9a19f3..e952127e4fb0 100644 --- a/tools/perf/tests/attr.py +++ b/tools/perf/tests/attr.py | |||
@@ -1,6 +1,8 @@ | |||
1 | #! /usr/bin/python | 1 | #! /usr/bin/python |
2 | # SPDX-License-Identifier: GPL-2.0 | 2 | # SPDX-License-Identifier: GPL-2.0 |
3 | 3 | ||
4 | from __future__ import print_function | ||
5 | |||
4 | import os | 6 | import os |
5 | import sys | 7 | import sys |
6 | import glob | 8 | import glob |
@@ -8,7 +10,11 @@ import optparse | |||
8 | import tempfile | 10 | import tempfile |
9 | import logging | 11 | import logging |
10 | import shutil | 12 | import shutil |
11 | import ConfigParser | 13 | |
14 | try: | ||
15 | import configparser | ||
16 | except ImportError: | ||
17 | import ConfigParser as configparser | ||
12 | 18 | ||
13 | def data_equal(a, b): | 19 | def data_equal(a, b): |
14 | # Allow multiple values in assignment separated by '|' | 20 | # Allow multiple values in assignment separated by '|' |
@@ -100,20 +106,20 @@ class Event(dict): | |||
100 | def equal(self, other): | 106 | def equal(self, other): |
101 | for t in Event.terms: | 107 | for t in Event.terms: |
102 | log.debug(" [%s] %s %s" % (t, self[t], other[t])); | 108 | log.debug(" [%s] %s %s" % (t, self[t], other[t])); |
103 | if not self.has_key(t) or not other.has_key(t): | 109 | if t not in self or t not in other: |
104 | return False | 110 | return False |
105 | if not data_equal(self[t], other[t]): | 111 | if not data_equal(self[t], other[t]): |
106 | return False | 112 | return False |
107 | return True | 113 | return True |
108 | 114 | ||
109 | def optional(self): | 115 | def optional(self): |
110 | if self.has_key('optional') and self['optional'] == '1': | 116 | if 'optional' in self and self['optional'] == '1': |
111 | return True | 117 | return True |
112 | return False | 118 | return False |
113 | 119 | ||
114 | def diff(self, other): | 120 | def diff(self, other): |
115 | for t in Event.terms: | 121 | for t in Event.terms: |
116 | if not self.has_key(t) or not other.has_key(t): | 122 | if t not in self or t not in other: |
117 | continue | 123 | continue |
118 | if not data_equal(self[t], other[t]): | 124 | if not data_equal(self[t], other[t]): |
119 | log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) | 125 | log.warning("expected %s=%s, got %s" % (t, self[t], other[t])) |
@@ -134,7 +140,7 @@ class Event(dict): | |||
134 | # - expected values assignments | 140 | # - expected values assignments |
135 | class Test(object): | 141 | class Test(object): |
136 | def __init__(self, path, options): | 142 | def __init__(self, path, options): |
137 | parser = ConfigParser.SafeConfigParser() | 143 | parser = configparser.SafeConfigParser() |
138 | parser.read(path) | 144 | parser.read(path) |
139 | 145 | ||
140 | log.warning("running '%s'" % path) | 146 | log.warning("running '%s'" % path) |
@@ -193,7 +199,7 @@ class Test(object): | |||
193 | return True | 199 | return True |
194 | 200 | ||
195 | def load_events(self, path, events): | 201 | def load_events(self, path, events): |
196 | parser_event = ConfigParser.SafeConfigParser() | 202 | parser_event = configparser.SafeConfigParser() |
197 | parser_event.read(path) | 203 | parser_event.read(path) |
198 | 204 | ||
199 | # The event record section header contains 'event' word, | 205 | # The event record section header contains 'event' word, |
@@ -207,7 +213,7 @@ class Test(object): | |||
207 | # Read parent event if there's any | 213 | # Read parent event if there's any |
208 | if (':' in section): | 214 | if (':' in section): |
209 | base = section[section.index(':') + 1:] | 215 | base = section[section.index(':') + 1:] |
210 | parser_base = ConfigParser.SafeConfigParser() | 216 | parser_base = configparser.SafeConfigParser() |
211 | parser_base.read(self.test_dir + '/' + base) | 217 | parser_base.read(self.test_dir + '/' + base) |
212 | base_items = parser_base.items('event') | 218 | base_items = parser_base.items('event') |
213 | 219 | ||
@@ -322,9 +328,9 @@ def run_tests(options): | |||
322 | for f in glob.glob(options.test_dir + '/' + options.test): | 328 | for f in glob.glob(options.test_dir + '/' + options.test): |
323 | try: | 329 | try: |
324 | Test(f, options).run() | 330 | Test(f, options).run() |
325 | except Unsup, obj: | 331 | except Unsup as obj: |
326 | log.warning("unsupp %s" % obj.getMsg()) | 332 | log.warning("unsupp %s" % obj.getMsg()) |
327 | except Notest, obj: | 333 | except Notest as obj: |
328 | log.warning("skipped %s" % obj.getMsg()) | 334 | log.warning("skipped %s" % obj.getMsg()) |
329 | 335 | ||
330 | def setup_log(verbose): | 336 | def setup_log(verbose): |
@@ -363,7 +369,7 @@ def main(): | |||
363 | parser.add_option("-p", "--perf", | 369 | parser.add_option("-p", "--perf", |
364 | action="store", type="string", dest="perf") | 370 | action="store", type="string", dest="perf") |
365 | parser.add_option("-v", "--verbose", | 371 | parser.add_option("-v", "--verbose", |
366 | action="count", dest="verbose") | 372 | default=0, action="count", dest="verbose") |
367 | 373 | ||
368 | options, args = parser.parse_args() | 374 | options, args = parser.parse_args() |
369 | if args: | 375 | if args: |
@@ -373,7 +379,7 @@ def main(): | |||
373 | setup_log(options.verbose) | 379 | setup_log(options.verbose) |
374 | 380 | ||
375 | if not options.test_dir: | 381 | if not options.test_dir: |
376 | print 'FAILED no -d option specified' | 382 | print('FAILED no -d option specified') |
377 | sys.exit(-1) | 383 | sys.exit(-1) |
378 | 384 | ||
379 | if not options.test: | 385 | if not options.test: |
@@ -382,8 +388,8 @@ def main(): | |||
382 | try: | 388 | try: |
383 | run_tests(options) | 389 | run_tests(options) |
384 | 390 | ||
385 | except Fail, obj: | 391 | except Fail as obj: |
386 | print "FAILED %s" % obj.getMsg(); | 392 | print("FAILED %s" % obj.getMsg()) |
387 | sys.exit(-1) | 393 | sys.exit(-1) |
388 | 394 | ||
389 | sys.exit(0) | 395 | sys.exit(0) |
diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c index 5f8501c68da4..5cbba70bcdd0 100644 --- a/tools/perf/tests/evsel-tp-sched.c +++ b/tools/perf/tests/evsel-tp-sched.c | |||
@@ -17,7 +17,7 @@ static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, | |||
17 | return -1; | 17 | return -1; |
18 | } | 18 | } |
19 | 19 | ||
20 | is_signed = !!(field->flags | TEP_FIELD_IS_SIGNED); | 20 | is_signed = !!(field->flags & TEP_FIELD_IS_SIGNED); |
21 | if (should_be_signed && !is_signed) { | 21 | if (should_be_signed && !is_signed) { |
22 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", | 22 | pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", |
23 | evsel->name, name, is_signed, should_be_signed); | 23 | evsel->name, name, is_signed, should_be_signed); |
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 1d00e5ec7906..82e16bf84466 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c | |||
@@ -224,20 +224,24 @@ static unsigned int annotate_browser__refresh(struct ui_browser *browser) | |||
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
226 | 226 | ||
227 | static int disasm__cmp(struct annotation_line *a, struct annotation_line *b) | 227 | static double disasm__cmp(struct annotation_line *a, struct annotation_line *b, |
228 | int percent_type) | ||
228 | { | 229 | { |
229 | int i; | 230 | int i; |
230 | 231 | ||
231 | for (i = 0; i < a->data_nr; i++) { | 232 | for (i = 0; i < a->data_nr; i++) { |
232 | if (a->data[i].percent == b->data[i].percent) | 233 | if (a->data[i].percent[percent_type] == b->data[i].percent[percent_type]) |
233 | continue; | 234 | continue; |
234 | return a->data[i].percent < b->data[i].percent; | 235 | return a->data[i].percent[percent_type] - |
236 | b->data[i].percent[percent_type]; | ||
235 | } | 237 | } |
236 | return 0; | 238 | return 0; |
237 | } | 239 | } |
238 | 240 | ||
239 | static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line *al) | 241 | static void disasm_rb_tree__insert(struct annotate_browser *browser, |
242 | struct annotation_line *al) | ||
240 | { | 243 | { |
244 | struct rb_root *root = &browser->entries; | ||
241 | struct rb_node **p = &root->rb_node; | 245 | struct rb_node **p = &root->rb_node; |
242 | struct rb_node *parent = NULL; | 246 | struct rb_node *parent = NULL; |
243 | struct annotation_line *l; | 247 | struct annotation_line *l; |
@@ -246,7 +250,7 @@ static void disasm_rb_tree__insert(struct rb_root *root, struct annotation_line | |||
246 | parent = *p; | 250 | parent = *p; |
247 | l = rb_entry(parent, struct annotation_line, rb_node); | 251 | l = rb_entry(parent, struct annotation_line, rb_node); |
248 | 252 | ||
249 | if (disasm__cmp(al, l)) | 253 | if (disasm__cmp(al, l, browser->opts->percent_type) < 0) |
250 | p = &(*p)->rb_left; | 254 | p = &(*p)->rb_left; |
251 | else | 255 | else |
252 | p = &(*p)->rb_right; | 256 | p = &(*p)->rb_right; |
@@ -329,7 +333,7 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser, | |||
329 | RB_CLEAR_NODE(&pos->al.rb_node); | 333 | RB_CLEAR_NODE(&pos->al.rb_node); |
330 | continue; | 334 | continue; |
331 | } | 335 | } |
332 | disasm_rb_tree__insert(&browser->entries, &pos->al); | 336 | disasm_rb_tree__insert(browser, &pos->al); |
333 | } | 337 | } |
334 | pthread_mutex_unlock(¬es->lock); | 338 | pthread_mutex_unlock(¬es->lock); |
335 | 339 | ||
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp index 89512504551b..39c0004f2886 100644 --- a/tools/perf/util/c++/clang.cpp +++ b/tools/perf/util/c++/clang.cpp | |||
@@ -160,7 +160,7 @@ getBPFObjectFromModule(llvm::Module *Module) | |||
160 | } | 160 | } |
161 | PM.run(*Module); | 161 | PM.run(*Module); |
162 | 162 | ||
163 | return std::move(Buffer); | 163 | return Buffer; |
164 | } | 164 | } |
165 | 165 | ||
166 | } | 166 | } |
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c index 1ccbd3342069..383674f448fc 100644 --- a/tools/perf/util/cpumap.c +++ b/tools/perf/util/cpumap.c | |||
@@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list) | |||
134 | if (!cpu_list) | 134 | if (!cpu_list) |
135 | return cpu_map__read_all_cpu_map(); | 135 | return cpu_map__read_all_cpu_map(); |
136 | 136 | ||
137 | if (!isdigit(*cpu_list)) | 137 | /* |
138 | * must handle the case of empty cpumap to cover | ||
139 | * TOPOLOGY header for NUMA nodes with no CPU | ||
140 | * ( e.g., because of CPU hotplug) | ||
141 | */ | ||
142 | if (!isdigit(*cpu_list) && *cpu_list != '\0') | ||
138 | goto out; | 143 | goto out; |
139 | 144 | ||
140 | while (isdigit(*cpu_list)) { | 145 | while (isdigit(*cpu_list)) { |
@@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list) | |||
181 | 186 | ||
182 | if (nr_cpus > 0) | 187 | if (nr_cpus > 0) |
183 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); | 188 | cpus = cpu_map__trim_new(nr_cpus, tmp_cpus); |
184 | else | 189 | else if (*cpu_list != '\0') |
185 | cpus = cpu_map__default_new(); | 190 | cpus = cpu_map__default_new(); |
191 | else | ||
192 | cpus = cpu_map__dummy_new(); | ||
186 | invalid: | 193 | invalid: |
187 | free(tmp_cpus); | 194 | free(tmp_cpus); |
188 | out: | 195 | out: |
diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c index 93f74d8d3cdd..42c3e5a229d2 100644 --- a/tools/perf/util/mem-events.c +++ b/tools/perf/util/mem-events.c | |||
@@ -28,7 +28,7 @@ struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = { | |||
28 | static char mem_loads_name[100]; | 28 | static char mem_loads_name[100]; |
29 | static bool mem_loads_name__init; | 29 | static bool mem_loads_name__init; |
30 | 30 | ||
31 | char *perf_mem_events__name(int i) | 31 | char * __weak perf_mem_events__name(int i) |
32 | { | 32 | { |
33 | if (i == PERF_MEM_EVENTS__LOAD) { | 33 | if (i == PERF_MEM_EVENTS__LOAD) { |
34 | if (!mem_loads_name__init) { | 34 | if (!mem_loads_name__init) { |
diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c index 897589507d97..ea523d3b248f 100644 --- a/tools/perf/util/ordered-events.c +++ b/tools/perf/util/ordered-events.c | |||
@@ -391,8 +391,10 @@ void ordered_events__free(struct ordered_events *oe) | |||
391 | * Current buffer might not have all the events allocated | 391 | * Current buffer might not have all the events allocated |
392 | * yet, we need to free only allocated ones ... | 392 | * yet, we need to free only allocated ones ... |
393 | */ | 393 | */ |
394 | list_del(&oe->buffer->list); | 394 | if (oe->buffer) { |
395 | ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); | 395 | list_del(&oe->buffer->list); |
396 | ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe); | ||
397 | } | ||
396 | 398 | ||
397 | /* ... and continue with the rest */ | 399 | /* ... and continue with the rest */ |
398 | list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { | 400 | list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) { |
diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 63f758c655d5..64d1f36dee99 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py | |||
@@ -17,6 +17,8 @@ if cc == "clang": | |||
17 | vars[var] = sub("-mcet", "", vars[var]) | 17 | vars[var] = sub("-mcet", "", vars[var]) |
18 | if not clang_has_option("-fcf-protection"): | 18 | if not clang_has_option("-fcf-protection"): |
19 | vars[var] = sub("-fcf-protection", "", vars[var]) | 19 | vars[var] = sub("-fcf-protection", "", vars[var]) |
20 | if not clang_has_option("-fstack-clash-protection"): | ||
21 | vars[var] = sub("-fstack-clash-protection", "", vars[var]) | ||
20 | 22 | ||
21 | from distutils.core import setup, Extension | 23 | from distutils.core import setup, Extension |
22 | 24 | ||
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 66a84d5846c8..dca7dfae69ad 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c | |||
@@ -19,6 +19,20 @@ | |||
19 | #define EM_AARCH64 183 /* ARM 64 bit */ | 19 | #define EM_AARCH64 183 /* ARM 64 bit */ |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifndef ELF32_ST_VISIBILITY | ||
23 | #define ELF32_ST_VISIBILITY(o) ((o) & 0x03) | ||
24 | #endif | ||
25 | |||
26 | /* For ELF64 the definitions are the same. */ | ||
27 | #ifndef ELF64_ST_VISIBILITY | ||
28 | #define ELF64_ST_VISIBILITY(o) ELF32_ST_VISIBILITY (o) | ||
29 | #endif | ||
30 | |||
31 | /* How to extract information held in the st_other field. */ | ||
32 | #ifndef GELF_ST_VISIBILITY | ||
33 | #define GELF_ST_VISIBILITY(val) ELF64_ST_VISIBILITY (val) | ||
34 | #endif | ||
35 | |||
22 | typedef Elf64_Nhdr GElf_Nhdr; | 36 | typedef Elf64_Nhdr GElf_Nhdr; |
23 | 37 | ||
24 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT | 38 | #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT |
@@ -87,6 +101,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym) | |||
87 | return GELF_ST_TYPE(sym->st_info); | 101 | return GELF_ST_TYPE(sym->st_info); |
88 | } | 102 | } |
89 | 103 | ||
104 | static inline uint8_t elf_sym__visibility(const GElf_Sym *sym) | ||
105 | { | ||
106 | return GELF_ST_VISIBILITY(sym->st_other); | ||
107 | } | ||
108 | |||
90 | #ifndef STT_GNU_IFUNC | 109 | #ifndef STT_GNU_IFUNC |
91 | #define STT_GNU_IFUNC 10 | 110 | #define STT_GNU_IFUNC 10 |
92 | #endif | 111 | #endif |
@@ -111,7 +130,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym) | |||
111 | return elf_sym__type(sym) == STT_NOTYPE && | 130 | return elf_sym__type(sym) == STT_NOTYPE && |
112 | sym->st_name != 0 && | 131 | sym->st_name != 0 && |
113 | sym->st_shndx != SHN_UNDEF && | 132 | sym->st_shndx != SHN_UNDEF && |
114 | sym->st_shndx != SHN_ABS; | 133 | sym->st_shndx != SHN_ABS && |
134 | elf_sym__visibility(sym) != STV_HIDDEN && | ||
135 | elf_sym__visibility(sym) != STV_INTERNAL; | ||
115 | } | 136 | } |
116 | 137 | ||
117 | static bool elf_sym__filter(GElf_Sym *sym) | 138 | static bool elf_sym__filter(GElf_Sym *sym) |
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile index 1a2bd15c5b6e..400ee81a3043 100644 --- a/tools/testing/selftests/Makefile +++ b/tools/testing/selftests/Makefile | |||
@@ -10,6 +10,7 @@ TARGETS += drivers/dma-buf | |||
10 | TARGETS += efivarfs | 10 | TARGETS += efivarfs |
11 | TARGETS += exec | 11 | TARGETS += exec |
12 | TARGETS += filesystems | 12 | TARGETS += filesystems |
13 | TARGETS += filesystems/binderfs | ||
13 | TARGETS += firmware | 14 | TARGETS += firmware |
14 | TARGETS += ftrace | 15 | TARGETS += ftrace |
15 | TARGETS += futex | 16 | TARGETS += futex |
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h index 315a44fa32af..84fd6f1bf33e 100644 --- a/tools/testing/selftests/bpf/bpf_util.h +++ b/tools/testing/selftests/bpf/bpf_util.h | |||
@@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
13 | unsigned int start, end, possible_cpus = 0; | 13 | unsigned int start, end, possible_cpus = 0; |
14 | char buff[128]; | 14 | char buff[128]; |
15 | FILE *fp; | 15 | FILE *fp; |
16 | int n; | 16 | int len, n, i, j = 0; |
17 | 17 | ||
18 | fp = fopen(fcpu, "r"); | 18 | fp = fopen(fcpu, "r"); |
19 | if (!fp) { | 19 | if (!fp) { |
@@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void) | |||
21 | exit(1); | 21 | exit(1); |
22 | } | 22 | } |
23 | 23 | ||
24 | while (fgets(buff, sizeof(buff), fp)) { | 24 | if (!fgets(buff, sizeof(buff), fp)) { |
25 | n = sscanf(buff, "%u-%u", &start, &end); | 25 | printf("Failed to read %s!\n", fcpu); |
26 | if (n == 0) { | 26 | exit(1); |
27 | printf("Failed to retrieve # possible CPUs!\n"); | 27 | } |
28 | exit(1); | 28 | |
29 | } else if (n == 1) { | 29 | len = strlen(buff); |
30 | end = start; | 30 | for (i = 0; i <= len; i++) { |
31 | if (buff[i] == ',' || buff[i] == '\0') { | ||
32 | buff[i] = '\0'; | ||
33 | n = sscanf(&buff[j], "%u-%u", &start, &end); | ||
34 | if (n <= 0) { | ||
35 | printf("Failed to retrieve # possible CPUs!\n"); | ||
36 | exit(1); | ||
37 | } else if (n == 1) { | ||
38 | end = start; | ||
39 | } | ||
40 | possible_cpus += end - start + 1; | ||
41 | j = i + 1; | ||
31 | } | 42 | } |
32 | possible_cpus = start == 0 ? end + 1 : 0; | ||
33 | break; | ||
34 | } | 43 | } |
44 | |||
35 | fclose(fp); | 45 | fclose(fp); |
36 | 46 | ||
37 | return possible_cpus; | 47 | return possible_cpus; |
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index a0bd04befe87..91420fa83b08 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
@@ -1881,13 +1881,12 @@ static struct btf_raw_test raw_tests[] = { | |||
1881 | }, | 1881 | }, |
1882 | 1882 | ||
1883 | { | 1883 | { |
1884 | .descr = "func proto (CONST=>TYPEDEF=>FUNC_PROTO)", | 1884 | .descr = "func proto (TYPEDEF=>FUNC_PROTO)", |
1885 | .raw_types = { | 1885 | .raw_types = { |
1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | 1886 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ |
1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ | 1887 | BTF_TYPE_INT_ENC(0, 0, 0, 32, 4), /* [2] */ |
1888 | BTF_CONST_ENC(4), /* [3] */ | 1888 | BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */ |
1889 | BTF_TYPEDEF_ENC(NAME_TBD, 5), /* [4] */ | 1889 | BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ |
1890 | BTF_FUNC_PROTO_ENC(0, 2), /* [5] */ | ||
1891 | BTF_FUNC_PROTO_ARG_ENC(0, 1), | 1890 | BTF_FUNC_PROTO_ARG_ENC(0, 1), |
1892 | BTF_FUNC_PROTO_ARG_ENC(0, 2), | 1891 | BTF_FUNC_PROTO_ARG_ENC(0, 2), |
1893 | BTF_END_RAW, | 1892 | BTF_END_RAW, |
@@ -1901,8 +1900,6 @@ static struct btf_raw_test raw_tests[] = { | |||
1901 | .key_type_id = 1, | 1900 | .key_type_id = 1, |
1902 | .value_type_id = 1, | 1901 | .value_type_id = 1, |
1903 | .max_entries = 4, | 1902 | .max_entries = 4, |
1904 | .btf_load_err = true, | ||
1905 | .err_str = "Invalid type_id", | ||
1906 | }, | 1903 | }, |
1907 | 1904 | ||
1908 | { | 1905 | { |
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh index bab13dd025a6..0d26b5e3f966 100755 --- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh +++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh | |||
@@ -37,6 +37,10 @@ prerequisite() | |||
37 | exit $ksft_skip | 37 | exit $ksft_skip |
38 | fi | 38 | fi |
39 | 39 | ||
40 | present_cpus=`cat $SYSFS/devices/system/cpu/present` | ||
41 | present_max=${present_cpus##*-} | ||
42 | echo "present_cpus = $present_cpus present_max = $present_max" | ||
43 | |||
40 | echo -e "\t Cpus in online state: $online_cpus" | 44 | echo -e "\t Cpus in online state: $online_cpus" |
41 | 45 | ||
42 | offline_cpus=`cat $SYSFS/devices/system/cpu/offline` | 46 | offline_cpus=`cat $SYSFS/devices/system/cpu/offline` |
@@ -151,6 +155,8 @@ online_cpus=0 | |||
151 | online_max=0 | 155 | online_max=0 |
152 | offline_cpus=0 | 156 | offline_cpus=0 |
153 | offline_max=0 | 157 | offline_max=0 |
158 | present_cpus=0 | ||
159 | present_max=0 | ||
154 | 160 | ||
155 | while getopts e:ahp: opt; do | 161 | while getopts e:ahp: opt; do |
156 | case $opt in | 162 | case $opt in |
@@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then | |||
190 | online_cpu_expect_success $online_max | 196 | online_cpu_expect_success $online_max |
191 | 197 | ||
192 | if [[ $offline_cpus -gt 0 ]]; then | 198 | if [[ $offline_cpus -gt 0 ]]; then |
193 | echo -e "\t offline to online to offline: cpu $offline_max" | 199 | echo -e "\t offline to online to offline: cpu $present_max" |
194 | online_cpu_expect_success $offline_max | 200 | online_cpu_expect_success $present_max |
195 | offline_cpu_expect_success $offline_max | 201 | offline_cpu_expect_success $present_max |
202 | online_cpu $present_max | ||
196 | fi | 203 | fi |
197 | exit 0 | 204 | exit 0 |
198 | else | 205 | else |
diff --git a/tools/testing/selftests/filesystems/binderfs/.gitignore b/tools/testing/selftests/filesystems/binderfs/.gitignore new file mode 100644 index 000000000000..8a5d9bf63dd4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/.gitignore | |||
@@ -0,0 +1 @@ | |||
binderfs_test | |||
diff --git a/tools/testing/selftests/filesystems/binderfs/Makefile b/tools/testing/selftests/filesystems/binderfs/Makefile new file mode 100644 index 000000000000..58cb659b56b4 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | CFLAGS += -I../../../../../usr/include/ | ||
4 | TEST_GEN_PROGS := binderfs_test | ||
5 | |||
6 | include ../../lib.mk | ||
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c new file mode 100644 index 000000000000..8c2ed962e1c7 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c | |||
@@ -0,0 +1,275 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | |||
3 | #define _GNU_SOURCE | ||
4 | #include <errno.h> | ||
5 | #include <fcntl.h> | ||
6 | #include <sched.h> | ||
7 | #include <stdbool.h> | ||
8 | #include <stdio.h> | ||
9 | #include <stdlib.h> | ||
10 | #include <string.h> | ||
11 | #include <sys/ioctl.h> | ||
12 | #include <sys/mount.h> | ||
13 | #include <sys/stat.h> | ||
14 | #include <sys/types.h> | ||
15 | #include <unistd.h> | ||
16 | #include <linux/android/binder.h> | ||
17 | #include <linux/android/binderfs.h> | ||
18 | #include "../../kselftest.h" | ||
19 | |||
20 | static ssize_t write_nointr(int fd, const void *buf, size_t count) | ||
21 | { | ||
22 | ssize_t ret; | ||
23 | again: | ||
24 | ret = write(fd, buf, count); | ||
25 | if (ret < 0 && errno == EINTR) | ||
26 | goto again; | ||
27 | |||
28 | return ret; | ||
29 | } | ||
30 | |||
31 | static void write_to_file(const char *filename, const void *buf, size_t count, | ||
32 | int allowed_errno) | ||
33 | { | ||
34 | int fd, saved_errno; | ||
35 | ssize_t ret; | ||
36 | |||
37 | fd = open(filename, O_WRONLY | O_CLOEXEC); | ||
38 | if (fd < 0) | ||
39 | ksft_exit_fail_msg("%s - Failed to open file %s\n", | ||
40 | strerror(errno), filename); | ||
41 | |||
42 | ret = write_nointr(fd, buf, count); | ||
43 | if (ret < 0) { | ||
44 | if (allowed_errno && (errno == allowed_errno)) { | ||
45 | close(fd); | ||
46 | return; | ||
47 | } | ||
48 | |||
49 | goto on_error; | ||
50 | } | ||
51 | |||
52 | if ((size_t)ret != count) | ||
53 | goto on_error; | ||
54 | |||
55 | close(fd); | ||
56 | return; | ||
57 | |||
58 | on_error: | ||
59 | saved_errno = errno; | ||
60 | close(fd); | ||
61 | errno = saved_errno; | ||
62 | |||
63 | if (ret < 0) | ||
64 | ksft_exit_fail_msg("%s - Failed to write to file %s\n", | ||
65 | strerror(errno), filename); | ||
66 | |||
67 | ksft_exit_fail_msg("Failed to write to file %s\n", filename); | ||
68 | } | ||
69 | |||
70 | static void change_to_userns(void) | ||
71 | { | ||
72 | int ret; | ||
73 | uid_t uid; | ||
74 | gid_t gid; | ||
75 | /* {g,u}id_map files only allow a max of 4096 bytes written to them */ | ||
76 | char idmap[4096]; | ||
77 | |||
78 | uid = getuid(); | ||
79 | gid = getgid(); | ||
80 | |||
81 | ret = unshare(CLONE_NEWUSER); | ||
82 | if (ret < 0) | ||
83 | ksft_exit_fail_msg("%s - Failed to unshare user namespace\n", | ||
84 | strerror(errno)); | ||
85 | |||
86 | write_to_file("/proc/self/setgroups", "deny", strlen("deny"), ENOENT); | ||
87 | |||
88 | ret = snprintf(idmap, sizeof(idmap), "0 %d 1", uid); | ||
89 | if (ret < 0 || (size_t)ret >= sizeof(idmap)) | ||
90 | ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", | ||
91 | strerror(errno)); | ||
92 | |||
93 | write_to_file("/proc/self/uid_map", idmap, strlen(idmap), 0); | ||
94 | |||
95 | ret = snprintf(idmap, sizeof(idmap), "0 %d 1", gid); | ||
96 | if (ret < 0 || (size_t)ret >= sizeof(idmap)) | ||
97 | ksft_exit_fail_msg("%s - Failed to prepare uid mapping\n", | ||
98 | strerror(errno)); | ||
99 | |||
100 | write_to_file("/proc/self/gid_map", idmap, strlen(idmap), 0); | ||
101 | |||
102 | ret = setgid(0); | ||
103 | if (ret) | ||
104 | ksft_exit_fail_msg("%s - Failed to setgid(0)\n", | ||
105 | strerror(errno)); | ||
106 | |||
107 | ret = setuid(0); | ||
108 | if (ret) | ||
109 | ksft_exit_fail_msg("%s - Failed to setgid(0)\n", | ||
110 | strerror(errno)); | ||
111 | } | ||
112 | |||
113 | static void change_to_mountns(void) | ||
114 | { | ||
115 | int ret; | ||
116 | |||
117 | ret = unshare(CLONE_NEWNS); | ||
118 | if (ret < 0) | ||
119 | ksft_exit_fail_msg("%s - Failed to unshare mount namespace\n", | ||
120 | strerror(errno)); | ||
121 | |||
122 | ret = mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, 0); | ||
123 | if (ret < 0) | ||
124 | ksft_exit_fail_msg("%s - Failed to mount / as private\n", | ||
125 | strerror(errno)); | ||
126 | } | ||
127 | |||
128 | static void rmdir_protect_errno(const char *dir) | ||
129 | { | ||
130 | int saved_errno = errno; | ||
131 | (void)rmdir(dir); | ||
132 | errno = saved_errno; | ||
133 | } | ||
134 | |||
135 | static void __do_binderfs_test(void) | ||
136 | { | ||
137 | int fd, ret, saved_errno; | ||
138 | size_t len; | ||
139 | ssize_t wret; | ||
140 | bool keep = false; | ||
141 | struct binderfs_device device = { 0 }; | ||
142 | struct binder_version version = { 0 }; | ||
143 | |||
144 | change_to_mountns(); | ||
145 | |||
146 | ret = mkdir("/dev/binderfs", 0755); | ||
147 | if (ret < 0) { | ||
148 | if (errno != EEXIST) | ||
149 | ksft_exit_fail_msg( | ||
150 | "%s - Failed to create binderfs mountpoint\n", | ||
151 | strerror(errno)); | ||
152 | |||
153 | keep = true; | ||
154 | } | ||
155 | |||
156 | ret = mount(NULL, "/dev/binderfs", "binder", 0, 0); | ||
157 | if (ret < 0) { | ||
158 | if (errno != ENODEV) | ||
159 | ksft_exit_fail_msg("%s - Failed to mount binderfs\n", | ||
160 | strerror(errno)); | ||
161 | |||
162 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
163 | ksft_exit_skip( | ||
164 | "The Android binderfs filesystem is not available\n"); | ||
165 | } | ||
166 | |||
167 | /* binderfs mount test passed */ | ||
168 | ksft_inc_pass_cnt(); | ||
169 | |||
170 | memcpy(device.name, "my-binder", strlen("my-binder")); | ||
171 | |||
172 | fd = open("/dev/binderfs/binder-control", O_RDONLY | O_CLOEXEC); | ||
173 | if (fd < 0) | ||
174 | ksft_exit_fail_msg( | ||
175 | "%s - Failed to open binder-control device\n", | ||
176 | strerror(errno)); | ||
177 | |||
178 | ret = ioctl(fd, BINDER_CTL_ADD, &device); | ||
179 | saved_errno = errno; | ||
180 | close(fd); | ||
181 | errno = saved_errno; | ||
182 | if (ret < 0) { | ||
183 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
184 | ksft_exit_fail_msg( | ||
185 | "%s - Failed to allocate new binder device\n", | ||
186 | strerror(errno)); | ||
187 | } | ||
188 | |||
189 | ksft_print_msg( | ||
190 | "Allocated new binder device with major %d, minor %d, and name %s\n", | ||
191 | device.major, device.minor, device.name); | ||
192 | |||
193 | /* binder device allocation test passed */ | ||
194 | ksft_inc_pass_cnt(); | ||
195 | |||
196 | fd = open("/dev/binderfs/my-binder", O_CLOEXEC | O_RDONLY); | ||
197 | if (fd < 0) { | ||
198 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
199 | ksft_exit_fail_msg("%s - Failed to open my-binder device\n", | ||
200 | strerror(errno)); | ||
201 | } | ||
202 | |||
203 | ret = ioctl(fd, BINDER_VERSION, &version); | ||
204 | saved_errno = errno; | ||
205 | close(fd); | ||
206 | errno = saved_errno; | ||
207 | if (ret < 0) { | ||
208 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
209 | ksft_exit_fail_msg( | ||
210 | "%s - Failed to open perform BINDER_VERSION request\n", | ||
211 | strerror(errno)); | ||
212 | } | ||
213 | |||
214 | ksft_print_msg("Detected binder version: %d\n", | ||
215 | version.protocol_version); | ||
216 | |||
217 | /* binder transaction with binderfs binder device passed */ | ||
218 | ksft_inc_pass_cnt(); | ||
219 | |||
220 | ret = unlink("/dev/binderfs/my-binder"); | ||
221 | if (ret < 0) { | ||
222 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
223 | ksft_exit_fail_msg("%s - Failed to delete binder device\n", | ||
224 | strerror(errno)); | ||
225 | } | ||
226 | |||
227 | /* binder device removal passed */ | ||
228 | ksft_inc_pass_cnt(); | ||
229 | |||
230 | ret = unlink("/dev/binderfs/binder-control"); | ||
231 | if (!ret) { | ||
232 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
233 | ksft_exit_fail_msg("Managed to delete binder-control device\n"); | ||
234 | } else if (errno != EPERM) { | ||
235 | keep ? : rmdir_protect_errno("/dev/binderfs"); | ||
236 | ksft_exit_fail_msg( | ||
237 | "%s - Failed to delete binder-control device but exited with unexpected error code\n", | ||
238 | strerror(errno)); | ||
239 | } | ||
240 | |||
241 | /* binder-control device removal failed as expected */ | ||
242 | ksft_inc_xfail_cnt(); | ||
243 | |||
244 | on_error: | ||
245 | ret = umount2("/dev/binderfs", MNT_DETACH); | ||
246 | keep ?: rmdir_protect_errno("/dev/binderfs"); | ||
247 | if (ret < 0) | ||
248 | ksft_exit_fail_msg("%s - Failed to unmount binderfs\n", | ||
249 | strerror(errno)); | ||
250 | |||
251 | /* binderfs unmount test passed */ | ||
252 | ksft_inc_pass_cnt(); | ||
253 | } | ||
254 | |||
255 | static void binderfs_test_privileged() | ||
256 | { | ||
257 | if (geteuid() != 0) | ||
258 | ksft_print_msg( | ||
259 | "Tests are not run as root. Skipping privileged tests\n"); | ||
260 | else | ||
261 | __do_binderfs_test(); | ||
262 | } | ||
263 | |||
264 | static void binderfs_test_unprivileged() | ||
265 | { | ||
266 | change_to_userns(); | ||
267 | __do_binderfs_test(); | ||
268 | } | ||
269 | |||
270 | int main(int argc, char *argv[]) | ||
271 | { | ||
272 | binderfs_test_privileged(); | ||
273 | binderfs_test_unprivileged(); | ||
274 | ksft_exit_pass(); | ||
275 | } | ||
diff --git a/tools/testing/selftests/filesystems/binderfs/config b/tools/testing/selftests/filesystems/binderfs/config new file mode 100644 index 000000000000..02dd6cc9cf99 --- /dev/null +++ b/tools/testing/selftests/filesystems/binderfs/config | |||
@@ -0,0 +1,3 @@ | |||
1 | CONFIG_ANDROID=y | ||
2 | CONFIG_ANDROID_BINDERFS=y | ||
3 | CONFIG_ANDROID_BINDER_IPC=y | ||
diff --git a/tools/testing/selftests/ir/Makefile b/tools/testing/selftests/ir/Makefile index f4ba8eb84b95..ad06489c22a5 100644 --- a/tools/testing/selftests/ir/Makefile +++ b/tools/testing/selftests/ir/Makefile | |||
@@ -1,5 +1,7 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | TEST_PROGS := ir_loopback.sh | 2 | TEST_PROGS := ir_loopback.sh |
3 | TEST_GEN_PROGS_EXTENDED := ir_loopback | 3 | TEST_GEN_PROGS_EXTENDED := ir_loopback |
4 | APIDIR := ../../../include/uapi | ||
5 | CFLAGS += -Wall -O2 -I$(APIDIR) | ||
4 | 6 | ||
5 | include ../lib.mk | 7 | include ../lib.mk |
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index f8f3e90700c0..1e6d14d2825c 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile | |||
@@ -21,6 +21,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls | |||
21 | KSFT_KHDR_INSTALL := 1 | 21 | KSFT_KHDR_INSTALL := 1 |
22 | include ../lib.mk | 22 | include ../lib.mk |
23 | 23 | ||
24 | $(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma | 24 | $(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma |
25 | $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread | 25 | $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread |
26 | $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread | 26 | $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread |
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh index 8db35b99457c..71d7fdc513c1 100755 --- a/tools/testing/selftests/net/xfrm_policy.sh +++ b/tools/testing/selftests/net/xfrm_policy.sh | |||
@@ -28,6 +28,19 @@ KEY_AES=0x0123456789abcdef0123456789012345 | |||
28 | SPI1=0x1 | 28 | SPI1=0x1 |
29 | SPI2=0x2 | 29 | SPI2=0x2 |
30 | 30 | ||
31 | do_esp_policy() { | ||
32 | local ns=$1 | ||
33 | local me=$2 | ||
34 | local remote=$3 | ||
35 | local lnet=$4 | ||
36 | local rnet=$5 | ||
37 | |||
38 | # to encrypt packets as they go out (includes forwarded packets that need encapsulation) | ||
39 | ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow | ||
40 | # to fwd decrypted packets after esp processing: | ||
41 | ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow | ||
42 | } | ||
43 | |||
31 | do_esp() { | 44 | do_esp() { |
32 | local ns=$1 | 45 | local ns=$1 |
33 | local me=$2 | 46 | local me=$2 |
@@ -40,10 +53,59 @@ do_esp() { | |||
40 | ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet | 53 | ip -net $ns xfrm state add src $remote dst $me proto esp spi $spi_in enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $rnet dst $lnet |
41 | ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet | 54 | ip -net $ns xfrm state add src $me dst $remote proto esp spi $spi_out enc aes $KEY_AES auth sha1 $KEY_SHA mode tunnel sel src $lnet dst $rnet |
42 | 55 | ||
43 | # to encrypt packets as they go out (includes forwarded packets that need encapsulation) | 56 | do_esp_policy $ns $me $remote $lnet $rnet |
44 | ip -net $ns xfrm policy add src $lnet dst $rnet dir out tmpl src $me dst $remote proto esp mode tunnel priority 100 action allow | 57 | } |
45 | # to fwd decrypted packets after esp processing: | 58 | |
46 | ip -net $ns xfrm policy add src $rnet dst $lnet dir fwd tmpl src $remote dst $me proto esp mode tunnel priority 100 action allow | 59 | # add policies with different netmasks, to make sure kernel carries |
60 | # the policies contained within new netmask over when search tree is | ||
61 | # re-built. | ||
62 | # peer netns that are supposed to be encapsulated via esp have addresses | ||
63 | # in the 10.0.1.0/24 and 10.0.2.0/24 subnets, respectively. | ||
64 | # | ||
65 | # Adding a policy for '10.0.1.0/23' will make it necessary to | ||
66 | # alter the prefix of 10.0.1.0 subnet. | ||
67 | # In case new prefix overlaps with existing node, the node and all | ||
68 | # policies it carries need to be merged with the existing one(s). | ||
69 | # | ||
70 | # Do that here. | ||
71 | do_overlap() | ||
72 | { | ||
73 | local ns=$1 | ||
74 | |||
75 | # adds new nodes to tree (neither network exists yet in policy database). | ||
76 | ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block | ||
77 | |||
78 | # adds a new node in the 10.0.0.0/24 tree (dst node exists). | ||
79 | ip -net $ns xfrm policy add src 10.2.0.0/24 dst 10.0.0.0/24 dir fwd priority 200 action block | ||
80 | |||
81 | # adds a 10.2.0.0/23 node, but for different dst. | ||
82 | ip -net $ns xfrm policy add src 10.2.0.0/23 dst 10.0.1.0/24 dir fwd priority 200 action block | ||
83 | |||
84 | # dst now overlaps with the 10.0.1.0/24 ESP policy in fwd. | ||
85 | # kernel must 'promote' existing one (10.0.0.0/24) to 10.0.0.0/23. | ||
86 | # But 10.0.0.0/23 also includes existing 10.0.1.0/24, so that node | ||
87 | # also has to be merged too, including source-sorted subtrees. | ||
88 | # old: | ||
89 | # 10.0.0.0/24 (node 1 in dst tree of the bin) | ||
90 | # 10.1.0.0/24 (node in src tree of dst node 1) | ||
91 | # 10.2.0.0/24 (node in src tree of dst node 1) | ||
92 | # 10.0.1.0/24 (node 2 in dst tree of the bin) | ||
93 | # 10.0.2.0/24 (node in src tree of dst node 2) | ||
94 | # 10.2.0.0/24 (node in src tree of dst node 2) | ||
95 | # | ||
96 | # The next 'policy add' adds dst '10.0.0.0/23', which means | ||
97 | # that dst node 1 and dst node 2 have to be merged including | ||
98 | # the sub-tree. As no duplicates are allowed, policies in | ||
99 | # the two '10.0.2.0/24' are also merged. | ||
100 | # | ||
101 | # after the 'add', internal search tree should look like this: | ||
102 | # 10.0.0.0/23 (node in dst tree of bin) | ||
103 | # 10.0.2.0/24 (node in src tree of dst node) | ||
104 | # 10.1.0.0/24 (node in src tree of dst node) | ||
105 | # 10.2.0.0/24 (node in src tree of dst node) | ||
106 | # | ||
107 | # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23. | ||
108 | ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block | ||
47 | } | 109 | } |
48 | 110 | ||
49 | do_esp_policy_get_check() { | 111 | do_esp_policy_get_check() { |
@@ -160,6 +222,41 @@ check_xfrm() { | |||
160 | return $lret | 222 | return $lret |
161 | } | 223 | } |
162 | 224 | ||
225 | check_exceptions() | ||
226 | { | ||
227 | logpostfix="$1" | ||
228 | local lret=0 | ||
229 | |||
230 | # ping to .254 should be excluded from the tunnel (exception is in place). | ||
231 | check_xfrm 0 254 | ||
232 | if [ $? -ne 0 ]; then | ||
233 | echo "FAIL: expected ping to .254 to fail ($logpostfix)" | ||
234 | lret=1 | ||
235 | else | ||
236 | echo "PASS: ping to .254 bypassed ipsec tunnel ($logpostfix)" | ||
237 | fi | ||
238 | |||
239 | # ping to .253 should use use ipsec due to direct policy exception. | ||
240 | check_xfrm 1 253 | ||
241 | if [ $? -ne 0 ]; then | ||
242 | echo "FAIL: expected ping to .253 to use ipsec tunnel ($logpostfix)" | ||
243 | lret=1 | ||
244 | else | ||
245 | echo "PASS: direct policy matches ($logpostfix)" | ||
246 | fi | ||
247 | |||
248 | # ping to .2 should use ipsec. | ||
249 | check_xfrm 1 2 | ||
250 | if [ $? -ne 0 ]; then | ||
251 | echo "FAIL: expected ping to .2 to use ipsec tunnel ($logpostfix)" | ||
252 | lret=1 | ||
253 | else | ||
254 | echo "PASS: policy matches ($logpostfix)" | ||
255 | fi | ||
256 | |||
257 | return $lret | ||
258 | } | ||
259 | |||
163 | #check for needed privileges | 260 | #check for needed privileges |
164 | if [ "$(id -u)" -ne 0 ];then | 261 | if [ "$(id -u)" -ne 0 ];then |
165 | echo "SKIP: Need root privileges" | 262 | echo "SKIP: Need root privileges" |
@@ -270,33 +367,45 @@ do_exception ns4 10.0.3.10 10.0.3.1 10.0.1.253 10.0.1.240/28 | |||
270 | do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 | 367 | do_exception ns3 dead:3::1 dead:3::10 dead:2::fd dead:2:f0::/96 |
271 | do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 | 368 | do_exception ns4 dead:3::10 dead:3::1 dead:1::fd dead:1:f0::/96 |
272 | 369 | ||
273 | # ping to .254 should now be excluded from the tunnel | 370 | check_exceptions "exceptions" |
274 | check_xfrm 0 254 | ||
275 | if [ $? -ne 0 ]; then | 371 | if [ $? -ne 0 ]; then |
276 | echo "FAIL: expected ping to .254 to fail" | ||
277 | ret=1 | 372 | ret=1 |
278 | else | ||
279 | echo "PASS: ping to .254 bypassed ipsec tunnel" | ||
280 | fi | 373 | fi |
281 | 374 | ||
282 | # ping to .253 should use use ipsec due to direct policy exception. | 375 | # insert block policies with adjacent/overlapping netmasks |
283 | check_xfrm 1 253 | 376 | do_overlap ns3 |
284 | if [ $? -ne 0 ]; then | ||
285 | echo "FAIL: expected ping to .253 to use ipsec tunnel" | ||
286 | ret=1 | ||
287 | else | ||
288 | echo "PASS: direct policy matches" | ||
289 | fi | ||
290 | 377 | ||
291 | # ping to .2 should use ipsec. | 378 | check_exceptions "exceptions and block policies" |
292 | check_xfrm 1 2 | ||
293 | if [ $? -ne 0 ]; then | 379 | if [ $? -ne 0 ]; then |
294 | echo "FAIL: expected ping to .2 to use ipsec tunnel" | ||
295 | ret=1 | 380 | ret=1 |
296 | else | ||
297 | echo "PASS: policy matches" | ||
298 | fi | 381 | fi |
299 | 382 | ||
383 | for n in ns3 ns4;do | ||
384 | ip -net $n xfrm policy set hthresh4 28 24 hthresh6 126 125 | ||
385 | sleep $((RANDOM%5)) | ||
386 | done | ||
387 | |||
388 | check_exceptions "exceptions and block policies after hresh changes" | ||
389 | |||
390 | # full flush of policy db, check everything gets freed incl. internal meta data | ||
391 | ip -net ns3 xfrm policy flush | ||
392 | |||
393 | do_esp_policy ns3 10.0.3.1 10.0.3.10 10.0.1.0/24 10.0.2.0/24 | ||
394 | do_exception ns3 10.0.3.1 10.0.3.10 10.0.2.253 10.0.2.240/28 | ||
395 | |||
396 | # move inexact policies to hash table | ||
397 | ip -net ns3 xfrm policy set hthresh4 16 16 | ||
398 | |||
399 | sleep $((RANDOM%5)) | ||
400 | check_exceptions "exceptions and block policies after hthresh change in ns3" | ||
401 | |||
402 | # restore original hthresh settings -- move policies back to tables | ||
403 | for n in ns3 ns4;do | ||
404 | ip -net $n xfrm policy set hthresh4 32 32 hthresh6 128 128 | ||
405 | sleep $((RANDOM%5)) | ||
406 | done | ||
407 | check_exceptions "exceptions and block policies after hresh change to normal" | ||
408 | |||
300 | for i in 1 2 3 4;do ip netns del ns$i;done | 409 | for i in 1 2 3 4;do ip netns del ns$i;done |
301 | 410 | ||
302 | exit $ret | 411 | exit $ret |
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile index 47ed6cef93fb..c9ff2b47bd1c 100644 --- a/tools/testing/selftests/netfilter/Makefile +++ b/tools/testing/selftests/netfilter/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | # Makefile for netfilter selftests | 2 | # Makefile for netfilter selftests |
3 | 3 | ||
4 | TEST_PROGS := nft_trans_stress.sh | 4 | TEST_PROGS := nft_trans_stress.sh nft_nat.sh |
5 | 5 | ||
6 | include ../lib.mk | 6 | include ../lib.mk |
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config index 1017313e41a8..59caa8f71cd8 100644 --- a/tools/testing/selftests/netfilter/config +++ b/tools/testing/selftests/netfilter/config | |||
@@ -1,2 +1,2 @@ | |||
1 | CONFIG_NET_NS=y | 1 | CONFIG_NET_NS=y |
2 | NF_TABLES_INET=y | 2 | CONFIG_NF_TABLES_INET=y |
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh new file mode 100755 index 000000000000..8ec76681605c --- /dev/null +++ b/tools/testing/selftests/netfilter/nft_nat.sh | |||
@@ -0,0 +1,762 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # This test is for basic NAT functionality: snat, dnat, redirect, masquerade. | ||
4 | # | ||
5 | |||
6 | # Kselftest framework requirement - SKIP code is 4. | ||
7 | ksft_skip=4 | ||
8 | ret=0 | ||
9 | |||
10 | nft --version > /dev/null 2>&1 | ||
11 | if [ $? -ne 0 ];then | ||
12 | echo "SKIP: Could not run test without nft tool" | ||
13 | exit $ksft_skip | ||
14 | fi | ||
15 | |||
16 | ip -Version > /dev/null 2>&1 | ||
17 | if [ $? -ne 0 ];then | ||
18 | echo "SKIP: Could not run test without ip tool" | ||
19 | exit $ksft_skip | ||
20 | fi | ||
21 | |||
22 | ip netns add ns0 | ||
23 | ip netns add ns1 | ||
24 | ip netns add ns2 | ||
25 | |||
26 | ip link add veth0 netns ns0 type veth peer name eth0 netns ns1 | ||
27 | ip link add veth1 netns ns0 type veth peer name eth0 netns ns2 | ||
28 | |||
29 | ip -net ns0 link set lo up | ||
30 | ip -net ns0 link set veth0 up | ||
31 | ip -net ns0 addr add 10.0.1.1/24 dev veth0 | ||
32 | ip -net ns0 addr add dead:1::1/64 dev veth0 | ||
33 | |||
34 | ip -net ns0 link set veth1 up | ||
35 | ip -net ns0 addr add 10.0.2.1/24 dev veth1 | ||
36 | ip -net ns0 addr add dead:2::1/64 dev veth1 | ||
37 | |||
38 | for i in 1 2; do | ||
39 | ip -net ns$i link set lo up | ||
40 | ip -net ns$i link set eth0 up | ||
41 | ip -net ns$i addr add 10.0.$i.99/24 dev eth0 | ||
42 | ip -net ns$i route add default via 10.0.$i.1 | ||
43 | ip -net ns$i addr add dead:$i::99/64 dev eth0 | ||
44 | ip -net ns$i route add default via dead:$i::1 | ||
45 | done | ||
46 | |||
47 | bad_counter() | ||
48 | { | ||
49 | local ns=$1 | ||
50 | local counter=$2 | ||
51 | local expect=$3 | ||
52 | |||
53 | echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2 | ||
54 | ip netns exec $ns nft list counter inet filter $counter 1>&2 | ||
55 | } | ||
56 | |||
57 | check_counters() | ||
58 | { | ||
59 | ns=$1 | ||
60 | local lret=0 | ||
61 | |||
62 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84") | ||
63 | if [ $? -ne 0 ]; then | ||
64 | bad_counter $ns ns0in "packets 1 bytes 84" | ||
65 | lret=1 | ||
66 | fi | ||
67 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84") | ||
68 | if [ $? -ne 0 ]; then | ||
69 | bad_counter $ns ns0out "packets 1 bytes 84" | ||
70 | lret=1 | ||
71 | fi | ||
72 | |||
73 | expect="packets 1 bytes 104" | ||
74 | cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect") | ||
75 | if [ $? -ne 0 ]; then | ||
76 | bad_counter $ns ns0in6 "$expect" | ||
77 | lret=1 | ||
78 | fi | ||
79 | cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect") | ||
80 | if [ $? -ne 0 ]; then | ||
81 | bad_counter $ns ns0out6 "$expect" | ||
82 | lret=1 | ||
83 | fi | ||
84 | |||
85 | return $lret | ||
86 | } | ||
87 | |||
88 | check_ns0_counters() | ||
89 | { | ||
90 | local ns=$1 | ||
91 | local lret=0 | ||
92 | |||
93 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0") | ||
94 | if [ $? -ne 0 ]; then | ||
95 | bad_counter ns0 ns0in "packets 0 bytes 0" | ||
96 | lret=1 | ||
97 | fi | ||
98 | |||
99 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0") | ||
100 | if [ $? -ne 0 ]; then | ||
101 | bad_counter ns0 ns0in6 "packets 0 bytes 0" | ||
102 | lret=1 | ||
103 | fi | ||
104 | |||
105 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0") | ||
106 | if [ $? -ne 0 ]; then | ||
107 | bad_counter ns0 ns0out "packets 0 bytes 0" | ||
108 | lret=1 | ||
109 | fi | ||
110 | cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0") | ||
111 | if [ $? -ne 0 ]; then | ||
112 | bad_counter ns0 ns0out6 "packets 0 bytes 0" | ||
113 | lret=1 | ||
114 | fi | ||
115 | |||
116 | for dir in "in" "out" ; do | ||
117 | expect="packets 1 bytes 84" | ||
118 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect") | ||
119 | if [ $? -ne 0 ]; then | ||
120 | bad_counter ns0 $ns$dir "$expect" | ||
121 | lret=1 | ||
122 | fi | ||
123 | |||
124 | expect="packets 1 bytes 104" | ||
125 | cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect") | ||
126 | if [ $? -ne 0 ]; then | ||
127 | bad_counter ns0 $ns$dir6 "$expect" | ||
128 | lret=1 | ||
129 | fi | ||
130 | done | ||
131 | |||
132 | return $lret | ||
133 | } | ||
134 | |||
135 | reset_counters() | ||
136 | { | ||
137 | for i in 0 1 2;do | ||
138 | ip netns exec ns$i nft reset counters inet > /dev/null | ||
139 | done | ||
140 | } | ||
141 | |||
142 | test_local_dnat6() | ||
143 | { | ||
144 | local lret=0 | ||
145 | ip netns exec ns0 nft -f - <<EOF | ||
146 | table ip6 nat { | ||
147 | chain output { | ||
148 | type nat hook output priority 0; policy accept; | ||
149 | ip6 daddr dead:1::99 dnat to dead:2::99 | ||
150 | } | ||
151 | } | ||
152 | EOF | ||
153 | if [ $? -ne 0 ]; then | ||
154 | echo "SKIP: Could not add add ip6 dnat hook" | ||
155 | return $ksft_skip | ||
156 | fi | ||
157 | |||
158 | # ping netns1, expect rewrite to netns2 | ||
159 | ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null | ||
160 | if [ $? -ne 0 ]; then | ||
161 | lret=1 | ||
162 | echo "ERROR: ping6 failed" | ||
163 | return $lret | ||
164 | fi | ||
165 | |||
166 | expect="packets 0 bytes 0" | ||
167 | for dir in "in6" "out6" ; do | ||
168 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
169 | if [ $? -ne 0 ]; then | ||
170 | bad_counter ns0 ns1$dir "$expect" | ||
171 | lret=1 | ||
172 | fi | ||
173 | done | ||
174 | |||
175 | expect="packets 1 bytes 104" | ||
176 | for dir in "in6" "out6" ; do | ||
177 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
178 | if [ $? -ne 0 ]; then | ||
179 | bad_counter ns0 ns2$dir "$expect" | ||
180 | lret=1 | ||
181 | fi | ||
182 | done | ||
183 | |||
184 | # expect 0 count in ns1 | ||
185 | expect="packets 0 bytes 0" | ||
186 | for dir in "in6" "out6" ; do | ||
187 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
188 | if [ $? -ne 0 ]; then | ||
189 | bad_counter ns1 ns0$dir "$expect" | ||
190 | lret=1 | ||
191 | fi | ||
192 | done | ||
193 | |||
194 | # expect 1 packet in ns2 | ||
195 | expect="packets 1 bytes 104" | ||
196 | for dir in "in6" "out6" ; do | ||
197 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
198 | if [ $? -ne 0 ]; then | ||
199 | bad_counter ns2 ns0$dir "$expect" | ||
200 | lret=1 | ||
201 | fi | ||
202 | done | ||
203 | |||
204 | test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2" | ||
205 | ip netns exec ns0 nft flush chain ip6 nat output | ||
206 | |||
207 | return $lret | ||
208 | } | ||
209 | |||
210 | test_local_dnat() | ||
211 | { | ||
212 | local lret=0 | ||
213 | ip netns exec ns0 nft -f - <<EOF | ||
214 | table ip nat { | ||
215 | chain output { | ||
216 | type nat hook output priority 0; policy accept; | ||
217 | ip daddr 10.0.1.99 dnat to 10.0.2.99 | ||
218 | } | ||
219 | } | ||
220 | EOF | ||
221 | # ping netns1, expect rewrite to netns2 | ||
222 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
223 | if [ $? -ne 0 ]; then | ||
224 | lret=1 | ||
225 | echo "ERROR: ping failed" | ||
226 | return $lret | ||
227 | fi | ||
228 | |||
229 | expect="packets 0 bytes 0" | ||
230 | for dir in "in" "out" ; do | ||
231 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
232 | if [ $? -ne 0 ]; then | ||
233 | bad_counter ns0 ns1$dir "$expect" | ||
234 | lret=1 | ||
235 | fi | ||
236 | done | ||
237 | |||
238 | expect="packets 1 bytes 84" | ||
239 | for dir in "in" "out" ; do | ||
240 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
241 | if [ $? -ne 0 ]; then | ||
242 | bad_counter ns0 ns2$dir "$expect" | ||
243 | lret=1 | ||
244 | fi | ||
245 | done | ||
246 | |||
247 | # expect 0 count in ns1 | ||
248 | expect="packets 0 bytes 0" | ||
249 | for dir in "in" "out" ; do | ||
250 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
251 | if [ $? -ne 0 ]; then | ||
252 | bad_counter ns1 ns0$dir "$expect" | ||
253 | lret=1 | ||
254 | fi | ||
255 | done | ||
256 | |||
257 | # expect 1 packet in ns2 | ||
258 | expect="packets 1 bytes 84" | ||
259 | for dir in "in" "out" ; do | ||
260 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
261 | if [ $? -ne 0 ]; then | ||
262 | bad_counter ns2 ns0$dir "$expect" | ||
263 | lret=1 | ||
264 | fi | ||
265 | done | ||
266 | |||
267 | test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2" | ||
268 | |||
269 | ip netns exec ns0 nft flush chain ip nat output | ||
270 | |||
271 | reset_counters | ||
272 | ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null | ||
273 | if [ $? -ne 0 ]; then | ||
274 | lret=1 | ||
275 | echo "ERROR: ping failed" | ||
276 | return $lret | ||
277 | fi | ||
278 | |||
279 | expect="packets 1 bytes 84" | ||
280 | for dir in "in" "out" ; do | ||
281 | cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
282 | if [ $? -ne 0 ]; then | ||
283 | bad_counter ns1 ns1$dir "$expect" | ||
284 | lret=1 | ||
285 | fi | ||
286 | done | ||
287 | expect="packets 0 bytes 0" | ||
288 | for dir in "in" "out" ; do | ||
289 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
290 | if [ $? -ne 0 ]; then | ||
291 | bad_counter ns0 ns2$dir "$expect" | ||
292 | lret=1 | ||
293 | fi | ||
294 | done | ||
295 | |||
296 | # expect 1 count in ns1 | ||
297 | expect="packets 1 bytes 84" | ||
298 | for dir in "in" "out" ; do | ||
299 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
300 | if [ $? -ne 0 ]; then | ||
301 | bad_counter ns0 ns0$dir "$expect" | ||
302 | lret=1 | ||
303 | fi | ||
304 | done | ||
305 | |||
306 | # expect 0 packet in ns2 | ||
307 | expect="packets 0 bytes 0" | ||
308 | for dir in "in" "out" ; do | ||
309 | cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
310 | if [ $? -ne 0 ]; then | ||
311 | bad_counter ns2 ns2$dir "$expect" | ||
312 | lret=1 | ||
313 | fi | ||
314 | done | ||
315 | |||
316 | test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush" | ||
317 | |||
318 | return $lret | ||
319 | } | ||
320 | |||
321 | |||
322 | test_masquerade6() | ||
323 | { | ||
324 | local lret=0 | ||
325 | |||
326 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
327 | |||
328 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
329 | if [ $? -ne 0 ] ; then | ||
330 | echo "ERROR: cannot ping ns1 from ns2 via ipv6" | ||
331 | return 1 | ||
332 | lret=1 | ||
333 | fi | ||
334 | |||
335 | expect="packets 1 bytes 104" | ||
336 | for dir in "in6" "out6" ; do | ||
337 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
338 | if [ $? -ne 0 ]; then | ||
339 | bad_counter ns1 ns2$dir "$expect" | ||
340 | lret=1 | ||
341 | fi | ||
342 | |||
343 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
344 | if [ $? -ne 0 ]; then | ||
345 | bad_counter ns2 ns1$dir "$expect" | ||
346 | lret=1 | ||
347 | fi | ||
348 | done | ||
349 | |||
350 | reset_counters | ||
351 | |||
352 | # add masquerading rule | ||
353 | ip netns exec ns0 nft -f - <<EOF | ||
354 | table ip6 nat { | ||
355 | chain postrouting { | ||
356 | type nat hook postrouting priority 0; policy accept; | ||
357 | meta oif veth0 masquerade | ||
358 | } | ||
359 | } | ||
360 | EOF | ||
361 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
362 | if [ $? -ne 0 ] ; then | ||
363 | echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading" | ||
364 | lret=1 | ||
365 | fi | ||
366 | |||
367 | # ns1 should have seen packets from ns0, due to masquerade | ||
368 | expect="packets 1 bytes 104" | ||
369 | for dir in "in6" "out6" ; do | ||
370 | |||
371 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
372 | if [ $? -ne 0 ]; then | ||
373 | bad_counter ns1 ns0$dir "$expect" | ||
374 | lret=1 | ||
375 | fi | ||
376 | |||
377 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
378 | if [ $? -ne 0 ]; then | ||
379 | bad_counter ns2 ns1$dir "$expect" | ||
380 | lret=1 | ||
381 | fi | ||
382 | done | ||
383 | |||
384 | # ns1 should not have seen packets from ns2, due to masquerade | ||
385 | expect="packets 0 bytes 0" | ||
386 | for dir in "in6" "out6" ; do | ||
387 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
388 | if [ $? -ne 0 ]; then | ||
389 | bad_counter ns1 ns0$dir "$expect" | ||
390 | lret=1 | ||
391 | fi | ||
392 | |||
393 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
394 | if [ $? -ne 0 ]; then | ||
395 | bad_counter ns2 ns1$dir "$expect" | ||
396 | lret=1 | ||
397 | fi | ||
398 | done | ||
399 | |||
400 | ip netns exec ns0 nft flush chain ip6 nat postrouting | ||
401 | if [ $? -ne 0 ]; then | ||
402 | echo "ERROR: Could not flush ip6 nat postrouting" 1>&2 | ||
403 | lret=1 | ||
404 | fi | ||
405 | |||
406 | test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2" | ||
407 | |||
408 | return $lret | ||
409 | } | ||
410 | |||
411 | test_masquerade() | ||
412 | { | ||
413 | local lret=0 | ||
414 | |||
415 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
416 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
417 | |||
418 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
419 | if [ $? -ne 0 ] ; then | ||
420 | echo "ERROR: canot ping ns1 from ns2" | ||
421 | lret=1 | ||
422 | fi | ||
423 | |||
424 | expect="packets 1 bytes 84" | ||
425 | for dir in "in" "out" ; do | ||
426 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
427 | if [ $? -ne 0 ]; then | ||
428 | bad_counter ns1 ns2$dir "$expect" | ||
429 | lret=1 | ||
430 | fi | ||
431 | |||
432 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
433 | if [ $? -ne 0 ]; then | ||
434 | bad_counter ns2 ns1$dir "$expect" | ||
435 | lret=1 | ||
436 | fi | ||
437 | done | ||
438 | |||
439 | reset_counters | ||
440 | |||
441 | # add masquerading rule | ||
442 | ip netns exec ns0 nft -f - <<EOF | ||
443 | table ip nat { | ||
444 | chain postrouting { | ||
445 | type nat hook postrouting priority 0; policy accept; | ||
446 | meta oif veth0 masquerade | ||
447 | } | ||
448 | } | ||
449 | EOF | ||
450 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
451 | if [ $? -ne 0 ] ; then | ||
452 | echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading" | ||
453 | lret=1 | ||
454 | fi | ||
455 | |||
456 | # ns1 should have seen packets from ns0, due to masquerade | ||
457 | expect="packets 1 bytes 84" | ||
458 | for dir in "in" "out" ; do | ||
459 | cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") | ||
460 | if [ $? -ne 0 ]; then | ||
461 | bad_counter ns1 ns0$dir "$expect" | ||
462 | lret=1 | ||
463 | fi | ||
464 | |||
465 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
466 | if [ $? -ne 0 ]; then | ||
467 | bad_counter ns2 ns1$dir "$expect" | ||
468 | lret=1 | ||
469 | fi | ||
470 | done | ||
471 | |||
472 | # ns1 should not have seen packets from ns2, due to masquerade | ||
473 | expect="packets 0 bytes 0" | ||
474 | for dir in "in" "out" ; do | ||
475 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
476 | if [ $? -ne 0 ]; then | ||
477 | bad_counter ns1 ns0$dir "$expect" | ||
478 | lret=1 | ||
479 | fi | ||
480 | |||
481 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
482 | if [ $? -ne 0 ]; then | ||
483 | bad_counter ns2 ns1$dir "$expect" | ||
484 | lret=1 | ||
485 | fi | ||
486 | done | ||
487 | |||
488 | ip netns exec ns0 nft flush chain ip nat postrouting | ||
489 | if [ $? -ne 0 ]; then | ||
490 | echo "ERROR: Could not flush nat postrouting" 1>&2 | ||
491 | lret=1 | ||
492 | fi | ||
493 | |||
494 | test $lret -eq 0 && echo "PASS: IP masquerade for ns2" | ||
495 | |||
496 | return $lret | ||
497 | } | ||
498 | |||
499 | test_redirect6() | ||
500 | { | ||
501 | local lret=0 | ||
502 | |||
503 | ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null | ||
504 | |||
505 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
506 | if [ $? -ne 0 ] ; then | ||
507 | echo "ERROR: cannnot ping ns1 from ns2 via ipv6" | ||
508 | lret=1 | ||
509 | fi | ||
510 | |||
511 | expect="packets 1 bytes 104" | ||
512 | for dir in "in6" "out6" ; do | ||
513 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
514 | if [ $? -ne 0 ]; then | ||
515 | bad_counter ns1 ns2$dir "$expect" | ||
516 | lret=1 | ||
517 | fi | ||
518 | |||
519 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
520 | if [ $? -ne 0 ]; then | ||
521 | bad_counter ns2 ns1$dir "$expect" | ||
522 | lret=1 | ||
523 | fi | ||
524 | done | ||
525 | |||
526 | reset_counters | ||
527 | |||
528 | # add redirect rule | ||
529 | ip netns exec ns0 nft -f - <<EOF | ||
530 | table ip6 nat { | ||
531 | chain prerouting { | ||
532 | type nat hook prerouting priority 0; policy accept; | ||
533 | meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect | ||
534 | } | ||
535 | } | ||
536 | EOF | ||
537 | ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 | ||
538 | if [ $? -ne 0 ] ; then | ||
539 | echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect" | ||
540 | lret=1 | ||
541 | fi | ||
542 | |||
543 | # ns1 should have seen no packets from ns2, due to redirection | ||
544 | expect="packets 0 bytes 0" | ||
545 | for dir in "in6" "out6" ; do | ||
546 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
547 | if [ $? -ne 0 ]; then | ||
548 | bad_counter ns1 ns0$dir "$expect" | ||
549 | lret=1 | ||
550 | fi | ||
551 | done | ||
552 | |||
553 | # ns0 should have seen packets from ns2, due to masquerade | ||
554 | expect="packets 1 bytes 104" | ||
555 | for dir in "in6" "out6" ; do | ||
556 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
557 | if [ $? -ne 0 ]; then | ||
558 | bad_counter ns1 ns0$dir "$expect" | ||
559 | lret=1 | ||
560 | fi | ||
561 | done | ||
562 | |||
563 | ip netns exec ns0 nft delete table ip6 nat | ||
564 | if [ $? -ne 0 ]; then | ||
565 | echo "ERROR: Could not delete ip6 nat table" 1>&2 | ||
566 | lret=1 | ||
567 | fi | ||
568 | |||
569 | test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2" | ||
570 | |||
571 | return $lret | ||
572 | } | ||
573 | |||
574 | test_redirect() | ||
575 | { | ||
576 | local lret=0 | ||
577 | |||
578 | ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null | ||
579 | ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null | ||
580 | |||
581 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
582 | if [ $? -ne 0 ] ; then | ||
583 | echo "ERROR: cannot ping ns1 from ns2" | ||
584 | lret=1 | ||
585 | fi | ||
586 | |||
587 | expect="packets 1 bytes 84" | ||
588 | for dir in "in" "out" ; do | ||
589 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
590 | if [ $? -ne 0 ]; then | ||
591 | bad_counter ns1 ns2$dir "$expect" | ||
592 | lret=1 | ||
593 | fi | ||
594 | |||
595 | cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect") | ||
596 | if [ $? -ne 0 ]; then | ||
597 | bad_counter ns2 ns1$dir "$expect" | ||
598 | lret=1 | ||
599 | fi | ||
600 | done | ||
601 | |||
602 | reset_counters | ||
603 | |||
604 | # add redirect rule | ||
605 | ip netns exec ns0 nft -f - <<EOF | ||
606 | table ip nat { | ||
607 | chain prerouting { | ||
608 | type nat hook prerouting priority 0; policy accept; | ||
609 | meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect | ||
610 | } | ||
611 | } | ||
612 | EOF | ||
613 | ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 | ||
614 | if [ $? -ne 0 ] ; then | ||
615 | echo "ERROR: cannot ping ns1 from ns2 with active ip redirect" | ||
616 | lret=1 | ||
617 | fi | ||
618 | |||
619 | # ns1 should have seen no packets from ns2, due to redirection | ||
620 | expect="packets 0 bytes 0" | ||
621 | for dir in "in" "out" ; do | ||
622 | |||
623 | cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
624 | if [ $? -ne 0 ]; then | ||
625 | bad_counter ns1 ns0$dir "$expect" | ||
626 | lret=1 | ||
627 | fi | ||
628 | done | ||
629 | |||
630 | # ns0 should have seen packets from ns2, due to masquerade | ||
631 | expect="packets 1 bytes 84" | ||
632 | for dir in "in" "out" ; do | ||
633 | cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect") | ||
634 | if [ $? -ne 0 ]; then | ||
635 | bad_counter ns1 ns0$dir "$expect" | ||
636 | lret=1 | ||
637 | fi | ||
638 | done | ||
639 | |||
640 | ip netns exec ns0 nft delete table ip nat | ||
641 | if [ $? -ne 0 ]; then | ||
642 | echo "ERROR: Could not delete nat table" 1>&2 | ||
643 | lret=1 | ||
644 | fi | ||
645 | |||
646 | test $lret -eq 0 && echo "PASS: IP redirection for ns2" | ||
647 | |||
648 | return $lret | ||
649 | } | ||
650 | |||
651 | |||
652 | # ip netns exec ns0 ping -c 1 -q 10.0.$i.99 | ||
653 | for i in 0 1 2; do | ||
654 | ip netns exec ns$i nft -f - <<EOF | ||
655 | table inet filter { | ||
656 | counter ns0in {} | ||
657 | counter ns1in {} | ||
658 | counter ns2in {} | ||
659 | |||
660 | counter ns0out {} | ||
661 | counter ns1out {} | ||
662 | counter ns2out {} | ||
663 | |||
664 | counter ns0in6 {} | ||
665 | counter ns1in6 {} | ||
666 | counter ns2in6 {} | ||
667 | |||
668 | counter ns0out6 {} | ||
669 | counter ns1out6 {} | ||
670 | counter ns2out6 {} | ||
671 | |||
672 | map nsincounter { | ||
673 | type ipv4_addr : counter | ||
674 | elements = { 10.0.1.1 : "ns0in", | ||
675 | 10.0.2.1 : "ns0in", | ||
676 | 10.0.1.99 : "ns1in", | ||
677 | 10.0.2.99 : "ns2in" } | ||
678 | } | ||
679 | |||
680 | map nsincounter6 { | ||
681 | type ipv6_addr : counter | ||
682 | elements = { dead:1::1 : "ns0in6", | ||
683 | dead:2::1 : "ns0in6", | ||
684 | dead:1::99 : "ns1in6", | ||
685 | dead:2::99 : "ns2in6" } | ||
686 | } | ||
687 | |||
688 | map nsoutcounter { | ||
689 | type ipv4_addr : counter | ||
690 | elements = { 10.0.1.1 : "ns0out", | ||
691 | 10.0.2.1 : "ns0out", | ||
692 | 10.0.1.99: "ns1out", | ||
693 | 10.0.2.99: "ns2out" } | ||
694 | } | ||
695 | |||
696 | map nsoutcounter6 { | ||
697 | type ipv6_addr : counter | ||
698 | elements = { dead:1::1 : "ns0out6", | ||
699 | dead:2::1 : "ns0out6", | ||
700 | dead:1::99 : "ns1out6", | ||
701 | dead:2::99 : "ns2out6" } | ||
702 | } | ||
703 | |||
704 | chain input { | ||
705 | type filter hook input priority 0; policy accept; | ||
706 | counter name ip saddr map @nsincounter | ||
707 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6 | ||
708 | } | ||
709 | chain output { | ||
710 | type filter hook output priority 0; policy accept; | ||
711 | counter name ip daddr map @nsoutcounter | ||
712 | icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6 | ||
713 | } | ||
714 | } | ||
715 | EOF | ||
716 | done | ||
717 | |||
718 | sleep 3 | ||
719 | # test basic connectivity | ||
720 | for i in 1 2; do | ||
721 | ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null | ||
722 | if [ $? -ne 0 ];then | ||
723 | echo "ERROR: Could not reach other namespace(s)" 1>&2 | ||
724 | ret=1 | ||
725 | fi | ||
726 | |||
727 | ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null | ||
728 | if [ $? -ne 0 ];then | ||
729 | echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2 | ||
730 | ret=1 | ||
731 | fi | ||
732 | check_counters ns$i | ||
733 | if [ $? -ne 0 ]; then | ||
734 | ret=1 | ||
735 | fi | ||
736 | |||
737 | check_ns0_counters ns$i | ||
738 | if [ $? -ne 0 ]; then | ||
739 | ret=1 | ||
740 | fi | ||
741 | reset_counters | ||
742 | done | ||
743 | |||
744 | if [ $ret -eq 0 ];then | ||
745 | echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2" | ||
746 | fi | ||
747 | |||
748 | reset_counters | ||
749 | test_local_dnat | ||
750 | test_local_dnat6 | ||
751 | |||
752 | reset_counters | ||
753 | test_masquerade | ||
754 | test_masquerade6 | ||
755 | |||
756 | reset_counters | ||
757 | test_redirect | ||
758 | test_redirect6 | ||
759 | |||
760 | for i in 0 1 2; do ip netns del ns$i;done | ||
761 | |||
762 | exit $ret | ||
diff --git a/tools/testing/selftests/networking/timestamping/Makefile b/tools/testing/selftests/networking/timestamping/Makefile index 9050eeea5f5f..1de8bd8ccf5d 100644 --- a/tools/testing/selftests/networking/timestamping/Makefile +++ b/tools/testing/selftests/networking/timestamping/Makefile | |||
@@ -9,6 +9,3 @@ all: $(TEST_PROGS) | |||
9 | top_srcdir = ../../../../.. | 9 | top_srcdir = ../../../../.. |
10 | KSFT_KHDR_INSTALL := 1 | 10 | KSFT_KHDR_INSTALL := 1 |
11 | include ../../lib.mk | 11 | include ../../lib.mk |
12 | |||
13 | clean: | ||
14 | rm -fr $(TEST_GEN_FILES) | ||
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore index 82121a81681f..29bac5ef9a93 100644 --- a/tools/testing/selftests/proc/.gitignore +++ b/tools/testing/selftests/proc/.gitignore | |||
@@ -10,4 +10,5 @@ | |||
10 | /proc-uptime-002 | 10 | /proc-uptime-002 |
11 | /read | 11 | /read |
12 | /self | 12 | /self |
13 | /setns-dcache | ||
13 | /thread-self | 14 | /thread-self |
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile index 1c12c34cf85d..434d033ee067 100644 --- a/tools/testing/selftests/proc/Makefile +++ b/tools/testing/selftests/proc/Makefile | |||
@@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001 | |||
14 | TEST_GEN_PROGS += proc-uptime-002 | 14 | TEST_GEN_PROGS += proc-uptime-002 |
15 | TEST_GEN_PROGS += read | 15 | TEST_GEN_PROGS += read |
16 | TEST_GEN_PROGS += self | 16 | TEST_GEN_PROGS += self |
17 | TEST_GEN_PROGS += setns-dcache | ||
17 | TEST_GEN_PROGS += thread-self | 18 | TEST_GEN_PROGS += thread-self |
18 | 19 | ||
19 | include ../lib.mk | 20 | include ../lib.mk |
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c new file mode 100644 index 000000000000..60ab197a73fc --- /dev/null +++ b/tools/testing/selftests/proc/setns-dcache.c | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com> | ||
3 | * | ||
4 | * Permission to use, copy, modify, and distribute this software for any | ||
5 | * purpose with or without fee is hereby granted, provided that the above | ||
6 | * copyright notice and this permission notice appear in all copies. | ||
7 | * | ||
8 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES | ||
9 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF | ||
10 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR | ||
11 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES | ||
12 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | ||
13 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | ||
14 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | ||
15 | */ | ||
16 | /* | ||
17 | * Test that setns(CLONE_NEWNET) points to new /proc/net content even | ||
18 | * if old one is in dcache. | ||
19 | * | ||
20 | * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled. | ||
21 | */ | ||
22 | #undef NDEBUG | ||
23 | #include <assert.h> | ||
24 | #include <errno.h> | ||
25 | #include <sched.h> | ||
26 | #include <signal.h> | ||
27 | #include <stdio.h> | ||
28 | #include <stdlib.h> | ||
29 | #include <string.h> | ||
30 | #include <unistd.h> | ||
31 | #include <sys/types.h> | ||
32 | #include <sys/stat.h> | ||
33 | #include <fcntl.h> | ||
34 | #include <sys/socket.h> | ||
35 | |||
36 | static pid_t pid = -1; | ||
37 | |||
38 | static void f(void) | ||
39 | { | ||
40 | if (pid > 0) { | ||
41 | kill(pid, SIGTERM); | ||
42 | } | ||
43 | } | ||
44 | |||
45 | int main(void) | ||
46 | { | ||
47 | int fd[2]; | ||
48 | char _ = 0; | ||
49 | int nsfd; | ||
50 | |||
51 | atexit(f); | ||
52 | |||
53 | /* Check for priviledges and syscall availability straight away. */ | ||
54 | if (unshare(CLONE_NEWNET) == -1) { | ||
55 | if (errno == ENOSYS || errno == EPERM) { | ||
56 | return 4; | ||
57 | } | ||
58 | return 1; | ||
59 | } | ||
60 | /* Distinguisher between two otherwise empty net namespaces. */ | ||
61 | if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) { | ||
62 | return 1; | ||
63 | } | ||
64 | |||
65 | if (pipe(fd) == -1) { | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | pid = fork(); | ||
70 | if (pid == -1) { | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | if (pid == 0) { | ||
75 | if (unshare(CLONE_NEWNET) == -1) { | ||
76 | return 1; | ||
77 | } | ||
78 | |||
79 | if (write(fd[1], &_, 1) != 1) { | ||
80 | return 1; | ||
81 | } | ||
82 | |||
83 | pause(); | ||
84 | |||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | if (read(fd[0], &_, 1) != 1) { | ||
89 | return 1; | ||
90 | } | ||
91 | |||
92 | { | ||
93 | char buf[64]; | ||
94 | snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid); | ||
95 | nsfd = open(buf, O_RDONLY); | ||
96 | if (nsfd == -1) { | ||
97 | return 1; | ||
98 | } | ||
99 | } | ||
100 | |||
101 | /* Reliably pin dentry into dcache. */ | ||
102 | (void)open("/proc/net/unix", O_RDONLY); | ||
103 | |||
104 | if (setns(nsfd, CLONE_NEWNET) == -1) { | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | kill(pid, SIGTERM); | ||
109 | pid = 0; | ||
110 | |||
111 | { | ||
112 | char buf[4096]; | ||
113 | ssize_t rv; | ||
114 | int fd; | ||
115 | |||
116 | fd = open("/proc/net/unix", O_RDONLY); | ||
117 | if (fd == -1) { | ||
118 | return 1; | ||
119 | } | ||
120 | |||
121 | #define S "Num RefCount Protocol Flags Type St Inode Path\n" | ||
122 | rv = read(fd, buf, sizeof(buf)); | ||
123 | |||
124 | assert(rv == strlen(S)); | ||
125 | assert(memcmp(buf, S, strlen(S)) == 0); | ||
126 | } | ||
127 | |||
128 | return 0; | ||
129 | } | ||
diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 496a9a8c773a..7e632b465ab4 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c | |||
@@ -1608,7 +1608,16 @@ TEST_F(TRACE_poke, getpid_runs_normally) | |||
1608 | #ifdef SYSCALL_NUM_RET_SHARE_REG | 1608 | #ifdef SYSCALL_NUM_RET_SHARE_REG |
1609 | # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) | 1609 | # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(-1, action) |
1610 | #else | 1610 | #else |
1611 | # define EXPECT_SYSCALL_RETURN(val, action) EXPECT_EQ(val, action) | 1611 | # define EXPECT_SYSCALL_RETURN(val, action) \ |
1612 | do { \ | ||
1613 | errno = 0; \ | ||
1614 | if (val < 0) { \ | ||
1615 | EXPECT_EQ(-1, action); \ | ||
1616 | EXPECT_EQ(-(val), errno); \ | ||
1617 | } else { \ | ||
1618 | EXPECT_EQ(val, action); \ | ||
1619 | } \ | ||
1620 | } while (0) | ||
1612 | #endif | 1621 | #endif |
1613 | 1622 | ||
1614 | /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for | 1623 | /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for |
@@ -1647,7 +1656,7 @@ int get_syscall(struct __test_metadata *_metadata, pid_t tracee) | |||
1647 | 1656 | ||
1648 | /* Architecture-specific syscall changing routine. */ | 1657 | /* Architecture-specific syscall changing routine. */ |
1649 | void change_syscall(struct __test_metadata *_metadata, | 1658 | void change_syscall(struct __test_metadata *_metadata, |
1650 | pid_t tracee, int syscall) | 1659 | pid_t tracee, int syscall, int result) |
1651 | { | 1660 | { |
1652 | int ret; | 1661 | int ret; |
1653 | ARCH_REGS regs; | 1662 | ARCH_REGS regs; |
@@ -1706,7 +1715,7 @@ void change_syscall(struct __test_metadata *_metadata, | |||
1706 | #ifdef SYSCALL_NUM_RET_SHARE_REG | 1715 | #ifdef SYSCALL_NUM_RET_SHARE_REG |
1707 | TH_LOG("Can't modify syscall return on this architecture"); | 1716 | TH_LOG("Can't modify syscall return on this architecture"); |
1708 | #else | 1717 | #else |
1709 | regs.SYSCALL_RET = EPERM; | 1718 | regs.SYSCALL_RET = result; |
1710 | #endif | 1719 | #endif |
1711 | 1720 | ||
1712 | #ifdef HAVE_GETREGS | 1721 | #ifdef HAVE_GETREGS |
@@ -1734,14 +1743,19 @@ void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, | |||
1734 | case 0x1002: | 1743 | case 0x1002: |
1735 | /* change getpid to getppid. */ | 1744 | /* change getpid to getppid. */ |
1736 | EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); | 1745 | EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); |
1737 | change_syscall(_metadata, tracee, __NR_getppid); | 1746 | change_syscall(_metadata, tracee, __NR_getppid, 0); |
1738 | break; | 1747 | break; |
1739 | case 0x1003: | 1748 | case 0x1003: |
1740 | /* skip gettid. */ | 1749 | /* skip gettid with valid return code. */ |
1741 | EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); | 1750 | EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); |
1742 | change_syscall(_metadata, tracee, -1); | 1751 | change_syscall(_metadata, tracee, -1, 45000); |
1743 | break; | 1752 | break; |
1744 | case 0x1004: | 1753 | case 0x1004: |
1754 | /* skip openat with error. */ | ||
1755 | EXPECT_EQ(__NR_openat, get_syscall(_metadata, tracee)); | ||
1756 | change_syscall(_metadata, tracee, -1, -ESRCH); | ||
1757 | break; | ||
1758 | case 0x1005: | ||
1745 | /* do nothing (allow getppid) */ | 1759 | /* do nothing (allow getppid) */ |
1746 | EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); | 1760 | EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); |
1747 | break; | 1761 | break; |
@@ -1774,9 +1788,11 @@ void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, | |||
1774 | nr = get_syscall(_metadata, tracee); | 1788 | nr = get_syscall(_metadata, tracee); |
1775 | 1789 | ||
1776 | if (nr == __NR_getpid) | 1790 | if (nr == __NR_getpid) |
1777 | change_syscall(_metadata, tracee, __NR_getppid); | 1791 | change_syscall(_metadata, tracee, __NR_getppid, 0); |
1792 | if (nr == __NR_gettid) | ||
1793 | change_syscall(_metadata, tracee, -1, 45000); | ||
1778 | if (nr == __NR_openat) | 1794 | if (nr == __NR_openat) |
1779 | change_syscall(_metadata, tracee, -1); | 1795 | change_syscall(_metadata, tracee, -1, -ESRCH); |
1780 | } | 1796 | } |
1781 | 1797 | ||
1782 | FIXTURE_DATA(TRACE_syscall) { | 1798 | FIXTURE_DATA(TRACE_syscall) { |
@@ -1793,8 +1809,10 @@ FIXTURE_SETUP(TRACE_syscall) | |||
1793 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), | 1809 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), |
1794 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), | 1810 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), |
1795 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), | 1811 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), |
1796 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | 1812 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_openat, 0, 1), |
1797 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), | 1813 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), |
1814 | BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), | ||
1815 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1005), | ||
1798 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), | 1816 | BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), |
1799 | }; | 1817 | }; |
1800 | 1818 | ||
@@ -1842,15 +1860,26 @@ TEST_F(TRACE_syscall, ptrace_syscall_redirected) | |||
1842 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | 1860 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); |
1843 | } | 1861 | } |
1844 | 1862 | ||
1845 | TEST_F(TRACE_syscall, ptrace_syscall_dropped) | 1863 | TEST_F(TRACE_syscall, ptrace_syscall_errno) |
1864 | { | ||
1865 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | ||
1866 | teardown_trace_fixture(_metadata, self->tracer); | ||
1867 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | ||
1868 | true); | ||
1869 | |||
1870 | /* Tracer should skip the open syscall, resulting in ESRCH. */ | ||
1871 | EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); | ||
1872 | } | ||
1873 | |||
1874 | TEST_F(TRACE_syscall, ptrace_syscall_faked) | ||
1846 | { | 1875 | { |
1847 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ | 1876 | /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ |
1848 | teardown_trace_fixture(_metadata, self->tracer); | 1877 | teardown_trace_fixture(_metadata, self->tracer); |
1849 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, | 1878 | self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, |
1850 | true); | 1879 | true); |
1851 | 1880 | ||
1852 | /* Tracer should skip the open syscall, resulting in EPERM. */ | 1881 | /* Tracer should skip the gettid syscall, resulting fake pid. */ |
1853 | EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_openat)); | 1882 | EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); |
1854 | } | 1883 | } |
1855 | 1884 | ||
1856 | TEST_F(TRACE_syscall, syscall_allowed) | 1885 | TEST_F(TRACE_syscall, syscall_allowed) |
@@ -1883,7 +1912,21 @@ TEST_F(TRACE_syscall, syscall_redirected) | |||
1883 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); | 1912 | EXPECT_NE(self->mypid, syscall(__NR_getpid)); |
1884 | } | 1913 | } |
1885 | 1914 | ||
1886 | TEST_F(TRACE_syscall, syscall_dropped) | 1915 | TEST_F(TRACE_syscall, syscall_errno) |
1916 | { | ||
1917 | long ret; | ||
1918 | |||
1919 | ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); | ||
1920 | ASSERT_EQ(0, ret); | ||
1921 | |||
1922 | ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); | ||
1923 | ASSERT_EQ(0, ret); | ||
1924 | |||
1925 | /* openat has been skipped and an errno return. */ | ||
1926 | EXPECT_SYSCALL_RETURN(-ESRCH, syscall(__NR_openat)); | ||
1927 | } | ||
1928 | |||
1929 | TEST_F(TRACE_syscall, syscall_faked) | ||
1887 | { | 1930 | { |
1888 | long ret; | 1931 | long ret; |
1889 | 1932 | ||
@@ -1894,8 +1937,7 @@ TEST_F(TRACE_syscall, syscall_dropped) | |||
1894 | ASSERT_EQ(0, ret); | 1937 | ASSERT_EQ(0, ret); |
1895 | 1938 | ||
1896 | /* gettid has been skipped and an altered return value stored. */ | 1939 | /* gettid has been skipped and an altered return value stored. */ |
1897 | EXPECT_SYSCALL_RETURN(EPERM, syscall(__NR_gettid)); | 1940 | EXPECT_SYSCALL_RETURN(45000, syscall(__NR_gettid)); |
1898 | EXPECT_NE(self->mytid, syscall(__NR_gettid)); | ||
1899 | } | 1941 | } |
1900 | 1942 | ||
1901 | TEST_F(TRACE_syscall, skip_after_RET_TRACE) | 1943 | TEST_F(TRACE_syscall, skip_after_RET_TRACE) |
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index c02683cfb6c9..7656c7ce79d9 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile | |||
@@ -1,6 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
2 | CFLAGS += -O3 -Wl,-no-as-needed -Wall | 2 | CFLAGS += -O3 -Wl,-no-as-needed -Wall |
3 | LDFLAGS += -lrt -lpthread -lm | 3 | LDLIBS += -lrt -lpthread -lm |
4 | 4 | ||
5 | # these are all "safe" tests that don't modify | 5 | # these are all "safe" tests that don't modify |
6 | # system time or require escalated privileges | 6 | # system time or require escalated privileges |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ecea812cb6a..585845203db8 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -3000,8 +3000,10 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
3000 | if (ops->init) | 3000 | if (ops->init) |
3001 | ops->init(dev); | 3001 | ops->init(dev); |
3002 | 3002 | ||
3003 | kvm_get_kvm(kvm); | ||
3003 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); | 3004 | ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC); |
3004 | if (ret < 0) { | 3005 | if (ret < 0) { |
3006 | kvm_put_kvm(kvm); | ||
3005 | mutex_lock(&kvm->lock); | 3007 | mutex_lock(&kvm->lock); |
3006 | list_del(&dev->vm_node); | 3008 | list_del(&dev->vm_node); |
3007 | mutex_unlock(&kvm->lock); | 3009 | mutex_unlock(&kvm->lock); |
@@ -3009,7 +3011,6 @@ static int kvm_ioctl_create_device(struct kvm *kvm, | |||
3009 | return ret; | 3011 | return ret; |
3010 | } | 3012 | } |
3011 | 3013 | ||
3012 | kvm_get_kvm(kvm); | ||
3013 | cd->fd = ret; | 3014 | cd->fd = ret; |
3014 | return 0; | 3015 | return 0; |
3015 | } | 3016 | } |