summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-09-15 08:17:27 -0400
committerDavid S. Miller <davem@davemloft.net>2019-09-15 08:17:27 -0400
commitaa2eaa8c272a3211dec07ce9c6c863a7e355c10e (patch)
tree8454a23d36b2ff36133c276ee0ba80eabc00850e
parenta3d3c74da49c65fc63a937fa559186b0e16adca3 (diff)
parent1609d7604b847a9820e63393d1a3b6cac7286d40 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Minor overlapping changes in the btusb and ixgbe drivers. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--.clang-format17
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst14
-rw-r--r--Documentation/riscv/boot-image-header.txt13
-rw-r--r--MAINTAINERS3
-rw-r--r--Makefile2
-rw-r--r--arch/arm64/boot/dts/renesas/hihope-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a77995-draak.dts6
-rw-r--r--arch/powerpc/kernel/process.c21
-rw-r--r--arch/powerpc/mm/nohash/tlb.c1
-rw-r--r--arch/riscv/include/asm/image.h12
-rw-r--r--arch/riscv/kernel/head.S4
-rw-r--r--arch/s390/kvm/interrupt.c10
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c33
-rw-r--r--arch/x86/hyperv/mmu.c8
-rw-r--r--arch/x86/include/asm/bootparam_utils.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/kernel/apic/apic.c8
-rw-r--r--arch/x86/kvm/mmu.c101
-rw-r--r--arch/x86/kvm/vmx/nested.c4
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/purgatory/Makefile35
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/btusb.c8
-rw-r--r--drivers/bluetooth/hci_qca.c10
-rw-r--r--drivers/dma/sh/rcar-dmac.c28
-rw-r--r--drivers/dma/sprd-dma.c10
-rw-r--r--drivers/dma/ti/dma-crossbar.c4
-rw-r--r--drivers/dma/ti/omap-dma.c4
-rw-r--r--drivers/gpio/gpio-mockup.c1
-rw-r--r--drivers/gpio/gpio-pca953x.c15
-rw-r--r--drivers/gpio/gpiolib-acpi.c42
-rw-r--r--drivers/gpio/gpiolib-of.c27
-rw-r--r--drivers/gpio/gpiolib.c16
-rw-r--r--drivers/gpu/drm/drm_modes.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c5
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c5
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c12
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h7
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c8
-rw-r--r--drivers/iommu/amd_iommu.c40
-rw-r--r--drivers/iommu/intel-iommu.c55
-rw-r--r--drivers/iommu/intel-svm.c36
-rw-r--r--drivers/isdn/capi/capi.c10
-rw-r--r--drivers/mmc/core/mmc_ops.c2
-rw-r--r--drivers/mmc/host/bcm2835.c2
-rw-r--r--drivers/mmc/host/renesas_sdhi_core.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mmc/host/tmio_mmc.c5
-rw-r--r--drivers/mmc/host/tmio_mmc.h1
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c27
-rw-r--r--drivers/mmc/host/uniphier-sd.c3
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.c10
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c143
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c7
-rw-r--r--drivers/net/hamradio/6pack.c4
-rw-r--r--drivers/net/phy/phylink.c6
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wimax/i2400m/op-rfkill.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c24
-rw-r--r--drivers/net/wireless/marvell/mwifiex/ie.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_cmd.c9
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c15
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800lib.c37
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c1
-rw-r--r--drivers/nfc/st95hf/core.c2
-rw-r--r--drivers/nvdimm/pfn_devs.c5
-rw-r--r--drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c30
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.c7
-rw-r--r--drivers/pinctrl/aspeed/pinmux-aspeed.h7
-rw-r--r--drivers/regulator/act8945a-regulator.c8
-rw-r--r--drivers/regulator/slg51000-regulator.c4
-rw-r--r--drivers/regulator/twl-regulator.c23
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h2
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c6
-rw-r--r--drivers/vhost/test.c13
-rw-r--r--drivers/vhost/vhost.c524
-rw-r--r--drivers/vhost/vhost.h41
-rw-r--r--drivers/virtio/virtio_ring.c8
-rw-r--r--fs/btrfs/extent_io.c35
-rw-r--r--fs/btrfs/tree-log.c16
-rw-r--r--fs/configfs/configfs_internal.h15
-rw-r--r--fs/configfs/dir.c137
-rw-r--r--fs/configfs/file.c280
-rw-r--r--fs/nfs/inode.c18
-rw-r--r--include/linux/compiler.h8
-rw-r--r--include/linux/input/elan-i2c-ids.h2
-rw-r--r--include/linux/intel-iommu.h3
-rw-r--r--include/linux/phy_fixed.h1
-rw-r--r--include/linux/syscalls.h19
-rw-r--r--include/net/ip_fib.h4
-rw-r--r--include/net/nexthop.h5
-rw-r--r--include/net/xfrm.h2
-rw-r--r--include/uapi/asm-generic/unistd.h2
-rw-r--r--include/uapi/linux/isdn/capicmd.h1
-rw-r--r--ipc/util.h25
-rw-r--r--kernel/bpf/verifier.c23
-rw-r--r--kernel/cgroup/cgroup.c10
-rw-r--r--kernel/events/hw_breakpoint.c4
-rw-r--r--kernel/fork.c10
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/sched/core.c78
-rw-r--r--kernel/sched/fair.c5
-rw-r--r--lib/Kconfig6
-rw-r--r--mm/balloon_compaction.c3
-rw-r--r--net/bluetooth/hci_event.c5
-rw-r--r--net/bluetooth/l2cap_core.c9
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_netfilter_hooks.c4
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/skbuff.c19
-rw-r--r--net/core/sock_map.c3
-rw-r--r--net/ipv4/fib_semantics.c15
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/route.c21
-rw-r--r--net/mac80211/cfg.c14
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/netfilter/nf_conntrack_netlink.c7
-rw-r--r--net/netfilter/nf_flow_table_core.c2
-rw-r--r--net/netfilter/nft_fib_netdev.c3
-rw-r--r--net/netfilter/nft_socket.c6
-rw-r--r--net/qrtr/tun.c5
-rw-r--r--net/rds/bind.c40
-rw-r--r--net/rxrpc/input.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_generic.c9
-rw-r--r--net/sched/sch_hhf.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/sm_sideeffect.c2
-rw-r--r--net/sctp/socket.c24
-rw-r--r--net/tipc/name_distr.c3
-rw-r--r--net/xfrm/xfrm_interface.c56
-rw-r--r--net/xfrm/xfrm_policy.c6
-rw-r--r--security/keys/request_key_auth.c6
-rw-r--r--sound/pci/hda/hda_auto_parser.c4
-rw-r--r--sound/pci/hda/hda_generic.c3
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/patch_realtek.c17
-rw-r--r--tools/testing/selftests/cgroup/test_freezer.c54
-rwxr-xr-xtools/testing/selftests/net/fib_nexthops.sh24
-rwxr-xr-xtools/testing/selftests/net/xfrm_policy.sh7
159 files changed, 1672 insertions, 1377 deletions
diff --git a/.clang-format b/.clang-format
index 2ffd69afc1a8..196ca317bd1f 100644
--- a/.clang-format
+++ b/.clang-format
@@ -107,10 +107,13 @@ ForEachMacros:
107 - 'css_for_each_descendant_post' 107 - 'css_for_each_descendant_post'
108 - 'css_for_each_descendant_pre' 108 - 'css_for_each_descendant_pre'
109 - 'device_for_each_child_node' 109 - 'device_for_each_child_node'
110 - 'dma_fence_chain_for_each'
110 - 'drm_atomic_crtc_for_each_plane' 111 - 'drm_atomic_crtc_for_each_plane'
111 - 'drm_atomic_crtc_state_for_each_plane' 112 - 'drm_atomic_crtc_state_for_each_plane'
112 - 'drm_atomic_crtc_state_for_each_plane_state' 113 - 'drm_atomic_crtc_state_for_each_plane_state'
113 - 'drm_atomic_for_each_plane_damage' 114 - 'drm_atomic_for_each_plane_damage'
115 - 'drm_client_for_each_connector_iter'
116 - 'drm_client_for_each_modeset'
114 - 'drm_connector_for_each_possible_encoder' 117 - 'drm_connector_for_each_possible_encoder'
115 - 'drm_for_each_connector_iter' 118 - 'drm_for_each_connector_iter'
116 - 'drm_for_each_crtc' 119 - 'drm_for_each_crtc'
@@ -126,6 +129,7 @@ ForEachMacros:
126 - 'drm_mm_for_each_node_in_range' 129 - 'drm_mm_for_each_node_in_range'
127 - 'drm_mm_for_each_node_safe' 130 - 'drm_mm_for_each_node_safe'
128 - 'flow_action_for_each' 131 - 'flow_action_for_each'
132 - 'for_each_active_dev_scope'
129 - 'for_each_active_drhd_unit' 133 - 'for_each_active_drhd_unit'
130 - 'for_each_active_iommu' 134 - 'for_each_active_iommu'
131 - 'for_each_available_child_of_node' 135 - 'for_each_available_child_of_node'
@@ -153,6 +157,8 @@ ForEachMacros:
153 - 'for_each_cpu_not' 157 - 'for_each_cpu_not'
154 - 'for_each_cpu_wrap' 158 - 'for_each_cpu_wrap'
155 - 'for_each_dev_addr' 159 - 'for_each_dev_addr'
160 - 'for_each_dev_scope'
161 - 'for_each_displayid_db'
156 - 'for_each_dma_cap_mask' 162 - 'for_each_dma_cap_mask'
157 - 'for_each_dpcm_be' 163 - 'for_each_dpcm_be'
158 - 'for_each_dpcm_be_rollback' 164 - 'for_each_dpcm_be_rollback'
@@ -169,6 +175,8 @@ ForEachMacros:
169 - 'for_each_evictable_lru' 175 - 'for_each_evictable_lru'
170 - 'for_each_fib6_node_rt_rcu' 176 - 'for_each_fib6_node_rt_rcu'
171 - 'for_each_fib6_walker_rt' 177 - 'for_each_fib6_walker_rt'
178 - 'for_each_free_mem_pfn_range_in_zone'
179 - 'for_each_free_mem_pfn_range_in_zone_from'
172 - 'for_each_free_mem_range' 180 - 'for_each_free_mem_range'
173 - 'for_each_free_mem_range_reverse' 181 - 'for_each_free_mem_range_reverse'
174 - 'for_each_func_rsrc' 182 - 'for_each_func_rsrc'
@@ -178,6 +186,7 @@ ForEachMacros:
178 - 'for_each_ip_tunnel_rcu' 186 - 'for_each_ip_tunnel_rcu'
179 - 'for_each_irq_nr' 187 - 'for_each_irq_nr'
180 - 'for_each_link_codecs' 188 - 'for_each_link_codecs'
189 - 'for_each_link_platforms'
181 - 'for_each_lru' 190 - 'for_each_lru'
182 - 'for_each_matching_node' 191 - 'for_each_matching_node'
183 - 'for_each_matching_node_and_match' 192 - 'for_each_matching_node_and_match'
@@ -302,7 +311,10 @@ ForEachMacros:
302 - 'ide_port_for_each_present_dev' 311 - 'ide_port_for_each_present_dev'
303 - 'idr_for_each_entry' 312 - 'idr_for_each_entry'
304 - 'idr_for_each_entry_continue' 313 - 'idr_for_each_entry_continue'
314 - 'idr_for_each_entry_continue_ul'
305 - 'idr_for_each_entry_ul' 315 - 'idr_for_each_entry_ul'
316 - 'in_dev_for_each_ifa_rcu'
317 - 'in_dev_for_each_ifa_rtnl'
306 - 'inet_bind_bucket_for_each' 318 - 'inet_bind_bucket_for_each'
307 - 'inet_lhash2_for_each_icsk_rcu' 319 - 'inet_lhash2_for_each_icsk_rcu'
308 - 'key_for_each' 320 - 'key_for_each'
@@ -343,8 +355,6 @@ ForEachMacros:
343 - 'media_device_for_each_intf' 355 - 'media_device_for_each_intf'
344 - 'media_device_for_each_link' 356 - 'media_device_for_each_link'
345 - 'media_device_for_each_pad' 357 - 'media_device_for_each_pad'
346 - 'mp_bvec_for_each_page'
347 - 'mp_bvec_for_each_segment'
348 - 'nanddev_io_for_each_page' 358 - 'nanddev_io_for_each_page'
349 - 'netdev_for_each_lower_dev' 359 - 'netdev_for_each_lower_dev'
350 - 'netdev_for_each_lower_private' 360 - 'netdev_for_each_lower_private'
@@ -381,18 +391,19 @@ ForEachMacros:
381 - 'radix_tree_for_each_slot' 391 - 'radix_tree_for_each_slot'
382 - 'radix_tree_for_each_tagged' 392 - 'radix_tree_for_each_tagged'
383 - 'rbtree_postorder_for_each_entry_safe' 393 - 'rbtree_postorder_for_each_entry_safe'
394 - 'rdma_for_each_block'
384 - 'rdma_for_each_port' 395 - 'rdma_for_each_port'
385 - 'resource_list_for_each_entry' 396 - 'resource_list_for_each_entry'
386 - 'resource_list_for_each_entry_safe' 397 - 'resource_list_for_each_entry_safe'
387 - 'rhl_for_each_entry_rcu' 398 - 'rhl_for_each_entry_rcu'
388 - 'rhl_for_each_rcu' 399 - 'rhl_for_each_rcu'
389 - 'rht_for_each' 400 - 'rht_for_each'
390 - 'rht_for_each_from'
391 - 'rht_for_each_entry' 401 - 'rht_for_each_entry'
392 - 'rht_for_each_entry_from' 402 - 'rht_for_each_entry_from'
393 - 'rht_for_each_entry_rcu' 403 - 'rht_for_each_entry_rcu'
394 - 'rht_for_each_entry_rcu_from' 404 - 'rht_for_each_entry_rcu_from'
395 - 'rht_for_each_entry_safe' 405 - 'rht_for_each_entry_safe'
406 - 'rht_for_each_from'
396 - 'rht_for_each_rcu' 407 - 'rht_for_each_rcu'
397 - 'rht_for_each_rcu_from' 408 - 'rht_for_each_rcu_from'
398 - '__rq_for_each_bio' 409 - '__rq_for_each_bio'
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index d37cbc502936..402636356fbe 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -5,7 +5,7 @@ Scope
5----- 5-----
6 6
7Hardware issues which result in security problems are a different category 7Hardware issues which result in security problems are a different category
8of security bugs than pure software bugs which only affect the Linux 8of security bugs than pure software bugs which only affect the Linux
9kernel. 9kernel.
10 10
11Hardware issues like Meltdown, Spectre, L1TF etc. must be treated 11Hardware issues like Meltdown, Spectre, L1TF etc. must be treated
@@ -159,7 +159,7 @@ Mitigation development
159 159
160The initial response team sets up an encrypted mailing-list or repurposes 160The initial response team sets up an encrypted mailing-list or repurposes
161an existing one if appropriate. The disclosing party should provide a list 161an existing one if appropriate. The disclosing party should provide a list
162of contacts for all other parties who have already been, or should be 162of contacts for all other parties who have already been, or should be,
163informed about the issue. The response team contacts these parties so they 163informed about the issue. The response team contacts these parties so they
164can name experts who should be subscribed to the mailing-list. 164can name experts who should be subscribed to the mailing-list.
165 165
@@ -217,11 +217,11 @@ an involved disclosed party. The current ambassadors list:
217 AMD 217 AMD
218 IBM 218 IBM
219 Intel 219 Intel
220 Qualcomm 220 Qualcomm Trilok Soni <tsoni@codeaurora.org>
221 221
222 Microsoft 222 Microsoft Sasha Levin <sashal@kernel.org>
223 VMware 223 VMware
224 XEN 224 Xen Andrew Cooper <andrew.cooper3@citrix.com>
225 225
226 Canonical Tyler Hicks <tyhicks@canonical.com> 226 Canonical Tyler Hicks <tyhicks@canonical.com>
227 Debian Ben Hutchings <ben@decadent.org.uk> 227 Debian Ben Hutchings <ben@decadent.org.uk>
@@ -230,8 +230,8 @@ an involved disclosed party. The current ambassadors list:
230 SUSE Jiri Kosina <jkosina@suse.cz> 230 SUSE Jiri Kosina <jkosina@suse.cz>
231 231
232 Amazon 232 Amazon
233 Google 233 Google Kees Cook <keescook@chromium.org>
234 ============== ======================================================== 234 ============= ========================================================
235 235
236If you want your organization to be added to the ambassadors list, please 236If you want your organization to be added to the ambassadors list, please
237contact the hardware security team. The nominated ambassador has to 237contact the hardware security team. The nominated ambassador has to
diff --git a/Documentation/riscv/boot-image-header.txt b/Documentation/riscv/boot-image-header.txt
index 1b73fea23b39..14b1492f689b 100644
--- a/Documentation/riscv/boot-image-header.txt
+++ b/Documentation/riscv/boot-image-header.txt
@@ -18,7 +18,7 @@ The following 64-byte header is present in decompressed Linux kernel image.
18 u32 res1 = 0; /* Reserved */ 18 u32 res1 = 0; /* Reserved */
19 u64 res2 = 0; /* Reserved */ 19 u64 res2 = 0; /* Reserved */
20 u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */ 20 u64 magic = 0x5643534952; /* Magic number, little endian, "RISCV" */
21 u32 res3; /* Reserved for additional RISC-V specific header */ 21 u32 magic2 = 0x56534905; /* Magic number 2, little endian, "RSC\x05" */
22 u32 res4; /* Reserved for PE COFF offset */ 22 u32 res4; /* Reserved for PE COFF offset */
23 23
24This header format is compliant with PE/COFF header and largely inspired from 24This header format is compliant with PE/COFF header and largely inspired from
@@ -37,13 +37,14 @@ Notes:
37 Bits 16:31 - Major version 37 Bits 16:31 - Major version
38 38
39 This preserves compatibility across newer and older version of the header. 39 This preserves compatibility across newer and older version of the header.
40 The current version is defined as 0.1. 40 The current version is defined as 0.2.
41 41
42- res3 is reserved for offset to any other additional fields. This makes the 42- The "magic" field is deprecated as of version 0.2. In a future
43 header extendible in future. One example would be to accommodate ISA 43 release, it may be removed. This originally should have matched up
44 extension for RISC-V in future. For current version, it is set to be zero. 44 with the ARM64 header "magic" field, but unfortunately does not.
45 The "magic2" field replaces it, matching up with the ARM64 header.
45 46
46- In current header, the flag field has only one field. 47- In current header, the flags field has only one field.
47 Bit 0: Kernel endianness. 1 if BE, 0 if LE. 48 Bit 0: Kernel endianness. 1 if BE, 0 if LE.
48 49
49- Image size is mandatory for boot loader to load kernel image. Booting will 50- Image size is mandatory for boot loader to load kernel image. Booting will
diff --git a/MAINTAINERS b/MAINTAINERS
index 84bb34727f81..16fc09defd39 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -17732,8 +17732,7 @@ F: include/uapi/linux/dqblk_xfs.h
17732F: include/uapi/linux/fsmap.h 17732F: include/uapi/linux/fsmap.h
17733 17733
17734XILINX AXI ETHERNET DRIVER 17734XILINX AXI ETHERNET DRIVER
17735M: Anirudha Sarangi <anirudh@xilinx.com> 17735M: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
17736M: John Linn <John.Linn@xilinx.com>
17737S: Maintained 17736S: Maintained
17738F: drivers/net/ethernet/xilinx/xilinx_axienet* 17737F: drivers/net/ethernet/xilinx/xilinx_axienet*
17739 17738
diff --git a/Makefile b/Makefile
index 0cbe8717bdb3..9b08f6383a52 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
2VERSION = 5 2VERSION = 5
3PATCHLEVEL = 3 3PATCHLEVEL = 3
4SUBLEVEL = 0 4SUBLEVEL = 0
5EXTRAVERSION = -rc7 5EXTRAVERSION = -rc8
6NAME = Bobtail Squid 6NAME = Bobtail Squid
7 7
8# *DOCUMENTATION* 8# *DOCUMENTATION*
diff --git a/arch/arm64/boot/dts/renesas/hihope-common.dtsi b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
index 3311a982fff8..23fd0224ca90 100644
--- a/arch/arm64/boot/dts/renesas/hihope-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/hihope-common.dtsi
@@ -279,6 +279,7 @@
279 mmc-hs200-1_8v; 279 mmc-hs200-1_8v;
280 non-removable; 280 non-removable;
281 fixed-emmc-driver-type = <1>; 281 fixed-emmc-driver-type = <1>;
282 status = "okay";
282}; 283};
283 284
284&usb_extal_clk { 285&usb_extal_clk {
diff --git a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
index 0711170b26b1..3aa2564dfdc2 100644
--- a/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
+++ b/arch/arm64/boot/dts/renesas/r8a77995-draak.dts
@@ -97,7 +97,7 @@
97 reg = <0x0 0x48000000 0x0 0x18000000>; 97 reg = <0x0 0x48000000 0x0 0x18000000>;
98 }; 98 };
99 99
100 reg_1p8v: regulator0 { 100 reg_1p8v: regulator-1p8v {
101 compatible = "regulator-fixed"; 101 compatible = "regulator-fixed";
102 regulator-name = "fixed-1.8V"; 102 regulator-name = "fixed-1.8V";
103 regulator-min-microvolt = <1800000>; 103 regulator-min-microvolt = <1800000>;
@@ -106,7 +106,7 @@
106 regulator-always-on; 106 regulator-always-on;
107 }; 107 };
108 108
109 reg_3p3v: regulator1 { 109 reg_3p3v: regulator-3p3v {
110 compatible = "regulator-fixed"; 110 compatible = "regulator-fixed";
111 regulator-name = "fixed-3.3V"; 111 regulator-name = "fixed-3.3V";
112 regulator-min-microvolt = <3300000>; 112 regulator-min-microvolt = <3300000>;
@@ -115,7 +115,7 @@
115 regulator-always-on; 115 regulator-always-on;
116 }; 116 };
117 117
118 reg_12p0v: regulator1 { 118 reg_12p0v: regulator-12p0v {
119 compatible = "regulator-fixed"; 119 compatible = "regulator-fixed";
120 regulator-name = "D12.0V"; 120 regulator-name = "D12.0V";
121 regulator-min-microvolt = <12000000>; 121 regulator-min-microvolt = <12000000>;
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8fc4de0d22b4..7a84c9f1778e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -101,21 +101,8 @@ static void check_if_tm_restore_required(struct task_struct *tsk)
101 } 101 }
102} 102}
103 103
104static bool tm_active_with_fp(struct task_struct *tsk)
105{
106 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
107 (tsk->thread.ckpt_regs.msr & MSR_FP);
108}
109
110static bool tm_active_with_altivec(struct task_struct *tsk)
111{
112 return MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
113 (tsk->thread.ckpt_regs.msr & MSR_VEC);
114}
115#else 104#else
116static inline void check_if_tm_restore_required(struct task_struct *tsk) { } 105static inline void check_if_tm_restore_required(struct task_struct *tsk) { }
117static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; }
118static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; }
119#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ 106#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
120 107
121bool strict_msr_control; 108bool strict_msr_control;
@@ -252,7 +239,7 @@ EXPORT_SYMBOL(enable_kernel_fp);
252 239
253static int restore_fp(struct task_struct *tsk) 240static int restore_fp(struct task_struct *tsk)
254{ 241{
255 if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { 242 if (tsk->thread.load_fp) {
256 load_fp_state(&current->thread.fp_state); 243 load_fp_state(&current->thread.fp_state);
257 current->thread.load_fp++; 244 current->thread.load_fp++;
258 return 1; 245 return 1;
@@ -334,8 +321,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
334 321
335static int restore_altivec(struct task_struct *tsk) 322static int restore_altivec(struct task_struct *tsk)
336{ 323{
337 if (cpu_has_feature(CPU_FTR_ALTIVEC) && 324 if (cpu_has_feature(CPU_FTR_ALTIVEC) && (tsk->thread.load_vec)) {
338 (tsk->thread.load_vec || tm_active_with_altivec(tsk))) {
339 load_vr_state(&tsk->thread.vr_state); 325 load_vr_state(&tsk->thread.vr_state);
340 tsk->thread.used_vr = 1; 326 tsk->thread.used_vr = 1;
341 tsk->thread.load_vec++; 327 tsk->thread.load_vec++;
@@ -497,13 +483,14 @@ void giveup_all(struct task_struct *tsk)
497 if (!tsk->thread.regs) 483 if (!tsk->thread.regs)
498 return; 484 return;
499 485
486 check_if_tm_restore_required(tsk);
487
500 usermsr = tsk->thread.regs->msr; 488 usermsr = tsk->thread.regs->msr;
501 489
502 if ((usermsr & msr_all_available) == 0) 490 if ((usermsr & msr_all_available) == 0)
503 return; 491 return;
504 492
505 msr_check_and_set(msr_all_available); 493 msr_check_and_set(msr_all_available);
506 check_if_tm_restore_required(tsk);
507 494
508 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC))); 495 WARN_ON((usermsr & MSR_VSX) && !((usermsr & MSR_FP) && (usermsr & MSR_VEC)));
509 496
diff --git a/arch/powerpc/mm/nohash/tlb.c b/arch/powerpc/mm/nohash/tlb.c
index d4acf6fa0596..bf60983a58c7 100644
--- a/arch/powerpc/mm/nohash/tlb.c
+++ b/arch/powerpc/mm/nohash/tlb.c
@@ -630,7 +630,6 @@ static void early_init_this_mmu(void)
630#ifdef CONFIG_PPC_FSL_BOOK3E 630#ifdef CONFIG_PPC_FSL_BOOK3E
631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { 631 if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
632 unsigned int num_cams; 632 unsigned int num_cams;
633 int __maybe_unused cpu = smp_processor_id();
634 bool map = true; 633 bool map = true;
635 634
636 /* use a quarter of the TLBCAM for bolted linear map */ 635 /* use a quarter of the TLBCAM for bolted linear map */
diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h
index ef28e106f247..344db5244547 100644
--- a/arch/riscv/include/asm/image.h
+++ b/arch/riscv/include/asm/image.h
@@ -3,7 +3,8 @@
3#ifndef __ASM_IMAGE_H 3#ifndef __ASM_IMAGE_H
4#define __ASM_IMAGE_H 4#define __ASM_IMAGE_H
5 5
6#define RISCV_IMAGE_MAGIC "RISCV" 6#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
7#define RISCV_IMAGE_MAGIC2 "RSC\x05"
7 8
8#define RISCV_IMAGE_FLAG_BE_SHIFT 0 9#define RISCV_IMAGE_FLAG_BE_SHIFT 0
9#define RISCV_IMAGE_FLAG_BE_MASK 0x1 10#define RISCV_IMAGE_FLAG_BE_MASK 0x1
@@ -23,7 +24,7 @@
23#define __HEAD_FLAGS (__HEAD_FLAG(BE)) 24#define __HEAD_FLAGS (__HEAD_FLAG(BE))
24 25
25#define RISCV_HEADER_VERSION_MAJOR 0 26#define RISCV_HEADER_VERSION_MAJOR 0
26#define RISCV_HEADER_VERSION_MINOR 1 27#define RISCV_HEADER_VERSION_MINOR 2
27 28
28#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \ 29#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
29 RISCV_HEADER_VERSION_MINOR) 30 RISCV_HEADER_VERSION_MINOR)
@@ -39,9 +40,8 @@
39 * @version: version 40 * @version: version
40 * @res1: reserved 41 * @res1: reserved
41 * @res2: reserved 42 * @res2: reserved
42 * @magic: Magic number 43 * @magic: Magic number (RISC-V specific; deprecated)
43 * @res3: reserved (will be used for additional RISC-V specific 44 * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
44 * header)
45 * @res4: reserved (will be used for PE COFF offset) 45 * @res4: reserved (will be used for PE COFF offset)
46 * 46 *
47 * The intention is for this header format to be shared between multiple 47 * The intention is for this header format to be shared between multiple
@@ -58,7 +58,7 @@ struct riscv_image_header {
58 u32 res1; 58 u32 res1;
59 u64 res2; 59 u64 res2;
60 u64 magic; 60 u64 magic;
61 u32 res3; 61 u32 magic2;
62 u32 res4; 62 u32 res4;
63}; 63};
64#endif /* __ASSEMBLY__ */ 64#endif /* __ASSEMBLY__ */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 0f1ba17e476f..52eec0c1bf30 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -39,9 +39,9 @@ ENTRY(_start)
39 .word RISCV_HEADER_VERSION 39 .word RISCV_HEADER_VERSION
40 .word 0 40 .word 0
41 .dword 0 41 .dword 0
42 .asciz RISCV_IMAGE_MAGIC 42 .ascii RISCV_IMAGE_MAGIC
43 .word 0
44 .balign 4 43 .balign 4
44 .ascii RISCV_IMAGE_MAGIC2
45 .word 0 45 .word 0
46 46
47.global _start_kernel 47.global _start_kernel
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index b5fd6e85657c..d1ccc168c071 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -1961,6 +1961,16 @@ int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1961 case KVM_S390_MCHK: 1961 case KVM_S390_MCHK:
1962 irq->u.mchk.mcic = s390int->parm64; 1962 irq->u.mchk.mcic = s390int->parm64;
1963 break; 1963 break;
1964 case KVM_S390_INT_PFAULT_INIT:
1965 irq->u.ext.ext_params = s390int->parm;
1966 irq->u.ext.ext_params2 = s390int->parm64;
1967 break;
1968 case KVM_S390_RESTART:
1969 case KVM_S390_INT_CLOCK_COMP:
1970 case KVM_S390_INT_CPU_TIMER:
1971 break;
1972 default:
1973 return -EINVAL;
1964 } 1974 }
1965 return 0; 1975 return 0;
1966} 1976}
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index f329dcb3f44c..39cff07bf2eb 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1018,6 +1018,8 @@ static int kvm_s390_vm_start_migration(struct kvm *kvm)
1018 /* mark all the pages in active slots as dirty */ 1018 /* mark all the pages in active slots as dirty */
1019 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) { 1019 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
1020 ms = slots->memslots + slotnr; 1020 ms = slots->memslots + slotnr;
1021 if (!ms->dirty_bitmap)
1022 return -EINVAL;
1021 /* 1023 /*
1022 * The second half of the bitmap is only used on x86, 1024 * The second half of the bitmap is only used on x86,
1023 * and would be wasted otherwise, so we put it to good 1025 * and would be wasted otherwise, so we put it to good
@@ -4323,7 +4325,7 @@ long kvm_arch_vcpu_async_ioctl(struct file *filp,
4323 } 4325 }
4324 case KVM_S390_INTERRUPT: { 4326 case KVM_S390_INTERRUPT: {
4325 struct kvm_s390_interrupt s390int; 4327 struct kvm_s390_interrupt s390int;
4326 struct kvm_s390_irq s390irq; 4328 struct kvm_s390_irq s390irq = {};
4327 4329
4328 if (copy_from_user(&s390int, argp, sizeof(s390int))) 4330 if (copy_from_user(&s390int, argp, sizeof(s390int)))
4329 return -EFAULT; 4331 return -EFAULT;
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index ccc88926bc00..9f41a6f5a032 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -336,25 +336,28 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
336{ 336{
337 long err; 337 long err;
338 338
339 if (!IS_ENABLED(CONFIG_SYSVIPC))
340 return -ENOSYS;
341
339 /* No need for backward compatibility. We can start fresh... */ 342 /* No need for backward compatibility. We can start fresh... */
340 if (call <= SEMTIMEDOP) { 343 if (call <= SEMTIMEDOP) {
341 switch (call) { 344 switch (call) {
342 case SEMOP: 345 case SEMOP:
343 err = sys_semtimedop(first, ptr, 346 err = ksys_semtimedop(first, ptr,
344 (unsigned int)second, NULL); 347 (unsigned int)second, NULL);
345 goto out; 348 goto out;
346 case SEMTIMEDOP: 349 case SEMTIMEDOP:
347 err = sys_semtimedop(first, ptr, (unsigned int)second, 350 err = ksys_semtimedop(first, ptr, (unsigned int)second,
348 (const struct __kernel_timespec __user *) 351 (const struct __kernel_timespec __user *)
349 (unsigned long) fifth); 352 (unsigned long) fifth);
350 goto out; 353 goto out;
351 case SEMGET: 354 case SEMGET:
352 err = sys_semget(first, (int)second, (int)third); 355 err = ksys_semget(first, (int)second, (int)third);
353 goto out; 356 goto out;
354 case SEMCTL: { 357 case SEMCTL: {
355 err = sys_semctl(first, second, 358 err = ksys_old_semctl(first, second,
356 (int)third | IPC_64, 359 (int)third | IPC_64,
357 (unsigned long) ptr); 360 (unsigned long) ptr);
358 goto out; 361 goto out;
359 } 362 }
360 default: 363 default:
@@ -365,18 +368,18 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
365 if (call <= MSGCTL) { 368 if (call <= MSGCTL) {
366 switch (call) { 369 switch (call) {
367 case MSGSND: 370 case MSGSND:
368 err = sys_msgsnd(first, ptr, (size_t)second, 371 err = ksys_msgsnd(first, ptr, (size_t)second,
369 (int)third); 372 (int)third);
370 goto out; 373 goto out;
371 case MSGRCV: 374 case MSGRCV:
372 err = sys_msgrcv(first, ptr, (size_t)second, fifth, 375 err = ksys_msgrcv(first, ptr, (size_t)second, fifth,
373 (int)third); 376 (int)third);
374 goto out; 377 goto out;
375 case MSGGET: 378 case MSGGET:
376 err = sys_msgget((key_t)first, (int)second); 379 err = ksys_msgget((key_t)first, (int)second);
377 goto out; 380 goto out;
378 case MSGCTL: 381 case MSGCTL:
379 err = sys_msgctl(first, (int)second | IPC_64, ptr); 382 err = ksys_old_msgctl(first, (int)second | IPC_64, ptr);
380 goto out; 383 goto out;
381 default: 384 default:
382 err = -ENOSYS; 385 err = -ENOSYS;
@@ -396,13 +399,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
396 goto out; 399 goto out;
397 } 400 }
398 case SHMDT: 401 case SHMDT:
399 err = sys_shmdt(ptr); 402 err = ksys_shmdt(ptr);
400 goto out; 403 goto out;
401 case SHMGET: 404 case SHMGET:
402 err = sys_shmget(first, (size_t)second, (int)third); 405 err = ksys_shmget(first, (size_t)second, (int)third);
403 goto out; 406 goto out;
404 case SHMCTL: 407 case SHMCTL:
405 err = sys_shmctl(first, (int)second | IPC_64, ptr); 408 err = ksys_old_shmctl(first, (int)second | IPC_64, ptr);
406 goto out; 409 goto out;
407 default: 410 default:
408 err = -ENOSYS; 411 err = -ENOSYS;
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c
index e65d7fe6489f..5208ba49c89a 100644
--- a/arch/x86/hyperv/mmu.c
+++ b/arch/x86/hyperv/mmu.c
@@ -37,12 +37,14 @@ static inline int fill_gva_list(u64 gva_list[], int offset,
37 * Lower 12 bits encode the number of additional 37 * Lower 12 bits encode the number of additional
38 * pages to flush (in addition to the 'cur' page). 38 * pages to flush (in addition to the 'cur' page).
39 */ 39 */
40 if (diff >= HV_TLB_FLUSH_UNIT) 40 if (diff >= HV_TLB_FLUSH_UNIT) {
41 gva_list[gva_n] |= ~PAGE_MASK; 41 gva_list[gva_n] |= ~PAGE_MASK;
42 else if (diff) 42 cur += HV_TLB_FLUSH_UNIT;
43 } else if (diff) {
43 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT; 44 gva_list[gva_n] |= (diff - 1) >> PAGE_SHIFT;
45 cur = end;
46 }
44 47
45 cur += HV_TLB_FLUSH_UNIT;
46 gva_n++; 48 gva_n++;
47 49
48 } while (cur < end); 50 } while (cur < end);
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h
index 9e5f3c722c33..981fe923a59f 100644
--- a/arch/x86/include/asm/bootparam_utils.h
+++ b/arch/x86/include/asm/bootparam_utils.h
@@ -70,6 +70,7 @@ static void sanitize_boot_params(struct boot_params *boot_params)
70 BOOT_PARAM_PRESERVE(eddbuf_entries), 70 BOOT_PARAM_PRESERVE(eddbuf_entries),
71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries), 71 BOOT_PARAM_PRESERVE(edd_mbr_sig_buf_entries),
72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer), 72 BOOT_PARAM_PRESERVE(edd_mbr_sig_buffer),
73 BOOT_PARAM_PRESERVE(secure_boot),
73 BOOT_PARAM_PRESERVE(hdr), 74 BOOT_PARAM_PRESERVE(hdr),
74 BOOT_PARAM_PRESERVE(e820_table), 75 BOOT_PARAM_PRESERVE(e820_table),
75 BOOT_PARAM_PRESERVE(eddbuf), 76 BOOT_PARAM_PRESERVE(eddbuf),
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 74e88e5edd9c..bdc16b0aa7c6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -335,6 +335,7 @@ struct kvm_mmu_page {
335 int root_count; /* Currently serving as active root */ 335 int root_count; /* Currently serving as active root */
336 unsigned int unsync_children; 336 unsigned int unsync_children;
337 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ 337 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
338 unsigned long mmu_valid_gen;
338 DECLARE_BITMAP(unsync_child_bitmap, 512); 339 DECLARE_BITMAP(unsync_child_bitmap, 512);
339 340
340#ifdef CONFIG_X86_32 341#ifdef CONFIG_X86_32
@@ -856,6 +857,7 @@ struct kvm_arch {
856 unsigned long n_requested_mmu_pages; 857 unsigned long n_requested_mmu_pages;
857 unsigned long n_max_mmu_pages; 858 unsigned long n_max_mmu_pages;
858 unsigned int indirect_shadow_pages; 859 unsigned int indirect_shadow_pages;
860 unsigned long mmu_valid_gen;
859 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 861 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
860 /* 862 /*
861 * Hash table of struct kvm_mmu_page. 863 * Hash table of struct kvm_mmu_page.
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 9c4435307ff8..35c225ede0e4 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -444,8 +444,10 @@ __pu_label: \
444({ \ 444({ \
445 int __gu_err; \ 445 int __gu_err; \
446 __inttype(*(ptr)) __gu_val; \ 446 __inttype(*(ptr)) __gu_val; \
447 __typeof__(ptr) __gu_ptr = (ptr); \
448 __typeof__(size) __gu_size = (size); \
447 __uaccess_begin_nospec(); \ 449 __uaccess_begin_nospec(); \
448 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ 450 __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT); \
449 __uaccess_end(); \ 451 __uaccess_end(); \
450 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 452 (x) = (__force __typeof__(*(ptr)))__gu_val; \
451 __builtin_expect(__gu_err, 0); \ 453 __builtin_expect(__gu_err, 0); \
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index dba2828b779a..08fb79f37793 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -834,6 +834,10 @@ bool __init apic_needs_pit(void)
834 if (!boot_cpu_has(X86_FEATURE_APIC)) 834 if (!boot_cpu_has(X86_FEATURE_APIC))
835 return true; 835 return true;
836 836
837 /* Virt guests may lack ARAT, but still have DEADLINE */
838 if (!boot_cpu_has(X86_FEATURE_ARAT))
839 return true;
840
837 /* Deadline timer is based on TSC so no further PIT action required */ 841 /* Deadline timer is based on TSC so no further PIT action required */
838 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) 842 if (boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
839 return false; 843 return false;
@@ -1179,10 +1183,6 @@ void clear_local_APIC(void)
1179 apic_write(APIC_LVT0, v | APIC_LVT_MASKED); 1183 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1180 v = apic_read(APIC_LVT1); 1184 v = apic_read(APIC_LVT1);
1181 apic_write(APIC_LVT1, v | APIC_LVT_MASKED); 1185 apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
1182 if (!x2apic_enabled()) {
1183 v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
1184 apic_write(APIC_LDR, v);
1185 }
1186 if (maxlvt >= 4) { 1186 if (maxlvt >= 4) {
1187 v = apic_read(APIC_LVTPC); 1187 v = apic_read(APIC_LVTPC);
1188 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED); 1188 apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 218b277bfda3..a63964e7cec7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2095,6 +2095,12 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct
2095 if (!direct) 2095 if (!direct)
2096 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache); 2096 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache);
2097 set_page_private(virt_to_page(sp->spt), (unsigned long)sp); 2097 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
2098
2099 /*
2100 * active_mmu_pages must be a FIFO list, as kvm_zap_obsolete_pages()
2101 * depends on valid pages being added to the head of the list. See
2102 * comments in kvm_zap_obsolete_pages().
2103 */
2098 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); 2104 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
2099 kvm_mod_used_mmu_pages(vcpu->kvm, +1); 2105 kvm_mod_used_mmu_pages(vcpu->kvm, +1);
2100 return sp; 2106 return sp;
@@ -2244,7 +2250,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2244#define for_each_valid_sp(_kvm, _sp, _gfn) \ 2250#define for_each_valid_sp(_kvm, _sp, _gfn) \
2245 hlist_for_each_entry(_sp, \ 2251 hlist_for_each_entry(_sp, \
2246 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 2252 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
2247 if ((_sp)->role.invalid) { \ 2253 if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
2248 } else 2254 } else
2249 2255
2250#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ 2256#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
@@ -2301,6 +2307,11 @@ static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
2301static void mmu_audit_disable(void) { } 2307static void mmu_audit_disable(void) { }
2302#endif 2308#endif
2303 2309
2310static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
2311{
2312 return unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
2313}
2314
2304static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 2315static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
2305 struct list_head *invalid_list) 2316 struct list_head *invalid_list)
2306{ 2317{
@@ -2525,6 +2536,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2525 if (level > PT_PAGE_TABLE_LEVEL && need_sync) 2536 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
2526 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); 2537 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
2527 } 2538 }
2539 sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
2528 clear_page(sp->spt); 2540 clear_page(sp->spt);
2529 trace_kvm_mmu_get_page(sp, true); 2541 trace_kvm_mmu_get_page(sp, true);
2530 2542
@@ -4233,6 +4245,13 @@ static bool fast_cr3_switch(struct kvm_vcpu *vcpu, gpa_t new_cr3,
4233 return false; 4245 return false;
4234 4246
4235 if (cached_root_available(vcpu, new_cr3, new_role)) { 4247 if (cached_root_available(vcpu, new_cr3, new_role)) {
4248 /*
4249 * It is possible that the cached previous root page is
4250 * obsolete because of a change in the MMU generation
4251 * number. However, changing the generation number is
4252 * accompanied by KVM_REQ_MMU_RELOAD, which will free
4253 * the root set here and allocate a new one.
4254 */
4236 kvm_make_request(KVM_REQ_LOAD_CR3, vcpu); 4255 kvm_make_request(KVM_REQ_LOAD_CR3, vcpu);
4237 if (!skip_tlb_flush) { 4256 if (!skip_tlb_flush) {
4238 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu); 4257 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -5649,11 +5668,89 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
5649 return alloc_mmu_pages(vcpu); 5668 return alloc_mmu_pages(vcpu);
5650} 5669}
5651 5670
5671
5672static void kvm_zap_obsolete_pages(struct kvm *kvm)
5673{
5674 struct kvm_mmu_page *sp, *node;
5675 LIST_HEAD(invalid_list);
5676 int ign;
5677
5678restart:
5679 list_for_each_entry_safe_reverse(sp, node,
5680 &kvm->arch.active_mmu_pages, link) {
5681 /*
5682 * No obsolete valid page exists before a newly created page
5683 * since active_mmu_pages is a FIFO list.
5684 */
5685 if (!is_obsolete_sp(kvm, sp))
5686 break;
5687
5688 /*
5689 * Do not repeatedly zap a root page to avoid unnecessary
5690 * KVM_REQ_MMU_RELOAD, otherwise we may not be able to
5691 * progress:
5692 * vcpu 0 vcpu 1
5693 * call vcpu_enter_guest():
5694 * 1): handle KVM_REQ_MMU_RELOAD
5695 * and require mmu-lock to
5696 * load mmu
5697 * repeat:
5698 * 1): zap root page and
5699 * send KVM_REQ_MMU_RELOAD
5700 *
5701 * 2): if (cond_resched_lock(mmu-lock))
5702 *
5703 * 2): hold mmu-lock and load mmu
5704 *
5705 * 3): see KVM_REQ_MMU_RELOAD bit
5706 * on vcpu->requests is set
5707 * then return 1 to call
5708 * vcpu_enter_guest() again.
5709 * goto repeat;
5710 *
5711 * Since we are reversely walking the list and the invalid
5712 * list will be moved to the head, skip the invalid page
5713 * can help us to avoid the infinity list walking.
5714 */
5715 if (sp->role.invalid)
5716 continue;
5717
5718 if (need_resched() || spin_needbreak(&kvm->mmu_lock)) {
5719 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5720 cond_resched_lock(&kvm->mmu_lock);
5721 goto restart;
5722 }
5723
5724 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign))
5725 goto restart;
5726 }
5727
5728 kvm_mmu_commit_zap_page(kvm, &invalid_list);
5729}
5730
5731/*
5732 * Fast invalidate all shadow pages and use lock-break technique
5733 * to zap obsolete pages.
5734 *
5735 * It's required when memslot is being deleted or VM is being
5736 * destroyed, in these cases, we should ensure that KVM MMU does
5737 * not use any resource of the being-deleted slot or all slots
5738 * after calling the function.
5739 */
5740static void kvm_mmu_zap_all_fast(struct kvm *kvm)
5741{
5742 spin_lock(&kvm->mmu_lock);
5743 kvm->arch.mmu_valid_gen++;
5744
5745 kvm_zap_obsolete_pages(kvm);
5746 spin_unlock(&kvm->mmu_lock);
5747}
5748
5652static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm, 5749static void kvm_mmu_invalidate_zap_pages_in_memslot(struct kvm *kvm,
5653 struct kvm_memory_slot *slot, 5750 struct kvm_memory_slot *slot,
5654 struct kvm_page_track_notifier_node *node) 5751 struct kvm_page_track_notifier_node *node)
5655{ 5752{
5656 kvm_mmu_zap_all(kvm); 5753 kvm_mmu_zap_all_fast(kvm);
5657} 5754}
5658 5755
5659void kvm_mmu_init_vm(struct kvm *kvm) 5756void kvm_mmu_init_vm(struct kvm *kvm)
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index ced9fba32598..a3cba321b5c5 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -4540,6 +4540,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
4540 int len; 4540 int len;
4541 gva_t gva = 0; 4541 gva_t gva = 0;
4542 struct vmcs12 *vmcs12; 4542 struct vmcs12 *vmcs12;
4543 struct x86_exception e;
4543 short offset; 4544 short offset;
4544 4545
4545 if (!nested_vmx_check_permission(vcpu)) 4546 if (!nested_vmx_check_permission(vcpu))
@@ -4588,7 +4589,8 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
4588 vmx_instruction_info, true, len, &gva)) 4589 vmx_instruction_info, true, len, &gva))
4589 return 1; 4590 return 1;
4590 /* _system ok, nested_vmx_check_permission has verified cpl=0 */ 4591 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
4591 kvm_write_guest_virt_system(vcpu, gva, &field_value, len, NULL); 4592 if (kvm_write_guest_virt_system(vcpu, gva, &field_value, len, &e))
4593 kvm_inject_page_fault(vcpu, &e);
4592 } 4594 }
4593 4595
4594 return nested_vmx_succeed(vcpu); 4596 return nested_vmx_succeed(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 290c3c3efb87..91602d310a3f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5312,6 +5312,13 @@ int kvm_write_guest_virt_system(struct kvm_vcpu *vcpu, gva_t addr, void *val,
5312 /* kvm_write_guest_virt_system can pull in tons of pages. */ 5312 /* kvm_write_guest_virt_system can pull in tons of pages. */
5313 vcpu->arch.l1tf_flush_l1d = true; 5313 vcpu->arch.l1tf_flush_l1d = true;
5314 5314
5315 /*
5316 * FIXME: this should call handle_emulation_failure if X86EMUL_IO_NEEDED
5317 * is returned, but our callers are not ready for that and they blindly
5318 * call kvm_inject_page_fault. Ensure that they at least do not leak
5319 * uninitialized kernel stack memory into cr2 and error code.
5320 */
5321 memset(exception, 0, sizeof(*exception));
5315 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 5322 return kvm_write_guest_virt_helper(addr, val, bytes, vcpu,
5316 PFERR_WRITE_MASK, exception); 5323 PFERR_WRITE_MASK, exception);
5317} 5324}
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 8901a1f89cf5..10fb42da0007 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -18,37 +18,40 @@ targets += purgatory.ro
18KASAN_SANITIZE := n 18KASAN_SANITIZE := n
19KCOV_INSTRUMENT := n 19KCOV_INSTRUMENT := n
20 20
21# These are adjustments to the compiler flags used for objects that
22# make up the standalone purgatory.ro
23
24PURGATORY_CFLAGS_REMOVE := -mcmodel=kernel
25PURGATORY_CFLAGS := -mcmodel=large -ffreestanding -fno-zero-initialized-in-bss
26
21# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That 27# Default KBUILD_CFLAGS can have -pg option set when FTRACE is enabled. That
22# in turn leaves some undefined symbols like __fentry__ in purgatory and not 28# in turn leaves some undefined symbols like __fentry__ in purgatory and not
23# sure how to relocate those. 29# sure how to relocate those.
24ifdef CONFIG_FUNCTION_TRACER 30ifdef CONFIG_FUNCTION_TRACER
25CFLAGS_REMOVE_sha256.o += $(CC_FLAGS_FTRACE) 31PURGATORY_CFLAGS_REMOVE += $(CC_FLAGS_FTRACE)
26CFLAGS_REMOVE_purgatory.o += $(CC_FLAGS_FTRACE)
27CFLAGS_REMOVE_string.o += $(CC_FLAGS_FTRACE)
28CFLAGS_REMOVE_kexec-purgatory.o += $(CC_FLAGS_FTRACE)
29endif 32endif
30 33
31ifdef CONFIG_STACKPROTECTOR 34ifdef CONFIG_STACKPROTECTOR
32CFLAGS_REMOVE_sha256.o += -fstack-protector 35PURGATORY_CFLAGS_REMOVE += -fstack-protector
33CFLAGS_REMOVE_purgatory.o += -fstack-protector
34CFLAGS_REMOVE_string.o += -fstack-protector
35CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector
36endif 36endif
37 37
38ifdef CONFIG_STACKPROTECTOR_STRONG 38ifdef CONFIG_STACKPROTECTOR_STRONG
39CFLAGS_REMOVE_sha256.o += -fstack-protector-strong 39PURGATORY_CFLAGS_REMOVE += -fstack-protector-strong
40CFLAGS_REMOVE_purgatory.o += -fstack-protector-strong
41CFLAGS_REMOVE_string.o += -fstack-protector-strong
42CFLAGS_REMOVE_kexec-purgatory.o += -fstack-protector-strong
43endif 40endif
44 41
45ifdef CONFIG_RETPOLINE 42ifdef CONFIG_RETPOLINE
46CFLAGS_REMOVE_sha256.o += $(RETPOLINE_CFLAGS) 43PURGATORY_CFLAGS_REMOVE += $(RETPOLINE_CFLAGS)
47CFLAGS_REMOVE_purgatory.o += $(RETPOLINE_CFLAGS)
48CFLAGS_REMOVE_string.o += $(RETPOLINE_CFLAGS)
49CFLAGS_REMOVE_kexec-purgatory.o += $(RETPOLINE_CFLAGS)
50endif 44endif
51 45
46CFLAGS_REMOVE_purgatory.o += $(PURGATORY_CFLAGS_REMOVE)
47CFLAGS_purgatory.o += $(PURGATORY_CFLAGS)
48
49CFLAGS_REMOVE_sha256.o += $(PURGATORY_CFLAGS_REMOVE)
50CFLAGS_sha256.o += $(PURGATORY_CFLAGS)
51
52CFLAGS_REMOVE_string.o += $(PURGATORY_CFLAGS_REMOVE)
53CFLAGS_string.o += $(PURGATORY_CFLAGS)
54
52$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE 55$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
53 $(call if_changed,ld) 56 $(call if_changed,ld)
54 57
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index a0e84538cec8..1fa58c059cbf 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -337,7 +337,7 @@ static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
337 337
338 usb_free_urb(urb); 338 usb_free_urb(urb);
339 339
340 return 0; 340 return err;
341} 341}
342 342
343static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) 343static int bpa10x_set_diag(struct hci_dev *hdev, bool enable)
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index ed455de598ea..a9c35ebb30f8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,9 @@ static const struct usb_device_id blacklist_table[] = {
384 { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, 384 { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK },
385 { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, 385 { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
386 386
387 /* Additional Realtek 8822CE Bluetooth devices */
388 { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
389
387 /* Silicon Wave based devices */ 390 /* Silicon Wave based devices */
388 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, 391 { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
389 392
@@ -1201,10 +1204,6 @@ static int btusb_open(struct hci_dev *hdev)
1201 } 1204 }
1202 1205
1203 data->intf->needs_remote_wakeup = 1; 1206 data->intf->needs_remote_wakeup = 1;
1204 /* device specific wakeup source enabled and required for USB
1205 * remote wakeup while host is suspended
1206 */
1207 device_wakeup_enable(&data->udev->dev);
1208 1207
1209 /* Disable device remote wakeup when host is suspended 1208 /* Disable device remote wakeup when host is suspended
1210 * For Realtek chips, global suspend without 1209 * For Realtek chips, global suspend without
@@ -1281,7 +1280,6 @@ static int btusb_close(struct hci_dev *hdev)
1281 if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags)) 1280 if (test_bit(BTUSB_WAKEUP_DISABLE, &data->flags))
1282 data->intf->needs_remote_wakeup = 1; 1281 data->intf->needs_remote_wakeup = 1;
1283 1282
1284 device_wakeup_disable(&data->udev->dev);
1285 usb_autopm_put_interface(data->intf); 1283 usb_autopm_put_interface(data->intf);
1286 1284
1287failed: 1285failed:
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index d33828fef89f..e3164c200eac 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -309,13 +309,14 @@ static void qca_wq_awake_device(struct work_struct *work)
309 ws_awake_device); 309 ws_awake_device);
310 struct hci_uart *hu = qca->hu; 310 struct hci_uart *hu = qca->hu;
311 unsigned long retrans_delay; 311 unsigned long retrans_delay;
312 unsigned long flags;
312 313
313 BT_DBG("hu %p wq awake device", hu); 314 BT_DBG("hu %p wq awake device", hu);
314 315
315 /* Vote for serial clock */ 316 /* Vote for serial clock */
316 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); 317 serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu);
317 318
318 spin_lock(&qca->hci_ibs_lock); 319 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
319 320
320 /* Send wake indication to device */ 321 /* Send wake indication to device */
321 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) 322 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0)
@@ -327,7 +328,7 @@ static void qca_wq_awake_device(struct work_struct *work)
327 retrans_delay = msecs_to_jiffies(qca->wake_retrans); 328 retrans_delay = msecs_to_jiffies(qca->wake_retrans);
328 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); 329 mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay);
329 330
330 spin_unlock(&qca->hci_ibs_lock); 331 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
331 332
332 /* Actually send the packets */ 333 /* Actually send the packets */
333 hci_uart_tx_wakeup(hu); 334 hci_uart_tx_wakeup(hu);
@@ -338,12 +339,13 @@ static void qca_wq_awake_rx(struct work_struct *work)
338 struct qca_data *qca = container_of(work, struct qca_data, 339 struct qca_data *qca = container_of(work, struct qca_data,
339 ws_awake_rx); 340 ws_awake_rx);
340 struct hci_uart *hu = qca->hu; 341 struct hci_uart *hu = qca->hu;
342 unsigned long flags;
341 343
342 BT_DBG("hu %p wq awake rx", hu); 344 BT_DBG("hu %p wq awake rx", hu);
343 345
344 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); 346 serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu);
345 347
346 spin_lock(&qca->hci_ibs_lock); 348 spin_lock_irqsave(&qca->hci_ibs_lock, flags);
347 qca->rx_ibs_state = HCI_IBS_RX_AWAKE; 349 qca->rx_ibs_state = HCI_IBS_RX_AWAKE;
348 350
349 /* Always acknowledge device wake up, 351 /* Always acknowledge device wake up,
@@ -354,7 +356,7 @@ static void qca_wq_awake_rx(struct work_struct *work)
354 356
355 qca->ibs_sent_wacks++; 357 qca->ibs_sent_wacks++;
356 358
357 spin_unlock(&qca->hci_ibs_lock); 359 spin_unlock_irqrestore(&qca->hci_ibs_lock, flags);
358 360
359 /* Actually send the packets */ 361 /* Actually send the packets */
360 hci_uart_tx_wakeup(hu); 362 hci_uart_tx_wakeup(hu);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 9c41a4e42575..1072c450c37a 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -192,6 +192,7 @@ struct rcar_dmac_chan {
192 * @iomem: remapped I/O memory base 192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels 193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels 194 * @channels: array of DMAC channels
195 * @channels_mask: bitfield of which DMA channels are managed by this driver
195 * @modules: bitmask of client modules in use 196 * @modules: bitmask of client modules in use
196 */ 197 */
197struct rcar_dmac { 198struct rcar_dmac {
@@ -202,6 +203,7 @@ struct rcar_dmac {
202 203
203 unsigned int n_channels; 204 unsigned int n_channels;
204 struct rcar_dmac_chan *channels; 205 struct rcar_dmac_chan *channels;
206 unsigned int channels_mask;
205 207
206 DECLARE_BITMAP(modules, 256); 208 DECLARE_BITMAP(modules, 256);
207}; 209};
@@ -438,7 +440,7 @@ static int rcar_dmac_init(struct rcar_dmac *dmac)
438 u16 dmaor; 440 u16 dmaor;
439 441
440 /* Clear all channels and enable the DMAC globally. */ 442 /* Clear all channels and enable the DMAC globally. */
441 rcar_dmac_write(dmac, RCAR_DMACHCLR, GENMASK(dmac->n_channels - 1, 0)); 443 rcar_dmac_write(dmac, RCAR_DMACHCLR, dmac->channels_mask);
442 rcar_dmac_write(dmac, RCAR_DMAOR, 444 rcar_dmac_write(dmac, RCAR_DMAOR,
443 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); 445 RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME);
444 446
@@ -814,6 +816,9 @@ static void rcar_dmac_stop_all_chan(struct rcar_dmac *dmac)
814 for (i = 0; i < dmac->n_channels; ++i) { 816 for (i = 0; i < dmac->n_channels; ++i) {
815 struct rcar_dmac_chan *chan = &dmac->channels[i]; 817 struct rcar_dmac_chan *chan = &dmac->channels[i];
816 818
819 if (!(dmac->channels_mask & BIT(i)))
820 continue;
821
817 /* Stop and reinitialize the channel. */ 822 /* Stop and reinitialize the channel. */
818 spin_lock_irq(&chan->lock); 823 spin_lock_irq(&chan->lock);
819 rcar_dmac_chan_halt(chan); 824 rcar_dmac_chan_halt(chan);
@@ -1776,6 +1781,8 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac,
1776 return 0; 1781 return 0;
1777} 1782}
1778 1783
1784#define RCAR_DMAC_MAX_CHANNELS 32
1785
1779static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) 1786static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1780{ 1787{
1781 struct device_node *np = dev->of_node; 1788 struct device_node *np = dev->of_node;
@@ -1787,12 +1794,16 @@ static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac)
1787 return ret; 1794 return ret;
1788 } 1795 }
1789 1796
1790 if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { 1797 /* The hardware and driver don't support more than 32 bits in CHCLR */
1798 if (dmac->n_channels <= 0 ||
1799 dmac->n_channels >= RCAR_DMAC_MAX_CHANNELS) {
1791 dev_err(dev, "invalid number of channels %u\n", 1800 dev_err(dev, "invalid number of channels %u\n",
1792 dmac->n_channels); 1801 dmac->n_channels);
1793 return -EINVAL; 1802 return -EINVAL;
1794 } 1803 }
1795 1804
1805 dmac->channels_mask = GENMASK(dmac->n_channels - 1, 0);
1806
1796 return 0; 1807 return 0;
1797} 1808}
1798 1809
@@ -1802,7 +1813,6 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1802 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | 1813 DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES |
1803 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | 1814 DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES |
1804 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; 1815 DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES;
1805 unsigned int channels_offset = 0;
1806 struct dma_device *engine; 1816 struct dma_device *engine;
1807 struct rcar_dmac *dmac; 1817 struct rcar_dmac *dmac;
1808 struct resource *mem; 1818 struct resource *mem;
@@ -1831,10 +1841,8 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1831 * level we can't disable it selectively, so ignore channel 0 for now if 1841 * level we can't disable it selectively, so ignore channel 0 for now if
1832 * the device is part of an IOMMU group. 1842 * the device is part of an IOMMU group.
1833 */ 1843 */
1834 if (device_iommu_mapped(&pdev->dev)) { 1844 if (device_iommu_mapped(&pdev->dev))
1835 dmac->n_channels--; 1845 dmac->channels_mask &= ~BIT(0);
1836 channels_offset = 1;
1837 }
1838 1846
1839 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, 1847 dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels,
1840 sizeof(*dmac->channels), GFP_KERNEL); 1848 sizeof(*dmac->channels), GFP_KERNEL);
@@ -1892,8 +1900,10 @@ static int rcar_dmac_probe(struct platform_device *pdev)
1892 INIT_LIST_HEAD(&engine->channels); 1900 INIT_LIST_HEAD(&engine->channels);
1893 1901
1894 for (i = 0; i < dmac->n_channels; ++i) { 1902 for (i = 0; i < dmac->n_channels; ++i) {
1895 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], 1903 if (!(dmac->channels_mask & BIT(i)))
1896 i + channels_offset); 1904 continue;
1905
1906 ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i);
1897 if (ret < 0) 1907 if (ret < 0)
1898 goto error; 1908 goto error;
1899 } 1909 }
diff --git a/drivers/dma/sprd-dma.c b/drivers/dma/sprd-dma.c
index baac476c8622..525dc7338fe3 100644
--- a/drivers/dma/sprd-dma.c
+++ b/drivers/dma/sprd-dma.c
@@ -908,6 +908,7 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
908 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan); 908 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
909 struct dma_slave_config *slave_cfg = &schan->slave_cfg; 909 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
910 dma_addr_t src = 0, dst = 0; 910 dma_addr_t src = 0, dst = 0;
911 dma_addr_t start_src = 0, start_dst = 0;
911 struct sprd_dma_desc *sdesc; 912 struct sprd_dma_desc *sdesc;
912 struct scatterlist *sg; 913 struct scatterlist *sg;
913 u32 len = 0; 914 u32 len = 0;
@@ -954,6 +955,11 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
954 dst = sg_dma_address(sg); 955 dst = sg_dma_address(sg);
955 } 956 }
956 957
958 if (!i) {
959 start_src = src;
960 start_dst = dst;
961 }
962
957 /* 963 /*
958 * The link-list mode needs at least 2 link-list 964 * The link-list mode needs at least 2 link-list
959 * configurations. If there is only one sg, it doesn't 965 * configurations. If there is only one sg, it doesn't
@@ -970,8 +976,8 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
970 } 976 }
971 } 977 }
972 978
973 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, 979 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
974 dir, flags, slave_cfg); 980 start_dst, len, dir, flags, slave_cfg);
975 if (ret) { 981 if (ret) {
976 kfree(sdesc); 982 kfree(sdesc);
977 return NULL; 983 return NULL;
diff --git a/drivers/dma/ti/dma-crossbar.c b/drivers/dma/ti/dma-crossbar.c
index ad2f0a4cd6a4..f255056696ee 100644
--- a/drivers/dma/ti/dma-crossbar.c
+++ b/drivers/dma/ti/dma-crossbar.c
@@ -391,8 +391,10 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
391 391
392 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events, 392 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
393 nelm * 2); 393 nelm * 2);
394 if (ret) 394 if (ret) {
395 kfree(rsv_events);
395 return ret; 396 return ret;
397 }
396 398
397 for (i = 0; i < nelm; i++) { 399 for (i = 0; i < nelm; i++) {
398 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1], 400 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index ba27802efcd0..d07c0d5de7a2 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1540,8 +1540,10 @@ static int omap_dma_probe(struct platform_device *pdev)
1540 1540
1541 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq, 1541 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1542 IRQF_SHARED, "omap-dma-engine", od); 1542 IRQF_SHARED, "omap-dma-engine", od);
1543 if (rc) 1543 if (rc) {
1544 omap_dma_free(od);
1544 return rc; 1545 return rc;
1546 }
1545 } 1547 }
1546 1548
1547 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123) 1549 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
index f1a9c0544e3f..213aedc97dc2 100644
--- a/drivers/gpio/gpio-mockup.c
+++ b/drivers/gpio/gpio-mockup.c
@@ -309,6 +309,7 @@ static const struct file_operations gpio_mockup_debugfs_ops = {
309 .read = gpio_mockup_debugfs_read, 309 .read = gpio_mockup_debugfs_read,
310 .write = gpio_mockup_debugfs_write, 310 .write = gpio_mockup_debugfs_write,
311 .llseek = no_llseek, 311 .llseek = no_llseek,
312 .release = single_release,
312}; 313};
313 314
314static void gpio_mockup_debugfs_setup(struct device *dev, 315static void gpio_mockup_debugfs_setup(struct device *dev,
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c
index 378b206d2dc9..48fea4c68e8d 100644
--- a/drivers/gpio/gpio-pca953x.c
+++ b/drivers/gpio/gpio-pca953x.c
@@ -604,10 +604,9 @@ static void pca953x_irq_bus_sync_unlock(struct irq_data *d)
604 u8 new_irqs; 604 u8 new_irqs;
605 int level, i; 605 int level, i;
606 u8 invert_irq_mask[MAX_BANK]; 606 u8 invert_irq_mask[MAX_BANK];
607 int reg_direction[MAX_BANK]; 607 u8 reg_direction[MAX_BANK];
608 608
609 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 609 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
610 NBANK(chip));
611 610
612 if (chip->driver_data & PCA_PCAL) { 611 if (chip->driver_data & PCA_PCAL) {
613 /* Enable latch on interrupt-enabled inputs */ 612 /* Enable latch on interrupt-enabled inputs */
@@ -679,7 +678,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
679 bool pending_seen = false; 678 bool pending_seen = false;
680 bool trigger_seen = false; 679 bool trigger_seen = false;
681 u8 trigger[MAX_BANK]; 680 u8 trigger[MAX_BANK];
682 int reg_direction[MAX_BANK]; 681 u8 reg_direction[MAX_BANK];
683 int ret, i; 682 int ret, i;
684 683
685 if (chip->driver_data & PCA_PCAL) { 684 if (chip->driver_data & PCA_PCAL) {
@@ -710,8 +709,7 @@ static bool pca953x_irq_pending(struct pca953x_chip *chip, u8 *pending)
710 return false; 709 return false;
711 710
712 /* Remove output pins from the equation */ 711 /* Remove output pins from the equation */
713 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 712 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
714 NBANK(chip));
715 for (i = 0; i < NBANK(chip); i++) 713 for (i = 0; i < NBANK(chip); i++)
716 cur_stat[i] &= reg_direction[i]; 714 cur_stat[i] &= reg_direction[i];
717 715
@@ -768,7 +766,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
768{ 766{
769 struct i2c_client *client = chip->client; 767 struct i2c_client *client = chip->client;
770 struct irq_chip *irq_chip = &chip->irq_chip; 768 struct irq_chip *irq_chip = &chip->irq_chip;
771 int reg_direction[MAX_BANK]; 769 u8 reg_direction[MAX_BANK];
772 int ret, i; 770 int ret, i;
773 771
774 if (!client->irq) 772 if (!client->irq)
@@ -789,8 +787,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
789 * interrupt. We have to rely on the previous read for 787 * interrupt. We have to rely on the previous read for
790 * this purpose. 788 * this purpose.
791 */ 789 */
792 regmap_bulk_read(chip->regmap, chip->regs->direction, reg_direction, 790 pca953x_read_regs(chip, chip->regs->direction, reg_direction);
793 NBANK(chip));
794 for (i = 0; i < NBANK(chip); i++) 791 for (i = 0; i < NBANK(chip); i++)
795 chip->irq_stat[i] &= reg_direction[i]; 792 chip->irq_stat[i] &= reg_direction[i];
796 mutex_init(&chip->irq_lock); 793 mutex_init(&chip->irq_lock);
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index 39f2f9035c11..bda28eb82c3f 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -7,6 +7,7 @@
7 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 */ 8 */
9 9
10#include <linux/dmi.h>
10#include <linux/errno.h> 11#include <linux/errno.h>
11#include <linux/gpio/consumer.h> 12#include <linux/gpio/consumer.h>
12#include <linux/gpio/driver.h> 13#include <linux/gpio/driver.h>
@@ -19,6 +20,11 @@
19 20
20#include "gpiolib.h" 21#include "gpiolib.h"
21 22
23static int run_edge_events_on_boot = -1;
24module_param(run_edge_events_on_boot, int, 0444);
25MODULE_PARM_DESC(run_edge_events_on_boot,
26 "Run edge _AEI event-handlers at boot: 0=no, 1=yes, -1=auto");
27
22/** 28/**
23 * struct acpi_gpio_event - ACPI GPIO event handler data 29 * struct acpi_gpio_event - ACPI GPIO event handler data
24 * 30 *
@@ -170,10 +176,13 @@ static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
170 event->irq_requested = true; 176 event->irq_requested = true;
171 177
172 /* Make sure we trigger the initial state of edge-triggered IRQs */ 178 /* Make sure we trigger the initial state of edge-triggered IRQs */
173 value = gpiod_get_raw_value_cansleep(event->desc); 179 if (run_edge_events_on_boot &&
174 if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) || 180 (event->irqflags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))) {
175 ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0)) 181 value = gpiod_get_raw_value_cansleep(event->desc);
176 event->handler(event->irq, event); 182 if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
183 ((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
184 event->handler(event->irq, event);
185 }
177} 186}
178 187
179static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio) 188static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
@@ -1283,3 +1292,28 @@ static int acpi_gpio_handle_deferred_request_irqs(void)
1283} 1292}
1284/* We must use _sync so that this runs after the first deferred_probe run */ 1293/* We must use _sync so that this runs after the first deferred_probe run */
1285late_initcall_sync(acpi_gpio_handle_deferred_request_irqs); 1294late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);
1295
1296static const struct dmi_system_id run_edge_events_on_boot_blacklist[] = {
1297 {
1298 .matches = {
1299 DMI_MATCH(DMI_SYS_VENDOR, "MINIX"),
1300 DMI_MATCH(DMI_PRODUCT_NAME, "Z83-4"),
1301 }
1302 },
1303 {} /* Terminating entry */
1304};
1305
1306static int acpi_gpio_setup_params(void)
1307{
1308 if (run_edge_events_on_boot < 0) {
1309 if (dmi_check_system(run_edge_events_on_boot_blacklist))
1310 run_edge_events_on_boot = 0;
1311 else
1312 run_edge_events_on_boot = 1;
1313 }
1314
1315 return 0;
1316}
1317
1318/* Directly after dmi_setup() which runs as core_initcall() */
1319postcore_initcall(acpi_gpio_setup_params);
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 9762dd6d99fa..9b44c49a9227 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -343,36 +343,27 @@ struct gpio_desc *of_find_gpio(struct device *dev, const char *con_id,
343 343
344 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, 344 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx,
345 &of_flags); 345 &of_flags);
346 /*
347 * -EPROBE_DEFER in our case means that we found a
348 * valid GPIO property, but no controller has been
349 * registered so far.
350 *
351 * This means we don't need to look any further for
352 * alternate name conventions, and we should really
353 * preserve the return code for our user to be able to
354 * retry probing later.
355 */
356 if (IS_ERR(desc) && PTR_ERR(desc) == -EPROBE_DEFER)
357 return desc;
358 346
359 if (!IS_ERR(desc) || (PTR_ERR(desc) != -ENOENT)) 347 if (!IS_ERR(desc) || PTR_ERR(desc) != -ENOENT)
360 break; 348 break;
361 } 349 }
362 350
363 /* Special handling for SPI GPIOs if used */ 351 if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
364 if (IS_ERR(desc)) 352 /* Special handling for SPI GPIOs if used */
365 desc = of_find_spi_gpio(dev, con_id, &of_flags); 353 desc = of_find_spi_gpio(dev, con_id, &of_flags);
366 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) { 354 }
355
356 if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
367 /* This quirk looks up flags and all */ 357 /* This quirk looks up flags and all */
368 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags); 358 desc = of_find_spi_cs_gpio(dev, con_id, idx, flags);
369 if (!IS_ERR(desc)) 359 if (!IS_ERR(desc))
370 return desc; 360 return desc;
371 } 361 }
372 362
373 /* Special handling for regulator GPIOs if used */ 363 if (IS_ERR(desc) && PTR_ERR(desc) == -ENOENT) {
374 if (IS_ERR(desc) && PTR_ERR(desc) != -EPROBE_DEFER) 364 /* Special handling for regulator GPIOs if used */
375 desc = of_find_regulator_gpio(dev, con_id, &of_flags); 365 desc = of_find_regulator_gpio(dev, con_id, &of_flags);
366 }
376 367
377 if (IS_ERR(desc)) 368 if (IS_ERR(desc))
378 return desc; 369 return desc;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index cca749010cd0..d9074191edef 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -536,6 +536,14 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
536 return -EINVAL; 536 return -EINVAL;
537 537
538 /* 538 /*
539 * Do not allow both INPUT & OUTPUT flags to be set as they are
540 * contradictory.
541 */
542 if ((lflags & GPIOHANDLE_REQUEST_INPUT) &&
543 (lflags & GPIOHANDLE_REQUEST_OUTPUT))
544 return -EINVAL;
545
546 /*
539 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If 547 * Do not allow OPEN_SOURCE & OPEN_DRAIN flags in a single request. If
540 * the hardware actually supports enabling both at the same time the 548 * the hardware actually supports enabling both at the same time the
541 * electrical result would be disastrous. 549 * electrical result would be disastrous.
@@ -926,7 +934,9 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
926 } 934 }
927 935
928 /* This is just wrong: we don't look for events on output lines */ 936 /* This is just wrong: we don't look for events on output lines */
929 if (lflags & GPIOHANDLE_REQUEST_OUTPUT) { 937 if ((lflags & GPIOHANDLE_REQUEST_OUTPUT) ||
938 (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN) ||
939 (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)) {
930 ret = -EINVAL; 940 ret = -EINVAL;
931 goto out_free_label; 941 goto out_free_label;
932 } 942 }
@@ -940,10 +950,6 @@ static int lineevent_create(struct gpio_device *gdev, void __user *ip)
940 950
941 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW) 951 if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
942 set_bit(FLAG_ACTIVE_LOW, &desc->flags); 952 set_bit(FLAG_ACTIVE_LOW, &desc->flags);
943 if (lflags & GPIOHANDLE_REQUEST_OPEN_DRAIN)
944 set_bit(FLAG_OPEN_DRAIN, &desc->flags);
945 if (lflags & GPIOHANDLE_REQUEST_OPEN_SOURCE)
946 set_bit(FLAG_OPEN_SOURCE, &desc->flags);
947 953
948 ret = gpiod_direction_input(desc); 954 ret = gpiod_direction_input(desc);
949 if (ret) 955 if (ret)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index b0369e690f36..c814bcef18a4 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1454,6 +1454,7 @@ static int drm_mode_parse_cmdline_refresh(const char *str, char **end_ptr,
1454} 1454}
1455 1455
1456static int drm_mode_parse_cmdline_extra(const char *str, int length, 1456static int drm_mode_parse_cmdline_extra(const char *str, int length,
1457 bool freestanding,
1457 const struct drm_connector *connector, 1458 const struct drm_connector *connector,
1458 struct drm_cmdline_mode *mode) 1459 struct drm_cmdline_mode *mode)
1459{ 1460{
@@ -1462,9 +1463,15 @@ static int drm_mode_parse_cmdline_extra(const char *str, int length,
1462 for (i = 0; i < length; i++) { 1463 for (i = 0; i < length; i++) {
1463 switch (str[i]) { 1464 switch (str[i]) {
1464 case 'i': 1465 case 'i':
1466 if (freestanding)
1467 return -EINVAL;
1468
1465 mode->interlace = true; 1469 mode->interlace = true;
1466 break; 1470 break;
1467 case 'm': 1471 case 'm':
1472 if (freestanding)
1473 return -EINVAL;
1474
1468 mode->margins = true; 1475 mode->margins = true;
1469 break; 1476 break;
1470 case 'D': 1477 case 'D':
@@ -1542,6 +1549,7 @@ static int drm_mode_parse_cmdline_res_mode(const char *str, unsigned int length,
1542 if (extras) { 1549 if (extras) {
1543 int ret = drm_mode_parse_cmdline_extra(end_ptr + i, 1550 int ret = drm_mode_parse_cmdline_extra(end_ptr + i,
1544 1, 1551 1,
1552 false,
1545 connector, 1553 connector,
1546 mode); 1554 mode);
1547 if (ret) 1555 if (ret)
@@ -1669,6 +1677,22 @@ static int drm_mode_parse_cmdline_options(char *str, size_t len,
1669 return 0; 1677 return 0;
1670} 1678}
1671 1679
1680static const char * const drm_named_modes_whitelist[] = {
1681 "NTSC",
1682 "PAL",
1683};
1684
1685static bool drm_named_mode_is_in_whitelist(const char *mode, unsigned int size)
1686{
1687 int i;
1688
1689 for (i = 0; i < ARRAY_SIZE(drm_named_modes_whitelist); i++)
1690 if (!strncmp(mode, drm_named_modes_whitelist[i], size))
1691 return true;
1692
1693 return false;
1694}
1695
1672/** 1696/**
1673 * drm_mode_parse_command_line_for_connector - parse command line modeline for connector 1697 * drm_mode_parse_command_line_for_connector - parse command line modeline for connector
1674 * @mode_option: optional per connector mode option 1698 * @mode_option: optional per connector mode option
@@ -1725,16 +1749,30 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1725 * bunch of things: 1749 * bunch of things:
1726 * - We need to make sure that the first character (which 1750 * - We need to make sure that the first character (which
1727 * would be our resolution in X) is a digit. 1751 * would be our resolution in X) is a digit.
1728 * - However, if the X resolution is missing, then we end up 1752 * - If not, then it's either a named mode or a force on/off.
1729 * with something like x<yres>, with our first character 1753 * To distinguish between the two, we need to run the
1730 * being an alpha-numerical character, which would be 1754 * extra parsing function, and if not, then we consider it
1731 * considered a named mode. 1755 * a named mode.
1732 * 1756 *
1733 * If this isn't enough, we should add more heuristics here, 1757 * If this isn't enough, we should add more heuristics here,
1734 * and matching unit-tests. 1758 * and matching unit-tests.
1735 */ 1759 */
1736 if (!isdigit(name[0]) && name[0] != 'x') 1760 if (!isdigit(name[0]) && name[0] != 'x') {
1761 unsigned int namelen = strlen(name);
1762
1763 /*
1764 * Only the force on/off options can be in that case,
1765 * and they all take a single character.
1766 */
1767 if (namelen == 1) {
1768 ret = drm_mode_parse_cmdline_extra(name, namelen, true,
1769 connector, mode);
1770 if (!ret)
1771 return true;
1772 }
1773
1737 named_mode = true; 1774 named_mode = true;
1775 }
1738 1776
1739 /* Try to locate the bpp and refresh specifiers, if any */ 1777 /* Try to locate the bpp and refresh specifiers, if any */
1740 bpp_ptr = strchr(name, '-'); 1778 bpp_ptr = strchr(name, '-');
@@ -1772,6 +1810,10 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1772 if (named_mode) { 1810 if (named_mode) {
1773 if (mode_end + 1 > DRM_DISPLAY_MODE_LEN) 1811 if (mode_end + 1 > DRM_DISPLAY_MODE_LEN)
1774 return false; 1812 return false;
1813
1814 if (!drm_named_mode_is_in_whitelist(name, mode_end))
1815 return false;
1816
1775 strscpy(mode->name, name, mode_end + 1); 1817 strscpy(mode->name, name, mode_end + 1);
1776 } else { 1818 } else {
1777 ret = drm_mode_parse_cmdline_res_mode(name, mode_end, 1819 ret = drm_mode_parse_cmdline_res_mode(name, mode_end,
@@ -1811,7 +1853,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1811 extra_ptr != options_ptr) { 1853 extra_ptr != options_ptr) {
1812 int len = strlen(name) - (extra_ptr - name); 1854 int len = strlen(name) - (extra_ptr - name);
1813 1855
1814 ret = drm_mode_parse_cmdline_extra(extra_ptr, len, 1856 ret = drm_mode_parse_cmdline_extra(extra_ptr, len, false,
1815 connector, mode); 1857 connector, mode);
1816 if (ret) 1858 if (ret)
1817 return false; 1859 return false;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 18e4cba76720..8aa6a31e8ad0 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -128,7 +128,15 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
128 limits.max_lane_count = intel_dp_max_lane_count(intel_dp); 128 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
129 129
130 limits.min_bpp = intel_dp_min_bpp(pipe_config); 130 limits.min_bpp = intel_dp_min_bpp(pipe_config);
131 limits.max_bpp = pipe_config->pipe_bpp; 131 /*
132 * FIXME: If all the streams can't fit into the link with
133 * their current pipe_bpp we should reduce pipe_bpp across
134 * the board until things start to fit. Until then we
135 * limit to <= 8bpc since that's what was hardcoded for all
136 * MST streams previously. This hack should be removed once
137 * we have the proper retry logic in place.
138 */
139 limits.max_bpp = min(pipe_config->pipe_bpp, 24);
132 140
133 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits); 141 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
134 142
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 2caa594322bc..528b61678334 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -664,15 +664,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
664 664
665 for_each_sgt_page(page, sgt_iter, pages) { 665 for_each_sgt_page(page, sgt_iter, pages) {
666 if (obj->mm.dirty) 666 if (obj->mm.dirty)
667 /* 667 set_page_dirty(page);
668 * As this may not be anonymous memory (e.g. shmem)
669 * but exist on a real mapping, we have to lock
670 * the page in order to dirty it -- holding
671 * the page reference is not sufficient to
672 * prevent the inode from being truncated.
673 * Play safe and take the lock.
674 */
675 set_page_dirty_lock(page);
676 668
677 mark_page_accessed(page); 669 mark_page_accessed(page);
678 put_page(page); 670 put_page(page);
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 98dfb086320f..99e8242194c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -308,11 +308,6 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
308 FLOW_CONTROL_ENABLE | 308 FLOW_CONTROL_ENABLE |
309 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 309 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
310 310
311 /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
312 if (!IS_COFFEELAKE(i915))
313 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
314 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
315
316 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ 311 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
317 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ 312 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
318 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, 313 WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index e9f9e9fb9b17..6381652a8829 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -656,10 +656,9 @@ static int ingenic_drm_probe(struct platform_device *pdev)
656 return ret; 656 return ret;
657 } 657 }
658 658
659 if (panel) { 659 if (panel)
660 bridge = devm_drm_panel_bridge_add(dev, panel, 660 bridge = devm_drm_panel_bridge_add(dev, panel,
661 DRM_MODE_CONNECTOR_Unknown); 661 DRM_MODE_CONNECTOR_DPI);
662 }
663 662
664 priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc), 663 priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc),
665 &priv->dma_hwdesc_phys, 664 &priv->dma_hwdesc_phys,
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index 477c0f766663..b609dc030d6c 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -342,7 +342,7 @@ int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
342 timeout = drm_timeout_abs_to_jiffies(timeout_ns); 342 timeout = drm_timeout_abs_to_jiffies(timeout_ns);
343 343
344 ret = drm_gem_reservation_object_wait(file, handle, write, timeout); 344 ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
345 if (ret == 0) 345 if (ret == -ETIME)
346 ret = timeout ? -ETIMEDOUT : -EBUSY; 346 ret = timeout ? -ETIMEDOUT : -EBUSY;
347 347
348 return ret; 348 return ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
index 84a2f243ed9b..4695f1c8e33f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gp102.c
@@ -190,6 +190,9 @@ MODULE_FIRMWARE("nvidia/gp102/nvdec/scrubber.bin");
190MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin"); 190MODULE_FIRMWARE("nvidia/gp102/sec2/desc.bin");
191MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin"); 191MODULE_FIRMWARE("nvidia/gp102/sec2/image.bin");
192MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin"); 192MODULE_FIRMWARE("nvidia/gp102/sec2/sig.bin");
193MODULE_FIRMWARE("nvidia/gp102/sec2/desc-1.bin");
194MODULE_FIRMWARE("nvidia/gp102/sec2/image-1.bin");
195MODULE_FIRMWARE("nvidia/gp102/sec2/sig-1.bin");
193MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin"); 196MODULE_FIRMWARE("nvidia/gp104/acr/bl.bin");
194MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin"); 197MODULE_FIRMWARE("nvidia/gp104/acr/unload_bl.bin");
195MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin"); 198MODULE_FIRMWARE("nvidia/gp104/acr/ucode_load.bin");
@@ -210,6 +213,9 @@ MODULE_FIRMWARE("nvidia/gp104/nvdec/scrubber.bin");
210MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin"); 213MODULE_FIRMWARE("nvidia/gp104/sec2/desc.bin");
211MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin"); 214MODULE_FIRMWARE("nvidia/gp104/sec2/image.bin");
212MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin"); 215MODULE_FIRMWARE("nvidia/gp104/sec2/sig.bin");
216MODULE_FIRMWARE("nvidia/gp104/sec2/desc-1.bin");
217MODULE_FIRMWARE("nvidia/gp104/sec2/image-1.bin");
218MODULE_FIRMWARE("nvidia/gp104/sec2/sig-1.bin");
213MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin"); 219MODULE_FIRMWARE("nvidia/gp106/acr/bl.bin");
214MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin"); 220MODULE_FIRMWARE("nvidia/gp106/acr/unload_bl.bin");
215MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin"); 221MODULE_FIRMWARE("nvidia/gp106/acr/ucode_load.bin");
@@ -230,6 +236,9 @@ MODULE_FIRMWARE("nvidia/gp106/nvdec/scrubber.bin");
230MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin"); 236MODULE_FIRMWARE("nvidia/gp106/sec2/desc.bin");
231MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin"); 237MODULE_FIRMWARE("nvidia/gp106/sec2/image.bin");
232MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin"); 238MODULE_FIRMWARE("nvidia/gp106/sec2/sig.bin");
239MODULE_FIRMWARE("nvidia/gp106/sec2/desc-1.bin");
240MODULE_FIRMWARE("nvidia/gp106/sec2/image-1.bin");
241MODULE_FIRMWARE("nvidia/gp106/sec2/sig-1.bin");
233MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin"); 242MODULE_FIRMWARE("nvidia/gp107/acr/bl.bin");
234MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin"); 243MODULE_FIRMWARE("nvidia/gp107/acr/unload_bl.bin");
235MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin"); 244MODULE_FIRMWARE("nvidia/gp107/acr/ucode_load.bin");
@@ -250,3 +259,6 @@ MODULE_FIRMWARE("nvidia/gp107/nvdec/scrubber.bin");
250MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin"); 259MODULE_FIRMWARE("nvidia/gp107/sec2/desc.bin");
251MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin"); 260MODULE_FIRMWARE("nvidia/gp107/sec2/image.bin");
252MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin"); 261MODULE_FIRMWARE("nvidia/gp107/sec2/sig.bin");
262MODULE_FIRMWARE("nvidia/gp107/sec2/desc-1.bin");
263MODULE_FIRMWARE("nvidia/gp107/sec2/image-1.bin");
264MODULE_FIRMWARE("nvidia/gp107/sec2/sig-1.bin");
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index b45824ec7c8f..6d61a0eb5d64 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -9,6 +9,13 @@
9 9
10#define cmdline_test(test) selftest(test, test) 10#define cmdline_test(test) selftest(test, test)
11 11
12cmdline_test(drm_cmdline_test_force_d_only)
13cmdline_test(drm_cmdline_test_force_D_only_dvi)
14cmdline_test(drm_cmdline_test_force_D_only_hdmi)
15cmdline_test(drm_cmdline_test_force_D_only_not_digital)
16cmdline_test(drm_cmdline_test_force_e_only)
17cmdline_test(drm_cmdline_test_margin_only)
18cmdline_test(drm_cmdline_test_interlace_only)
12cmdline_test(drm_cmdline_test_res) 19cmdline_test(drm_cmdline_test_res)
13cmdline_test(drm_cmdline_test_res_missing_x) 20cmdline_test(drm_cmdline_test_res_missing_x)
14cmdline_test(drm_cmdline_test_res_missing_y) 21cmdline_test(drm_cmdline_test_res_missing_y)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 14c96edb13df..013de9d27c35 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -17,6 +17,136 @@
17 17
18static const struct drm_connector no_connector = {}; 18static const struct drm_connector no_connector = {};
19 19
20static int drm_cmdline_test_force_e_only(void *ignored)
21{
22 struct drm_cmdline_mode mode = { };
23
24 FAIL_ON(!drm_mode_parse_command_line_for_connector("e",
25 &no_connector,
26 &mode));
27 FAIL_ON(mode.specified);
28 FAIL_ON(mode.refresh_specified);
29 FAIL_ON(mode.bpp_specified);
30
31 FAIL_ON(mode.rb);
32 FAIL_ON(mode.cvt);
33 FAIL_ON(mode.interlace);
34 FAIL_ON(mode.margins);
35 FAIL_ON(mode.force != DRM_FORCE_ON);
36
37 return 0;
38}
39
40static int drm_cmdline_test_force_D_only_not_digital(void *ignored)
41{
42 struct drm_cmdline_mode mode = { };
43
44 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
45 &no_connector,
46 &mode));
47 FAIL_ON(mode.specified);
48 FAIL_ON(mode.refresh_specified);
49 FAIL_ON(mode.bpp_specified);
50
51 FAIL_ON(mode.rb);
52 FAIL_ON(mode.cvt);
53 FAIL_ON(mode.interlace);
54 FAIL_ON(mode.margins);
55 FAIL_ON(mode.force != DRM_FORCE_ON);
56
57 return 0;
58}
59
60static const struct drm_connector connector_hdmi = {
61 .connector_type = DRM_MODE_CONNECTOR_HDMIB,
62};
63
64static int drm_cmdline_test_force_D_only_hdmi(void *ignored)
65{
66 struct drm_cmdline_mode mode = { };
67
68 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
69 &connector_hdmi,
70 &mode));
71 FAIL_ON(mode.specified);
72 FAIL_ON(mode.refresh_specified);
73 FAIL_ON(mode.bpp_specified);
74
75 FAIL_ON(mode.rb);
76 FAIL_ON(mode.cvt);
77 FAIL_ON(mode.interlace);
78 FAIL_ON(mode.margins);
79 FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
80
81 return 0;
82}
83
84static const struct drm_connector connector_dvi = {
85 .connector_type = DRM_MODE_CONNECTOR_DVII,
86};
87
88static int drm_cmdline_test_force_D_only_dvi(void *ignored)
89{
90 struct drm_cmdline_mode mode = { };
91
92 FAIL_ON(!drm_mode_parse_command_line_for_connector("D",
93 &connector_dvi,
94 &mode));
95 FAIL_ON(mode.specified);
96 FAIL_ON(mode.refresh_specified);
97 FAIL_ON(mode.bpp_specified);
98
99 FAIL_ON(mode.rb);
100 FAIL_ON(mode.cvt);
101 FAIL_ON(mode.interlace);
102 FAIL_ON(mode.margins);
103 FAIL_ON(mode.force != DRM_FORCE_ON_DIGITAL);
104
105 return 0;
106}
107
108static int drm_cmdline_test_force_d_only(void *ignored)
109{
110 struct drm_cmdline_mode mode = { };
111
112 FAIL_ON(!drm_mode_parse_command_line_for_connector("d",
113 &no_connector,
114 &mode));
115 FAIL_ON(mode.specified);
116 FAIL_ON(mode.refresh_specified);
117 FAIL_ON(mode.bpp_specified);
118
119 FAIL_ON(mode.rb);
120 FAIL_ON(mode.cvt);
121 FAIL_ON(mode.interlace);
122 FAIL_ON(mode.margins);
123 FAIL_ON(mode.force != DRM_FORCE_OFF);
124
125 return 0;
126}
127
128static int drm_cmdline_test_margin_only(void *ignored)
129{
130 struct drm_cmdline_mode mode = { };
131
132 FAIL_ON(drm_mode_parse_command_line_for_connector("m",
133 &no_connector,
134 &mode));
135
136 return 0;
137}
138
139static int drm_cmdline_test_interlace_only(void *ignored)
140{
141 struct drm_cmdline_mode mode = { };
142
143 FAIL_ON(drm_mode_parse_command_line_for_connector("i",
144 &no_connector,
145 &mode));
146
147 return 0;
148}
149
20static int drm_cmdline_test_res(void *ignored) 150static int drm_cmdline_test_res(void *ignored)
21{ 151{
22 struct drm_cmdline_mode mode = { }; 152 struct drm_cmdline_mode mode = { };
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 59e9d05ab928..0af048d1a815 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -353,7 +353,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
353 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB)); 353 !!(HIGH_WORD(ecx) & MESSAGE_STATUS_HB));
354 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) { 354 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_SUCCESS) == 0) {
355 kfree(reply); 355 kfree(reply);
356 356 reply = NULL;
357 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) { 357 if ((HIGH_WORD(ebx) & MESSAGE_STATUS_CPT) != 0) {
358 /* A checkpoint occurred. Retry. */ 358 /* A checkpoint occurred. Retry. */
359 continue; 359 continue;
@@ -377,7 +377,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
377 377
378 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) { 378 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_SUCCESS) == 0) {
379 kfree(reply); 379 kfree(reply);
380 380 reply = NULL;
381 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) { 381 if ((HIGH_WORD(ecx) & MESSAGE_STATUS_CPT) != 0) {
382 /* A checkpoint occurred. Retry. */ 382 /* A checkpoint occurred. Retry. */
383 continue; 383 continue;
@@ -389,10 +389,8 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg,
389 break; 389 break;
390 } 390 }
391 391
392 if (retries == RETRIES) { 392 if (!reply)
393 kfree(reply);
394 return -EINVAL; 393 return -EINVAL;
395 }
396 394
397 *msg_len = reply_len; 395 *msg_len = reply_len;
398 *msg = reply; 396 *msg = reply;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index b607a92791d3..61de81965c44 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1143,6 +1143,17 @@ static void amd_iommu_flush_tlb_all(struct amd_iommu *iommu)
1143 iommu_completion_wait(iommu); 1143 iommu_completion_wait(iommu);
1144} 1144}
1145 1145
1146static void amd_iommu_flush_tlb_domid(struct amd_iommu *iommu, u32 dom_id)
1147{
1148 struct iommu_cmd cmd;
1149
1150 build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
1151 dom_id, 1);
1152 iommu_queue_command(iommu, &cmd);
1153
1154 iommu_completion_wait(iommu);
1155}
1156
1146static void amd_iommu_flush_all(struct amd_iommu *iommu) 1157static void amd_iommu_flush_all(struct amd_iommu *iommu)
1147{ 1158{
1148 struct iommu_cmd cmd; 1159 struct iommu_cmd cmd;
@@ -1424,18 +1435,21 @@ static void free_pagetable(struct protection_domain *domain)
1424 * another level increases the size of the address space by 9 bits to a size up 1435 * another level increases the size of the address space by 9 bits to a size up
1425 * to 64 bits. 1436 * to 64 bits.
1426 */ 1437 */
1427static bool increase_address_space(struct protection_domain *domain, 1438static void increase_address_space(struct protection_domain *domain,
1428 gfp_t gfp) 1439 gfp_t gfp)
1429{ 1440{
1441 unsigned long flags;
1430 u64 *pte; 1442 u64 *pte;
1431 1443
1432 if (domain->mode == PAGE_MODE_6_LEVEL) 1444 spin_lock_irqsave(&domain->lock, flags);
1445
1446 if (WARN_ON_ONCE(domain->mode == PAGE_MODE_6_LEVEL))
1433 /* address space already 64 bit large */ 1447 /* address space already 64 bit large */
1434 return false; 1448 goto out;
1435 1449
1436 pte = (void *)get_zeroed_page(gfp); 1450 pte = (void *)get_zeroed_page(gfp);
1437 if (!pte) 1451 if (!pte)
1438 return false; 1452 goto out;
1439 1453
1440 *pte = PM_LEVEL_PDE(domain->mode, 1454 *pte = PM_LEVEL_PDE(domain->mode,
1441 iommu_virt_to_phys(domain->pt_root)); 1455 iommu_virt_to_phys(domain->pt_root));
@@ -1443,7 +1457,10 @@ static bool increase_address_space(struct protection_domain *domain,
1443 domain->mode += 1; 1457 domain->mode += 1;
1444 domain->updated = true; 1458 domain->updated = true;
1445 1459
1446 return true; 1460out:
1461 spin_unlock_irqrestore(&domain->lock, flags);
1462
1463 return;
1447} 1464}
1448 1465
1449static u64 *alloc_pte(struct protection_domain *domain, 1466static u64 *alloc_pte(struct protection_domain *domain,
@@ -1873,6 +1890,7 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1873{ 1890{
1874 u64 pte_root = 0; 1891 u64 pte_root = 0;
1875 u64 flags = 0; 1892 u64 flags = 0;
1893 u32 old_domid;
1876 1894
1877 if (domain->mode != PAGE_MODE_NONE) 1895 if (domain->mode != PAGE_MODE_NONE)
1878 pte_root = iommu_virt_to_phys(domain->pt_root); 1896 pte_root = iommu_virt_to_phys(domain->pt_root);
@@ -1922,8 +1940,20 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain,
1922 flags &= ~DEV_DOMID_MASK; 1940 flags &= ~DEV_DOMID_MASK;
1923 flags |= domain->id; 1941 flags |= domain->id;
1924 1942
1943 old_domid = amd_iommu_dev_table[devid].data[1] & DEV_DOMID_MASK;
1925 amd_iommu_dev_table[devid].data[1] = flags; 1944 amd_iommu_dev_table[devid].data[1] = flags;
1926 amd_iommu_dev_table[devid].data[0] = pte_root; 1945 amd_iommu_dev_table[devid].data[0] = pte_root;
1946
1947 /*
1948 * A kdump kernel might be replacing a domain ID that was copied from
1949 * the previous kernel--if so, it needs to flush the translation cache
1950 * entries for the old domain ID that is being overwritten
1951 */
1952 if (old_domid) {
1953 struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
1954
1955 amd_iommu_flush_tlb_domid(iommu, old_domid);
1956 }
1927} 1957}
1928 1958
1929static void clear_dte_entry(u16 devid) 1959static void clear_dte_entry(u16 devid)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 12d094d08c0a..c4e0e4a9ee9e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -339,6 +339,8 @@ static void domain_exit(struct dmar_domain *domain);
339static void domain_remove_dev_info(struct dmar_domain *domain); 339static void domain_remove_dev_info(struct dmar_domain *domain);
340static void dmar_remove_one_dev_info(struct device *dev); 340static void dmar_remove_one_dev_info(struct device *dev);
341static void __dmar_remove_one_dev_info(struct device_domain_info *info); 341static void __dmar_remove_one_dev_info(struct device_domain_info *info);
342static void domain_context_clear(struct intel_iommu *iommu,
343 struct device *dev);
342static int domain_detach_iommu(struct dmar_domain *domain, 344static int domain_detach_iommu(struct dmar_domain *domain,
343 struct intel_iommu *iommu); 345 struct intel_iommu *iommu);
344static bool device_is_rmrr_locked(struct device *dev); 346static bool device_is_rmrr_locked(struct device *dev);
@@ -2105,9 +2107,26 @@ out_unlock:
2105 return ret; 2107 return ret;
2106} 2108}
2107 2109
2110struct domain_context_mapping_data {
2111 struct dmar_domain *domain;
2112 struct intel_iommu *iommu;
2113 struct pasid_table *table;
2114};
2115
2116static int domain_context_mapping_cb(struct pci_dev *pdev,
2117 u16 alias, void *opaque)
2118{
2119 struct domain_context_mapping_data *data = opaque;
2120
2121 return domain_context_mapping_one(data->domain, data->iommu,
2122 data->table, PCI_BUS_NUM(alias),
2123 alias & 0xff);
2124}
2125
2108static int 2126static int
2109domain_context_mapping(struct dmar_domain *domain, struct device *dev) 2127domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2110{ 2128{
2129 struct domain_context_mapping_data data;
2111 struct pasid_table *table; 2130 struct pasid_table *table;
2112 struct intel_iommu *iommu; 2131 struct intel_iommu *iommu;
2113 u8 bus, devfn; 2132 u8 bus, devfn;
@@ -2117,7 +2136,17 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
2117 return -ENODEV; 2136 return -ENODEV;
2118 2137
2119 table = intel_pasid_get_table(dev); 2138 table = intel_pasid_get_table(dev);
2120 return domain_context_mapping_one(domain, iommu, table, bus, devfn); 2139
2140 if (!dev_is_pci(dev))
2141 return domain_context_mapping_one(domain, iommu, table,
2142 bus, devfn);
2143
2144 data.domain = domain;
2145 data.iommu = iommu;
2146 data.table = table;
2147
2148 return pci_for_each_dma_alias(to_pci_dev(dev),
2149 &domain_context_mapping_cb, &data);
2121} 2150}
2122 2151
2123static int domain_context_mapped_cb(struct pci_dev *pdev, 2152static int domain_context_mapped_cb(struct pci_dev *pdev,
@@ -4759,6 +4788,28 @@ out_free_dmar:
4759 return ret; 4788 return ret;
4760} 4789}
4761 4790
4791static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque)
4792{
4793 struct intel_iommu *iommu = opaque;
4794
4795 domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff);
4796 return 0;
4797}
4798
4799/*
4800 * NB - intel-iommu lacks any sort of reference counting for the users of
4801 * dependent devices. If multiple endpoints have intersecting dependent
4802 * devices, unbinding the driver from any one of them will possibly leave
4803 * the others unable to operate.
4804 */
4805static void domain_context_clear(struct intel_iommu *iommu, struct device *dev)
4806{
4807 if (!iommu || !dev || !dev_is_pci(dev))
4808 return;
4809
4810 pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu);
4811}
4812
4762static void __dmar_remove_one_dev_info(struct device_domain_info *info) 4813static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4763{ 4814{
4764 struct dmar_domain *domain; 4815 struct dmar_domain *domain;
@@ -4779,7 +4830,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
4779 PASID_RID2PASID); 4830 PASID_RID2PASID);
4780 4831
4781 iommu_disable_dev_iotlb(info); 4832 iommu_disable_dev_iotlb(info);
4782 domain_context_clear_one(iommu, info->bus, info->devfn); 4833 domain_context_clear(iommu, info->dev);
4783 intel_pasid_free_table(info->dev); 4834 intel_pasid_free_table(info->dev);
4784 } 4835 }
4785 4836
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 780de0caafe8..9b159132405d 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -100,24 +100,19 @@ int intel_svm_finish_prq(struct intel_iommu *iommu)
100} 100}
101 101
102static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev, 102static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_dev *sdev,
103 unsigned long address, unsigned long pages, int ih, int gl) 103 unsigned long address, unsigned long pages, int ih)
104{ 104{
105 struct qi_desc desc; 105 struct qi_desc desc;
106 106
107 if (pages == -1) { 107 /*
108 /* For global kernel pages we have to flush them in *all* PASIDs 108 * Do PASID granu IOTLB invalidation if page selective capability is
109 * because that's the only option the hardware gives us. Despite 109 * not available.
110 * the fact that they are actually only accessible through one. */ 110 */
111 if (gl) 111 if (pages == -1 || !cap_pgsel_inv(svm->iommu->cap)) {
112 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) | 112 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
113 QI_EIOTLB_DID(sdev->did) | 113 QI_EIOTLB_DID(sdev->did) |
114 QI_EIOTLB_GRAN(QI_GRAN_ALL_ALL) | 114 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
115 QI_EIOTLB_TYPE; 115 QI_EIOTLB_TYPE;
116 else
117 desc.qw0 = QI_EIOTLB_PASID(svm->pasid) |
118 QI_EIOTLB_DID(sdev->did) |
119 QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
120 QI_EIOTLB_TYPE;
121 desc.qw1 = 0; 116 desc.qw1 = 0;
122 } else { 117 } else {
123 int mask = ilog2(__roundup_pow_of_two(pages)); 118 int mask = ilog2(__roundup_pow_of_two(pages));
@@ -127,7 +122,6 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
127 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) | 122 QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
128 QI_EIOTLB_TYPE; 123 QI_EIOTLB_TYPE;
129 desc.qw1 = QI_EIOTLB_ADDR(address) | 124 desc.qw1 = QI_EIOTLB_ADDR(address) |
130 QI_EIOTLB_GL(gl) |
131 QI_EIOTLB_IH(ih) | 125 QI_EIOTLB_IH(ih) |
132 QI_EIOTLB_AM(mask); 126 QI_EIOTLB_AM(mask);
133 } 127 }
@@ -162,13 +156,13 @@ static void intel_flush_svm_range_dev (struct intel_svm *svm, struct intel_svm_d
162} 156}
163 157
164static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address, 158static void intel_flush_svm_range(struct intel_svm *svm, unsigned long address,
165 unsigned long pages, int ih, int gl) 159 unsigned long pages, int ih)
166{ 160{
167 struct intel_svm_dev *sdev; 161 struct intel_svm_dev *sdev;
168 162
169 rcu_read_lock(); 163 rcu_read_lock();
170 list_for_each_entry_rcu(sdev, &svm->devs, list) 164 list_for_each_entry_rcu(sdev, &svm->devs, list)
171 intel_flush_svm_range_dev(svm, sdev, address, pages, ih, gl); 165 intel_flush_svm_range_dev(svm, sdev, address, pages, ih);
172 rcu_read_unlock(); 166 rcu_read_unlock();
173} 167}
174 168
@@ -180,7 +174,7 @@ static void intel_invalidate_range(struct mmu_notifier *mn,
180 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier); 174 struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
181 175
182 intel_flush_svm_range(svm, start, 176 intel_flush_svm_range(svm, start,
183 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0, 0); 177 (end - start + PAGE_SIZE - 1) >> VTD_PAGE_SHIFT, 0);
184} 178}
185 179
186static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) 180static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -203,7 +197,7 @@ static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
203 rcu_read_lock(); 197 rcu_read_lock();
204 list_for_each_entry_rcu(sdev, &svm->devs, list) { 198 list_for_each_entry_rcu(sdev, &svm->devs, list) {
205 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid); 199 intel_pasid_tear_down_entry(svm->iommu, sdev->dev, svm->pasid);
206 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); 200 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
207 } 201 }
208 rcu_read_unlock(); 202 rcu_read_unlock();
209 203
@@ -425,7 +419,7 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
425 * large and has to be physically contiguous. So it's 419 * large and has to be physically contiguous. So it's
426 * hard to be as defensive as we might like. */ 420 * hard to be as defensive as we might like. */
427 intel_pasid_tear_down_entry(iommu, dev, svm->pasid); 421 intel_pasid_tear_down_entry(iommu, dev, svm->pasid);
428 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm); 422 intel_flush_svm_range_dev(svm, sdev, 0, -1, 0);
429 kfree_rcu(sdev, rcu); 423 kfree_rcu(sdev, rcu);
430 424
431 if (list_empty(&svm->devs)) { 425 if (list_empty(&svm->devs)) {
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 3c3ad42f22bf..c92b405b7646 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -688,6 +688,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
688 if (!cdev->ap.applid) 688 if (!cdev->ap.applid)
689 return -ENODEV; 689 return -ENODEV;
690 690
691 if (count < CAPIMSG_BASELEN)
692 return -EINVAL;
693
691 skb = alloc_skb(count, GFP_USER); 694 skb = alloc_skb(count, GFP_USER);
692 if (!skb) 695 if (!skb)
693 return -ENOMEM; 696 return -ENOMEM;
@@ -698,7 +701,8 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
698 } 701 }
699 mlen = CAPIMSG_LEN(skb->data); 702 mlen = CAPIMSG_LEN(skb->data);
700 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) { 703 if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_REQ) {
701 if ((size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) { 704 if (count < CAPI_DATA_B3_REQ_LEN ||
705 (size_t)(mlen + CAPIMSG_DATALEN(skb->data)) != count) {
702 kfree_skb(skb); 706 kfree_skb(skb);
703 return -EINVAL; 707 return -EINVAL;
704 } 708 }
@@ -711,6 +715,10 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
711 CAPIMSG_SETAPPID(skb->data, cdev->ap.applid); 715 CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
712 716
713 if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) { 717 if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
718 if (count < CAPI_DISCONNECT_B3_RESP_LEN) {
719 kfree_skb(skb);
720 return -EINVAL;
721 }
714 mutex_lock(&cdev->lock); 722 mutex_lock(&cdev->lock);
715 capincci_free(cdev, CAPIMSG_NCCI(skb->data)); 723 capincci_free(cdev, CAPIMSG_NCCI(skb->data));
716 mutex_unlock(&cdev->lock); 724 mutex_unlock(&cdev->lock);
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 74e4364bc9fb..09113b9ad679 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -564,7 +564,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
564 if (index == EXT_CSD_SANITIZE_START) 564 if (index == EXT_CSD_SANITIZE_START)
565 cmd.sanitize_busy = true; 565 cmd.sanitize_busy = true;
566 566
567 err = mmc_wait_for_cmd(host, &cmd, 0); 567 err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);
568 if (err) 568 if (err)
569 goto out; 569 goto out;
570 570
diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c
index 7e0d3a49c06d..bb31e13648d6 100644
--- a/drivers/mmc/host/bcm2835.c
+++ b/drivers/mmc/host/bcm2835.c
@@ -597,7 +597,7 @@ static void bcm2835_finish_request(struct bcm2835_host *host)
597 struct dma_chan *terminate_chan = NULL; 597 struct dma_chan *terminate_chan = NULL;
598 struct mmc_request *mrq; 598 struct mmc_request *mrq;
599 599
600 cancel_delayed_work_sync(&host->timeout_work); 600 cancel_delayed_work(&host->timeout_work);
601 601
602 mrq = host->mrq; 602 mrq = host->mrq;
603 603
diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c
index 64d3b5fb7fe5..4a2872f49a60 100644
--- a/drivers/mmc/host/renesas_sdhi_core.c
+++ b/drivers/mmc/host/renesas_sdhi_core.c
@@ -774,8 +774,6 @@ int renesas_sdhi_probe(struct platform_device *pdev,
774 /* All SDHI have SDIO status bits which must be 1 */ 774 /* All SDHI have SDIO status bits which must be 1 */
775 mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS; 775 mmc_data->flags |= TMIO_MMC_SDIO_STATUS_SETBITS;
776 776
777 pm_runtime_enable(&pdev->dev);
778
779 ret = renesas_sdhi_clk_enable(host); 777 ret = renesas_sdhi_clk_enable(host);
780 if (ret) 778 if (ret)
781 goto efree; 779 goto efree;
@@ -856,8 +854,6 @@ edisclk:
856efree: 854efree:
857 tmio_mmc_host_free(host); 855 tmio_mmc_host_free(host);
858 856
859 pm_runtime_disable(&pdev->dev);
860
861 return ret; 857 return ret;
862} 858}
863EXPORT_SYMBOL_GPL(renesas_sdhi_probe); 859EXPORT_SYMBOL_GPL(renesas_sdhi_probe);
@@ -869,8 +865,6 @@ int renesas_sdhi_remove(struct platform_device *pdev)
869 tmio_mmc_host_remove(host); 865 tmio_mmc_host_remove(host);
870 renesas_sdhi_clk_disable(host); 866 renesas_sdhi_clk_disable(host);
871 867
872 pm_runtime_disable(&pdev->dev);
873
874 return 0; 868 return 0;
875} 869}
876EXPORT_SYMBOL_GPL(renesas_sdhi_remove); 870EXPORT_SYMBOL_GPL(renesas_sdhi_remove);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 9dc4548271b4..19944b0049db 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -432,7 +432,6 @@ int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot)
432 mmc_hostname(host->mmc)); 432 mmc_hostname(host->mmc));
433 host->flags &= ~SDHCI_SIGNALING_330; 433 host->flags &= ~SDHCI_SIGNALING_330;
434 host->flags |= SDHCI_SIGNALING_180; 434 host->flags |= SDHCI_SIGNALING_180;
435 host->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD;
436 host->mmc->caps2 |= MMC_CAP2_NO_SD; 435 host->mmc->caps2 |= MMC_CAP2_NO_SD;
437 host->mmc->caps2 |= MMC_CAP2_NO_SDIO; 436 host->mmc->caps2 |= MMC_CAP2_NO_SDIO;
438 pci_write_config_dword(chip->pdev, 437 pci_write_config_dword(chip->pdev,
@@ -682,6 +681,7 @@ static const struct sdhci_ops sdhci_pci_o2_ops = {
682const struct sdhci_pci_fixes sdhci_o2 = { 681const struct sdhci_pci_fixes sdhci_o2 = {
683 .probe = sdhci_pci_o2_probe, 682 .probe = sdhci_pci_o2_probe,
684 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, 683 .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
684 .quirks2 = SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD,
685 .probe_slot = sdhci_pci_o2_probe_slot, 685 .probe_slot = sdhci_pci_o2_probe_slot,
686#ifdef CONFIG_PM_SLEEP 686#ifdef CONFIG_PM_SLEEP
687 .resume = sdhci_pci_o2_resume, 687 .resume = sdhci_pci_o2_resume,
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index 8539e10784b4..93e83ad25976 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -172,8 +172,6 @@ static int tmio_mmc_probe(struct platform_device *pdev)
172 host->mmc->f_max = pdata->hclk; 172 host->mmc->f_max = pdata->hclk;
173 host->mmc->f_min = pdata->hclk / 512; 173 host->mmc->f_min = pdata->hclk / 512;
174 174
175 pm_runtime_enable(&pdev->dev);
176
177 ret = tmio_mmc_host_probe(host); 175 ret = tmio_mmc_host_probe(host);
178 if (ret) 176 if (ret)
179 goto host_free; 177 goto host_free;
@@ -193,7 +191,6 @@ host_remove:
193 tmio_mmc_host_remove(host); 191 tmio_mmc_host_remove(host);
194host_free: 192host_free:
195 tmio_mmc_host_free(host); 193 tmio_mmc_host_free(host);
196 pm_runtime_disable(&pdev->dev);
197cell_disable: 194cell_disable:
198 if (cell->disable) 195 if (cell->disable)
199 cell->disable(pdev); 196 cell->disable(pdev);
@@ -210,8 +207,6 @@ static int tmio_mmc_remove(struct platform_device *pdev)
210 if (cell->disable) 207 if (cell->disable)
211 cell->disable(pdev); 208 cell->disable(pdev);
212 209
213 pm_runtime_disable(&pdev->dev);
214
215 return 0; 210 return 0;
216} 211}
217 212
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index c5ba13fae399..2f0b092d6dcc 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -163,6 +163,7 @@ struct tmio_mmc_host {
163 unsigned long last_req_ts; 163 unsigned long last_req_ts;
164 struct mutex ios_lock; /* protect set_ios() context */ 164 struct mutex ios_lock; /* protect set_ios() context */
165 bool native_hotplug; 165 bool native_hotplug;
166 bool runtime_synced;
166 bool sdio_irq_enabled; 167 bool sdio_irq_enabled;
167 168
168 /* Mandatory callback */ 169 /* Mandatory callback */
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 2cb3f951c3e2..9b6e1001e77c 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -1153,15 +1153,6 @@ void tmio_mmc_host_free(struct tmio_mmc_host *host)
1153} 1153}
1154EXPORT_SYMBOL_GPL(tmio_mmc_host_free); 1154EXPORT_SYMBOL_GPL(tmio_mmc_host_free);
1155 1155
1156/**
1157 * tmio_mmc_host_probe() - Common probe for all implementations
1158 * @_host: Host to probe
1159 *
1160 * Perform tasks common to all implementations probe functions.
1161 *
1162 * The caller should have called pm_runtime_enable() prior to calling
1163 * the common probe function.
1164 */
1165int tmio_mmc_host_probe(struct tmio_mmc_host *_host) 1156int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1166{ 1157{
1167 struct platform_device *pdev = _host->pdev; 1158 struct platform_device *pdev = _host->pdev;
@@ -1257,19 +1248,22 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
1257 /* See if we also get DMA */ 1248 /* See if we also get DMA */
1258 tmio_mmc_request_dma(_host, pdata); 1249 tmio_mmc_request_dma(_host, pdata);
1259 1250
1260 pm_runtime_set_active(&pdev->dev);
1261 pm_runtime_set_autosuspend_delay(&pdev->dev, 50); 1251 pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
1262 pm_runtime_use_autosuspend(&pdev->dev); 1252 pm_runtime_use_autosuspend(&pdev->dev);
1253 pm_runtime_enable(&pdev->dev);
1254 pm_runtime_get_sync(&pdev->dev);
1263 1255
1264 ret = mmc_add_host(mmc); 1256 ret = mmc_add_host(mmc);
1265 if (ret) 1257 if (ret)
1266 goto remove_host; 1258 goto remove_host;
1267 1259
1268 dev_pm_qos_expose_latency_limit(&pdev->dev, 100); 1260 dev_pm_qos_expose_latency_limit(&pdev->dev, 100);
1261 pm_runtime_put(&pdev->dev);
1269 1262
1270 return 0; 1263 return 0;
1271 1264
1272remove_host: 1265remove_host:
1266 pm_runtime_put_noidle(&pdev->dev);
1273 tmio_mmc_host_remove(_host); 1267 tmio_mmc_host_remove(_host);
1274 return ret; 1268 return ret;
1275} 1269}
@@ -1280,12 +1274,11 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1280 struct platform_device *pdev = host->pdev; 1274 struct platform_device *pdev = host->pdev;
1281 struct mmc_host *mmc = host->mmc; 1275 struct mmc_host *mmc = host->mmc;
1282 1276
1277 pm_runtime_get_sync(&pdev->dev);
1278
1283 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) 1279 if (host->pdata->flags & TMIO_MMC_SDIO_IRQ)
1284 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); 1280 sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000);
1285 1281
1286 if (!host->native_hotplug)
1287 pm_runtime_get_sync(&pdev->dev);
1288
1289 dev_pm_qos_hide_latency_limit(&pdev->dev); 1282 dev_pm_qos_hide_latency_limit(&pdev->dev);
1290 1283
1291 mmc_remove_host(mmc); 1284 mmc_remove_host(mmc);
@@ -1294,7 +1287,10 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
1294 tmio_mmc_release_dma(host); 1287 tmio_mmc_release_dma(host);
1295 1288
1296 pm_runtime_dont_use_autosuspend(&pdev->dev); 1289 pm_runtime_dont_use_autosuspend(&pdev->dev);
1290 if (host->native_hotplug)
1291 pm_runtime_put_noidle(&pdev->dev);
1297 pm_runtime_put_sync(&pdev->dev); 1292 pm_runtime_put_sync(&pdev->dev);
1293 pm_runtime_disable(&pdev->dev);
1298} 1294}
1299EXPORT_SYMBOL_GPL(tmio_mmc_host_remove); 1295EXPORT_SYMBOL_GPL(tmio_mmc_host_remove);
1300 1296
@@ -1337,6 +1333,11 @@ int tmio_mmc_host_runtime_resume(struct device *dev)
1337{ 1333{
1338 struct tmio_mmc_host *host = dev_get_drvdata(dev); 1334 struct tmio_mmc_host *host = dev_get_drvdata(dev);
1339 1335
1336 if (!host->runtime_synced) {
1337 host->runtime_synced = true;
1338 return 0;
1339 }
1340
1340 tmio_mmc_clk_enable(host); 1341 tmio_mmc_clk_enable(host);
1341 tmio_mmc_hw_reset(host->mmc); 1342 tmio_mmc_hw_reset(host->mmc);
1342 1343
diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c
index 49aad9a79c18..91a2be41edf6 100644
--- a/drivers/mmc/host/uniphier-sd.c
+++ b/drivers/mmc/host/uniphier-sd.c
@@ -631,7 +631,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
631 host->clk_disable = uniphier_sd_clk_disable; 631 host->clk_disable = uniphier_sd_clk_disable;
632 host->set_clock = uniphier_sd_set_clock; 632 host->set_clock = uniphier_sd_set_clock;
633 633
634 pm_runtime_enable(&pdev->dev);
635 ret = uniphier_sd_clk_enable(host); 634 ret = uniphier_sd_clk_enable(host);
636 if (ret) 635 if (ret)
637 goto free_host; 636 goto free_host;
@@ -653,7 +652,6 @@ static int uniphier_sd_probe(struct platform_device *pdev)
653 652
654free_host: 653free_host:
655 tmio_mmc_host_free(host); 654 tmio_mmc_host_free(host);
656 pm_runtime_disable(&pdev->dev);
657 655
658 return ret; 656 return ret;
659} 657}
@@ -664,7 +662,6 @@ static int uniphier_sd_remove(struct platform_device *pdev)
664 662
665 tmio_mmc_host_remove(host); 663 tmio_mmc_host_remove(host);
666 uniphier_sd_clk_disable(host); 664 uniphier_sd_clk_disable(host);
667 pm_runtime_disable(&pdev->dev);
668 665
669 return 0; 666 return 0;
670} 667}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index 58c6231aaa00..87dece0e745d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -98,7 +98,7 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
98 .reset_level = HNAE3_GLOBAL_RESET }, 98 .reset_level = HNAE3_GLOBAL_RESET },
99 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", 99 { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
100 .reset_level = HNAE3_GLOBAL_RESET }, 100 .reset_level = HNAE3_GLOBAL_RESET },
101 { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow", 101 { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
102 .reset_level = HNAE3_GLOBAL_RESET }, 102 .reset_level = HNAE3_GLOBAL_RESET },
103 { .int_msk = BIT(3), .msg = "tx_buf_overflow", 103 { .int_msk = BIT(3), .msg = "tx_buf_overflow",
104 .reset_level = HNAE3_GLOBAL_RESET }, 104 .reset_level = HNAE3_GLOBAL_RESET },
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 4f83f97ffe8b..2e5172f61564 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1984,8 +1984,11 @@ static void __ibmvnic_reset(struct work_struct *work)
1984 rwi = get_next_rwi(adapter); 1984 rwi = get_next_rwi(adapter);
1985 while (rwi) { 1985 while (rwi) {
1986 if (adapter->state == VNIC_REMOVING || 1986 if (adapter->state == VNIC_REMOVING ||
1987 adapter->state == VNIC_REMOVED) 1987 adapter->state == VNIC_REMOVED) {
1988 goto out; 1988 kfree(rwi);
1989 rc = EBUSY;
1990 break;
1991 }
1989 1992
1990 if (adapter->force_reset_recovery) { 1993 if (adapter->force_reset_recovery) {
1991 adapter->force_reset_recovery = false; 1994 adapter->force_reset_recovery = false;
@@ -2011,7 +2014,7 @@ static void __ibmvnic_reset(struct work_struct *work)
2011 netdev_dbg(adapter->netdev, "Reset failed\n"); 2014 netdev_dbg(adapter->netdev, "Reset failed\n");
2012 free_all_rwi(adapter); 2015 free_all_rwi(adapter);
2013 } 2016 }
2014out: 2017
2015 adapter->resetting = false; 2018 adapter->resetting = false;
2016 if (we_lock_rtnl) 2019 if (we_lock_rtnl)
2017 rtnl_unlock(); 2020 rtnl_unlock();
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index dc034f4e8cf6..1ce2397306b9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -36,6 +36,7 @@
36#include <net/vxlan.h> 36#include <net/vxlan.h>
37#include <net/mpls.h> 37#include <net/mpls.h>
38#include <net/xdp_sock.h> 38#include <net/xdp_sock.h>
39#include <net/xfrm.h>
39 40
40#include "ixgbe.h" 41#include "ixgbe.h"
41#include "ixgbe_common.h" 42#include "ixgbe_common.h"
@@ -2623,7 +2624,7 @@ adjust_by_size:
2623 /* 16K ints/sec to 9.2K ints/sec */ 2624 /* 16K ints/sec to 9.2K ints/sec */
2624 avg_wire_size *= 15; 2625 avg_wire_size *= 15;
2625 avg_wire_size += 11452; 2626 avg_wire_size += 11452;
2626 } else if (avg_wire_size <= 1980) { 2627 } else if (avg_wire_size < 1968) {
2627 /* 9.2K ints/sec to 8K ints/sec */ 2628 /* 9.2K ints/sec to 8K ints/sec */
2628 avg_wire_size *= 5; 2629 avg_wire_size *= 5;
2629 avg_wire_size += 22420; 2630 avg_wire_size += 22420;
@@ -2656,6 +2657,8 @@ adjust_by_size:
2656 case IXGBE_LINK_SPEED_2_5GB_FULL: 2657 case IXGBE_LINK_SPEED_2_5GB_FULL:
2657 case IXGBE_LINK_SPEED_1GB_FULL: 2658 case IXGBE_LINK_SPEED_1GB_FULL:
2658 case IXGBE_LINK_SPEED_10_FULL: 2659 case IXGBE_LINK_SPEED_10_FULL:
2660 if (avg_wire_size > 8064)
2661 avg_wire_size = 8064;
2659 itr += DIV_ROUND_UP(avg_wire_size, 2662 itr += DIV_ROUND_UP(avg_wire_size,
2660 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) * 2663 IXGBE_ITR_ADAPTIVE_MIN_INC * 64) *
2661 IXGBE_ITR_ADAPTIVE_MIN_INC; 2664 IXGBE_ITR_ADAPTIVE_MIN_INC;
@@ -8698,7 +8701,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
8698#endif /* IXGBE_FCOE */ 8701#endif /* IXGBE_FCOE */
8699 8702
8700#ifdef CONFIG_IXGBE_IPSEC 8703#ifdef CONFIG_IXGBE_IPSEC
8701 if (secpath_exists(skb) && 8704 if (xfrm_offload(skb) &&
8702 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) 8705 !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx))
8703 goto out_drop; 8706 goto out_drop;
8704#endif 8707#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index ad802a8909e0..a37dcd140f63 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -642,19 +642,17 @@ static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
642bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, 642bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
643 struct ixgbe_ring *tx_ring, int napi_budget) 643 struct ixgbe_ring *tx_ring, int napi_budget)
644{ 644{
645 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
645 unsigned int total_packets = 0, total_bytes = 0; 646 unsigned int total_packets = 0, total_bytes = 0;
646 u32 i = tx_ring->next_to_clean, xsk_frames = 0;
647 unsigned int budget = q_vector->tx.work_limit;
648 struct xdp_umem *umem = tx_ring->xsk_umem; 647 struct xdp_umem *umem = tx_ring->xsk_umem;
649 union ixgbe_adv_tx_desc *tx_desc; 648 union ixgbe_adv_tx_desc *tx_desc;
650 struct ixgbe_tx_buffer *tx_bi; 649 struct ixgbe_tx_buffer *tx_bi;
651 bool xmit_done; 650 u32 xsk_frames = 0;
652 651
653 tx_bi = &tx_ring->tx_buffer_info[i]; 652 tx_bi = &tx_ring->tx_buffer_info[ntc];
654 tx_desc = IXGBE_TX_DESC(tx_ring, i); 653 tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
655 i -= tx_ring->count;
656 654
657 do { 655 while (ntc != ntu) {
658 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) 656 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
659 break; 657 break;
660 658
@@ -670,22 +668,18 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
670 668
671 tx_bi++; 669 tx_bi++;
672 tx_desc++; 670 tx_desc++;
673 i++; 671 ntc++;
674 if (unlikely(!i)) { 672 if (unlikely(ntc == tx_ring->count)) {
675 i -= tx_ring->count; 673 ntc = 0;
676 tx_bi = tx_ring->tx_buffer_info; 674 tx_bi = tx_ring->tx_buffer_info;
677 tx_desc = IXGBE_TX_DESC(tx_ring, 0); 675 tx_desc = IXGBE_TX_DESC(tx_ring, 0);
678 } 676 }
679 677
680 /* issue prefetch for next Tx descriptor */ 678 /* issue prefetch for next Tx descriptor */
681 prefetch(tx_desc); 679 prefetch(tx_desc);
680 }
682 681
683 /* update budget accounting */ 682 tx_ring->next_to_clean = ntc;
684 budget--;
685 } while (likely(budget));
686
687 i += tx_ring->count;
688 tx_ring->next_to_clean = i;
689 683
690 u64_stats_update_begin(&tx_ring->syncp); 684 u64_stats_update_begin(&tx_ring->syncp);
691 tx_ring->stats.bytes += total_bytes; 685 tx_ring->stats.bytes += total_bytes;
@@ -704,9 +698,7 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
704 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); 698 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
705 } 699 }
706 700
707 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 701 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
708
709 return budget > 0 && xmit_done;
710} 702}
711 703
712int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) 704int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 75e93ce2ed99..076f2da36f27 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -30,6 +30,7 @@
30#include <linux/bpf.h> 30#include <linux/bpf.h>
31#include <linux/bpf_trace.h> 31#include <linux/bpf_trace.h>
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <net/xfrm.h>
33 34
34#include "ixgbevf.h" 35#include "ixgbevf.h"
35 36
@@ -4167,7 +4168,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
4167 first->protocol = vlan_get_protocol(skb); 4168 first->protocol = vlan_get_protocol(skb);
4168 4169
4169#ifdef CONFIG_IXGBEVF_IPSEC 4170#ifdef CONFIG_IXGBEVF_IPSEC
4170 if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) 4171 if (xfrm_offload(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx))
4171 goto out_drop; 4172 goto out_drop;
4172#endif 4173#endif
4173 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); 4174 tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index ef3f3d06ff1e..fce9b3a24347 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2240,7 +2240,7 @@ static int mlx4_validate_optimized_steering(struct mlx4_dev *dev)
2240 for (i = 1; i <= dev->caps.num_ports; i++) { 2240 for (i = 1; i <= dev->caps.num_ports; i++) {
2241 if (mlx4_dev_port(dev, i, &port_cap)) { 2241 if (mlx4_dev_port(dev, i, &port_cap)) {
2242 mlx4_err(dev, 2242 mlx4_err(dev,
2243 "QUERY_DEV_CAP command failed, can't veify DMFS high rate steering.\n"); 2243 "QUERY_DEV_CAP command failed, can't verify DMFS high rate steering.\n");
2244 } else if ((dev->caps.dmfs_high_steer_mode != 2244 } else if ((dev->caps.dmfs_high_steer_mode !=
2245 MLX4_STEERING_DMFS_A0_DEFAULT) && 2245 MLX4_STEERING_DMFS_A0_DEFAULT) &&
2246 (port_cap.dmfs_optimized_state == 2246 (port_cap.dmfs_optimized_state ==
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index d0a01e8f000a..b339125b2f09 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -232,9 +232,9 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
232 232
233 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE); 233 laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
234 if (!laddr) { 234 if (!laddr) {
235 printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name); 235 pr_err_ratelimited("%s: failed to map tx DMA buffer.\n", dev->name);
236 dev_kfree_skb(skb); 236 dev_kfree_skb_any(skb);
237 return NETDEV_TX_BUSY; 237 return NETDEV_TX_OK;
238 } 238 }
239 239
240 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ 240 sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
index d5bbe3d6048b..05981b54eaab 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.c
@@ -260,9 +260,6 @@ nfp_flower_cmsg_process_one_rx(struct nfp_app *app, struct sk_buff *skb)
260 260
261 type = cmsg_hdr->type; 261 type = cmsg_hdr->type;
262 switch (type) { 262 switch (type) {
263 case NFP_FLOWER_CMSG_TYPE_PORT_REIFY:
264 nfp_flower_cmsg_portreify_rx(app, skb);
265 break;
266 case NFP_FLOWER_CMSG_TYPE_PORT_MOD: 263 case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
267 nfp_flower_cmsg_portmod_rx(app, skb); 264 nfp_flower_cmsg_portmod_rx(app, skb);
268 break; 265 break;
@@ -328,8 +325,7 @@ nfp_flower_queue_ctl_msg(struct nfp_app *app, struct sk_buff *skb, int type)
328 struct nfp_flower_priv *priv = app->priv; 325 struct nfp_flower_priv *priv = app->priv;
329 struct sk_buff_head *skb_head; 326 struct sk_buff_head *skb_head;
330 327
331 if (type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY || 328 if (type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
332 type == NFP_FLOWER_CMSG_TYPE_PORT_MOD)
333 skb_head = &priv->cmsg_skbs_high; 329 skb_head = &priv->cmsg_skbs_high;
334 else 330 else
335 skb_head = &priv->cmsg_skbs_low; 331 skb_head = &priv->cmsg_skbs_low;
@@ -368,6 +364,10 @@ void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
368 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) { 364 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_TUN_NEIGH) {
369 /* Acks from the NFP that the route is added - ignore. */ 365 /* Acks from the NFP that the route is added - ignore. */
370 dev_consume_skb_any(skb); 366 dev_consume_skb_any(skb);
367 } else if (cmsg_hdr->type == NFP_FLOWER_CMSG_TYPE_PORT_REIFY) {
368 /* Handle REIFY acks outside wq to prevent RTNL conflict. */
369 nfp_flower_cmsg_portreify_rx(app, skb);
370 dev_consume_skb_any(skb);
371 } else { 371 } else {
372 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type); 372 nfp_flower_queue_ctl_msg(app, skb, cmsg_hdr->type);
373 } 373 }
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ecca794c55e2..05d2b478c99b 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -713,6 +713,21 @@ struct nv_skb_map {
713 struct nv_skb_map *next_tx_ctx; 713 struct nv_skb_map *next_tx_ctx;
714}; 714};
715 715
716struct nv_txrx_stats {
717 u64 stat_rx_packets;
718 u64 stat_rx_bytes; /* not always available in HW */
719 u64 stat_rx_missed_errors;
720 u64 stat_rx_dropped;
721 u64 stat_tx_packets; /* not always available in HW */
722 u64 stat_tx_bytes;
723 u64 stat_tx_dropped;
724};
725
726#define nv_txrx_stats_inc(member) \
727 __this_cpu_inc(np->txrx_stats->member)
728#define nv_txrx_stats_add(member, count) \
729 __this_cpu_add(np->txrx_stats->member, (count))
730
716/* 731/*
717 * SMP locking: 732 * SMP locking:
718 * All hardware access under netdev_priv(dev)->lock, except the performance 733 * All hardware access under netdev_priv(dev)->lock, except the performance
@@ -797,10 +812,7 @@ struct fe_priv {
797 812
798 /* RX software stats */ 813 /* RX software stats */
799 struct u64_stats_sync swstats_rx_syncp; 814 struct u64_stats_sync swstats_rx_syncp;
800 u64 stat_rx_packets; 815 struct nv_txrx_stats __percpu *txrx_stats;
801 u64 stat_rx_bytes; /* not always available in HW */
802 u64 stat_rx_missed_errors;
803 u64 stat_rx_dropped;
804 816
805 /* media detection workaround. 817 /* media detection workaround.
806 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock); 818 * Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
@@ -826,9 +838,6 @@ struct fe_priv {
826 838
827 /* TX software stats */ 839 /* TX software stats */
828 struct u64_stats_sync swstats_tx_syncp; 840 struct u64_stats_sync swstats_tx_syncp;
829 u64 stat_tx_packets; /* not always available in HW */
830 u64 stat_tx_bytes;
831 u64 stat_tx_dropped;
832 841
833 /* msi/msi-x fields */ 842 /* msi/msi-x fields */
834 u32 msi_flags; 843 u32 msi_flags;
@@ -1721,6 +1730,39 @@ static void nv_update_stats(struct net_device *dev)
1721 } 1730 }
1722} 1731}
1723 1732
1733static void nv_get_stats(int cpu, struct fe_priv *np,
1734 struct rtnl_link_stats64 *storage)
1735{
1736 struct nv_txrx_stats *src = per_cpu_ptr(np->txrx_stats, cpu);
1737 unsigned int syncp_start;
1738 u64 rx_packets, rx_bytes, rx_dropped, rx_missed_errors;
1739 u64 tx_packets, tx_bytes, tx_dropped;
1740
1741 do {
1742 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp);
1743 rx_packets = src->stat_rx_packets;
1744 rx_bytes = src->stat_rx_bytes;
1745 rx_dropped = src->stat_rx_dropped;
1746 rx_missed_errors = src->stat_rx_missed_errors;
1747 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1748
1749 storage->rx_packets += rx_packets;
1750 storage->rx_bytes += rx_bytes;
1751 storage->rx_dropped += rx_dropped;
1752 storage->rx_missed_errors += rx_missed_errors;
1753
1754 do {
1755 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1756 tx_packets = src->stat_tx_packets;
1757 tx_bytes = src->stat_tx_bytes;
1758 tx_dropped = src->stat_tx_dropped;
1759 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1760
1761 storage->tx_packets += tx_packets;
1762 storage->tx_bytes += tx_bytes;
1763 storage->tx_dropped += tx_dropped;
1764}
1765
1724/* 1766/*
1725 * nv_get_stats64: dev->ndo_get_stats64 function 1767 * nv_get_stats64: dev->ndo_get_stats64 function
1726 * Get latest stats value from the nic. 1768 * Get latest stats value from the nic.
@@ -1733,7 +1775,7 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1733 __releases(&netdev_priv(dev)->hwstats_lock) 1775 __releases(&netdev_priv(dev)->hwstats_lock)
1734{ 1776{
1735 struct fe_priv *np = netdev_priv(dev); 1777 struct fe_priv *np = netdev_priv(dev);
1736 unsigned int syncp_start; 1778 int cpu;
1737 1779
1738 /* 1780 /*
1739 * Note: because HW stats are not always available and for 1781 * Note: because HW stats are not always available and for
@@ -1746,20 +1788,8 @@ nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
1746 */ 1788 */
1747 1789
1748 /* software stats */ 1790 /* software stats */
1749 do { 1791 for_each_online_cpu(cpu)
1750 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_rx_syncp); 1792 nv_get_stats(cpu, np, storage);
1751 storage->rx_packets = np->stat_rx_packets;
1752 storage->rx_bytes = np->stat_rx_bytes;
1753 storage->rx_dropped = np->stat_rx_dropped;
1754 storage->rx_missed_errors = np->stat_rx_missed_errors;
1755 } while (u64_stats_fetch_retry_irq(&np->swstats_rx_syncp, syncp_start));
1756
1757 do {
1758 syncp_start = u64_stats_fetch_begin_irq(&np->swstats_tx_syncp);
1759 storage->tx_packets = np->stat_tx_packets;
1760 storage->tx_bytes = np->stat_tx_bytes;
1761 storage->tx_dropped = np->stat_tx_dropped;
1762 } while (u64_stats_fetch_retry_irq(&np->swstats_tx_syncp, syncp_start));
1763 1793
1764 /* If the nic supports hw counters then retrieve latest values */ 1794 /* If the nic supports hw counters then retrieve latest values */
1765 if (np->driver_data & DEV_HAS_STATISTICS_V123) { 1795 if (np->driver_data & DEV_HAS_STATISTICS_V123) {
@@ -1827,7 +1857,7 @@ static int nv_alloc_rx(struct net_device *dev)
1827 } else { 1857 } else {
1828packet_dropped: 1858packet_dropped:
1829 u64_stats_update_begin(&np->swstats_rx_syncp); 1859 u64_stats_update_begin(&np->swstats_rx_syncp);
1830 np->stat_rx_dropped++; 1860 nv_txrx_stats_inc(stat_rx_dropped);
1831 u64_stats_update_end(&np->swstats_rx_syncp); 1861 u64_stats_update_end(&np->swstats_rx_syncp);
1832 return 1; 1862 return 1;
1833 } 1863 }
@@ -1869,7 +1899,7 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1869 } else { 1899 } else {
1870packet_dropped: 1900packet_dropped:
1871 u64_stats_update_begin(&np->swstats_rx_syncp); 1901 u64_stats_update_begin(&np->swstats_rx_syncp);
1872 np->stat_rx_dropped++; 1902 nv_txrx_stats_inc(stat_rx_dropped);
1873 u64_stats_update_end(&np->swstats_rx_syncp); 1903 u64_stats_update_end(&np->swstats_rx_syncp);
1874 return 1; 1904 return 1;
1875 } 1905 }
@@ -2013,7 +2043,7 @@ static void nv_drain_tx(struct net_device *dev)
2013 } 2043 }
2014 if (nv_release_txskb(np, &np->tx_skb[i])) { 2044 if (nv_release_txskb(np, &np->tx_skb[i])) {
2015 u64_stats_update_begin(&np->swstats_tx_syncp); 2045 u64_stats_update_begin(&np->swstats_tx_syncp);
2016 np->stat_tx_dropped++; 2046 nv_txrx_stats_inc(stat_tx_dropped);
2017 u64_stats_update_end(&np->swstats_tx_syncp); 2047 u64_stats_update_end(&np->swstats_tx_syncp);
2018 } 2048 }
2019 np->tx_skb[i].dma = 0; 2049 np->tx_skb[i].dma = 0;
@@ -2227,7 +2257,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2227 /* on DMA mapping error - drop the packet */ 2257 /* on DMA mapping error - drop the packet */
2228 dev_kfree_skb_any(skb); 2258 dev_kfree_skb_any(skb);
2229 u64_stats_update_begin(&np->swstats_tx_syncp); 2259 u64_stats_update_begin(&np->swstats_tx_syncp);
2230 np->stat_tx_dropped++; 2260 nv_txrx_stats_inc(stat_tx_dropped);
2231 u64_stats_update_end(&np->swstats_tx_syncp); 2261 u64_stats_update_end(&np->swstats_tx_syncp);
2232 return NETDEV_TX_OK; 2262 return NETDEV_TX_OK;
2233 } 2263 }
@@ -2273,7 +2303,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2273 dev_kfree_skb_any(skb); 2303 dev_kfree_skb_any(skb);
2274 np->put_tx_ctx = start_tx_ctx; 2304 np->put_tx_ctx = start_tx_ctx;
2275 u64_stats_update_begin(&np->swstats_tx_syncp); 2305 u64_stats_update_begin(&np->swstats_tx_syncp);
2276 np->stat_tx_dropped++; 2306 nv_txrx_stats_inc(stat_tx_dropped);
2277 u64_stats_update_end(&np->swstats_tx_syncp); 2307 u64_stats_update_end(&np->swstats_tx_syncp);
2278 return NETDEV_TX_OK; 2308 return NETDEV_TX_OK;
2279 } 2309 }
@@ -2384,7 +2414,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2384 /* on DMA mapping error - drop the packet */ 2414 /* on DMA mapping error - drop the packet */
2385 dev_kfree_skb_any(skb); 2415 dev_kfree_skb_any(skb);
2386 u64_stats_update_begin(&np->swstats_tx_syncp); 2416 u64_stats_update_begin(&np->swstats_tx_syncp);
2387 np->stat_tx_dropped++; 2417 nv_txrx_stats_inc(stat_tx_dropped);
2388 u64_stats_update_end(&np->swstats_tx_syncp); 2418 u64_stats_update_end(&np->swstats_tx_syncp);
2389 return NETDEV_TX_OK; 2419 return NETDEV_TX_OK;
2390 } 2420 }
@@ -2431,7 +2461,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2431 dev_kfree_skb_any(skb); 2461 dev_kfree_skb_any(skb);
2432 np->put_tx_ctx = start_tx_ctx; 2462 np->put_tx_ctx = start_tx_ctx;
2433 u64_stats_update_begin(&np->swstats_tx_syncp); 2463 u64_stats_update_begin(&np->swstats_tx_syncp);
2434 np->stat_tx_dropped++; 2464 nv_txrx_stats_inc(stat_tx_dropped);
2435 u64_stats_update_end(&np->swstats_tx_syncp); 2465 u64_stats_update_end(&np->swstats_tx_syncp);
2436 return NETDEV_TX_OK; 2466 return NETDEV_TX_OK;
2437 } 2467 }
@@ -2560,9 +2590,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2560 && !(flags & NV_TX_RETRYCOUNT_MASK)) 2590 && !(flags & NV_TX_RETRYCOUNT_MASK))
2561 nv_legacybackoff_reseed(dev); 2591 nv_legacybackoff_reseed(dev);
2562 } else { 2592 } else {
2593 unsigned int len;
2594
2563 u64_stats_update_begin(&np->swstats_tx_syncp); 2595 u64_stats_update_begin(&np->swstats_tx_syncp);
2564 np->stat_tx_packets++; 2596 nv_txrx_stats_inc(stat_tx_packets);
2565 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2597 len = np->get_tx_ctx->skb->len;
2598 nv_txrx_stats_add(stat_tx_bytes, len);
2566 u64_stats_update_end(&np->swstats_tx_syncp); 2599 u64_stats_update_end(&np->swstats_tx_syncp);
2567 } 2600 }
2568 bytes_compl += np->get_tx_ctx->skb->len; 2601 bytes_compl += np->get_tx_ctx->skb->len;
@@ -2577,9 +2610,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2577 && !(flags & NV_TX2_RETRYCOUNT_MASK)) 2610 && !(flags & NV_TX2_RETRYCOUNT_MASK))
2578 nv_legacybackoff_reseed(dev); 2611 nv_legacybackoff_reseed(dev);
2579 } else { 2612 } else {
2613 unsigned int len;
2614
2580 u64_stats_update_begin(&np->swstats_tx_syncp); 2615 u64_stats_update_begin(&np->swstats_tx_syncp);
2581 np->stat_tx_packets++; 2616 nv_txrx_stats_inc(stat_tx_packets);
2582 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2617 len = np->get_tx_ctx->skb->len;
2618 nv_txrx_stats_add(stat_tx_bytes, len);
2583 u64_stats_update_end(&np->swstats_tx_syncp); 2619 u64_stats_update_end(&np->swstats_tx_syncp);
2584 } 2620 }
2585 bytes_compl += np->get_tx_ctx->skb->len; 2621 bytes_compl += np->get_tx_ctx->skb->len;
@@ -2627,9 +2663,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2627 nv_legacybackoff_reseed(dev); 2663 nv_legacybackoff_reseed(dev);
2628 } 2664 }
2629 } else { 2665 } else {
2666 unsigned int len;
2667
2630 u64_stats_update_begin(&np->swstats_tx_syncp); 2668 u64_stats_update_begin(&np->swstats_tx_syncp);
2631 np->stat_tx_packets++; 2669 nv_txrx_stats_inc(stat_tx_packets);
2632 np->stat_tx_bytes += np->get_tx_ctx->skb->len; 2670 len = np->get_tx_ctx->skb->len;
2671 nv_txrx_stats_add(stat_tx_bytes, len);
2633 u64_stats_update_end(&np->swstats_tx_syncp); 2672 u64_stats_update_end(&np->swstats_tx_syncp);
2634 } 2673 }
2635 2674
@@ -2806,6 +2845,15 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2806 } 2845 }
2807} 2846}
2808 2847
2848static void rx_missing_handler(u32 flags, struct fe_priv *np)
2849{
2850 if (flags & NV_RX_MISSEDFRAME) {
2851 u64_stats_update_begin(&np->swstats_rx_syncp);
2852 nv_txrx_stats_inc(stat_rx_missed_errors);
2853 u64_stats_update_end(&np->swstats_rx_syncp);
2854 }
2855}
2856
2809static int nv_rx_process(struct net_device *dev, int limit) 2857static int nv_rx_process(struct net_device *dev, int limit)
2810{ 2858{
2811 struct fe_priv *np = netdev_priv(dev); 2859 struct fe_priv *np = netdev_priv(dev);
@@ -2848,11 +2896,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
2848 } 2896 }
2849 /* the rest are hard errors */ 2897 /* the rest are hard errors */
2850 else { 2898 else {
2851 if (flags & NV_RX_MISSEDFRAME) { 2899 rx_missing_handler(flags, np);
2852 u64_stats_update_begin(&np->swstats_rx_syncp);
2853 np->stat_rx_missed_errors++;
2854 u64_stats_update_end(&np->swstats_rx_syncp);
2855 }
2856 dev_kfree_skb(skb); 2900 dev_kfree_skb(skb);
2857 goto next_pkt; 2901 goto next_pkt;
2858 } 2902 }
@@ -2896,8 +2940,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2896 skb->protocol = eth_type_trans(skb, dev); 2940 skb->protocol = eth_type_trans(skb, dev);
2897 napi_gro_receive(&np->napi, skb); 2941 napi_gro_receive(&np->napi, skb);
2898 u64_stats_update_begin(&np->swstats_rx_syncp); 2942 u64_stats_update_begin(&np->swstats_rx_syncp);
2899 np->stat_rx_packets++; 2943 nv_txrx_stats_inc(stat_rx_packets);
2900 np->stat_rx_bytes += len; 2944 nv_txrx_stats_add(stat_rx_bytes, len);
2901 u64_stats_update_end(&np->swstats_rx_syncp); 2945 u64_stats_update_end(&np->swstats_rx_syncp);
2902next_pkt: 2946next_pkt:
2903 if (unlikely(np->get_rx.orig++ == np->last_rx.orig)) 2947 if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
@@ -2982,8 +3026,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2982 } 3026 }
2983 napi_gro_receive(&np->napi, skb); 3027 napi_gro_receive(&np->napi, skb);
2984 u64_stats_update_begin(&np->swstats_rx_syncp); 3028 u64_stats_update_begin(&np->swstats_rx_syncp);
2985 np->stat_rx_packets++; 3029 nv_txrx_stats_inc(stat_rx_packets);
2986 np->stat_rx_bytes += len; 3030 nv_txrx_stats_add(stat_rx_bytes, len);
2987 u64_stats_update_end(&np->swstats_rx_syncp); 3031 u64_stats_update_end(&np->swstats_rx_syncp);
2988 } else { 3032 } else {
2989 dev_kfree_skb(skb); 3033 dev_kfree_skb(skb);
@@ -5651,6 +5695,12 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
5651 SET_NETDEV_DEV(dev, &pci_dev->dev); 5695 SET_NETDEV_DEV(dev, &pci_dev->dev);
5652 u64_stats_init(&np->swstats_rx_syncp); 5696 u64_stats_init(&np->swstats_rx_syncp);
5653 u64_stats_init(&np->swstats_tx_syncp); 5697 u64_stats_init(&np->swstats_tx_syncp);
5698 np->txrx_stats = alloc_percpu(struct nv_txrx_stats);
5699 if (!np->txrx_stats) {
5700 pr_err("np->txrx_stats, alloc memory error.\n");
5701 err = -ENOMEM;
5702 goto out_alloc_percpu;
5703 }
5654 5704
5655 timer_setup(&np->oom_kick, nv_do_rx_refill, 0); 5705 timer_setup(&np->oom_kick, nv_do_rx_refill, 0);
5656 timer_setup(&np->nic_poll, nv_do_nic_poll, 0); 5706 timer_setup(&np->nic_poll, nv_do_nic_poll, 0);
@@ -6060,6 +6110,8 @@ out_relreg:
6060out_disable: 6110out_disable:
6061 pci_disable_device(pci_dev); 6111 pci_disable_device(pci_dev);
6062out_free: 6112out_free:
6113 free_percpu(np->txrx_stats);
6114out_alloc_percpu:
6063 free_netdev(dev); 6115 free_netdev(dev);
6064out: 6116out:
6065 return err; 6117 return err;
@@ -6105,6 +6157,9 @@ static void nv_restore_mac_addr(struct pci_dev *pci_dev)
6105static void nv_remove(struct pci_dev *pci_dev) 6157static void nv_remove(struct pci_dev *pci_dev)
6106{ 6158{
6107 struct net_device *dev = pci_get_drvdata(pci_dev); 6159 struct net_device *dev = pci_get_drvdata(pci_dev);
6160 struct fe_priv *np = netdev_priv(dev);
6161
6162 free_percpu(np->txrx_stats);
6108 6163
6109 unregister_netdev(dev); 6164 unregister_netdev(dev);
6110 6165
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4083019c547a..f97a4096f8fc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -873,7 +873,12 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv)
873 int ret; 873 int ret;
874 u32 reg, val; 874 u32 reg, val;
875 875
876 regmap_field_read(gmac->regmap_field, &val); 876 ret = regmap_field_read(gmac->regmap_field, &val);
877 if (ret) {
878 dev_err(priv->device, "Fail to read from regmap field.\n");
879 return ret;
880 }
881
877 reg = gmac->variant->default_syscon_value; 882 reg = gmac->variant->default_syscon_value;
878 if (reg != val) 883 if (reg != val)
879 dev_warn(priv->device, 884 dev_warn(priv->device,
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 331c16d30d5d..23281aeeb222 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -344,10 +344,10 @@ static void sp_bump(struct sixpack *sp, char cmd)
344 344
345 sp->dev->stats.rx_bytes += count; 345 sp->dev->stats.rx_bytes += count;
346 346
347 if ((skb = dev_alloc_skb(count)) == NULL) 347 if ((skb = dev_alloc_skb(count + 1)) == NULL)
348 goto out_mem; 348 goto out_mem;
349 349
350 ptr = skb_put(skb, count); 350 ptr = skb_put(skb, count + 1);
351 *ptr++ = cmd; /* KISS command */ 351 *ptr++ = cmd; /* KISS command */
352 352
353 memcpy(ptr, sp->cooked_buf + 1, count); 353 memcpy(ptr, sp->cooked_buf + 1, count);
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index a45c5de96ab1..a5a57ca94c1a 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -376,8 +376,8 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
376 * Local device Link partner 376 * Local device Link partner
377 * Pause AsymDir Pause AsymDir Result 377 * Pause AsymDir Pause AsymDir Result
378 * 1 X 1 X TX+RX 378 * 1 X 1 X TX+RX
379 * 0 1 1 1 RX 379 * 0 1 1 1 TX
380 * 1 1 0 1 TX 380 * 1 1 0 1 RX
381 */ 381 */
382static void phylink_resolve_flow(struct phylink *pl, 382static void phylink_resolve_flow(struct phylink *pl,
383 struct phylink_link_state *state) 383 struct phylink_link_state *state)
@@ -398,7 +398,7 @@ static void phylink_resolve_flow(struct phylink *pl,
398 new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX; 398 new_pause = MLO_PAUSE_TX | MLO_PAUSE_RX;
399 else if (pause & MLO_PAUSE_ASYM) 399 else if (pause & MLO_PAUSE_ASYM)
400 new_pause = state->pause & MLO_PAUSE_SYM ? 400 new_pause = state->pause & MLO_PAUSE_SYM ?
401 MLO_PAUSE_RX : MLO_PAUSE_TX; 401 MLO_PAUSE_TX : MLO_PAUSE_RX;
402 } else { 402 } else {
403 new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK; 403 new_pause = pl->link_config.pause & MLO_PAUSE_TXRX_MASK;
404 } 404 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db16d7a13e00..aab0be40d443 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -787,7 +787,8 @@ static void tun_detach_all(struct net_device *dev)
787} 787}
788 788
789static int tun_attach(struct tun_struct *tun, struct file *file, 789static int tun_attach(struct tun_struct *tun, struct file *file,
790 bool skip_filter, bool napi, bool napi_frags) 790 bool skip_filter, bool napi, bool napi_frags,
791 bool publish_tun)
791{ 792{
792 struct tun_file *tfile = file->private_data; 793 struct tun_file *tfile = file->private_data;
793 struct net_device *dev = tun->dev; 794 struct net_device *dev = tun->dev;
@@ -870,7 +871,8 @@ static int tun_attach(struct tun_struct *tun, struct file *file,
870 * initialized tfile; otherwise we risk using half-initialized 871 * initialized tfile; otherwise we risk using half-initialized
871 * object. 872 * object.
872 */ 873 */
873 rcu_assign_pointer(tfile->tun, tun); 874 if (publish_tun)
875 rcu_assign_pointer(tfile->tun, tun);
874 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); 876 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
875 tun->numqueues++; 877 tun->numqueues++;
876 tun_set_real_num_queues(tun); 878 tun_set_real_num_queues(tun);
@@ -2730,7 +2732,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2730 2732
2731 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER, 2733 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2732 ifr->ifr_flags & IFF_NAPI, 2734 ifr->ifr_flags & IFF_NAPI,
2733 ifr->ifr_flags & IFF_NAPI_FRAGS); 2735 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2734 if (err < 0) 2736 if (err < 0)
2735 return err; 2737 return err;
2736 2738
@@ -2829,13 +2831,17 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2829 2831
2830 INIT_LIST_HEAD(&tun->disabled); 2832 INIT_LIST_HEAD(&tun->disabled);
2831 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI, 2833 err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2832 ifr->ifr_flags & IFF_NAPI_FRAGS); 2834 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2833 if (err < 0) 2835 if (err < 0)
2834 goto err_free_flow; 2836 goto err_free_flow;
2835 2837
2836 err = register_netdevice(tun->dev); 2838 err = register_netdevice(tun->dev);
2837 if (err < 0) 2839 if (err < 0)
2838 goto err_detach; 2840 goto err_detach;
2841 /* free_netdev() won't check refcnt, to aovid race
2842 * with dev_put() we need publish tun after registration.
2843 */
2844 rcu_assign_pointer(tfile->tun, tun);
2839 } 2845 }
2840 2846
2841 netif_carrier_on(tun->dev); 2847 netif_carrier_on(tun->dev);
@@ -2978,7 +2984,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr)
2978 if (ret < 0) 2984 if (ret < 0)
2979 goto unlock; 2985 goto unlock;
2980 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI, 2986 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2981 tun->flags & IFF_NAPI_FRAGS); 2987 tun->flags & IFF_NAPI_FRAGS, true);
2982 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { 2988 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2983 tun = rtnl_dereference(tfile->tun); 2989 tun = rtnl_dereference(tfile->tun);
2984 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached) 2990 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 8458e88c18e9..32f53de5b1fe 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -206,7 +206,15 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
206 goto bad_desc; 206 goto bad_desc;
207 } 207 }
208skip: 208skip:
209 if (rndis && header.usb_cdc_acm_descriptor && 209 /* Communcation class functions with bmCapabilities are not
210 * RNDIS. But some Wireless class RNDIS functions use
211 * bmCapabilities for their own purpose. The failsafe is
212 * therefore applied only to Communication class RNDIS
213 * functions. The rndis test is redundant, but a cheap
214 * optimization.
215 */
216 if (rndis && is_rndis(&intf->cur_altsetting->desc) &&
217 header.usb_cdc_acm_descriptor &&
210 header.usb_cdc_acm_descriptor->bmCapabilities) { 218 header.usb_cdc_acm_descriptor->bmCapabilities) {
211 dev_dbg(&intf->dev, 219 dev_dbg(&intf->dev,
212 "ACM capabilities %02x, not really RNDIS?\n", 220 "ACM capabilities %02x, not really RNDIS?\n",
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4f3de0ac8b0b..ba98e0971b84 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1331,7 +1331,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
1331 } 1331 }
1332 } 1332 }
1333 1333
1334 if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { 1334 if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) {
1335 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) 1335 if (!try_fill_recv(vi, rq, GFP_ATOMIC))
1336 schedule_delayed_work(&vi->refill, 0); 1336 schedule_delayed_work(&vi->refill, 0);
1337 } 1337 }
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index d74349628db2..0e6a51525d91 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -1115,7 +1115,7 @@ static void lmc_running_reset (struct net_device *dev) /*fold00*/
1115 sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN); 1115 sc->lmc_cmdmode |= (TULIP_CMD_TXRUN | TULIP_CMD_RXRUN);
1116 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode); 1116 LMC_CSR_WRITE (sc, csr_command, sc->lmc_cmdmode);
1117 1117
1118 lmc_trace(dev, "lmc_runnin_reset_out"); 1118 lmc_trace(dev, "lmc_running_reset_out");
1119} 1119}
1120 1120
1121 1121
diff --git a/drivers/net/wimax/i2400m/op-rfkill.c b/drivers/net/wimax/i2400m/op-rfkill.c
index 6642bcb27761..8efb493ceec2 100644
--- a/drivers/net/wimax/i2400m/op-rfkill.c
+++ b/drivers/net/wimax/i2400m/op-rfkill.c
@@ -127,6 +127,7 @@ int i2400m_op_rfkill_sw_toggle(struct wimax_dev *wimax_dev,
127 "%d\n", result); 127 "%d\n", result);
128 result = 0; 128 result = 0;
129error_cmd: 129error_cmd:
130 kfree(cmd);
130 kfree_skb(ack_skb); 131 kfree_skb(ack_skb);
131error_msg_to_dev: 132error_msg_to_dev:
132error_alloc: 133error_alloc:
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e5ca1f2685b6..e29c47744ef5 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -1114,18 +1114,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1114 1114
1115 /* same thing for QuZ... */ 1115 /* same thing for QuZ... */
1116 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) { 1116 if (iwl_trans->hw_rev == CSR_HW_REV_TYPE_QUZ) {
1117 if (cfg == &iwl_ax101_cfg_qu_hr) 1117 if (iwl_trans->cfg == &iwl_ax101_cfg_qu_hr)
1118 cfg = &iwl_ax101_cfg_quz_hr; 1118 iwl_trans->cfg = &iwl_ax101_cfg_quz_hr;
1119 else if (cfg == &iwl_ax201_cfg_qu_hr) 1119 else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr)
1120 cfg = &iwl_ax201_cfg_quz_hr; 1120 iwl_trans->cfg = &iwl_ax201_cfg_quz_hr;
1121 else if (cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0) 1121 else if (iwl_trans->cfg == &iwl9461_2ac_cfg_qu_b0_jf_b0)
1122 cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc; 1122 iwl_trans->cfg = &iwl9461_2ac_cfg_quz_a0_jf_b0_soc;
1123 else if (cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0) 1123 else if (iwl_trans->cfg == &iwl9462_2ac_cfg_qu_b0_jf_b0)
1124 cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc; 1124 iwl_trans->cfg = &iwl9462_2ac_cfg_quz_a0_jf_b0_soc;
1125 else if (cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0) 1125 else if (iwl_trans->cfg == &iwl9560_2ac_cfg_qu_b0_jf_b0)
1126 cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc; 1126 iwl_trans->cfg = &iwl9560_2ac_cfg_quz_a0_jf_b0_soc;
1127 else if (cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1127 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0)
1128 cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc; 1128 iwl_trans->cfg = &iwl9560_2ac_160_cfg_quz_a0_jf_b0_soc;
1129 } 1129 }
1130 1130
1131#endif 1131#endif
diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c
index 653d347a9a19..580387f9f12a 100644
--- a/drivers/net/wireless/marvell/mwifiex/ie.c
+++ b/drivers/net/wireless/marvell/mwifiex/ie.c
@@ -241,6 +241,9 @@ static int mwifiex_update_vs_ie(const u8 *ies, int ies_len,
241 } 241 }
242 242
243 vs_ie = (struct ieee_types_header *)vendor_ie; 243 vs_ie = (struct ieee_types_header *)vendor_ie;
244 if (le16_to_cpu(ie->ie_length) + vs_ie->len + 2 >
245 IEEE_MAX_IE_SIZE)
246 return -EINVAL;
244 memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length), 247 memcpy(ie->ie_buffer + le16_to_cpu(ie->ie_length),
245 vs_ie, vs_ie->len + 2); 248 vs_ie, vs_ie->len + 2);
246 le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2); 249 le16_unaligned_add_cpu(&ie->ie_length, vs_ie->len + 2);
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
index 18f7d9bf30b2..0939a8c8f3ab 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_cmd.c
@@ -265,6 +265,8 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
265 265
266 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len); 266 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_SUPP_RATES, var_pos, len);
267 if (rate_ie) { 267 if (rate_ie) {
268 if (rate_ie->len > MWIFIEX_SUPPORTED_RATES)
269 return;
268 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len); 270 memcpy(bss_cfg->rates, rate_ie + 1, rate_ie->len);
269 rate_len = rate_ie->len; 271 rate_len = rate_ie->len;
270 } 272 }
@@ -272,8 +274,11 @@ mwifiex_set_uap_rates(struct mwifiex_uap_bss_param *bss_cfg,
272 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, 274 rate_ie = (void *)cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES,
273 params->beacon.tail, 275 params->beacon.tail,
274 params->beacon.tail_len); 276 params->beacon.tail_len);
275 if (rate_ie) 277 if (rate_ie) {
278 if (rate_ie->len > MWIFIEX_SUPPORTED_RATES - rate_len)
279 return;
276 memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len); 280 memcpy(bss_cfg->rates + rate_len, rate_ie + 1, rate_ie->len);
281 }
277 282
278 return; 283 return;
279} 284}
@@ -391,6 +396,8 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
391 params->beacon.tail_len); 396 params->beacon.tail_len);
392 if (vendor_ie) { 397 if (vendor_ie) {
393 wmm_ie = vendor_ie; 398 wmm_ie = vendor_ie;
399 if (*(wmm_ie + 1) > sizeof(struct mwifiex_types_wmm_info))
400 return;
394 memcpy(&bss_cfg->wmm_info, wmm_ie + 401 memcpy(&bss_cfg->wmm_info, wmm_ie +
395 sizeof(struct ieee_types_header), *(wmm_ie + 1)); 402 sizeof(struct ieee_types_header), *(wmm_ie + 1));
396 priv->wmm_enabled = 1; 403 priv->wmm_enabled = 1;
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index 40c0d536e20d..9d4426f6905f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -59,6 +59,11 @@ static void mt76x0_set_chip_cap(struct mt76x02_dev *dev)
59 dev_dbg(dev->mt76.dev, "mask out 2GHz support\n"); 59 dev_dbg(dev->mt76.dev, "mask out 2GHz support\n");
60 } 60 }
61 61
62 if (is_mt7630(dev)) {
63 dev->mt76.cap.has_5ghz = false;
64 dev_dbg(dev->mt76.dev, "mask out 5GHz support\n");
65 }
66
62 if (!mt76x02_field_valid(nic_conf1 & 0xff)) 67 if (!mt76x02_field_valid(nic_conf1 & 0xff))
63 nic_conf1 &= 0xff00; 68 nic_conf1 &= 0xff00;
64 69
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index f84a7df296ea..7705e55aa3d1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -51,6 +51,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw)
51 mt76x0e_stop_hw(dev); 51 mt76x0e_stop_hw(dev);
52} 52}
53 53
54static int
55mt76x0e_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
56 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
57 struct ieee80211_key_conf *key)
58{
59 struct mt76x02_dev *dev = hw->priv;
60
61 if (is_mt7630(dev))
62 return -EOPNOTSUPP;
63
64 return mt76x02_set_key(hw, cmd, vif, sta, key);
65}
66
54static void 67static void
55mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 68mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
56 u32 queues, bool drop) 69 u32 queues, bool drop)
@@ -67,7 +80,7 @@ static const struct ieee80211_ops mt76x0e_ops = {
67 .configure_filter = mt76x02_configure_filter, 80 .configure_filter = mt76x02_configure_filter,
68 .bss_info_changed = mt76x02_bss_info_changed, 81 .bss_info_changed = mt76x02_bss_info_changed,
69 .sta_state = mt76_sta_state, 82 .sta_state = mt76_sta_state,
70 .set_key = mt76x02_set_key, 83 .set_key = mt76x0e_set_key,
71 .conf_tx = mt76x02_conf_tx, 84 .conf_tx = mt76x02_conf_tx,
72 .sw_scan_start = mt76_sw_scan, 85 .sw_scan_start = mt76_sw_scan,
73 .sw_scan_complete = mt76x02_sw_scan_complete, 86 .sw_scan_complete = mt76x02_sw_scan_complete,
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
index ecbe78b8027b..f1cdcd61c54a 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
@@ -1654,13 +1654,18 @@ static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev,
1654 1654
1655 offset = MAC_IVEIV_ENTRY(key->hw_key_idx); 1655 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
1656 1656
1657 rt2800_register_multiread(rt2x00dev, offset, 1657 if (crypto->cmd == SET_KEY) {
1658 &iveiv_entry, sizeof(iveiv_entry)); 1658 rt2800_register_multiread(rt2x00dev, offset,
1659 if ((crypto->cipher == CIPHER_TKIP) || 1659 &iveiv_entry, sizeof(iveiv_entry));
1660 (crypto->cipher == CIPHER_TKIP_NO_MIC) || 1660 if ((crypto->cipher == CIPHER_TKIP) ||
1661 (crypto->cipher == CIPHER_AES)) 1661 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
1662 iveiv_entry.iv[3] |= 0x20; 1662 (crypto->cipher == CIPHER_AES))
1663 iveiv_entry.iv[3] |= key->keyidx << 6; 1663 iveiv_entry.iv[3] |= 0x20;
1664 iveiv_entry.iv[3] |= key->keyidx << 6;
1665 } else {
1666 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
1667 }
1668
1664 rt2800_register_multiwrite(rt2x00dev, offset, 1669 rt2800_register_multiwrite(rt2x00dev, offset,
1665 &iveiv_entry, sizeof(iveiv_entry)); 1670 &iveiv_entry, sizeof(iveiv_entry));
1666} 1671}
@@ -4237,24 +4242,18 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
4237 switch (rt2x00dev->default_ant.rx_chain_num) { 4242 switch (rt2x00dev->default_ant.rx_chain_num) {
4238 case 3: 4243 case 3:
4239 /* Turn on tertiary LNAs */ 4244 /* Turn on tertiary LNAs */
4240 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 4245 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1);
4241 rf->channel > 14); 4246 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1);
4242 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN,
4243 rf->channel <= 14);
4244 /* fall-through */ 4247 /* fall-through */
4245 case 2: 4248 case 2:
4246 /* Turn on secondary LNAs */ 4249 /* Turn on secondary LNAs */
4247 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 4250 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
4248 rf->channel > 14); 4251 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
4249 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN,
4250 rf->channel <= 14);
4251 /* fall-through */ 4252 /* fall-through */
4252 case 1: 4253 case 1:
4253 /* Turn on primary LNAs */ 4254 /* Turn on primary LNAs */
4254 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 4255 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
4255 rf->channel > 14); 4256 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
4256 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN,
4257 rf->channel <= 14);
4258 break; 4257 break;
4259 } 4258 }
4260 4259
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index f5048d4b8cb6..760eaffeebd6 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -645,7 +645,6 @@ fail_rx:
645 kfree(rsi_dev->tx_buffer); 645 kfree(rsi_dev->tx_buffer);
646 646
647fail_eps: 647fail_eps:
648 kfree(rsi_dev);
649 648
650 return status; 649 return status;
651} 650}
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index e42850095892..7eda62a9e0df 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -316,7 +316,7 @@ static int st95hf_echo_command(struct st95hf_context *st95context)
316 &echo_response); 316 &echo_response);
317 if (result) { 317 if (result) {
318 dev_err(&st95context->spicontext.spidev->dev, 318 dev_err(&st95context->spicontext.spidev->dev,
319 "err: echo response receieve error = 0x%x\n", result); 319 "err: echo response receive error = 0x%x\n", result);
320 return result; 320 return result;
321 } 321 }
322 322
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 3e7b11cf1aae..cb98b8fe786e 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -655,6 +655,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
655 resource_size_t start, size; 655 resource_size_t start, size;
656 struct nd_region *nd_region; 656 struct nd_region *nd_region;
657 unsigned long npfns, align; 657 unsigned long npfns, align;
658 u32 end_trunc;
658 struct nd_pfn_sb *pfn_sb; 659 struct nd_pfn_sb *pfn_sb;
659 phys_addr_t offset; 660 phys_addr_t offset;
660 const char *sig; 661 const char *sig;
@@ -696,6 +697,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
696 size = resource_size(&nsio->res); 697 size = resource_size(&nsio->res);
697 npfns = PHYS_PFN(size - SZ_8K); 698 npfns = PHYS_PFN(size - SZ_8K);
698 align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT)); 699 align = max(nd_pfn->align, (1UL << SUBSECTION_SHIFT));
700 end_trunc = start + size - ALIGN_DOWN(start + size, align);
699 if (nd_pfn->mode == PFN_MODE_PMEM) { 701 if (nd_pfn->mode == PFN_MODE_PMEM) {
700 /* 702 /*
701 * The altmap should be padded out to the block size used 703 * The altmap should be padded out to the block size used
@@ -714,7 +716,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
714 return -ENXIO; 716 return -ENXIO;
715 } 717 }
716 718
717 npfns = PHYS_PFN(size - offset); 719 npfns = PHYS_PFN(size - offset - end_trunc);
718 pfn_sb->mode = cpu_to_le32(nd_pfn->mode); 720 pfn_sb->mode = cpu_to_le32(nd_pfn->mode);
719 pfn_sb->dataoff = cpu_to_le64(offset); 721 pfn_sb->dataoff = cpu_to_le64(offset);
720 pfn_sb->npfns = cpu_to_le64(npfns); 722 pfn_sb->npfns = cpu_to_le64(npfns);
@@ -723,6 +725,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
723 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); 725 memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16);
724 pfn_sb->version_major = cpu_to_le16(1); 726 pfn_sb->version_major = cpu_to_le16(1);
725 pfn_sb->version_minor = cpu_to_le16(3); 727 pfn_sb->version_minor = cpu_to_le16(3);
728 pfn_sb->end_trunc = cpu_to_le32(end_trunc);
726 pfn_sb->align = cpu_to_le32(nd_pfn->align); 729 pfn_sb->align = cpu_to_le32(nd_pfn->align);
727 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); 730 checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
728 pfn_sb->checksum = cpu_to_le64(checksum); 731 pfn_sb->checksum = cpu_to_le64(checksum);
diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
index ba6438ac4d72..ff84d1afd229 100644
--- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
+++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g5.c
@@ -2552,7 +2552,7 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
2552 if (IS_ERR(map)) 2552 if (IS_ERR(map))
2553 return map; 2553 return map;
2554 } else 2554 } else
2555 map = ERR_PTR(-ENODEV); 2555 return ERR_PTR(-ENODEV);
2556 2556
2557 ctx->maps[ASPEED_IP_LPC] = map; 2557 ctx->maps[ASPEED_IP_LPC] = map;
2558 dev_dbg(ctx->dev, "Acquired LPC regmap"); 2558 dev_dbg(ctx->dev, "Acquired LPC regmap");
@@ -2562,6 +2562,33 @@ static struct regmap *aspeed_g5_acquire_regmap(struct aspeed_pinmux_data *ctx,
2562 return ERR_PTR(-EINVAL); 2562 return ERR_PTR(-EINVAL);
2563} 2563}
2564 2564
2565static int aspeed_g5_sig_expr_eval(struct aspeed_pinmux_data *ctx,
2566 const struct aspeed_sig_expr *expr,
2567 bool enabled)
2568{
2569 int ret;
2570 int i;
2571
2572 for (i = 0; i < expr->ndescs; i++) {
2573 const struct aspeed_sig_desc *desc = &expr->descs[i];
2574 struct regmap *map;
2575
2576 map = aspeed_g5_acquire_regmap(ctx, desc->ip);
2577 if (IS_ERR(map)) {
2578 dev_err(ctx->dev,
2579 "Failed to acquire regmap for IP block %d\n",
2580 desc->ip);
2581 return PTR_ERR(map);
2582 }
2583
2584 ret = aspeed_sig_desc_eval(desc, enabled, ctx->maps[desc->ip]);
2585 if (ret <= 0)
2586 return ret;
2587 }
2588
2589 return 1;
2590}
2591
2565/** 2592/**
2566 * Configure a pin's signal by applying an expression's descriptor state for 2593 * Configure a pin's signal by applying an expression's descriptor state for
2567 * all descriptors in the expression. 2594 * all descriptors in the expression.
@@ -2647,6 +2674,7 @@ static int aspeed_g5_sig_expr_set(struct aspeed_pinmux_data *ctx,
2647} 2674}
2648 2675
2649static const struct aspeed_pinmux_ops aspeed_g5_ops = { 2676static const struct aspeed_pinmux_ops aspeed_g5_ops = {
2677 .eval = aspeed_g5_sig_expr_eval,
2650 .set = aspeed_g5_sig_expr_set, 2678 .set = aspeed_g5_sig_expr_set,
2651}; 2679};
2652 2680
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.c b/drivers/pinctrl/aspeed/pinmux-aspeed.c
index 839c01b7953f..57305ca838a7 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.c
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.c
@@ -78,11 +78,14 @@ int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc,
78 * neither the enabled nor disabled state. Thus we must explicitly test for 78 * neither the enabled nor disabled state. Thus we must explicitly test for
79 * either condition as required. 79 * either condition as required.
80 */ 80 */
81int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, 81int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
82 const struct aspeed_sig_expr *expr, bool enabled) 82 const struct aspeed_sig_expr *expr, bool enabled)
83{ 83{
84 int i;
85 int ret; 84 int ret;
85 int i;
86
87 if (ctx->ops->eval)
88 return ctx->ops->eval(ctx, expr, enabled);
86 89
87 for (i = 0; i < expr->ndescs; i++) { 90 for (i = 0; i < expr->ndescs; i++) {
88 const struct aspeed_sig_desc *desc = &expr->descs[i]; 91 const struct aspeed_sig_desc *desc = &expr->descs[i];
diff --git a/drivers/pinctrl/aspeed/pinmux-aspeed.h b/drivers/pinctrl/aspeed/pinmux-aspeed.h
index 52d299b59ce2..db3457c86f48 100644
--- a/drivers/pinctrl/aspeed/pinmux-aspeed.h
+++ b/drivers/pinctrl/aspeed/pinmux-aspeed.h
@@ -702,6 +702,8 @@ struct aspeed_pin_function {
702struct aspeed_pinmux_data; 702struct aspeed_pinmux_data;
703 703
704struct aspeed_pinmux_ops { 704struct aspeed_pinmux_ops {
705 int (*eval)(struct aspeed_pinmux_data *ctx,
706 const struct aspeed_sig_expr *expr, bool enabled);
705 int (*set)(struct aspeed_pinmux_data *ctx, 707 int (*set)(struct aspeed_pinmux_data *ctx,
706 const struct aspeed_sig_expr *expr, bool enabled); 708 const struct aspeed_sig_expr *expr, bool enabled);
707}; 709};
@@ -722,9 +724,8 @@ struct aspeed_pinmux_data {
722int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled, 724int aspeed_sig_desc_eval(const struct aspeed_sig_desc *desc, bool enabled,
723 struct regmap *map); 725 struct regmap *map);
724 726
725int aspeed_sig_expr_eval(const struct aspeed_pinmux_data *ctx, 727int aspeed_sig_expr_eval(struct aspeed_pinmux_data *ctx,
726 const struct aspeed_sig_expr *expr, 728 const struct aspeed_sig_expr *expr, bool enabled);
727 bool enabled);
728 729
729static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx, 730static inline int aspeed_sig_expr_set(struct aspeed_pinmux_data *ctx,
730 const struct aspeed_sig_expr *expr, 731 const struct aspeed_sig_expr *expr,
diff --git a/drivers/regulator/act8945a-regulator.c b/drivers/regulator/act8945a-regulator.c
index 584284938ac9..d2f804dbc785 100644
--- a/drivers/regulator/act8945a-regulator.c
+++ b/drivers/regulator/act8945a-regulator.c
@@ -169,16 +169,16 @@ static int act8945a_set_mode(struct regulator_dev *rdev, unsigned int mode)
169 reg = ACT8945A_DCDC3_CTRL; 169 reg = ACT8945A_DCDC3_CTRL;
170 break; 170 break;
171 case ACT8945A_ID_LDO1: 171 case ACT8945A_ID_LDO1:
172 reg = ACT8945A_LDO1_SUS; 172 reg = ACT8945A_LDO1_CTRL;
173 break; 173 break;
174 case ACT8945A_ID_LDO2: 174 case ACT8945A_ID_LDO2:
175 reg = ACT8945A_LDO2_SUS; 175 reg = ACT8945A_LDO2_CTRL;
176 break; 176 break;
177 case ACT8945A_ID_LDO3: 177 case ACT8945A_ID_LDO3:
178 reg = ACT8945A_LDO3_SUS; 178 reg = ACT8945A_LDO3_CTRL;
179 break; 179 break;
180 case ACT8945A_ID_LDO4: 180 case ACT8945A_ID_LDO4:
181 reg = ACT8945A_LDO4_SUS; 181 reg = ACT8945A_LDO4_CTRL;
182 break; 182 break;
183 default: 183 default:
184 return -EINVAL; 184 return -EINVAL;
diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c
index 04b732991d69..4d859fef55e6 100644
--- a/drivers/regulator/slg51000-regulator.c
+++ b/drivers/regulator/slg51000-regulator.c
@@ -205,7 +205,7 @@ static int slg51000_of_parse_cb(struct device_node *np,
205 ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np, 205 ena_gpiod = devm_gpiod_get_from_of_node(chip->dev, np,
206 "enable-gpios", 0, 206 "enable-gpios", 0,
207 gflags, "gpio-en-ldo"); 207 gflags, "gpio-en-ldo");
208 if (ena_gpiod) { 208 if (!IS_ERR(ena_gpiod)) {
209 config->ena_gpiod = ena_gpiod; 209 config->ena_gpiod = ena_gpiod;
210 devm_gpiod_unhinge(chip->dev, config->ena_gpiod); 210 devm_gpiod_unhinge(chip->dev, config->ena_gpiod);
211 } 211 }
@@ -459,7 +459,7 @@ static int slg51000_i2c_probe(struct i2c_client *client,
459 GPIOD_OUT_HIGH 459 GPIOD_OUT_HIGH
460 | GPIOD_FLAGS_BIT_NONEXCLUSIVE, 460 | GPIOD_FLAGS_BIT_NONEXCLUSIVE,
461 "slg51000-cs"); 461 "slg51000-cs");
462 if (cs_gpiod) { 462 if (!IS_ERR(cs_gpiod)) {
463 dev_info(dev, "Found chip selector property\n"); 463 dev_info(dev, "Found chip selector property\n");
464 chip->cs_gpiod = cs_gpiod; 464 chip->cs_gpiod = cs_gpiod;
465 } 465 }
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 6fa15b2d6fb3..866b4dd01da9 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -359,6 +359,17 @@ static const u16 VINTANA2_VSEL_table[] = {
359 2500, 2750, 359 2500, 2750,
360}; 360};
361 361
362/* 600mV to 1450mV in 12.5 mV steps */
363static const struct regulator_linear_range VDD1_ranges[] = {
364 REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500)
365};
366
367/* 600mV to 1450mV in 12.5 mV steps, everything above = 1500mV */
368static const struct regulator_linear_range VDD2_ranges[] = {
369 REGULATOR_LINEAR_RANGE(600000, 0, 68, 12500),
370 REGULATOR_LINEAR_RANGE(1500000, 69, 69, 12500)
371};
372
362static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) 373static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index)
363{ 374{
364 struct twlreg_info *info = rdev_get_drvdata(rdev); 375 struct twlreg_info *info = rdev_get_drvdata(rdev);
@@ -427,6 +438,8 @@ static int twl4030smps_get_voltage(struct regulator_dev *rdev)
427} 438}
428 439
429static const struct regulator_ops twl4030smps_ops = { 440static const struct regulator_ops twl4030smps_ops = {
441 .list_voltage = regulator_list_voltage_linear_range,
442
430 .set_voltage = twl4030smps_set_voltage, 443 .set_voltage = twl4030smps_set_voltage,
431 .get_voltage = twl4030smps_get_voltage, 444 .get_voltage = twl4030smps_get_voltage,
432}; 445};
@@ -466,7 +479,8 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
466 }, \ 479 }, \
467 } 480 }
468 481
469#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ 482#define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf, \
483 n_volt) \
470static const struct twlreg_info TWL4030_INFO_##label = { \ 484static const struct twlreg_info TWL4030_INFO_##label = { \
471 .base = offset, \ 485 .base = offset, \
472 .id = num, \ 486 .id = num, \
@@ -479,6 +493,9 @@ static const struct twlreg_info TWL4030_INFO_##label = { \
479 .owner = THIS_MODULE, \ 493 .owner = THIS_MODULE, \
480 .enable_time = turnon_delay, \ 494 .enable_time = turnon_delay, \
481 .of_map_mode = twl4030reg_map_mode, \ 495 .of_map_mode = twl4030reg_map_mode, \
496 .n_voltages = n_volt, \
497 .n_linear_ranges = ARRAY_SIZE(label ## _ranges), \
498 .linear_ranges = label ## _ranges, \
482 }, \ 499 }, \
483 } 500 }
484 501
@@ -518,8 +535,8 @@ TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00);
518TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08); 535TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08);
519TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08); 536TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08);
520TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08); 537TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08);
521TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08); 538TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08, 68);
522TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08); 539TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08, 69);
523/* VUSBCP is managed *only* by the USB subchip */ 540/* VUSBCP is managed *only* by the USB subchip */
524TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); 541TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08);
525TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); 542TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08);
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 8d8c495b5b60..d65558619ab0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5715,7 +5715,7 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2,
5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues. 5715 * 0 = Set nr_hw_queues by the number of CPUs or HW queues.
5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set, 5716 * 1,128 = Manually specify the maximum nr_hw_queue value to be set,
5717 * 5717 *
5718 * Value range is [0,128]. Default value is 8. 5718 * Value range is [0,256]. Default value is 8.
5719 */ 5719 */
5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, 5720LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF,
5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, 5721 LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX,
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 329f7aa7e169..a81ef0293696 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -46,7 +46,7 @@
46 46
47/* FCP MQ queue count limiting */ 47/* FCP MQ queue count limiting */
48#define LPFC_FCP_MQ_THRESHOLD_MIN 0 48#define LPFC_FCP_MQ_THRESHOLD_MIN 0
49#define LPFC_FCP_MQ_THRESHOLD_MAX 128 49#define LPFC_FCP_MQ_THRESHOLD_MAX 256
50#define LPFC_FCP_MQ_THRESHOLD_DEF 8 50#define LPFC_FCP_MQ_THRESHOLD_DEF 8
51 51
52/* Common buffer size to accomidate SCSI and NVME IO buffers */ 52/* Common buffer size to accomidate SCSI and NVME IO buffers */
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index d5cf953b4337..7d622ea1274e 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -630,6 +630,9 @@ int geni_se_tx_dma_prep(struct geni_se *se, void *buf, size_t len,
630 struct geni_wrapper *wrapper = se->wrapper; 630 struct geni_wrapper *wrapper = se->wrapper;
631 u32 val; 631 u32 val;
632 632
633 if (!wrapper)
634 return -EINVAL;
635
633 *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); 636 *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE);
634 if (dma_mapping_error(wrapper->dev, *iova)) 637 if (dma_mapping_error(wrapper->dev, *iova))
635 return -EIO; 638 return -EIO;
@@ -663,6 +666,9 @@ int geni_se_rx_dma_prep(struct geni_se *se, void *buf, size_t len,
663 struct geni_wrapper *wrapper = se->wrapper; 666 struct geni_wrapper *wrapper = se->wrapper;
664 u32 val; 667 u32 val;
665 668
669 if (!wrapper)
670 return -EINVAL;
671
666 *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); 672 *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE);
667 if (dma_mapping_error(wrapper->dev, *iova)) 673 if (dma_mapping_error(wrapper->dev, *iova))
668 return -EIO; 674 return -EIO;
diff --git a/drivers/vhost/test.c b/drivers/vhost/test.c
index 9e90e969af55..7804869c6a31 100644
--- a/drivers/vhost/test.c
+++ b/drivers/vhost/test.c
@@ -22,6 +22,12 @@
22 * Using this limit prevents one virtqueue from starving others. */ 22 * Using this limit prevents one virtqueue from starving others. */
23#define VHOST_TEST_WEIGHT 0x80000 23#define VHOST_TEST_WEIGHT 0x80000
24 24
25/* Max number of packets transferred before requeueing the job.
26 * Using this limit prevents one virtqueue from starving others with
27 * pkts.
28 */
29#define VHOST_TEST_PKT_WEIGHT 256
30
25enum { 31enum {
26 VHOST_TEST_VQ = 0, 32 VHOST_TEST_VQ = 0,
27 VHOST_TEST_VQ_MAX = 1, 33 VHOST_TEST_VQ_MAX = 1,
@@ -80,10 +86,8 @@ static void handle_vq(struct vhost_test *n)
80 } 86 }
81 vhost_add_used_and_signal(&n->dev, vq, head, 0); 87 vhost_add_used_and_signal(&n->dev, vq, head, 0);
82 total_len += len; 88 total_len += len;
83 if (unlikely(total_len >= VHOST_TEST_WEIGHT)) { 89 if (unlikely(vhost_exceeds_weight(vq, 0, total_len)))
84 vhost_poll_queue(&vq->poll);
85 break; 90 break;
86 }
87 } 91 }
88 92
89 mutex_unlock(&vq->mutex); 93 mutex_unlock(&vq->mutex);
@@ -115,7 +119,8 @@ static int vhost_test_open(struct inode *inode, struct file *f)
115 dev = &n->dev; 119 dev = &n->dev;
116 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ]; 120 vqs[VHOST_TEST_VQ] = &n->vqs[VHOST_TEST_VQ];
117 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick; 121 n->vqs[VHOST_TEST_VQ].handle_kick = handle_vq_kick;
118 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX); 122 vhost_dev_init(dev, vqs, VHOST_TEST_VQ_MAX, UIO_MAXIOV,
123 VHOST_TEST_PKT_WEIGHT, VHOST_TEST_WEIGHT);
119 124
120 f->private_data = n; 125 f->private_data = n;
121 126
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 0536f8526359..36ca2cf419bf 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -203,7 +203,6 @@ EXPORT_SYMBOL_GPL(vhost_poll_init);
203int vhost_poll_start(struct vhost_poll *poll, struct file *file) 203int vhost_poll_start(struct vhost_poll *poll, struct file *file)
204{ 204{
205 __poll_t mask; 205 __poll_t mask;
206 int ret = 0;
207 206
208 if (poll->wqh) 207 if (poll->wqh)
209 return 0; 208 return 0;
@@ -213,10 +212,10 @@ int vhost_poll_start(struct vhost_poll *poll, struct file *file)
213 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask)); 212 vhost_poll_wakeup(&poll->wait, 0, 0, poll_to_key(mask));
214 if (mask & EPOLLERR) { 213 if (mask & EPOLLERR) {
215 vhost_poll_stop(poll); 214 vhost_poll_stop(poll);
216 ret = -EINVAL; 215 return -EINVAL;
217 } 216 }
218 217
219 return ret; 218 return 0;
220} 219}
221EXPORT_SYMBOL_GPL(vhost_poll_start); 220EXPORT_SYMBOL_GPL(vhost_poll_start);
222 221
@@ -298,160 +297,6 @@ static void vhost_vq_meta_reset(struct vhost_dev *d)
298 __vhost_vq_meta_reset(d->vqs[i]); 297 __vhost_vq_meta_reset(d->vqs[i]);
299} 298}
300 299
301#if VHOST_ARCH_CAN_ACCEL_UACCESS
302static void vhost_map_unprefetch(struct vhost_map *map)
303{
304 kfree(map->pages);
305 map->pages = NULL;
306 map->npages = 0;
307 map->addr = NULL;
308}
309
310static void vhost_uninit_vq_maps(struct vhost_virtqueue *vq)
311{
312 struct vhost_map *map[VHOST_NUM_ADDRS];
313 int i;
314
315 spin_lock(&vq->mmu_lock);
316 for (i = 0; i < VHOST_NUM_ADDRS; i++) {
317 map[i] = rcu_dereference_protected(vq->maps[i],
318 lockdep_is_held(&vq->mmu_lock));
319 if (map[i])
320 rcu_assign_pointer(vq->maps[i], NULL);
321 }
322 spin_unlock(&vq->mmu_lock);
323
324 synchronize_rcu();
325
326 for (i = 0; i < VHOST_NUM_ADDRS; i++)
327 if (map[i])
328 vhost_map_unprefetch(map[i]);
329
330}
331
332static void vhost_reset_vq_maps(struct vhost_virtqueue *vq)
333{
334 int i;
335
336 vhost_uninit_vq_maps(vq);
337 for (i = 0; i < VHOST_NUM_ADDRS; i++)
338 vq->uaddrs[i].size = 0;
339}
340
341static bool vhost_map_range_overlap(struct vhost_uaddr *uaddr,
342 unsigned long start,
343 unsigned long end)
344{
345 if (unlikely(!uaddr->size))
346 return false;
347
348 return !(end < uaddr->uaddr || start > uaddr->uaddr - 1 + uaddr->size);
349}
350
351static void vhost_invalidate_vq_start(struct vhost_virtqueue *vq,
352 int index,
353 unsigned long start,
354 unsigned long end)
355{
356 struct vhost_uaddr *uaddr = &vq->uaddrs[index];
357 struct vhost_map *map;
358 int i;
359
360 if (!vhost_map_range_overlap(uaddr, start, end))
361 return;
362
363 spin_lock(&vq->mmu_lock);
364 ++vq->invalidate_count;
365
366 map = rcu_dereference_protected(vq->maps[index],
367 lockdep_is_held(&vq->mmu_lock));
368 if (map) {
369 if (uaddr->write) {
370 for (i = 0; i < map->npages; i++)
371 set_page_dirty(map->pages[i]);
372 }
373 rcu_assign_pointer(vq->maps[index], NULL);
374 }
375 spin_unlock(&vq->mmu_lock);
376
377 if (map) {
378 synchronize_rcu();
379 vhost_map_unprefetch(map);
380 }
381}
382
383static void vhost_invalidate_vq_end(struct vhost_virtqueue *vq,
384 int index,
385 unsigned long start,
386 unsigned long end)
387{
388 if (!vhost_map_range_overlap(&vq->uaddrs[index], start, end))
389 return;
390
391 spin_lock(&vq->mmu_lock);
392 --vq->invalidate_count;
393 spin_unlock(&vq->mmu_lock);
394}
395
396static int vhost_invalidate_range_start(struct mmu_notifier *mn,
397 const struct mmu_notifier_range *range)
398{
399 struct vhost_dev *dev = container_of(mn, struct vhost_dev,
400 mmu_notifier);
401 int i, j;
402
403 if (!mmu_notifier_range_blockable(range))
404 return -EAGAIN;
405
406 for (i = 0; i < dev->nvqs; i++) {
407 struct vhost_virtqueue *vq = dev->vqs[i];
408
409 for (j = 0; j < VHOST_NUM_ADDRS; j++)
410 vhost_invalidate_vq_start(vq, j,
411 range->start,
412 range->end);
413 }
414
415 return 0;
416}
417
418static void vhost_invalidate_range_end(struct mmu_notifier *mn,
419 const struct mmu_notifier_range *range)
420{
421 struct vhost_dev *dev = container_of(mn, struct vhost_dev,
422 mmu_notifier);
423 int i, j;
424
425 for (i = 0; i < dev->nvqs; i++) {
426 struct vhost_virtqueue *vq = dev->vqs[i];
427
428 for (j = 0; j < VHOST_NUM_ADDRS; j++)
429 vhost_invalidate_vq_end(vq, j,
430 range->start,
431 range->end);
432 }
433}
434
435static const struct mmu_notifier_ops vhost_mmu_notifier_ops = {
436 .invalidate_range_start = vhost_invalidate_range_start,
437 .invalidate_range_end = vhost_invalidate_range_end,
438};
439
440static void vhost_init_maps(struct vhost_dev *dev)
441{
442 struct vhost_virtqueue *vq;
443 int i, j;
444
445 dev->mmu_notifier.ops = &vhost_mmu_notifier_ops;
446
447 for (i = 0; i < dev->nvqs; ++i) {
448 vq = dev->vqs[i];
449 for (j = 0; j < VHOST_NUM_ADDRS; j++)
450 RCU_INIT_POINTER(vq->maps[j], NULL);
451 }
452}
453#endif
454
455static void vhost_vq_reset(struct vhost_dev *dev, 300static void vhost_vq_reset(struct vhost_dev *dev,
456 struct vhost_virtqueue *vq) 301 struct vhost_virtqueue *vq)
457{ 302{
@@ -480,11 +325,7 @@ static void vhost_vq_reset(struct vhost_dev *dev,
480 vq->busyloop_timeout = 0; 325 vq->busyloop_timeout = 0;
481 vq->umem = NULL; 326 vq->umem = NULL;
482 vq->iotlb = NULL; 327 vq->iotlb = NULL;
483 vq->invalidate_count = 0;
484 __vhost_vq_meta_reset(vq); 328 __vhost_vq_meta_reset(vq);
485#if VHOST_ARCH_CAN_ACCEL_UACCESS
486 vhost_reset_vq_maps(vq);
487#endif
488} 329}
489 330
490static int vhost_worker(void *data) 331static int vhost_worker(void *data)
@@ -634,9 +475,7 @@ void vhost_dev_init(struct vhost_dev *dev,
634 INIT_LIST_HEAD(&dev->read_list); 475 INIT_LIST_HEAD(&dev->read_list);
635 INIT_LIST_HEAD(&dev->pending_list); 476 INIT_LIST_HEAD(&dev->pending_list);
636 spin_lock_init(&dev->iotlb_lock); 477 spin_lock_init(&dev->iotlb_lock);
637#if VHOST_ARCH_CAN_ACCEL_UACCESS 478
638 vhost_init_maps(dev);
639#endif
640 479
641 for (i = 0; i < dev->nvqs; ++i) { 480 for (i = 0; i < dev->nvqs; ++i) {
642 vq = dev->vqs[i]; 481 vq = dev->vqs[i];
@@ -645,7 +484,6 @@ void vhost_dev_init(struct vhost_dev *dev,
645 vq->heads = NULL; 484 vq->heads = NULL;
646 vq->dev = dev; 485 vq->dev = dev;
647 mutex_init(&vq->mutex); 486 mutex_init(&vq->mutex);
648 spin_lock_init(&vq->mmu_lock);
649 vhost_vq_reset(dev, vq); 487 vhost_vq_reset(dev, vq);
650 if (vq->handle_kick) 488 if (vq->handle_kick)
651 vhost_poll_init(&vq->poll, vq->handle_kick, 489 vhost_poll_init(&vq->poll, vq->handle_kick,
@@ -725,18 +563,7 @@ long vhost_dev_set_owner(struct vhost_dev *dev)
725 if (err) 563 if (err)
726 goto err_cgroup; 564 goto err_cgroup;
727 565
728#if VHOST_ARCH_CAN_ACCEL_UACCESS
729 err = mmu_notifier_register(&dev->mmu_notifier, dev->mm);
730 if (err)
731 goto err_mmu_notifier;
732#endif
733
734 return 0; 566 return 0;
735
736#if VHOST_ARCH_CAN_ACCEL_UACCESS
737err_mmu_notifier:
738 vhost_dev_free_iovecs(dev);
739#endif
740err_cgroup: 567err_cgroup:
741 kthread_stop(worker); 568 kthread_stop(worker);
742 dev->worker = NULL; 569 dev->worker = NULL;
@@ -827,107 +654,6 @@ static void vhost_clear_msg(struct vhost_dev *dev)
827 spin_unlock(&dev->iotlb_lock); 654 spin_unlock(&dev->iotlb_lock);
828} 655}
829 656
830#if VHOST_ARCH_CAN_ACCEL_UACCESS
831static void vhost_setup_uaddr(struct vhost_virtqueue *vq,
832 int index, unsigned long uaddr,
833 size_t size, bool write)
834{
835 struct vhost_uaddr *addr = &vq->uaddrs[index];
836
837 addr->uaddr = uaddr;
838 addr->size = size;
839 addr->write = write;
840}
841
842static void vhost_setup_vq_uaddr(struct vhost_virtqueue *vq)
843{
844 vhost_setup_uaddr(vq, VHOST_ADDR_DESC,
845 (unsigned long)vq->desc,
846 vhost_get_desc_size(vq, vq->num),
847 false);
848 vhost_setup_uaddr(vq, VHOST_ADDR_AVAIL,
849 (unsigned long)vq->avail,
850 vhost_get_avail_size(vq, vq->num),
851 false);
852 vhost_setup_uaddr(vq, VHOST_ADDR_USED,
853 (unsigned long)vq->used,
854 vhost_get_used_size(vq, vq->num),
855 true);
856}
857
858static int vhost_map_prefetch(struct vhost_virtqueue *vq,
859 int index)
860{
861 struct vhost_map *map;
862 struct vhost_uaddr *uaddr = &vq->uaddrs[index];
863 struct page **pages;
864 int npages = DIV_ROUND_UP(uaddr->size, PAGE_SIZE);
865 int npinned;
866 void *vaddr, *v;
867 int err;
868 int i;
869
870 spin_lock(&vq->mmu_lock);
871
872 err = -EFAULT;
873 if (vq->invalidate_count)
874 goto err;
875
876 err = -ENOMEM;
877 map = kmalloc(sizeof(*map), GFP_ATOMIC);
878 if (!map)
879 goto err;
880
881 pages = kmalloc_array(npages, sizeof(struct page *), GFP_ATOMIC);
882 if (!pages)
883 goto err_pages;
884
885 err = EFAULT;
886 npinned = __get_user_pages_fast(uaddr->uaddr, npages,
887 uaddr->write, pages);
888 if (npinned > 0)
889 release_pages(pages, npinned);
890 if (npinned != npages)
891 goto err_gup;
892
893 for (i = 0; i < npinned; i++)
894 if (PageHighMem(pages[i]))
895 goto err_gup;
896
897 vaddr = v = page_address(pages[0]);
898
899 /* For simplicity, fallback to userspace address if VA is not
900 * contigious.
901 */
902 for (i = 1; i < npinned; i++) {
903 v += PAGE_SIZE;
904 if (v != page_address(pages[i]))
905 goto err_gup;
906 }
907
908 map->addr = vaddr + (uaddr->uaddr & (PAGE_SIZE - 1));
909 map->npages = npages;
910 map->pages = pages;
911
912 rcu_assign_pointer(vq->maps[index], map);
913 /* No need for a synchronize_rcu(). This function should be
914 * called by dev->worker so we are serialized with all
915 * readers.
916 */
917 spin_unlock(&vq->mmu_lock);
918
919 return 0;
920
921err_gup:
922 kfree(pages);
923err_pages:
924 kfree(map);
925err:
926 spin_unlock(&vq->mmu_lock);
927 return err;
928}
929#endif
930
931void vhost_dev_cleanup(struct vhost_dev *dev) 657void vhost_dev_cleanup(struct vhost_dev *dev)
932{ 658{
933 int i; 659 int i;
@@ -957,16 +683,8 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
957 kthread_stop(dev->worker); 683 kthread_stop(dev->worker);
958 dev->worker = NULL; 684 dev->worker = NULL;
959 } 685 }
960 if (dev->mm) { 686 if (dev->mm)
961#if VHOST_ARCH_CAN_ACCEL_UACCESS
962 mmu_notifier_unregister(&dev->mmu_notifier, dev->mm);
963#endif
964 mmput(dev->mm); 687 mmput(dev->mm);
965 }
966#if VHOST_ARCH_CAN_ACCEL_UACCESS
967 for (i = 0; i < dev->nvqs; i++)
968 vhost_uninit_vq_maps(dev->vqs[i]);
969#endif
970 dev->mm = NULL; 688 dev->mm = NULL;
971} 689}
972EXPORT_SYMBOL_GPL(vhost_dev_cleanup); 690EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
@@ -1195,26 +913,6 @@ static inline void __user *__vhost_get_user(struct vhost_virtqueue *vq,
1195 913
1196static inline int vhost_put_avail_event(struct vhost_virtqueue *vq) 914static inline int vhost_put_avail_event(struct vhost_virtqueue *vq)
1197{ 915{
1198#if VHOST_ARCH_CAN_ACCEL_UACCESS
1199 struct vhost_map *map;
1200 struct vring_used *used;
1201
1202 if (!vq->iotlb) {
1203 rcu_read_lock();
1204
1205 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1206 if (likely(map)) {
1207 used = map->addr;
1208 *((__virtio16 *)&used->ring[vq->num]) =
1209 cpu_to_vhost16(vq, vq->avail_idx);
1210 rcu_read_unlock();
1211 return 0;
1212 }
1213
1214 rcu_read_unlock();
1215 }
1216#endif
1217
1218 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx), 916 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->avail_idx),
1219 vhost_avail_event(vq)); 917 vhost_avail_event(vq));
1220} 918}
@@ -1223,27 +921,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
1223 struct vring_used_elem *head, int idx, 921 struct vring_used_elem *head, int idx,
1224 int count) 922 int count)
1225{ 923{
1226#if VHOST_ARCH_CAN_ACCEL_UACCESS
1227 struct vhost_map *map;
1228 struct vring_used *used;
1229 size_t size;
1230
1231 if (!vq->iotlb) {
1232 rcu_read_lock();
1233
1234 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1235 if (likely(map)) {
1236 used = map->addr;
1237 size = count * sizeof(*head);
1238 memcpy(used->ring + idx, head, size);
1239 rcu_read_unlock();
1240 return 0;
1241 }
1242
1243 rcu_read_unlock();
1244 }
1245#endif
1246
1247 return vhost_copy_to_user(vq, vq->used->ring + idx, head, 924 return vhost_copy_to_user(vq, vq->used->ring + idx, head,
1248 count * sizeof(*head)); 925 count * sizeof(*head));
1249} 926}
@@ -1251,25 +928,6 @@ static inline int vhost_put_used(struct vhost_virtqueue *vq,
1251static inline int vhost_put_used_flags(struct vhost_virtqueue *vq) 928static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1252 929
1253{ 930{
1254#if VHOST_ARCH_CAN_ACCEL_UACCESS
1255 struct vhost_map *map;
1256 struct vring_used *used;
1257
1258 if (!vq->iotlb) {
1259 rcu_read_lock();
1260
1261 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1262 if (likely(map)) {
1263 used = map->addr;
1264 used->flags = cpu_to_vhost16(vq, vq->used_flags);
1265 rcu_read_unlock();
1266 return 0;
1267 }
1268
1269 rcu_read_unlock();
1270 }
1271#endif
1272
1273 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags), 931 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->used_flags),
1274 &vq->used->flags); 932 &vq->used->flags);
1275} 933}
@@ -1277,25 +935,6 @@ static inline int vhost_put_used_flags(struct vhost_virtqueue *vq)
1277static inline int vhost_put_used_idx(struct vhost_virtqueue *vq) 935static inline int vhost_put_used_idx(struct vhost_virtqueue *vq)
1278 936
1279{ 937{
1280#if VHOST_ARCH_CAN_ACCEL_UACCESS
1281 struct vhost_map *map;
1282 struct vring_used *used;
1283
1284 if (!vq->iotlb) {
1285 rcu_read_lock();
1286
1287 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1288 if (likely(map)) {
1289 used = map->addr;
1290 used->idx = cpu_to_vhost16(vq, vq->last_used_idx);
1291 rcu_read_unlock();
1292 return 0;
1293 }
1294
1295 rcu_read_unlock();
1296 }
1297#endif
1298
1299 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx), 938 return vhost_put_user(vq, cpu_to_vhost16(vq, vq->last_used_idx),
1300 &vq->used->idx); 939 &vq->used->idx);
1301} 940}
@@ -1341,50 +980,12 @@ static void vhost_dev_unlock_vqs(struct vhost_dev *d)
1341static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq, 980static inline int vhost_get_avail_idx(struct vhost_virtqueue *vq,
1342 __virtio16 *idx) 981 __virtio16 *idx)
1343{ 982{
1344#if VHOST_ARCH_CAN_ACCEL_UACCESS
1345 struct vhost_map *map;
1346 struct vring_avail *avail;
1347
1348 if (!vq->iotlb) {
1349 rcu_read_lock();
1350
1351 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1352 if (likely(map)) {
1353 avail = map->addr;
1354 *idx = avail->idx;
1355 rcu_read_unlock();
1356 return 0;
1357 }
1358
1359 rcu_read_unlock();
1360 }
1361#endif
1362
1363 return vhost_get_avail(vq, *idx, &vq->avail->idx); 983 return vhost_get_avail(vq, *idx, &vq->avail->idx);
1364} 984}
1365 985
1366static inline int vhost_get_avail_head(struct vhost_virtqueue *vq, 986static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1367 __virtio16 *head, int idx) 987 __virtio16 *head, int idx)
1368{ 988{
1369#if VHOST_ARCH_CAN_ACCEL_UACCESS
1370 struct vhost_map *map;
1371 struct vring_avail *avail;
1372
1373 if (!vq->iotlb) {
1374 rcu_read_lock();
1375
1376 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1377 if (likely(map)) {
1378 avail = map->addr;
1379 *head = avail->ring[idx & (vq->num - 1)];
1380 rcu_read_unlock();
1381 return 0;
1382 }
1383
1384 rcu_read_unlock();
1385 }
1386#endif
1387
1388 return vhost_get_avail(vq, *head, 989 return vhost_get_avail(vq, *head,
1389 &vq->avail->ring[idx & (vq->num - 1)]); 990 &vq->avail->ring[idx & (vq->num - 1)]);
1390} 991}
@@ -1392,98 +993,24 @@ static inline int vhost_get_avail_head(struct vhost_virtqueue *vq,
1392static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq, 993static inline int vhost_get_avail_flags(struct vhost_virtqueue *vq,
1393 __virtio16 *flags) 994 __virtio16 *flags)
1394{ 995{
1395#if VHOST_ARCH_CAN_ACCEL_UACCESS
1396 struct vhost_map *map;
1397 struct vring_avail *avail;
1398
1399 if (!vq->iotlb) {
1400 rcu_read_lock();
1401
1402 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1403 if (likely(map)) {
1404 avail = map->addr;
1405 *flags = avail->flags;
1406 rcu_read_unlock();
1407 return 0;
1408 }
1409
1410 rcu_read_unlock();
1411 }
1412#endif
1413
1414 return vhost_get_avail(vq, *flags, &vq->avail->flags); 996 return vhost_get_avail(vq, *flags, &vq->avail->flags);
1415} 997}
1416 998
1417static inline int vhost_get_used_event(struct vhost_virtqueue *vq, 999static inline int vhost_get_used_event(struct vhost_virtqueue *vq,
1418 __virtio16 *event) 1000 __virtio16 *event)
1419{ 1001{
1420#if VHOST_ARCH_CAN_ACCEL_UACCESS
1421 struct vhost_map *map;
1422 struct vring_avail *avail;
1423
1424 if (!vq->iotlb) {
1425 rcu_read_lock();
1426 map = rcu_dereference(vq->maps[VHOST_ADDR_AVAIL]);
1427 if (likely(map)) {
1428 avail = map->addr;
1429 *event = (__virtio16)avail->ring[vq->num];
1430 rcu_read_unlock();
1431 return 0;
1432 }
1433 rcu_read_unlock();
1434 }
1435#endif
1436
1437 return vhost_get_avail(vq, *event, vhost_used_event(vq)); 1002 return vhost_get_avail(vq, *event, vhost_used_event(vq));
1438} 1003}
1439 1004
1440static inline int vhost_get_used_idx(struct vhost_virtqueue *vq, 1005static inline int vhost_get_used_idx(struct vhost_virtqueue *vq,
1441 __virtio16 *idx) 1006 __virtio16 *idx)
1442{ 1007{
1443#if VHOST_ARCH_CAN_ACCEL_UACCESS
1444 struct vhost_map *map;
1445 struct vring_used *used;
1446
1447 if (!vq->iotlb) {
1448 rcu_read_lock();
1449
1450 map = rcu_dereference(vq->maps[VHOST_ADDR_USED]);
1451 if (likely(map)) {
1452 used = map->addr;
1453 *idx = used->idx;
1454 rcu_read_unlock();
1455 return 0;
1456 }
1457
1458 rcu_read_unlock();
1459 }
1460#endif
1461
1462 return vhost_get_used(vq, *idx, &vq->used->idx); 1008 return vhost_get_used(vq, *idx, &vq->used->idx);
1463} 1009}
1464 1010
1465static inline int vhost_get_desc(struct vhost_virtqueue *vq, 1011static inline int vhost_get_desc(struct vhost_virtqueue *vq,
1466 struct vring_desc *desc, int idx) 1012 struct vring_desc *desc, int idx)
1467{ 1013{
1468#if VHOST_ARCH_CAN_ACCEL_UACCESS
1469 struct vhost_map *map;
1470 struct vring_desc *d;
1471
1472 if (!vq->iotlb) {
1473 rcu_read_lock();
1474
1475 map = rcu_dereference(vq->maps[VHOST_ADDR_DESC]);
1476 if (likely(map)) {
1477 d = map->addr;
1478 *desc = *(d + idx);
1479 rcu_read_unlock();
1480 return 0;
1481 }
1482
1483 rcu_read_unlock();
1484 }
1485#endif
1486
1487 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc)); 1014 return vhost_copy_from_user(vq, desc, vq->desc + idx, sizeof(*desc));
1488} 1015}
1489 1016
@@ -1824,32 +1351,12 @@ static bool iotlb_access_ok(struct vhost_virtqueue *vq,
1824 return true; 1351 return true;
1825} 1352}
1826 1353
1827#if VHOST_ARCH_CAN_ACCEL_UACCESS
1828static void vhost_vq_map_prefetch(struct vhost_virtqueue *vq)
1829{
1830 struct vhost_map __rcu *map;
1831 int i;
1832
1833 for (i = 0; i < VHOST_NUM_ADDRS; i++) {
1834 rcu_read_lock();
1835 map = rcu_dereference(vq->maps[i]);
1836 rcu_read_unlock();
1837 if (unlikely(!map))
1838 vhost_map_prefetch(vq, i);
1839 }
1840}
1841#endif
1842
1843int vq_meta_prefetch(struct vhost_virtqueue *vq) 1354int vq_meta_prefetch(struct vhost_virtqueue *vq)
1844{ 1355{
1845 unsigned int num = vq->num; 1356 unsigned int num = vq->num;
1846 1357
1847 if (!vq->iotlb) { 1358 if (!vq->iotlb)
1848#if VHOST_ARCH_CAN_ACCEL_UACCESS
1849 vhost_vq_map_prefetch(vq);
1850#endif
1851 return 1; 1359 return 1;
1852 }
1853 1360
1854 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc, 1361 return iotlb_access_ok(vq, VHOST_ACCESS_RO, (u64)(uintptr_t)vq->desc,
1855 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) && 1362 vhost_get_desc_size(vq, num), VHOST_ADDR_DESC) &&
@@ -2060,16 +1567,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
2060 1567
2061 mutex_lock(&vq->mutex); 1568 mutex_lock(&vq->mutex);
2062 1569
2063#if VHOST_ARCH_CAN_ACCEL_UACCESS
2064 /* Unregister MMU notifer to allow invalidation callback
2065 * can access vq->uaddrs[] without holding a lock.
2066 */
2067 if (d->mm)
2068 mmu_notifier_unregister(&d->mmu_notifier, d->mm);
2069
2070 vhost_uninit_vq_maps(vq);
2071#endif
2072
2073 switch (ioctl) { 1570 switch (ioctl) {
2074 case VHOST_SET_VRING_NUM: 1571 case VHOST_SET_VRING_NUM:
2075 r = vhost_vring_set_num(d, vq, argp); 1572 r = vhost_vring_set_num(d, vq, argp);
@@ -2081,13 +1578,6 @@ static long vhost_vring_set_num_addr(struct vhost_dev *d,
2081 BUG(); 1578 BUG();
2082 } 1579 }
2083 1580
2084#if VHOST_ARCH_CAN_ACCEL_UACCESS
2085 vhost_setup_vq_uaddr(vq);
2086
2087 if (d->mm)
2088 mmu_notifier_register(&d->mmu_notifier, d->mm);
2089#endif
2090
2091 mutex_unlock(&vq->mutex); 1581 mutex_unlock(&vq->mutex);
2092 1582
2093 return r; 1583 return r;
@@ -2688,7 +2178,7 @@ static int get_indirect(struct vhost_virtqueue *vq,
2688 /* If this is an input descriptor, increment that count. */ 2178 /* If this is an input descriptor, increment that count. */
2689 if (access == VHOST_ACCESS_WO) { 2179 if (access == VHOST_ACCESS_WO) {
2690 *in_num += ret; 2180 *in_num += ret;
2691 if (unlikely(log)) { 2181 if (unlikely(log && ret)) {
2692 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2182 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2693 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2183 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2694 ++*log_num; 2184 ++*log_num;
@@ -2829,7 +2319,7 @@ int vhost_get_vq_desc(struct vhost_virtqueue *vq,
2829 /* If this is an input descriptor, 2319 /* If this is an input descriptor,
2830 * increment that count. */ 2320 * increment that count. */
2831 *in_num += ret; 2321 *in_num += ret;
2832 if (unlikely(log)) { 2322 if (unlikely(log && ret)) {
2833 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr); 2323 log[*log_num].addr = vhost64_to_cpu(vq, desc.addr);
2834 log[*log_num].len = vhost32_to_cpu(vq, desc.len); 2324 log[*log_num].len = vhost32_to_cpu(vq, desc.len);
2835 ++*log_num; 2325 ++*log_num;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 42a8c2a13ab1..e9ed2722b633 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -12,9 +12,6 @@
12#include <linux/virtio_config.h> 12#include <linux/virtio_config.h>
13#include <linux/virtio_ring.h> 13#include <linux/virtio_ring.h>
14#include <linux/atomic.h> 14#include <linux/atomic.h>
15#include <linux/pagemap.h>
16#include <linux/mmu_notifier.h>
17#include <asm/cacheflush.h>
18 15
19struct vhost_work; 16struct vhost_work;
20typedef void (*vhost_work_fn_t)(struct vhost_work *work); 17typedef void (*vhost_work_fn_t)(struct vhost_work *work);
@@ -83,24 +80,6 @@ enum vhost_uaddr_type {
83 VHOST_NUM_ADDRS = 3, 80 VHOST_NUM_ADDRS = 3,
84}; 81};
85 82
86struct vhost_map {
87 int npages;
88 void *addr;
89 struct page **pages;
90};
91
92struct vhost_uaddr {
93 unsigned long uaddr;
94 size_t size;
95 bool write;
96};
97
98#if defined(CONFIG_MMU_NOTIFIER) && ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 0
99#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
100#else
101#define VHOST_ARCH_CAN_ACCEL_UACCESS 0
102#endif
103
104/* The virtqueue structure describes a queue attached to a device. */ 83/* The virtqueue structure describes a queue attached to a device. */
105struct vhost_virtqueue { 84struct vhost_virtqueue {
106 struct vhost_dev *dev; 85 struct vhost_dev *dev;
@@ -111,22 +90,7 @@ struct vhost_virtqueue {
111 struct vring_desc __user *desc; 90 struct vring_desc __user *desc;
112 struct vring_avail __user *avail; 91 struct vring_avail __user *avail;
113 struct vring_used __user *used; 92 struct vring_used __user *used;
114
115#if VHOST_ARCH_CAN_ACCEL_UACCESS
116 /* Read by memory accessors, modified by meta data
117 * prefetching, MMU notifier and vring ioctl().
118 * Synchonrized through mmu_lock (writers) and RCU (writers
119 * and readers).
120 */
121 struct vhost_map __rcu *maps[VHOST_NUM_ADDRS];
122 /* Read by MMU notifier, modified by vring ioctl(),
123 * synchronized through MMU notifier
124 * registering/unregistering.
125 */
126 struct vhost_uaddr uaddrs[VHOST_NUM_ADDRS];
127#endif
128 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS]; 93 const struct vhost_umem_node *meta_iotlb[VHOST_NUM_ADDRS];
129
130 struct file *kick; 94 struct file *kick;
131 struct eventfd_ctx *call_ctx; 95 struct eventfd_ctx *call_ctx;
132 struct eventfd_ctx *error_ctx; 96 struct eventfd_ctx *error_ctx;
@@ -181,8 +145,6 @@ struct vhost_virtqueue {
181 bool user_be; 145 bool user_be;
182#endif 146#endif
183 u32 busyloop_timeout; 147 u32 busyloop_timeout;
184 spinlock_t mmu_lock;
185 int invalidate_count;
186}; 148};
187 149
188struct vhost_msg_node { 150struct vhost_msg_node {
@@ -196,9 +158,6 @@ struct vhost_msg_node {
196 158
197struct vhost_dev { 159struct vhost_dev {
198 struct mm_struct *mm; 160 struct mm_struct *mm;
199#ifdef CONFIG_MMU_NOTIFIER
200 struct mmu_notifier mmu_notifier;
201#endif
202 struct mutex mutex; 161 struct mutex mutex;
203 struct vhost_virtqueue **vqs; 162 struct vhost_virtqueue **vqs;
204 int nvqs; 163 int nvqs;
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c8be1c4f5b55..bdc08244a648 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -566,13 +566,17 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
566 566
567unmap_release: 567unmap_release:
568 err_idx = i; 568 err_idx = i;
569 i = head; 569
570 if (indirect)
571 i = 0;
572 else
573 i = head;
570 574
571 for (n = 0; n < total_sg; n++) { 575 for (n = 0; n < total_sg; n++) {
572 if (i == err_idx) 576 if (i == err_idx)
573 break; 577 break;
574 vring_unmap_one_split(vq, &desc[i]); 578 vring_unmap_one_split(vq, &desc[i]);
575 i = virtio16_to_cpu(_vq->vdev, vq->split.vring.desc[i].next); 579 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
576 } 580 }
577 581
578 if (indirect) 582 if (indirect)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1ff438fd5bc2..eeb75281894e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3628,6 +3628,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
3628 TASK_UNINTERRUPTIBLE); 3628 TASK_UNINTERRUPTIBLE);
3629} 3629}
3630 3630
3631static void end_extent_buffer_writeback(struct extent_buffer *eb)
3632{
3633 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3634 smp_mb__after_atomic();
3635 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3636}
3637
3631/* 3638/*
3632 * Lock eb pages and flush the bio if we can't the locks 3639 * Lock eb pages and flush the bio if we can't the locks
3633 * 3640 *
@@ -3699,8 +3706,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
3699 3706
3700 if (!trylock_page(p)) { 3707 if (!trylock_page(p)) {
3701 if (!flush) { 3708 if (!flush) {
3702 ret = flush_write_bio(epd); 3709 int err;
3703 if (ret < 0) { 3710
3711 err = flush_write_bio(epd);
3712 if (err < 0) {
3713 ret = err;
3704 failed_page_nr = i; 3714 failed_page_nr = i;
3705 goto err_unlock; 3715 goto err_unlock;
3706 } 3716 }
@@ -3715,16 +3725,23 @@ err_unlock:
3715 /* Unlock already locked pages */ 3725 /* Unlock already locked pages */
3716 for (i = 0; i < failed_page_nr; i++) 3726 for (i = 0; i < failed_page_nr; i++)
3717 unlock_page(eb->pages[i]); 3727 unlock_page(eb->pages[i]);
3728 /*
3729 * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
3730 * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
3731 * be made and undo everything done before.
3732 */
3733 btrfs_tree_lock(eb);
3734 spin_lock(&eb->refs_lock);
3735 set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
3736 end_extent_buffer_writeback(eb);
3737 spin_unlock(&eb->refs_lock);
3738 percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
3739 fs_info->dirty_metadata_batch);
3740 btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
3741 btrfs_tree_unlock(eb);
3718 return ret; 3742 return ret;
3719} 3743}
3720 3744
3721static void end_extent_buffer_writeback(struct extent_buffer *eb)
3722{
3723 clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
3724 smp_mb__after_atomic();
3725 wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
3726}
3727
3728static void set_btree_ioerr(struct page *page) 3745static void set_btree_ioerr(struct page *page)
3729{ 3746{
3730 struct extent_buffer *eb = (struct extent_buffer *)page->private; 3747 struct extent_buffer *eb = (struct extent_buffer *)page->private;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 6c8297bcfeb7..1bfd7e34f31e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
4985 BTRFS_I(inode), 4985 BTRFS_I(inode),
4986 LOG_OTHER_INODE_ALL, 4986 LOG_OTHER_INODE_ALL,
4987 0, LLONG_MAX, ctx); 4987 0, LLONG_MAX, ctx);
4988 iput(inode); 4988 btrfs_add_delayed_iput(inode);
4989 } 4989 }
4990 } 4990 }
4991 continue; 4991 continue;
@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5000 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5000 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5001 LOG_OTHER_INODE, 0, LLONG_MAX, ctx); 5001 LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
5002 if (ret) { 5002 if (ret) {
5003 iput(inode); 5003 btrfs_add_delayed_iput(inode);
5004 continue; 5004 continue;
5005 } 5005 }
5006 5006
@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5009 key.offset = 0; 5009 key.offset = 0;
5010 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5010 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5011 if (ret < 0) { 5011 if (ret < 0) {
5012 iput(inode); 5012 btrfs_add_delayed_iput(inode);
5013 continue; 5013 continue;
5014 } 5014 }
5015 5015
@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5056 } 5056 }
5057 path->slots[0]++; 5057 path->slots[0]++;
5058 } 5058 }
5059 iput(inode); 5059 btrfs_add_delayed_iput(inode);
5060 } 5060 }
5061 5061
5062 return ret; 5062 return ret;
@@ -5689,7 +5689,7 @@ process_leaf:
5689 } 5689 }
5690 5690
5691 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { 5691 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
5692 iput(di_inode); 5692 btrfs_add_delayed_iput(di_inode);
5693 break; 5693 break;
5694 } 5694 }
5695 5695
@@ -5701,7 +5701,7 @@ process_leaf:
5701 if (!ret && 5701 if (!ret &&
5702 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) 5702 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
5703 ret = 1; 5703 ret = 1;
5704 iput(di_inode); 5704 btrfs_add_delayed_iput(di_inode);
5705 if (ret) 5705 if (ret)
5706 goto next_dir_inode; 5706 goto next_dir_inode;
5707 if (ctx->log_new_dentries) { 5707 if (ctx->log_new_dentries) {
@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
5848 if (!ret && ctx && ctx->log_new_dentries) 5848 if (!ret && ctx && ctx->log_new_dentries)
5849 ret = log_new_dir_dentries(trans, root, 5849 ret = log_new_dir_dentries(trans, root,
5850 BTRFS_I(dir_inode), ctx); 5850 BTRFS_I(dir_inode), ctx);
5851 iput(dir_inode); 5851 btrfs_add_delayed_iput(dir_inode);
5852 if (ret) 5852 if (ret)
5853 goto out; 5853 goto out;
5854 } 5854 }
@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
5891 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5891 ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
5892 LOG_INODE_EXISTS, 5892 LOG_INODE_EXISTS,
5893 0, LLONG_MAX, ctx); 5893 0, LLONG_MAX, ctx);
5894 iput(inode); 5894 btrfs_add_delayed_iput(inode);
5895 if (ret) 5895 if (ret)
5896 return ret; 5896 return ret;
5897 5897
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index f752d83a9c44..520f1813e789 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -20,6 +20,15 @@
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22 22
23struct configfs_fragment {
24 atomic_t frag_count;
25 struct rw_semaphore frag_sem;
26 bool frag_dead;
27};
28
29void put_fragment(struct configfs_fragment *);
30struct configfs_fragment *get_fragment(struct configfs_fragment *);
31
23struct configfs_dirent { 32struct configfs_dirent {
24 atomic_t s_count; 33 atomic_t s_count;
25 int s_dependent_count; 34 int s_dependent_count;
@@ -34,6 +43,7 @@ struct configfs_dirent {
34#ifdef CONFIG_LOCKDEP 43#ifdef CONFIG_LOCKDEP
35 int s_depth; 44 int s_depth;
36#endif 45#endif
46 struct configfs_fragment *s_frag;
37}; 47};
38 48
39#define CONFIGFS_ROOT 0x0001 49#define CONFIGFS_ROOT 0x0001
@@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
61extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); 71extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
62extern int configfs_create_bin_file(struct config_item *, 72extern int configfs_create_bin_file(struct config_item *,
63 const struct configfs_bin_attribute *); 73 const struct configfs_bin_attribute *);
64extern int configfs_make_dirent(struct configfs_dirent *, 74extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
65 struct dentry *, void *, umode_t, int); 75 void *, umode_t, int, struct configfs_fragment *);
66extern int configfs_dirent_is_ready(struct configfs_dirent *); 76extern int configfs_dirent_is_ready(struct configfs_dirent *);
67 77
68extern void configfs_hash_and_remove(struct dentry * dir, const char * name); 78extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
137{ 147{
138 if (!(sd->s_type & CONFIGFS_ROOT)) { 148 if (!(sd->s_type & CONFIGFS_ROOT)) {
139 kfree(sd->s_iattr); 149 kfree(sd->s_iattr);
150 put_fragment(sd->s_frag);
140 kmem_cache_free(configfs_dir_cachep, sd); 151 kmem_cache_free(configfs_dir_cachep, sd);
141 } 152 }
142} 153}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 92112915de8e..79fc25aaa8cd 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
151 151
152#endif /* CONFIG_LOCKDEP */ 152#endif /* CONFIG_LOCKDEP */
153 153
154static struct configfs_fragment *new_fragment(void)
155{
156 struct configfs_fragment *p;
157
158 p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
159 if (p) {
160 atomic_set(&p->frag_count, 1);
161 init_rwsem(&p->frag_sem);
162 p->frag_dead = false;
163 }
164 return p;
165}
166
167void put_fragment(struct configfs_fragment *frag)
168{
169 if (frag && atomic_dec_and_test(&frag->frag_count))
170 kfree(frag);
171}
172
173struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
174{
175 if (likely(frag))
176 atomic_inc(&frag->frag_count);
177 return frag;
178}
179
154/* 180/*
155 * Allocates a new configfs_dirent and links it to the parent configfs_dirent 181 * Allocates a new configfs_dirent and links it to the parent configfs_dirent
156 */ 182 */
157static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd, 183static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
158 void *element, int type) 184 void *element, int type,
185 struct configfs_fragment *frag)
159{ 186{
160 struct configfs_dirent * sd; 187 struct configfs_dirent * sd;
161 188
@@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
175 kmem_cache_free(configfs_dir_cachep, sd); 202 kmem_cache_free(configfs_dir_cachep, sd);
176 return ERR_PTR(-ENOENT); 203 return ERR_PTR(-ENOENT);
177 } 204 }
205 sd->s_frag = get_fragment(frag);
178 list_add(&sd->s_sibling, &parent_sd->s_children); 206 list_add(&sd->s_sibling, &parent_sd->s_children);
179 spin_unlock(&configfs_dirent_lock); 207 spin_unlock(&configfs_dirent_lock);
180 208
@@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
209 237
210int configfs_make_dirent(struct configfs_dirent * parent_sd, 238int configfs_make_dirent(struct configfs_dirent * parent_sd,
211 struct dentry * dentry, void * element, 239 struct dentry * dentry, void * element,
212 umode_t mode, int type) 240 umode_t mode, int type, struct configfs_fragment *frag)
213{ 241{
214 struct configfs_dirent * sd; 242 struct configfs_dirent * sd;
215 243
216 sd = configfs_new_dirent(parent_sd, element, type); 244 sd = configfs_new_dirent(parent_sd, element, type, frag);
217 if (IS_ERR(sd)) 245 if (IS_ERR(sd))
218 return PTR_ERR(sd); 246 return PTR_ERR(sd);
219 247
@@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
260 * until it is validated by configfs_dir_set_ready() 288 * until it is validated by configfs_dir_set_ready()
261 */ 289 */
262 290
263static int configfs_create_dir(struct config_item *item, struct dentry *dentry) 291static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
292 struct configfs_fragment *frag)
264{ 293{
265 int error; 294 int error;
266 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; 295 umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
@@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
273 return error; 302 return error;
274 303
275 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, 304 error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
276 CONFIGFS_DIR | CONFIGFS_USET_CREATING); 305 CONFIGFS_DIR | CONFIGFS_USET_CREATING,
306 frag);
277 if (unlikely(error)) 307 if (unlikely(error))
278 return error; 308 return error;
279 309
@@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
338{ 368{
339 int err = 0; 369 int err = 0;
340 umode_t mode = S_IFLNK | S_IRWXUGO; 370 umode_t mode = S_IFLNK | S_IRWXUGO;
371 struct configfs_dirent *p = parent->d_fsdata;
341 372
342 err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode, 373 err = configfs_make_dirent(p, dentry, sl, mode,
343 CONFIGFS_ITEM_LINK); 374 CONFIGFS_ITEM_LINK, p->s_frag);
344 if (!err) { 375 if (!err) {
345 err = configfs_create(dentry, mode, init_symlink); 376 err = configfs_create(dentry, mode, init_symlink);
346 if (err) { 377 if (err) {
@@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
599 630
600static int configfs_attach_group(struct config_item *parent_item, 631static int configfs_attach_group(struct config_item *parent_item,
601 struct config_item *item, 632 struct config_item *item,
602 struct dentry *dentry); 633 struct dentry *dentry,
634 struct configfs_fragment *frag);
603static void configfs_detach_group(struct config_item *item); 635static void configfs_detach_group(struct config_item *item);
604 636
605static void detach_groups(struct config_group *group) 637static void detach_groups(struct config_group *group)
@@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
647 * try using vfs_mkdir. Just a thought. 679 * try using vfs_mkdir. Just a thought.
648 */ 680 */
649static int create_default_group(struct config_group *parent_group, 681static int create_default_group(struct config_group *parent_group,
650 struct config_group *group) 682 struct config_group *group,
683 struct configfs_fragment *frag)
651{ 684{
652 int ret; 685 int ret;
653 struct configfs_dirent *sd; 686 struct configfs_dirent *sd;
@@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
663 d_add(child, NULL); 696 d_add(child, NULL);
664 697
665 ret = configfs_attach_group(&parent_group->cg_item, 698 ret = configfs_attach_group(&parent_group->cg_item,
666 &group->cg_item, child); 699 &group->cg_item, child, frag);
667 if (!ret) { 700 if (!ret) {
668 sd = child->d_fsdata; 701 sd = child->d_fsdata;
669 sd->s_type |= CONFIGFS_USET_DEFAULT; 702 sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
677 return ret; 710 return ret;
678} 711}
679 712
680static int populate_groups(struct config_group *group) 713static int populate_groups(struct config_group *group,
714 struct configfs_fragment *frag)
681{ 715{
682 struct config_group *new_group; 716 struct config_group *new_group;
683 int ret = 0; 717 int ret = 0;
684 718
685 list_for_each_entry(new_group, &group->default_groups, group_entry) { 719 list_for_each_entry(new_group, &group->default_groups, group_entry) {
686 ret = create_default_group(group, new_group); 720 ret = create_default_group(group, new_group, frag);
687 if (ret) { 721 if (ret) {
688 detach_groups(group); 722 detach_groups(group);
689 break; 723 break;
@@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
797 */ 831 */
798static int configfs_attach_item(struct config_item *parent_item, 832static int configfs_attach_item(struct config_item *parent_item,
799 struct config_item *item, 833 struct config_item *item,
800 struct dentry *dentry) 834 struct dentry *dentry,
835 struct configfs_fragment *frag)
801{ 836{
802 int ret; 837 int ret;
803 838
804 ret = configfs_create_dir(item, dentry); 839 ret = configfs_create_dir(item, dentry, frag);
805 if (!ret) { 840 if (!ret) {
806 ret = populate_attrs(item); 841 ret = populate_attrs(item);
807 if (ret) { 842 if (ret) {
@@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
831 866
832static int configfs_attach_group(struct config_item *parent_item, 867static int configfs_attach_group(struct config_item *parent_item,
833 struct config_item *item, 868 struct config_item *item,
834 struct dentry *dentry) 869 struct dentry *dentry,
870 struct configfs_fragment *frag)
835{ 871{
836 int ret; 872 int ret;
837 struct configfs_dirent *sd; 873 struct configfs_dirent *sd;
838 874
839 ret = configfs_attach_item(parent_item, item, dentry); 875 ret = configfs_attach_item(parent_item, item, dentry, frag);
840 if (!ret) { 876 if (!ret) {
841 sd = dentry->d_fsdata; 877 sd = dentry->d_fsdata;
842 sd->s_type |= CONFIGFS_USET_DIR; 878 sd->s_type |= CONFIGFS_USET_DIR;
@@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
852 */ 888 */
853 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 889 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
854 configfs_adjust_dir_dirent_depth_before_populate(sd); 890 configfs_adjust_dir_dirent_depth_before_populate(sd);
855 ret = populate_groups(to_config_group(item)); 891 ret = populate_groups(to_config_group(item), frag);
856 if (ret) { 892 if (ret) {
857 configfs_detach_item(item); 893 configfs_detach_item(item);
858 d_inode(dentry)->i_flags |= S_DEAD; 894 d_inode(dentry)->i_flags |= S_DEAD;
@@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1247 struct configfs_dirent *sd; 1283 struct configfs_dirent *sd;
1248 const struct config_item_type *type; 1284 const struct config_item_type *type;
1249 struct module *subsys_owner = NULL, *new_item_owner = NULL; 1285 struct module *subsys_owner = NULL, *new_item_owner = NULL;
1286 struct configfs_fragment *frag;
1250 char *name; 1287 char *name;
1251 1288
1252 sd = dentry->d_parent->d_fsdata; 1289 sd = dentry->d_parent->d_fsdata;
@@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1265 goto out; 1302 goto out;
1266 } 1303 }
1267 1304
1305 frag = new_fragment();
1306 if (!frag) {
1307 ret = -ENOMEM;
1308 goto out;
1309 }
1310
1268 /* Get a working ref for the duration of this function */ 1311 /* Get a working ref for the duration of this function */
1269 parent_item = configfs_get_config_item(dentry->d_parent); 1312 parent_item = configfs_get_config_item(dentry->d_parent);
1270 type = parent_item->ci_type; 1313 type = parent_item->ci_type;
@@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
1367 spin_unlock(&configfs_dirent_lock); 1410 spin_unlock(&configfs_dirent_lock);
1368 1411
1369 if (group) 1412 if (group)
1370 ret = configfs_attach_group(parent_item, item, dentry); 1413 ret = configfs_attach_group(parent_item, item, dentry, frag);
1371 else 1414 else
1372 ret = configfs_attach_item(parent_item, item, dentry); 1415 ret = configfs_attach_item(parent_item, item, dentry, frag);
1373 1416
1374 spin_lock(&configfs_dirent_lock); 1417 spin_lock(&configfs_dirent_lock);
1375 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR; 1418 sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1406,6 +1449,7 @@ out_put:
1406 * reference. 1449 * reference.
1407 */ 1450 */
1408 config_item_put(parent_item); 1451 config_item_put(parent_item);
1452 put_fragment(frag);
1409 1453
1410out: 1454out:
1411 return ret; 1455 return ret;
@@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1417 struct config_item *item; 1461 struct config_item *item;
1418 struct configfs_subsystem *subsys; 1462 struct configfs_subsystem *subsys;
1419 struct configfs_dirent *sd; 1463 struct configfs_dirent *sd;
1464 struct configfs_fragment *frag;
1420 struct module *subsys_owner = NULL, *dead_item_owner = NULL; 1465 struct module *subsys_owner = NULL, *dead_item_owner = NULL;
1421 int ret; 1466 int ret;
1422 1467
@@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
1474 } 1519 }
1475 } while (ret == -EAGAIN); 1520 } while (ret == -EAGAIN);
1476 1521
1522 frag = sd->s_frag;
1523 if (down_write_killable(&frag->frag_sem)) {
1524 spin_lock(&configfs_dirent_lock);
1525 configfs_detach_rollback(dentry);
1526 spin_unlock(&configfs_dirent_lock);
1527 return -EINTR;
1528 }
1529 frag->frag_dead = true;
1530 up_write(&frag->frag_sem);
1531
1477 /* Get a working ref for the duration of this function */ 1532 /* Get a working ref for the duration of this function */
1478 item = configfs_get_config_item(dentry); 1533 item = configfs_get_config_item(dentry);
1479 1534
@@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
1574 */ 1629 */
1575 err = -ENOENT; 1630 err = -ENOENT;
1576 if (configfs_dirent_is_ready(parent_sd)) { 1631 if (configfs_dirent_is_ready(parent_sd)) {
1577 file->private_data = configfs_new_dirent(parent_sd, NULL, 0); 1632 file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
1578 if (IS_ERR(file->private_data)) 1633 if (IS_ERR(file->private_data))
1579 err = PTR_ERR(file->private_data); 1634 err = PTR_ERR(file->private_data);
1580 else 1635 else
@@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
1732{ 1787{
1733 struct configfs_subsystem *subsys = parent_group->cg_subsys; 1788 struct configfs_subsystem *subsys = parent_group->cg_subsys;
1734 struct dentry *parent; 1789 struct dentry *parent;
1790 struct configfs_fragment *frag;
1735 int ret; 1791 int ret;
1736 1792
1793 frag = new_fragment();
1794 if (!frag)
1795 return -ENOMEM;
1796
1737 mutex_lock(&subsys->su_mutex); 1797 mutex_lock(&subsys->su_mutex);
1738 link_group(parent_group, group); 1798 link_group(parent_group, group);
1739 mutex_unlock(&subsys->su_mutex); 1799 mutex_unlock(&subsys->su_mutex);
@@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
1741 parent = parent_group->cg_item.ci_dentry; 1801 parent = parent_group->cg_item.ci_dentry;
1742 1802
1743 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1803 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1744 ret = create_default_group(parent_group, group); 1804 ret = create_default_group(parent_group, group, frag);
1745 if (ret) 1805 if (ret)
1746 goto err_out; 1806 goto err_out;
1747 1807
@@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
1749 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata); 1809 configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
1750 spin_unlock(&configfs_dirent_lock); 1810 spin_unlock(&configfs_dirent_lock);
1751 inode_unlock(d_inode(parent)); 1811 inode_unlock(d_inode(parent));
1812 put_fragment(frag);
1752 return 0; 1813 return 0;
1753err_out: 1814err_out:
1754 inode_unlock(d_inode(parent)); 1815 inode_unlock(d_inode(parent));
1755 mutex_lock(&subsys->su_mutex); 1816 mutex_lock(&subsys->su_mutex);
1756 unlink_group(group); 1817 unlink_group(group);
1757 mutex_unlock(&subsys->su_mutex); 1818 mutex_unlock(&subsys->su_mutex);
1819 put_fragment(frag);
1758 return ret; 1820 return ret;
1759} 1821}
1760EXPORT_SYMBOL(configfs_register_group); 1822EXPORT_SYMBOL(configfs_register_group);
@@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
1770 struct configfs_subsystem *subsys = group->cg_subsys; 1832 struct configfs_subsystem *subsys = group->cg_subsys;
1771 struct dentry *dentry = group->cg_item.ci_dentry; 1833 struct dentry *dentry = group->cg_item.ci_dentry;
1772 struct dentry *parent = group->cg_item.ci_parent->ci_dentry; 1834 struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
1835 struct configfs_dirent *sd = dentry->d_fsdata;
1836 struct configfs_fragment *frag = sd->s_frag;
1773 1837
1774 mutex_lock(&subsys->su_mutex); 1838 down_write(&frag->frag_sem);
1775 if (!group->cg_item.ci_parent->ci_group) { 1839 frag->frag_dead = true;
1776 /* 1840 up_write(&frag->frag_sem);
1777 * The parent has already been unlinked and detached
1778 * due to a rmdir.
1779 */
1780 goto unlink_group;
1781 }
1782 mutex_unlock(&subsys->su_mutex);
1783 1841
1784 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT); 1842 inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
1785 spin_lock(&configfs_dirent_lock); 1843 spin_lock(&configfs_dirent_lock);
@@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
1796 dput(dentry); 1854 dput(dentry);
1797 1855
1798 mutex_lock(&subsys->su_mutex); 1856 mutex_lock(&subsys->su_mutex);
1799unlink_group:
1800 unlink_group(group); 1857 unlink_group(group);
1801 mutex_unlock(&subsys->su_mutex); 1858 mutex_unlock(&subsys->su_mutex);
1802} 1859}
@@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1853 struct dentry *dentry; 1910 struct dentry *dentry;
1854 struct dentry *root; 1911 struct dentry *root;
1855 struct configfs_dirent *sd; 1912 struct configfs_dirent *sd;
1913 struct configfs_fragment *frag;
1914
1915 frag = new_fragment();
1916 if (!frag)
1917 return -ENOMEM;
1856 1918
1857 root = configfs_pin_fs(); 1919 root = configfs_pin_fs();
1858 if (IS_ERR(root)) 1920 if (IS_ERR(root)) {
1921 put_fragment(frag);
1859 return PTR_ERR(root); 1922 return PTR_ERR(root);
1923 }
1860 1924
1861 if (!group->cg_item.ci_name) 1925 if (!group->cg_item.ci_name)
1862 group->cg_item.ci_name = group->cg_item.ci_namebuf; 1926 group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1872 d_add(dentry, NULL); 1936 d_add(dentry, NULL);
1873 1937
1874 err = configfs_attach_group(sd->s_element, &group->cg_item, 1938 err = configfs_attach_group(sd->s_element, &group->cg_item,
1875 dentry); 1939 dentry, frag);
1876 if (err) { 1940 if (err) {
1877 BUG_ON(d_inode(dentry)); 1941 BUG_ON(d_inode(dentry));
1878 d_drop(dentry); 1942 d_drop(dentry);
@@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
1890 unlink_group(group); 1954 unlink_group(group);
1891 configfs_release_fs(); 1955 configfs_release_fs();
1892 } 1956 }
1957 put_fragment(frag);
1893 1958
1894 return err; 1959 return err;
1895} 1960}
@@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
1899 struct config_group *group = &subsys->su_group; 1964 struct config_group *group = &subsys->su_group;
1900 struct dentry *dentry = group->cg_item.ci_dentry; 1965 struct dentry *dentry = group->cg_item.ci_dentry;
1901 struct dentry *root = dentry->d_sb->s_root; 1966 struct dentry *root = dentry->d_sb->s_root;
1967 struct configfs_dirent *sd = dentry->d_fsdata;
1968 struct configfs_fragment *frag = sd->s_frag;
1902 1969
1903 if (dentry->d_parent != root) { 1970 if (dentry->d_parent != root) {
1904 pr_err("Tried to unregister non-subsystem!\n"); 1971 pr_err("Tried to unregister non-subsystem!\n");
1905 return; 1972 return;
1906 } 1973 }
1907 1974
1975 down_write(&frag->frag_sem);
1976 frag->frag_dead = true;
1977 up_write(&frag->frag_sem);
1978
1908 inode_lock_nested(d_inode(root), 1979 inode_lock_nested(d_inode(root),
1909 I_MUTEX_PARENT); 1980 I_MUTEX_PARENT);
1910 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD); 1981 inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 61e4db4390a1..fb65b706cc0d 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -39,40 +39,44 @@ struct configfs_buffer {
39 bool write_in_progress; 39 bool write_in_progress;
40 char *bin_buffer; 40 char *bin_buffer;
41 int bin_buffer_size; 41 int bin_buffer_size;
42 int cb_max_size;
43 struct config_item *item;
44 struct module *owner;
45 union {
46 struct configfs_attribute *attr;
47 struct configfs_bin_attribute *bin_attr;
48 };
42}; 49};
43 50
51static inline struct configfs_fragment *to_frag(struct file *file)
52{
53 struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
44 54
45/** 55 return sd->s_frag;
46 * fill_read_buffer - allocate and fill buffer from item. 56}
47 * @dentry: dentry pointer. 57
48 * @buffer: data buffer for file. 58static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
49 *
50 * Allocate @buffer->page, if it hasn't been already, then call the
51 * config_item's show() method to fill the buffer with this attribute's
52 * data.
53 * This is called only once, on the file's first read.
54 */
55static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
56{ 59{
57 struct configfs_attribute * attr = to_attr(dentry); 60 struct configfs_fragment *frag = to_frag(file);
58 struct config_item * item = to_item(dentry->d_parent); 61 ssize_t count = -ENOENT;
59 int ret = 0;
60 ssize_t count;
61 62
62 if (!buffer->page) 63 if (!buffer->page)
63 buffer->page = (char *) get_zeroed_page(GFP_KERNEL); 64 buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
64 if (!buffer->page) 65 if (!buffer->page)
65 return -ENOMEM; 66 return -ENOMEM;
66 67
67 count = attr->show(item, buffer->page); 68 down_read(&frag->frag_sem);
68 69 if (!frag->frag_dead)
69 BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE); 70 count = buffer->attr->show(buffer->item, buffer->page);
70 if (count >= 0) { 71 up_read(&frag->frag_sem);
71 buffer->needs_read_fill = 0; 72
72 buffer->count = count; 73 if (count < 0)
73 } else 74 return count;
74 ret = count; 75 if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
75 return ret; 76 return -EIO;
77 buffer->needs_read_fill = 0;
78 buffer->count = count;
79 return 0;
76} 80}
77 81
78/** 82/**
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
97static ssize_t 101static ssize_t
98configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) 102configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
99{ 103{
100 struct configfs_buffer * buffer = file->private_data; 104 struct configfs_buffer *buffer = file->private_data;
101 ssize_t retval = 0; 105 ssize_t retval = 0;
102 106
103 mutex_lock(&buffer->mutex); 107 mutex_lock(&buffer->mutex);
104 if (buffer->needs_read_fill) { 108 if (buffer->needs_read_fill) {
105 if ((retval = fill_read_buffer(file->f_path.dentry,buffer))) 109 retval = fill_read_buffer(file, buffer);
110 if (retval)
106 goto out; 111 goto out;
107 } 112 }
108 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n", 113 pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -138,10 +143,8 @@ static ssize_t
138configfs_read_bin_file(struct file *file, char __user *buf, 143configfs_read_bin_file(struct file *file, char __user *buf,
139 size_t count, loff_t *ppos) 144 size_t count, loff_t *ppos)
140{ 145{
146 struct configfs_fragment *frag = to_frag(file);
141 struct configfs_buffer *buffer = file->private_data; 147 struct configfs_buffer *buffer = file->private_data;
142 struct dentry *dentry = file->f_path.dentry;
143 struct config_item *item = to_item(dentry->d_parent);
144 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
145 ssize_t retval = 0; 148 ssize_t retval = 0;
146 ssize_t len = min_t(size_t, count, PAGE_SIZE); 149 ssize_t len = min_t(size_t, count, PAGE_SIZE);
147 150
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
156 159
157 if (buffer->needs_read_fill) { 160 if (buffer->needs_read_fill) {
158 /* perform first read with buf == NULL to get extent */ 161 /* perform first read with buf == NULL to get extent */
159 len = bin_attr->read(item, NULL, 0); 162 down_read(&frag->frag_sem);
163 if (!frag->frag_dead)
164 len = buffer->bin_attr->read(buffer->item, NULL, 0);
165 else
166 len = -ENOENT;
167 up_read(&frag->frag_sem);
160 if (len <= 0) { 168 if (len <= 0) {
161 retval = len; 169 retval = len;
162 goto out; 170 goto out;
163 } 171 }
164 172
165 /* do not exceed the maximum value */ 173 /* do not exceed the maximum value */
166 if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) { 174 if (buffer->cb_max_size && len > buffer->cb_max_size) {
167 retval = -EFBIG; 175 retval = -EFBIG;
168 goto out; 176 goto out;
169 } 177 }
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
176 buffer->bin_buffer_size = len; 184 buffer->bin_buffer_size = len;
177 185
178 /* perform second read to fill buffer */ 186 /* perform second read to fill buffer */
179 len = bin_attr->read(item, buffer->bin_buffer, len); 187 down_read(&frag->frag_sem);
188 if (!frag->frag_dead)
189 len = buffer->bin_attr->read(buffer->item,
190 buffer->bin_buffer, len);
191 else
192 len = -ENOENT;
193 up_read(&frag->frag_sem);
180 if (len < 0) { 194 if (len < 0) {
181 retval = len; 195 retval = len;
182 vfree(buffer->bin_buffer); 196 vfree(buffer->bin_buffer);
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
226 return error ? -EFAULT : count; 240 return error ? -EFAULT : count;
227} 241}
228 242
229
230/**
231 * flush_write_buffer - push buffer to config_item.
232 * @dentry: dentry to the attribute
233 * @buffer: data buffer for file.
234 * @count: number of bytes
235 *
236 * Get the correct pointers for the config_item and the attribute we're
237 * dealing with, then call the store() method for the attribute,
238 * passing the buffer that we acquired in fill_write_buffer().
239 */
240
241static int 243static int
242flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count) 244flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
243{ 245{
244 struct configfs_attribute * attr = to_attr(dentry); 246 struct configfs_fragment *frag = to_frag(file);
245 struct config_item * item = to_item(dentry->d_parent); 247 int res = -ENOENT;
246 248
247 return attr->store(item, buffer->page, count); 249 down_read(&frag->frag_sem);
250 if (!frag->frag_dead)
251 res = buffer->attr->store(buffer->item, buffer->page, count);
252 up_read(&frag->frag_sem);
253 return res;
248} 254}
249 255
250 256
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
268static ssize_t 274static ssize_t
269configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos) 275configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
270{ 276{
271 struct configfs_buffer * buffer = file->private_data; 277 struct configfs_buffer *buffer = file->private_data;
272 ssize_t len; 278 ssize_t len;
273 279
274 mutex_lock(&buffer->mutex); 280 mutex_lock(&buffer->mutex);
275 len = fill_write_buffer(buffer, buf, count); 281 len = fill_write_buffer(buffer, buf, count);
276 if (len > 0) 282 if (len > 0)
277 len = flush_write_buffer(file->f_path.dentry, buffer, len); 283 len = flush_write_buffer(file, buffer, len);
278 if (len > 0) 284 if (len > 0)
279 *ppos += len; 285 *ppos += len;
280 mutex_unlock(&buffer->mutex); 286 mutex_unlock(&buffer->mutex);
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
299 size_t count, loff_t *ppos) 305 size_t count, loff_t *ppos)
300{ 306{
301 struct configfs_buffer *buffer = file->private_data; 307 struct configfs_buffer *buffer = file->private_data;
302 struct dentry *dentry = file->f_path.dentry;
303 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
304 void *tbuf = NULL; 308 void *tbuf = NULL;
305 ssize_t len; 309 ssize_t len;
306 310
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
316 /* buffer grows? */ 320 /* buffer grows? */
317 if (*ppos + count > buffer->bin_buffer_size) { 321 if (*ppos + count > buffer->bin_buffer_size) {
318 322
319 if (bin_attr->cb_max_size && 323 if (buffer->cb_max_size &&
320 *ppos + count > bin_attr->cb_max_size) { 324 *ppos + count > buffer->cb_max_size) {
321 len = -EFBIG; 325 len = -EFBIG;
322 goto out; 326 goto out;
323 } 327 }
@@ -349,31 +353,51 @@ out:
349 return len; 353 return len;
350} 354}
351 355
352static int check_perm(struct inode * inode, struct file * file, int type) 356static int __configfs_open_file(struct inode *inode, struct file *file, int type)
353{ 357{
354 struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent); 358 struct dentry *dentry = file->f_path.dentry;
355 struct configfs_attribute * attr = to_attr(file->f_path.dentry); 359 struct configfs_fragment *frag = to_frag(file);
356 struct configfs_bin_attribute *bin_attr = NULL; 360 struct configfs_attribute *attr;
357 struct configfs_buffer * buffer; 361 struct configfs_buffer *buffer;
358 struct configfs_item_operations * ops = NULL; 362 int error;
359 int error = 0;
360 363
361 if (!item || !attr) 364 error = -ENOMEM;
362 goto Einval; 365 buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
366 if (!buffer)
367 goto out;
363 368
364 if (type & CONFIGFS_ITEM_BIN_ATTR) 369 error = -ENOENT;
365 bin_attr = to_bin_attr(file->f_path.dentry); 370 down_read(&frag->frag_sem);
371 if (unlikely(frag->frag_dead))
372 goto out_free_buffer;
366 373
367 /* Grab the module reference for this attribute if we have one */ 374 error = -EINVAL;
368 if (!try_module_get(attr->ca_owner)) { 375 buffer->item = to_item(dentry->d_parent);
369 error = -ENODEV; 376 if (!buffer->item)
370 goto Done; 377 goto out_free_buffer;
378
379 attr = to_attr(dentry);
380 if (!attr)
381 goto out_put_item;
382
383 if (type & CONFIGFS_ITEM_BIN_ATTR) {
384 buffer->bin_attr = to_bin_attr(dentry);
385 buffer->cb_max_size = buffer->bin_attr->cb_max_size;
386 } else {
387 buffer->attr = attr;
371 } 388 }
372 389
373 if (item->ci_type) 390 buffer->owner = attr->ca_owner;
374 ops = item->ci_type->ct_item_ops; 391 /* Grab the module reference for this attribute if we have one */
375 else 392 error = -ENODEV;
376 goto Eaccess; 393 if (!try_module_get(buffer->owner))
394 goto out_put_item;
395
396 error = -EACCES;
397 if (!buffer->item->ci_type)
398 goto out_put_module;
399
400 buffer->ops = buffer->item->ci_type->ct_item_ops;
377 401
378 /* File needs write support. 402 /* File needs write support.
379 * The inode's perms must say it's ok, 403 * The inode's perms must say it's ok,
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
381 */ 405 */
382 if (file->f_mode & FMODE_WRITE) { 406 if (file->f_mode & FMODE_WRITE) {
383 if (!(inode->i_mode & S_IWUGO)) 407 if (!(inode->i_mode & S_IWUGO))
384 goto Eaccess; 408 goto out_put_module;
385
386 if ((type & CONFIGFS_ITEM_ATTR) && !attr->store) 409 if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
387 goto Eaccess; 410 goto out_put_module;
388 411 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
389 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write) 412 goto out_put_module;
390 goto Eaccess;
391 } 413 }
392 414
393 /* File needs read support. 415 /* File needs read support.
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
396 */ 418 */
397 if (file->f_mode & FMODE_READ) { 419 if (file->f_mode & FMODE_READ) {
398 if (!(inode->i_mode & S_IRUGO)) 420 if (!(inode->i_mode & S_IRUGO))
399 goto Eaccess; 421 goto out_put_module;
400
401 if ((type & CONFIGFS_ITEM_ATTR) && !attr->show) 422 if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
402 goto Eaccess; 423 goto out_put_module;
403 424 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
404 if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read) 425 goto out_put_module;
405 goto Eaccess;
406 } 426 }
407 427
408 /* No error? Great, allocate a buffer for the file, and store it
409 * it in file->private_data for easy access.
410 */
411 buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
412 if (!buffer) {
413 error = -ENOMEM;
414 goto Enomem;
415 }
416 mutex_init(&buffer->mutex); 428 mutex_init(&buffer->mutex);
417 buffer->needs_read_fill = 1; 429 buffer->needs_read_fill = 1;
418 buffer->read_in_progress = false; 430 buffer->read_in_progress = false;
419 buffer->write_in_progress = false; 431 buffer->write_in_progress = false;
420 buffer->ops = ops;
421 file->private_data = buffer; 432 file->private_data = buffer;
422 goto Done; 433 up_read(&frag->frag_sem);
434 return 0;
423 435
424 Einval: 436out_put_module:
425 error = -EINVAL; 437 module_put(buffer->owner);
426 goto Done; 438out_put_item:
427 Eaccess: 439 config_item_put(buffer->item);
428 error = -EACCES; 440out_free_buffer:
429 Enomem: 441 up_read(&frag->frag_sem);
430 module_put(attr->ca_owner); 442 kfree(buffer);
431 Done: 443out:
432 if (error && item)
433 config_item_put(item);
434 return error; 444 return error;
435} 445}
436 446
437static int configfs_release(struct inode *inode, struct file *filp) 447static int configfs_release(struct inode *inode, struct file *filp)
438{ 448{
439 struct config_item * item = to_item(filp->f_path.dentry->d_parent); 449 struct configfs_buffer *buffer = filp->private_data;
440 struct configfs_attribute * attr = to_attr(filp->f_path.dentry); 450
441 struct module * owner = attr->ca_owner; 451 module_put(buffer->owner);
442 struct configfs_buffer * buffer = filp->private_data; 452 if (buffer->page)
443 453 free_page((unsigned long)buffer->page);
444 if (item) 454 mutex_destroy(&buffer->mutex);
445 config_item_put(item); 455 kfree(buffer);
446 /* After this point, attr should not be accessed. */
447 module_put(owner);
448
449 if (buffer) {
450 if (buffer->page)
451 free_page((unsigned long)buffer->page);
452 mutex_destroy(&buffer->mutex);
453 kfree(buffer);
454 }
455 return 0; 456 return 0;
456} 457}
457 458
458static int configfs_open_file(struct inode *inode, struct file *filp) 459static int configfs_open_file(struct inode *inode, struct file *filp)
459{ 460{
460 return check_perm(inode, filp, CONFIGFS_ITEM_ATTR); 461 return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
461} 462}
462 463
463static int configfs_open_bin_file(struct inode *inode, struct file *filp) 464static int configfs_open_bin_file(struct inode *inode, struct file *filp)
464{ 465{
465 return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR); 466 return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
466} 467}
467 468
468static int configfs_release_bin_file(struct inode *inode, struct file *filp) 469static int configfs_release_bin_file(struct inode *inode, struct file *file)
469{ 470{
470 struct configfs_buffer *buffer = filp->private_data; 471 struct configfs_buffer *buffer = file->private_data;
471 struct dentry *dentry = filp->f_path.dentry;
472 struct config_item *item = to_item(dentry->d_parent);
473 struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
474 ssize_t len = 0;
475 int ret;
476 472
477 buffer->read_in_progress = false; 473 buffer->read_in_progress = false;
478 474
479 if (buffer->write_in_progress) { 475 if (buffer->write_in_progress) {
476 struct configfs_fragment *frag = to_frag(file);
480 buffer->write_in_progress = false; 477 buffer->write_in_progress = false;
481 478
482 len = bin_attr->write(item, buffer->bin_buffer, 479 down_read(&frag->frag_sem);
483 buffer->bin_buffer_size); 480 if (!frag->frag_dead) {
484 481 /* result of ->release() is ignored */
482 buffer->bin_attr->write(buffer->item,
483 buffer->bin_buffer,
484 buffer->bin_buffer_size);
485 }
486 up_read(&frag->frag_sem);
485 /* vfree on NULL is safe */ 487 /* vfree on NULL is safe */
486 vfree(buffer->bin_buffer); 488 vfree(buffer->bin_buffer);
487 buffer->bin_buffer = NULL; 489 buffer->bin_buffer = NULL;
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
489 buffer->needs_read_fill = 1; 491 buffer->needs_read_fill = 1;
490 } 492 }
491 493
492 ret = configfs_release(inode, filp); 494 configfs_release(inode, file);
493 if (len < 0) 495 return 0;
494 return len;
495 return ret;
496} 496}
497 497
498 498
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
527 527
528 inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL); 528 inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
529 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, 529 error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
530 CONFIGFS_ITEM_ATTR); 530 CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
531 inode_unlock(d_inode(dir)); 531 inode_unlock(d_inode(dir));
532 532
533 return error; 533 return error;
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
549 549
550 inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL); 550 inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
551 error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode, 551 error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
552 CONFIGFS_ITEM_BIN_ATTR); 552 CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
553 inode_unlock(dir->d_inode); 553 inode_unlock(dir->d_inode);
554 554
555 return error; 555 return error;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c764cfe456e5..2a03bfeec10a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,11 +1403,12 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) 1403 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
1404 return 0; 1404 return 0;
1405 1405
1406 /* No fileid? Just exit */ 1406 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1407 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) 1407 /* Only a mounted-on-fileid? Just exit */
1408 return 0; 1408 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1409 return 0;
1409 /* Has the inode gone and changed behind our back? */ 1410 /* Has the inode gone and changed behind our back? */
1410 if (nfsi->fileid != fattr->fileid) { 1411 } else if (nfsi->fileid != fattr->fileid) {
1411 /* Is this perhaps the mounted-on fileid? */ 1412 /* Is this perhaps the mounted-on fileid? */
1412 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && 1413 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1413 nfsi->fileid == fattr->mounted_on_fileid) 1414 nfsi->fileid == fattr->mounted_on_fileid)
@@ -1807,11 +1808,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1807 nfs_display_fhandle_hash(NFS_FH(inode)), 1808 nfs_display_fhandle_hash(NFS_FH(inode)),
1808 atomic_read(&inode->i_count), fattr->valid); 1809 atomic_read(&inode->i_count), fattr->valid);
1809 1810
1810 /* No fileid? Just exit */ 1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
1811 if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) 1812 /* Only a mounted-on-fileid? Just exit */
1812 return 0; 1813 if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
1814 return 0;
1813 /* Has the inode gone and changed behind our back? */ 1815 /* Has the inode gone and changed behind our back? */
1814 if (nfsi->fileid != fattr->fileid) { 1816 } else if (nfsi->fileid != fattr->fileid) {
1815 /* Is this perhaps the mounted-on fileid? */ 1817 /* Is this perhaps the mounted-on fileid? */
1816 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) && 1818 if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
1817 nfsi->fileid == fattr->mounted_on_fileid) 1819 nfsi->fileid == fattr->mounted_on_fileid)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index f0fd5636fddb..5e88e7e33abe 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -24,7 +24,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
24 long ______r; \ 24 long ______r; \
25 static struct ftrace_likely_data \ 25 static struct ftrace_likely_data \
26 __aligned(4) \ 26 __aligned(4) \
27 __section("_ftrace_annotated_branch") \ 27 __section(_ftrace_annotated_branch) \
28 ______f = { \ 28 ______f = { \
29 .data.func = __func__, \ 29 .data.func = __func__, \
30 .data.file = __FILE__, \ 30 .data.file = __FILE__, \
@@ -60,7 +60,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
60#define __trace_if_value(cond) ({ \ 60#define __trace_if_value(cond) ({ \
61 static struct ftrace_branch_data \ 61 static struct ftrace_branch_data \
62 __aligned(4) \ 62 __aligned(4) \
63 __section("_ftrace_branch") \ 63 __section(_ftrace_branch) \
64 __if_trace = { \ 64 __if_trace = { \
65 .func = __func__, \ 65 .func = __func__, \
66 .file = __FILE__, \ 66 .file = __FILE__, \
@@ -118,7 +118,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
118 ".popsection\n\t" 118 ".popsection\n\t"
119 119
120/* Annotate a C jump table to allow objtool to follow the code flow */ 120/* Annotate a C jump table to allow objtool to follow the code flow */
121#define __annotate_jump_table __section(".rodata..c_jump_table") 121#define __annotate_jump_table __section(.rodata..c_jump_table)
122 122
123#else 123#else
124#define annotate_reachable() 124#define annotate_reachable()
@@ -298,7 +298,7 @@ unsigned long read_word_at_a_time(const void *addr)
298 * visible to the compiler. 298 * visible to the compiler.
299 */ 299 */
300#define __ADDRESSABLE(sym) \ 300#define __ADDRESSABLE(sym) \
301 static void * __section(".discard.addressable") __used \ 301 static void * __section(.discard.addressable) __used \
302 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; 302 __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
303 303
304/** 304/**
diff --git a/include/linux/input/elan-i2c-ids.h b/include/linux/input/elan-i2c-ids.h
index ceabb01a6a7d..1ecb6b45812c 100644
--- a/include/linux/input/elan-i2c-ids.h
+++ b/include/linux/input/elan-i2c-ids.h
@@ -48,7 +48,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
48 { "ELAN0618", 0 }, 48 { "ELAN0618", 0 },
49 { "ELAN0619", 0 }, 49 { "ELAN0619", 0 },
50 { "ELAN061A", 0 }, 50 { "ELAN061A", 0 },
51 { "ELAN061B", 0 }, 51/* { "ELAN061B", 0 }, not working on the Lenovo Legion Y7000 */
52 { "ELAN061C", 0 }, 52 { "ELAN061C", 0 },
53 { "ELAN061D", 0 }, 53 { "ELAN061D", 0 },
54 { "ELAN061E", 0 }, 54 { "ELAN061E", 0 },
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index f2ae8a006ff8..4fc6454f7ebb 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -346,7 +346,6 @@ enum {
346#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1)) 346#define QI_PC_PASID_SEL (QI_PC_TYPE | QI_PC_GRAN(1))
347 347
348#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK) 348#define QI_EIOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
349#define QI_EIOTLB_GL(gl) (((u64)gl) << 7)
350#define QI_EIOTLB_IH(ih) (((u64)ih) << 6) 349#define QI_EIOTLB_IH(ih) (((u64)ih) << 6)
351#define QI_EIOTLB_AM(am) (((u64)am)) 350#define QI_EIOTLB_AM(am) (((u64)am))
352#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32) 351#define QI_EIOTLB_PASID(pasid) (((u64)pasid) << 32)
@@ -378,8 +377,6 @@ enum {
378#define QI_RESP_INVALID 0x1 377#define QI_RESP_INVALID 0x1
379#define QI_RESP_FAILURE 0xf 378#define QI_RESP_FAILURE 0xf
380 379
381#define QI_GRAN_ALL_ALL 0
382#define QI_GRAN_NONG_ALL 1
383#define QI_GRAN_NONG_PASID 2 380#define QI_GRAN_NONG_PASID 2
384#define QI_GRAN_PSI_PASID 3 381#define QI_GRAN_PSI_PASID 3
385 382
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h
index 1e5d86ebdaeb..52bc8e487ef7 100644
--- a/include/linux/phy_fixed.h
+++ b/include/linux/phy_fixed.h
@@ -11,6 +11,7 @@ struct fixed_phy_status {
11}; 11};
12 12
13struct device_node; 13struct device_node;
14struct gpio_desc;
14 15
15#if IS_ENABLED(CONFIG_FIXED_PHY) 16#if IS_ENABLED(CONFIG_FIXED_PHY)
16extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); 17extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 88145da7d140..f7c561c4dcdd 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -1402,4 +1402,23 @@ static inline unsigned int ksys_personality(unsigned int personality)
1402 return old; 1402 return old;
1403} 1403}
1404 1404
1405/* for __ARCH_WANT_SYS_IPC */
1406long ksys_semtimedop(int semid, struct sembuf __user *tsops,
1407 unsigned int nsops,
1408 const struct __kernel_timespec __user *timeout);
1409long ksys_semget(key_t key, int nsems, int semflg);
1410long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
1411long ksys_msgget(key_t key, int msgflg);
1412long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
1413long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
1414 long msgtyp, int msgflg);
1415long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
1416 int msgflg);
1417long ksys_shmget(key_t key, size_t size, int shmflg);
1418long ksys_shmdt(char __user *shmaddr);
1419long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
1420long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
1421 unsigned int nsops,
1422 const struct old_timespec32 __user *timeout);
1423
1405#endif 1424#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 4c81846ccce8..ab1ca9e238d2 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -513,7 +513,7 @@ int ip_valid_fib_dump_req(struct net *net, const struct nlmsghdr *nlh,
513 struct netlink_callback *cb); 513 struct netlink_callback *cb);
514 514
515int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh, 515int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nh,
516 unsigned char *flags, bool skip_oif); 516 u8 rt_family, unsigned char *flags, bool skip_oif);
517int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh, 517int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nh,
518 int nh_weight); 518 int nh_weight, u8 rt_family);
519#endif /* _NET_FIB_H */ 519#endif /* _NET_FIB_H */
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 95f766c31c90..331ebbc94fe7 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -161,7 +161,8 @@ struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel)
161} 161}
162 162
163static inline 163static inline
164int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh) 164int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh,
165 u8 rt_family)
165{ 166{
166 struct nh_group *nhg = rtnl_dereference(nh->nh_grp); 167 struct nh_group *nhg = rtnl_dereference(nh->nh_grp);
167 int i; 168 int i;
@@ -172,7 +173,7 @@ int nexthop_mpath_fill_node(struct sk_buff *skb, struct nexthop *nh)
172 struct fib_nh_common *nhc = &nhi->fib_nhc; 173 struct fib_nh_common *nhc = &nhi->fib_nhc;
173 int weight = nhg->nh_entries[i].weight; 174 int weight = nhg->nh_entries[i].weight;
174 175
175 if (fib_add_nexthop(skb, nhc, weight) < 0) 176 if (fib_add_nexthop(skb, nhc, weight, rt_family) < 0)
176 return -EMSGSIZE; 177 return -EMSGSIZE;
177 } 178 }
178 179
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index b22db30c3d88..aa08a7a5f6ac 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -983,7 +983,6 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
983void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev); 983void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
984 984
985struct xfrm_if_parms { 985struct xfrm_if_parms {
986 char name[IFNAMSIZ]; /* name of XFRM device */
987 int link; /* ifindex of underlying L2 interface */ 986 int link; /* ifindex of underlying L2 interface */
988 u32 if_id; /* interface identifyer */ 987 u32 if_id; /* interface identifyer */
989}; 988};
@@ -991,7 +990,6 @@ struct xfrm_if_parms {
991struct xfrm_if { 990struct xfrm_if {
992 struct xfrm_if __rcu *next; /* next interface in list */ 991 struct xfrm_if __rcu *next; /* next interface in list */
993 struct net_device *dev; /* virtual device associated with interface */ 992 struct net_device *dev; /* virtual device associated with interface */
994 struct net_device *phydev; /* physical device */
995 struct net *net; /* netns for packet i/o */ 993 struct net *net; /* netns for packet i/o */
996 struct xfrm_if_parms p; /* interface parms */ 994 struct xfrm_if_parms p; /* interface parms */
997 995
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1be0e798e362..1fc8faa6e973 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -569,7 +569,7 @@ __SYSCALL(__NR_semget, sys_semget)
569__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl) 569__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
570#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32 570#if defined(__ARCH_WANT_TIME32_SYSCALLS) || __BITS_PER_LONG != 32
571#define __NR_semtimedop 192 571#define __NR_semtimedop 192
572__SC_COMP(__NR_semtimedop, sys_semtimedop, sys_semtimedop_time32) 572__SC_3264(__NR_semtimedop, sys_semtimedop_time32, sys_semtimedop)
573#endif 573#endif
574#define __NR_semop 193 574#define __NR_semop 193
575__SYSCALL(__NR_semop, sys_semop) 575__SYSCALL(__NR_semop, sys_semop)
diff --git a/include/uapi/linux/isdn/capicmd.h b/include/uapi/linux/isdn/capicmd.h
index 4941628a4fb9..5ec88e7548a9 100644
--- a/include/uapi/linux/isdn/capicmd.h
+++ b/include/uapi/linux/isdn/capicmd.h
@@ -16,6 +16,7 @@
16#define CAPI_MSG_BASELEN 8 16#define CAPI_MSG_BASELEN 8
17#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2) 17#define CAPI_DATA_B3_REQ_LEN (CAPI_MSG_BASELEN+4+4+2+2+2)
18#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2) 18#define CAPI_DATA_B3_RESP_LEN (CAPI_MSG_BASELEN+4+2)
19#define CAPI_DISCONNECT_B3_RESP_LEN (CAPI_MSG_BASELEN+4)
19 20
20/*----- CAPI commands -----*/ 21/*----- CAPI commands -----*/
21#define CAPI_ALERT 0x01 22#define CAPI_ALERT 0x01
diff --git a/ipc/util.h b/ipc/util.h
index 0fcf8e719b76..5766c61aed0e 100644
--- a/ipc/util.h
+++ b/ipc/util.h
@@ -276,29 +276,7 @@ static inline int compat_ipc_parse_version(int *cmd)
276 *cmd &= ~IPC_64; 276 *cmd &= ~IPC_64;
277 return version; 277 return version;
278} 278}
279#endif
280 279
281/* for __ARCH_WANT_SYS_IPC */
282long ksys_semtimedop(int semid, struct sembuf __user *tsops,
283 unsigned int nsops,
284 const struct __kernel_timespec __user *timeout);
285long ksys_semget(key_t key, int nsems, int semflg);
286long ksys_old_semctl(int semid, int semnum, int cmd, unsigned long arg);
287long ksys_msgget(key_t key, int msgflg);
288long ksys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf);
289long ksys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
290 long msgtyp, int msgflg);
291long ksys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz,
292 int msgflg);
293long ksys_shmget(key_t key, size_t size, int shmflg);
294long ksys_shmdt(char __user *shmaddr);
295long ksys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf);
296
297/* for CONFIG_ARCH_WANT_OLD_COMPAT_IPC */
298long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
299 unsigned int nsops,
300 const struct old_timespec32 __user *timeout);
301#ifdef CONFIG_COMPAT
302long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg); 280long compat_ksys_old_semctl(int semid, int semnum, int cmd, int arg);
303long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr); 281long compat_ksys_old_msgctl(int msqid, int cmd, void __user *uptr);
304long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz, 282long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
@@ -306,6 +284,7 @@ long compat_ksys_msgrcv(int msqid, compat_uptr_t msgp, compat_ssize_t msgsz,
306long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp, 284long compat_ksys_msgsnd(int msqid, compat_uptr_t msgp,
307 compat_ssize_t msgsz, int msgflg); 285 compat_ssize_t msgsz, int msgflg);
308long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr); 286long compat_ksys_old_shmctl(int shmid, int cmd, void __user *uptr);
309#endif /* CONFIG_COMPAT */ 287
288#endif
310 289
311#endif 290#endif
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 3fb50757e812..315798037d6c 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1772,16 +1772,21 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno,
1772 bitmap_from_u64(mask, stack_mask); 1772 bitmap_from_u64(mask, stack_mask);
1773 for_each_set_bit(i, mask, 64) { 1773 for_each_set_bit(i, mask, 64) {
1774 if (i >= func->allocated_stack / BPF_REG_SIZE) { 1774 if (i >= func->allocated_stack / BPF_REG_SIZE) {
1775 /* This can happen if backtracking 1775 /* the sequence of instructions:
1776 * is propagating stack precision where 1776 * 2: (bf) r3 = r10
1777 * caller has larger stack frame 1777 * 3: (7b) *(u64 *)(r3 -8) = r0
1778 * than callee, but backtrack_insn() should 1778 * 4: (79) r4 = *(u64 *)(r10 -8)
1779 * have returned -ENOTSUPP. 1779 * doesn't contain jmps. It's backtracked
1780 * as a single block.
1781 * During backtracking insn 3 is not recognized as
1782 * stack access, so at the end of backtracking
1783 * stack slot fp-8 is still marked in stack_mask.
1784 * However the parent state may not have accessed
1785 * fp-8 and it's "unallocated" stack space.
1786 * In such case fallback to conservative.
1780 */ 1787 */
1781 verbose(env, "BUG spi %d stack_size %d\n", 1788 mark_all_scalars_precise(env, st);
1782 i, func->allocated_stack); 1789 return 0;
1783 WARN_ONCE(1, "verifier backtracking bug");
1784 return -EFAULT;
1785 } 1790 }
1786 1791
1787 if (func->stack[i].slot_type[0] != STACK_SPILL) { 1792 if (func->stack[i].slot_type[0] != STACK_SPILL) {
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 753afbca549f..8be1da1ebd9a 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5255,8 +5255,16 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
5255 * if the parent has to be frozen, the child has too. 5255 * if the parent has to be frozen, the child has too.
5256 */ 5256 */
5257 cgrp->freezer.e_freeze = parent->freezer.e_freeze; 5257 cgrp->freezer.e_freeze = parent->freezer.e_freeze;
5258 if (cgrp->freezer.e_freeze) 5258 if (cgrp->freezer.e_freeze) {
5259 /*
5260 * Set the CGRP_FREEZE flag, so when a process will be
5261 * attached to the child cgroup, it will become frozen.
5262 * At this point the new cgroup is unpopulated, so we can
5263 * consider it frozen immediately.
5264 */
5265 set_bit(CGRP_FREEZE, &cgrp->flags);
5259 set_bit(CGRP_FROZEN, &cgrp->flags); 5266 set_bit(CGRP_FROZEN, &cgrp->flags);
5267 }
5260 5268
5261 spin_lock_irq(&css_set_lock); 5269 spin_lock_irq(&css_set_lock);
5262 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { 5270 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index c5cd852fe86b..3cc8416ec844 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -413,7 +413,7 @@ static int hw_breakpoint_parse(struct perf_event *bp,
413 413
414int register_perf_hw_breakpoint(struct perf_event *bp) 414int register_perf_hw_breakpoint(struct perf_event *bp)
415{ 415{
416 struct arch_hw_breakpoint hw; 416 struct arch_hw_breakpoint hw = { };
417 int err; 417 int err;
418 418
419 err = reserve_bp_slot(bp); 419 err = reserve_bp_slot(bp);
@@ -461,7 +461,7 @@ int
461modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr, 461modify_user_hw_breakpoint_check(struct perf_event *bp, struct perf_event_attr *attr,
462 bool check) 462 bool check)
463{ 463{
464 struct arch_hw_breakpoint hw; 464 struct arch_hw_breakpoint hw = { };
465 int err; 465 int err;
466 466
467 err = hw_breakpoint_parse(bp, attr, &hw); 467 err = hw_breakpoint_parse(bp, attr, &hw);
diff --git a/kernel/fork.c b/kernel/fork.c
index 2852d0e76ea3..541fd805fb88 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -2338,6 +2338,8 @@ struct mm_struct *copy_init_mm(void)
2338 * 2338 *
2339 * It copies the process, and if successful kick-starts 2339 * It copies the process, and if successful kick-starts
2340 * it and waits for it to finish using the VM if required. 2340 * it and waits for it to finish using the VM if required.
2341 *
2342 * args->exit_signal is expected to be checked for sanity by the caller.
2341 */ 2343 */
2342long _do_fork(struct kernel_clone_args *args) 2344long _do_fork(struct kernel_clone_args *args)
2343{ 2345{
@@ -2562,6 +2564,14 @@ noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
2562 if (copy_from_user(&args, uargs, size)) 2564 if (copy_from_user(&args, uargs, size))
2563 return -EFAULT; 2565 return -EFAULT;
2564 2566
2567 /*
2568 * Verify that higher 32bits of exit_signal are unset and that
2569 * it is a valid signal
2570 */
2571 if (unlikely((args.exit_signal & ~((u64)CSIGNAL)) ||
2572 !valid_signal(args.exit_signal)))
2573 return -EINVAL;
2574
2565 *kargs = (struct kernel_clone_args){ 2575 *kargs = (struct kernel_clone_args){
2566 .flags = args.flags, 2576 .flags = args.flags,
2567 .pidfd = u64_to_user_ptr(args.pidfd), 2577 .pidfd = u64_to_user_ptr(args.pidfd),
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 95414ad3506a..98c04ca5fa43 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -36,6 +36,8 @@ static void resend_irqs(unsigned long arg)
36 irq = find_first_bit(irqs_resend, nr_irqs); 36 irq = find_first_bit(irqs_resend, nr_irqs);
37 clear_bit(irq, irqs_resend); 37 clear_bit(irq, irqs_resend);
38 desc = irq_to_desc(irq); 38 desc = irq_to_desc(irq);
39 if (!desc)
40 continue;
39 local_irq_disable(); 41 local_irq_disable();
40 desc->handle_irq(desc); 42 desc->handle_irq(desc);
41 local_irq_enable(); 43 local_irq_enable();
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 010d578118d6..df9f1fe5689b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5105,37 +5105,40 @@ out_unlock:
5105 return retval; 5105 return retval;
5106} 5106}
5107 5107
5108static int sched_read_attr(struct sched_attr __user *uattr, 5108/*
5109 struct sched_attr *attr, 5109 * Copy the kernel size attribute structure (which might be larger
5110 unsigned int usize) 5110 * than what user-space knows about) to user-space.
5111 *
5112 * Note that all cases are valid: user-space buffer can be larger or
5113 * smaller than the kernel-space buffer. The usual case is that both
5114 * have the same size.
5115 */
5116static int
5117sched_attr_copy_to_user(struct sched_attr __user *uattr,
5118 struct sched_attr *kattr,
5119 unsigned int usize)
5111{ 5120{
5112 int ret; 5121 unsigned int ksize = sizeof(*kattr);
5113 5122
5114 if (!access_ok(uattr, usize)) 5123 if (!access_ok(uattr, usize))
5115 return -EFAULT; 5124 return -EFAULT;
5116 5125
5117 /* 5126 /*
5118 * If we're handed a smaller struct than we know of, 5127 * sched_getattr() ABI forwards and backwards compatibility:
5119 * ensure all the unknown bits are 0 - i.e. old 5128 *
5120 * user-space does not get uncomplete information. 5129 * If usize == ksize then we just copy everything to user-space and all is good.
5130 *
5131 * If usize < ksize then we only copy as much as user-space has space for,
5132 * this keeps ABI compatibility as well. We skip the rest.
5133 *
5134 * If usize > ksize then user-space is using a newer version of the ABI,
5135 * which part the kernel doesn't know about. Just ignore it - tooling can
5136 * detect the kernel's knowledge of attributes from the attr->size value
5137 * which is set to ksize in this case.
5121 */ 5138 */
5122 if (usize < sizeof(*attr)) { 5139 kattr->size = min(usize, ksize);
5123 unsigned char *addr;
5124 unsigned char *end;
5125 5140
5126 addr = (void *)attr + usize; 5141 if (copy_to_user(uattr, kattr, kattr->size))
5127 end = (void *)attr + sizeof(*attr);
5128
5129 for (; addr < end; addr++) {
5130 if (*addr)
5131 return -EFBIG;
5132 }
5133
5134 attr->size = usize;
5135 }
5136
5137 ret = copy_to_user(uattr, attr, attr->size);
5138 if (ret)
5139 return -EFAULT; 5142 return -EFAULT;
5140 5143
5141 return 0; 5144 return 0;
@@ -5145,20 +5148,18 @@ static int sched_read_attr(struct sched_attr __user *uattr,
5145 * sys_sched_getattr - similar to sched_getparam, but with sched_attr 5148 * sys_sched_getattr - similar to sched_getparam, but with sched_attr
5146 * @pid: the pid in question. 5149 * @pid: the pid in question.
5147 * @uattr: structure containing the extended parameters. 5150 * @uattr: structure containing the extended parameters.
5148 * @size: sizeof(attr) for fwd/bwd comp. 5151 * @usize: sizeof(attr) that user-space knows about, for forwards and backwards compatibility.
5149 * @flags: for future extension. 5152 * @flags: for future extension.
5150 */ 5153 */
5151SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, 5154SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
5152 unsigned int, size, unsigned int, flags) 5155 unsigned int, usize, unsigned int, flags)
5153{ 5156{
5154 struct sched_attr attr = { 5157 struct sched_attr kattr = { };
5155 .size = sizeof(struct sched_attr),
5156 };
5157 struct task_struct *p; 5158 struct task_struct *p;
5158 int retval; 5159 int retval;
5159 5160
5160 if (!uattr || pid < 0 || size > PAGE_SIZE || 5161 if (!uattr || pid < 0 || usize > PAGE_SIZE ||
5161 size < SCHED_ATTR_SIZE_VER0 || flags) 5162 usize < SCHED_ATTR_SIZE_VER0 || flags)
5162 return -EINVAL; 5163 return -EINVAL;
5163 5164
5164 rcu_read_lock(); 5165 rcu_read_lock();
@@ -5171,25 +5172,24 @@ SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
5171 if (retval) 5172 if (retval)
5172 goto out_unlock; 5173 goto out_unlock;
5173 5174
5174 attr.sched_policy = p->policy; 5175 kattr.sched_policy = p->policy;
5175 if (p->sched_reset_on_fork) 5176 if (p->sched_reset_on_fork)
5176 attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; 5177 kattr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
5177 if (task_has_dl_policy(p)) 5178 if (task_has_dl_policy(p))
5178 __getparam_dl(p, &attr); 5179 __getparam_dl(p, &kattr);
5179 else if (task_has_rt_policy(p)) 5180 else if (task_has_rt_policy(p))
5180 attr.sched_priority = p->rt_priority; 5181 kattr.sched_priority = p->rt_priority;
5181 else 5182 else
5182 attr.sched_nice = task_nice(p); 5183 kattr.sched_nice = task_nice(p);
5183 5184
5184#ifdef CONFIG_UCLAMP_TASK 5185#ifdef CONFIG_UCLAMP_TASK
5185 attr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value; 5186 kattr.sched_util_min = p->uclamp_req[UCLAMP_MIN].value;
5186 attr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value; 5187 kattr.sched_util_max = p->uclamp_req[UCLAMP_MAX].value;
5187#endif 5188#endif
5188 5189
5189 rcu_read_unlock(); 5190 rcu_read_unlock();
5190 5191
5191 retval = sched_read_attr(uattr, &attr, size); 5192 return sched_attr_copy_to_user(uattr, &kattr, usize);
5192 return retval;
5193 5193
5194out_unlock: 5194out_unlock:
5195 rcu_read_unlock(); 5195 rcu_read_unlock();
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc9cfeaac8bd..500f5db0de0b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4470,6 +4470,8 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
4470 if (likely(cfs_rq->runtime_remaining > 0)) 4470 if (likely(cfs_rq->runtime_remaining > 0))
4471 return; 4471 return;
4472 4472
4473 if (cfs_rq->throttled)
4474 return;
4473 /* 4475 /*
4474 * if we're unable to extend our runtime we resched so that the active 4476 * if we're unable to extend our runtime we resched so that the active
4475 * hierarchy can be throttled 4477 * hierarchy can be throttled
@@ -4673,6 +4675,9 @@ static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
4673 if (!cfs_rq_throttled(cfs_rq)) 4675 if (!cfs_rq_throttled(cfs_rq))
4674 goto next; 4676 goto next;
4675 4677
4678 /* By the above check, this should never be true */
4679 SCHED_WARN_ON(cfs_rq->runtime_remaining > 0);
4680
4676 runtime = -cfs_rq->runtime_remaining + 1; 4681 runtime = -cfs_rq->runtime_remaining + 1;
4677 if (runtime > remaining) 4682 if (runtime > remaining)
4678 runtime = remaining; 4683 runtime = remaining;
diff --git a/lib/Kconfig b/lib/Kconfig
index f33d66fc0e86..4e6b1c3e4c98 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -631,6 +631,9 @@ config SBITMAP
631config PARMAN 631config PARMAN
632 tristate "parman" if COMPILE_TEST 632 tristate "parman" if COMPILE_TEST
633 633
634config OBJAGG
635 tristate "objagg" if COMPILE_TEST
636
634config STRING_SELFTEST 637config STRING_SELFTEST
635 tristate "Test string functions" 638 tristate "Test string functions"
636 639
@@ -653,6 +656,3 @@ config GENERIC_LIB_CMPDI2
653 656
654config GENERIC_LIB_UCMPDI2 657config GENERIC_LIB_UCMPDI2
655 bool 658 bool
656
657config OBJAGG
658 tristate "objagg" if COMPILE_TEST
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 798275a51887..26de020aae7b 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -124,7 +124,8 @@ EXPORT_SYMBOL_GPL(balloon_page_list_dequeue);
124struct page *balloon_page_alloc(void) 124struct page *balloon_page_alloc(void)
125{ 125{
126 struct page *page = alloc_page(balloon_mapping_gfp_mask() | 126 struct page *page = alloc_page(balloon_mapping_gfp_mask() |
127 __GFP_NOMEMALLOC | __GFP_NORETRY); 127 __GFP_NOMEMALLOC | __GFP_NORETRY |
128 __GFP_NOWARN);
128 return page; 129 return page;
129} 130}
130EXPORT_SYMBOL_GPL(balloon_page_alloc); 131EXPORT_SYMBOL_GPL(balloon_page_alloc);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index cdb00c2ef242..c1d3a303d97f 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -5660,11 +5660,6 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
5660 return send_conn_param_neg_reply(hdev, handle, 5660 return send_conn_param_neg_reply(hdev, handle,
5661 HCI_ERROR_UNKNOWN_CONN_ID); 5661 HCI_ERROR_UNKNOWN_CONN_ID);
5662 5662
5663 if (min < hcon->le_conn_min_interval ||
5664 max > hcon->le_conn_max_interval)
5665 return send_conn_param_neg_reply(hdev, handle,
5666 HCI_ERROR_INVALID_LL_PARAMS);
5667
5668 if (hci_check_conn_params(min, max, latency, timeout)) 5663 if (hci_check_conn_params(min, max, latency, timeout))
5669 return send_conn_param_neg_reply(hdev, handle, 5664 return send_conn_param_neg_reply(hdev, handle,
5670 HCI_ERROR_INVALID_LL_PARAMS); 5665 HCI_ERROR_INVALID_LL_PARAMS);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index dfc1edb168b7..da7fdbdf9c41 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -5305,14 +5305,7 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5305 5305
5306 memset(&rsp, 0, sizeof(rsp)); 5306 memset(&rsp, 0, sizeof(rsp));
5307 5307
5308 if (min < hcon->le_conn_min_interval || 5308 err = hci_check_conn_params(min, max, latency, to_multiplier);
5309 max > hcon->le_conn_max_interval) {
5310 BT_DBG("requested connection interval exceeds current bounds.");
5311 err = -EINVAL;
5312 } else {
5313 err = hci_check_conn_params(min, max, latency, to_multiplier);
5314 }
5315
5316 if (err) 5309 if (err)
5317 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); 5310 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5318 else 5311 else
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 44594635a972..da5ed4cf9233 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -466,7 +466,7 @@ static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
466 struct nlmsghdr *nlh; 466 struct nlmsghdr *nlh;
467 struct nlattr *nest; 467 struct nlattr *nest;
468 468
469 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI); 469 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
470 if (!nlh) 470 if (!nlh)
471 return -EMSGSIZE; 471 return -EMSGSIZE;
472 472
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
index d3f9592f4ff8..af7800103e51 100644
--- a/net/bridge/br_netfilter_hooks.c
+++ b/net/bridge/br_netfilter_hooks.c
@@ -496,6 +496,10 @@ static unsigned int br_nf_pre_routing(void *priv,
496 if (!brnet->call_ip6tables && 496 if (!brnet->call_ip6tables &&
497 !br_opt_get(br, BROPT_NF_CALL_IP6TABLES)) 497 !br_opt_get(br, BROPT_NF_CALL_IP6TABLES))
498 return NF_ACCEPT; 498 return NF_ACCEPT;
499 if (!ipv6_mod_enabled()) {
500 pr_warn_once("Module ipv6 is disabled, so call_ip6tables is not supported.");
501 return NF_DROP;
502 }
499 503
500 nf_bridge_pull_encap_header_rcsum(skb); 504 nf_bridge_pull_encap_header_rcsum(skb);
501 return br_nf_pre_routing_ipv6(priv, skb, state); 505 return br_nf_pre_routing_ipv6(priv, skb, state);
diff --git a/net/core/dev.c b/net/core/dev.c
index b1afafee3e2a..a9775d676285 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8807,6 +8807,8 @@ int register_netdevice(struct net_device *dev)
8807 ret = notifier_to_errno(ret); 8807 ret = notifier_to_errno(ret);
8808 if (ret) { 8808 if (ret) {
8809 rollback_registered(dev); 8809 rollback_registered(dev);
8810 rcu_barrier();
8811
8810 dev->reg_state = NETREG_UNREGISTERED; 8812 dev->reg_state = NETREG_UNREGISTERED;
8811 } 8813 }
8812 /* 8814 /*
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2b40b5a9425b..f12e8a050edb 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3670,6 +3670,25 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
3670 int pos; 3670 int pos;
3671 int dummy; 3671 int dummy;
3672 3672
3673 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
3674 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
3675 /* gso_size is untrusted, and we have a frag_list with a linear
3676 * non head_frag head.
3677 *
3678 * (we assume checking the first list_skb member suffices;
3679 * i.e if either of the list_skb members have non head_frag
3680 * head, then the first one has too).
3681 *
3682 * If head_skb's headlen does not fit requested gso_size, it
3683 * means that the frag_list members do NOT terminate on exact
3684 * gso_size boundaries. Hence we cannot perform skb_frag_t page
3685 * sharing. Therefore we must fallback to copying the frag_list
3686 * skbs; we do so by disabling SG.
3687 */
3688 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
3689 features &= ~NETIF_F_SG;
3690 }
3691
3673 __skb_push(head_skb, doffset); 3692 __skb_push(head_skb, doffset);
3674 proto = skb_network_protocol(head_skb, &dummy); 3693 proto = skb_network_protocol(head_skb, &dummy);
3675 if (unlikely(!proto)) 3694 if (unlikely(!proto))
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 01998860afaa..eb114ee419b6 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -656,6 +656,7 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
656 struct sock *sk, u64 flags) 656 struct sock *sk, u64 flags)
657{ 657{
658 struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 658 struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
659 struct inet_connection_sock *icsk = inet_csk(sk);
659 u32 key_size = map->key_size, hash; 660 u32 key_size = map->key_size, hash;
660 struct bpf_htab_elem *elem, *elem_new; 661 struct bpf_htab_elem *elem, *elem_new;
661 struct bpf_htab_bucket *bucket; 662 struct bpf_htab_bucket *bucket;
@@ -666,6 +667,8 @@ static int sock_hash_update_common(struct bpf_map *map, void *key,
666 WARN_ON_ONCE(!rcu_read_lock_held()); 667 WARN_ON_ONCE(!rcu_read_lock_held());
667 if (unlikely(flags > BPF_EXIST)) 668 if (unlikely(flags > BPF_EXIST))
668 return -EINVAL; 669 return -EINVAL;
670 if (unlikely(icsk->icsk_ulp_data))
671 return -EINVAL;
669 672
670 link = sk_psock_init_link(); 673 link = sk_psock_init_link();
671 if (!link) 674 if (!link)
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 2db089e10ba0..0913a090b2bf 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1582,7 +1582,7 @@ failure:
1582} 1582}
1583 1583
1584int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc, 1584int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
1585 unsigned char *flags, bool skip_oif) 1585 u8 rt_family, unsigned char *flags, bool skip_oif)
1586{ 1586{
1587 if (nhc->nhc_flags & RTNH_F_DEAD) 1587 if (nhc->nhc_flags & RTNH_F_DEAD)
1588 *flags |= RTNH_F_DEAD; 1588 *flags |= RTNH_F_DEAD;
@@ -1613,7 +1613,7 @@ int fib_nexthop_info(struct sk_buff *skb, const struct fib_nh_common *nhc,
1613 /* if gateway family does not match nexthop family 1613 /* if gateway family does not match nexthop family
1614 * gateway is encoded as RTA_VIA 1614 * gateway is encoded as RTA_VIA
1615 */ 1615 */
1616 if (nhc->nhc_gw_family != nhc->nhc_family) { 1616 if (rt_family != nhc->nhc_gw_family) {
1617 int alen = sizeof(struct in6_addr); 1617 int alen = sizeof(struct in6_addr);
1618 struct nlattr *nla; 1618 struct nlattr *nla;
1619 struct rtvia *via; 1619 struct rtvia *via;
@@ -1654,7 +1654,7 @@ EXPORT_SYMBOL_GPL(fib_nexthop_info);
1654 1654
1655#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6) 1655#if IS_ENABLED(CONFIG_IP_ROUTE_MULTIPATH) || IS_ENABLED(CONFIG_IPV6)
1656int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc, 1656int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
1657 int nh_weight) 1657 int nh_weight, u8 rt_family)
1658{ 1658{
1659 const struct net_device *dev = nhc->nhc_dev; 1659 const struct net_device *dev = nhc->nhc_dev;
1660 struct rtnexthop *rtnh; 1660 struct rtnexthop *rtnh;
@@ -1667,7 +1667,7 @@ int fib_add_nexthop(struct sk_buff *skb, const struct fib_nh_common *nhc,
1667 rtnh->rtnh_hops = nh_weight - 1; 1667 rtnh->rtnh_hops = nh_weight - 1;
1668 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0; 1668 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
1669 1669
1670 if (fib_nexthop_info(skb, nhc, &flags, true) < 0) 1670 if (fib_nexthop_info(skb, nhc, rt_family, &flags, true) < 0)
1671 goto nla_put_failure; 1671 goto nla_put_failure;
1672 1672
1673 rtnh->rtnh_flags = flags; 1673 rtnh->rtnh_flags = flags;
@@ -1693,13 +1693,14 @@ static int fib_add_multipath(struct sk_buff *skb, struct fib_info *fi)
1693 goto nla_put_failure; 1693 goto nla_put_failure;
1694 1694
1695 if (unlikely(fi->nh)) { 1695 if (unlikely(fi->nh)) {
1696 if (nexthop_mpath_fill_node(skb, fi->nh) < 0) 1696 if (nexthop_mpath_fill_node(skb, fi->nh, AF_INET) < 0)
1697 goto nla_put_failure; 1697 goto nla_put_failure;
1698 goto mp_end; 1698 goto mp_end;
1699 } 1699 }
1700 1700
1701 for_nexthops(fi) { 1701 for_nexthops(fi) {
1702 if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight) < 0) 1702 if (fib_add_nexthop(skb, &nh->nh_common, nh->fib_nh_weight,
1703 AF_INET) < 0)
1703 goto nla_put_failure; 1704 goto nla_put_failure;
1704#ifdef CONFIG_IP_ROUTE_CLASSID 1705#ifdef CONFIG_IP_ROUTE_CLASSID
1705 if (nh->nh_tclassid && 1706 if (nh->nh_tclassid &&
@@ -1775,7 +1776,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1775 const struct fib_nh_common *nhc = fib_info_nhc(fi, 0); 1776 const struct fib_nh_common *nhc = fib_info_nhc(fi, 0);
1776 unsigned char flags = 0; 1777 unsigned char flags = 0;
1777 1778
1778 if (fib_nexthop_info(skb, nhc, &flags, false) < 0) 1779 if (fib_nexthop_info(skb, nhc, AF_INET, &flags, false) < 0)
1779 goto nla_put_failure; 1780 goto nla_put_failure;
1780 1781
1781 rtm->rtm_flags = flags; 1782 rtm->rtm_flags = flags;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 706cbb3b2986..7e94223fdb2b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -266,7 +266,7 @@ static void tcp_ecn_accept_cwr(struct sock *sk, const struct sk_buff *skb)
266 266
267static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) 267static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp)
268{ 268{
269 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; 269 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR;
270} 270}
271 271
272static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb) 272static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 87d2d8c1db7c..98ac32b49d8c 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -223,7 +223,7 @@ static int __net_init ping_v6_proc_init_net(struct net *net)
223 return 0; 223 return 0;
224} 224}
225 225
226static void __net_init ping_v6_proc_exit_net(struct net *net) 226static void __net_exit ping_v6_proc_exit_net(struct net *net)
227{ 227{
228 remove_proc_entry("icmp6", net->proc_net); 228 remove_proc_entry("icmp6", net->proc_net);
229} 229}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 874641d4d2a1..a63ff85fe141 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4386,13 +4386,14 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
4386 struct fib6_config cfg = { 4386 struct fib6_config cfg = {
4387 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL, 4387 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4388 .fc_ifindex = idev->dev->ifindex, 4388 .fc_ifindex = idev->dev->ifindex,
4389 .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP, 4389 .fc_flags = RTF_UP | RTF_NONEXTHOP,
4390 .fc_dst = *addr, 4390 .fc_dst = *addr,
4391 .fc_dst_len = 128, 4391 .fc_dst_len = 128,
4392 .fc_protocol = RTPROT_KERNEL, 4392 .fc_protocol = RTPROT_KERNEL,
4393 .fc_nlinfo.nl_net = net, 4393 .fc_nlinfo.nl_net = net,
4394 .fc_ignore_dev_down = true, 4394 .fc_ignore_dev_down = true,
4395 }; 4395 };
4396 struct fib6_info *f6i;
4396 4397
4397 if (anycast) { 4398 if (anycast) {
4398 cfg.fc_type = RTN_ANYCAST; 4399 cfg.fc_type = RTN_ANYCAST;
@@ -4402,7 +4403,10 @@ struct fib6_info *addrconf_f6i_alloc(struct net *net,
4402 cfg.fc_flags |= RTF_LOCAL; 4403 cfg.fc_flags |= RTF_LOCAL;
4403 } 4404 }
4404 4405
4405 return ip6_route_info_create(&cfg, gfp_flags, NULL); 4406 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
4407 if (!IS_ERR(f6i))
4408 f6i->dst_nocount = true;
4409 return f6i;
4406} 4410}
4407 4411
4408/* remove deleted ip from prefsrc entries */ 4412/* remove deleted ip from prefsrc entries */
@@ -5323,11 +5327,11 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5323 if (nexthop_is_multipath(nh)) { 5327 if (nexthop_is_multipath(nh)) {
5324 struct nlattr *mp; 5328 struct nlattr *mp;
5325 5329
5326 mp = nla_nest_start(skb, RTA_MULTIPATH); 5330 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5327 if (!mp) 5331 if (!mp)
5328 goto nla_put_failure; 5332 goto nla_put_failure;
5329 5333
5330 if (nexthop_mpath_fill_node(skb, nh)) 5334 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5331 goto nla_put_failure; 5335 goto nla_put_failure;
5332 5336
5333 nla_nest_end(skb, mp); 5337 nla_nest_end(skb, mp);
@@ -5335,7 +5339,7 @@ static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5335 struct fib6_nh *fib6_nh; 5339 struct fib6_nh *fib6_nh;
5336 5340
5337 fib6_nh = nexthop_fib6_nh(nh); 5341 fib6_nh = nexthop_fib6_nh(nh);
5338 if (fib_nexthop_info(skb, &fib6_nh->nh_common, 5342 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5339 flags, false) < 0) 5343 flags, false) < 0)
5340 goto nla_put_failure; 5344 goto nla_put_failure;
5341 } 5345 }
@@ -5464,13 +5468,14 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5464 goto nla_put_failure; 5468 goto nla_put_failure;
5465 5469
5466 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common, 5470 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5467 rt->fib6_nh->fib_nh_weight) < 0) 5471 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
5468 goto nla_put_failure; 5472 goto nla_put_failure;
5469 5473
5470 list_for_each_entry_safe(sibling, next_sibling, 5474 list_for_each_entry_safe(sibling, next_sibling,
5471 &rt->fib6_siblings, fib6_siblings) { 5475 &rt->fib6_siblings, fib6_siblings) {
5472 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common, 5476 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5473 sibling->fib6_nh->fib_nh_weight) < 0) 5477 sibling->fib6_nh->fib_nh_weight,
5478 AF_INET6) < 0)
5474 goto nla_put_failure; 5479 goto nla_put_failure;
5475 } 5480 }
5476 5481
@@ -5487,7 +5492,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5487 5492
5488 rtm->rtm_flags |= nh_flags; 5493 rtm->rtm_flags |= nh_flags;
5489 } else { 5494 } else {
5490 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, 5495 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5491 &nh_flags, false) < 0) 5496 &nh_flags, false) < 0)
5492 goto nla_put_failure; 5497 goto nla_put_failure;
5493 5498
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 7c6edb7c5f10..70739e746c13 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1532,7 +1532,6 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1532 struct sta_info *sta; 1532 struct sta_info *sta;
1533 struct ieee80211_sub_if_data *sdata; 1533 struct ieee80211_sub_if_data *sdata;
1534 int err; 1534 int err;
1535 int layer2_update;
1536 1535
1537 if (params->vlan) { 1536 if (params->vlan) {
1538 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan); 1537 sdata = IEEE80211_DEV_TO_SUB_IF(params->vlan);
@@ -1576,18 +1575,12 @@ static int ieee80211_add_station(struct wiphy *wiphy, struct net_device *dev,
1576 test_sta_flag(sta, WLAN_STA_ASSOC)) 1575 test_sta_flag(sta, WLAN_STA_ASSOC))
1577 rate_control_rate_init(sta); 1576 rate_control_rate_init(sta);
1578 1577
1579 layer2_update = sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1580 sdata->vif.type == NL80211_IFTYPE_AP;
1581
1582 err = sta_info_insert_rcu(sta); 1578 err = sta_info_insert_rcu(sta);
1583 if (err) { 1579 if (err) {
1584 rcu_read_unlock(); 1580 rcu_read_unlock();
1585 return err; 1581 return err;
1586 } 1582 }
1587 1583
1588 if (layer2_update)
1589 cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr);
1590
1591 rcu_read_unlock(); 1584 rcu_read_unlock();
1592 1585
1593 return 0; 1586 return 0;
@@ -1685,10 +1678,11 @@ static int ieee80211_change_station(struct wiphy *wiphy,
1685 sta->sdata = vlansdata; 1678 sta->sdata = vlansdata;
1686 ieee80211_check_fast_xmit(sta); 1679 ieee80211_check_fast_xmit(sta);
1687 1680
1688 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) 1681 if (test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
1689 ieee80211_vif_inc_num_mcast(sta->sdata); 1682 ieee80211_vif_inc_num_mcast(sta->sdata);
1690 1683 cfg80211_send_layer2_update(sta->sdata->dev,
1691 cfg80211_send_layer2_update(sta->sdata->dev, sta->sta.addr); 1684 sta->sta.addr);
1685 }
1692 } 1686 }
1693 1687
1694 err = sta_apply_parameters(local, sta, params); 1688 err = sta_apply_parameters(local, sta, params);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index df553070206c..bd11fef2139f 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -1979,6 +1979,10 @@ int sta_info_move_state(struct sta_info *sta,
1979 ieee80211_check_fast_xmit(sta); 1979 ieee80211_check_fast_xmit(sta);
1980 ieee80211_check_fast_rx(sta); 1980 ieee80211_check_fast_rx(sta);
1981 } 1981 }
1982 if (sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN ||
1983 sta->sdata->vif.type == NL80211_IFTYPE_AP)
1984 cfg80211_send_layer2_update(sta->sdata->dev,
1985 sta->sta.addr);
1982 break; 1986 break;
1983 default: 1987 default:
1984 break; 1988 break;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6aa01eb6fe99..e2d13cd18875 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -553,10 +553,8 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
553 goto nla_put_failure; 553 goto nla_put_failure;
554 554
555 if (ctnetlink_dump_status(skb, ct) < 0 || 555 if (ctnetlink_dump_status(skb, ct) < 0 ||
556 ctnetlink_dump_timeout(skb, ct) < 0 ||
557 ctnetlink_dump_acct(skb, ct, type) < 0 || 556 ctnetlink_dump_acct(skb, ct, type) < 0 ||
558 ctnetlink_dump_timestamp(skb, ct) < 0 || 557 ctnetlink_dump_timestamp(skb, ct) < 0 ||
559 ctnetlink_dump_protoinfo(skb, ct) < 0 ||
560 ctnetlink_dump_helpinfo(skb, ct) < 0 || 558 ctnetlink_dump_helpinfo(skb, ct) < 0 ||
561 ctnetlink_dump_mark(skb, ct) < 0 || 559 ctnetlink_dump_mark(skb, ct) < 0 ||
562 ctnetlink_dump_secctx(skb, ct) < 0 || 560 ctnetlink_dump_secctx(skb, ct) < 0 ||
@@ -568,6 +566,11 @@ ctnetlink_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
568 ctnetlink_dump_ct_synproxy(skb, ct) < 0) 566 ctnetlink_dump_ct_synproxy(skb, ct) < 0)
569 goto nla_put_failure; 567 goto nla_put_failure;
570 568
569 if (!test_bit(IPS_OFFLOAD_BIT, &ct->status) &&
570 (ctnetlink_dump_timeout(skb, ct) < 0 ||
571 ctnetlink_dump_protoinfo(skb, ct) < 0))
572 goto nla_put_failure;
573
571 nlmsg_end(skb, nlh); 574 nlmsg_end(skb, nlh);
572 return skb->len; 575 return skb->len;
573 576
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 09310a1bd91f..132f5228b431 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -218,7 +218,7 @@ int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
218 return err; 218 return err;
219 } 219 }
220 220
221 flow->timeout = (u32)jiffies; 221 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
222 return 0; 222 return 0;
223} 223}
224EXPORT_SYMBOL_GPL(flow_offload_add); 224EXPORT_SYMBOL_GPL(flow_offload_add);
diff --git a/net/netfilter/nft_fib_netdev.c b/net/netfilter/nft_fib_netdev.c
index 2cf3f32fe6d2..a2e726ae7f07 100644
--- a/net/netfilter/nft_fib_netdev.c
+++ b/net/netfilter/nft_fib_netdev.c
@@ -14,6 +14,7 @@
14#include <linux/netfilter/nf_tables.h> 14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables_core.h> 15#include <net/netfilter/nf_tables_core.h>
16#include <net/netfilter/nf_tables.h> 16#include <net/netfilter/nf_tables.h>
17#include <net/ipv6.h>
17 18
18#include <net/netfilter/nft_fib.h> 19#include <net/netfilter/nft_fib.h>
19 20
@@ -34,6 +35,8 @@ static void nft_fib_netdev_eval(const struct nft_expr *expr,
34 } 35 }
35 break; 36 break;
36 case ETH_P_IPV6: 37 case ETH_P_IPV6:
38 if (!ipv6_mod_enabled())
39 break;
37 switch (priv->result) { 40 switch (priv->result) {
38 case NFT_FIB_RESULT_OIF: 41 case NFT_FIB_RESULT_OIF:
39 case NFT_FIB_RESULT_OIFNAME: 42 case NFT_FIB_RESULT_OIFNAME:
diff --git a/net/netfilter/nft_socket.c b/net/netfilter/nft_socket.c
index d7f3776dfd71..637ce3e8c575 100644
--- a/net/netfilter/nft_socket.c
+++ b/net/netfilter/nft_socket.c
@@ -47,9 +47,6 @@ static void nft_socket_eval(const struct nft_expr *expr,
47 return; 47 return;
48 } 48 }
49 49
50 /* So that subsequent socket matching not to require other lookups. */
51 skb->sk = sk;
52
53 switch(priv->key) { 50 switch(priv->key) {
54 case NFT_SOCKET_TRANSPARENT: 51 case NFT_SOCKET_TRANSPARENT:
55 nft_reg_store8(dest, inet_sk_transparent(sk)); 52 nft_reg_store8(dest, inet_sk_transparent(sk));
@@ -66,6 +63,9 @@ static void nft_socket_eval(const struct nft_expr *expr,
66 WARN_ON(1); 63 WARN_ON(1);
67 regs->verdict.code = NFT_BREAK; 64 regs->verdict.code = NFT_BREAK;
68 } 65 }
66
67 if (sk != skb->sk)
68 sock_gen_put(sk);
69} 69}
70 70
71static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = { 71static const struct nla_policy nft_socket_policy[NFTA_SOCKET_MAX + 1] = {
diff --git a/net/qrtr/tun.c b/net/qrtr/tun.c
index ccff1e544c21..e35869e81766 100644
--- a/net/qrtr/tun.c
+++ b/net/qrtr/tun.c
@@ -84,11 +84,14 @@ static ssize_t qrtr_tun_write_iter(struct kiocb *iocb, struct iov_iter *from)
84 if (!kbuf) 84 if (!kbuf)
85 return -ENOMEM; 85 return -ENOMEM;
86 86
87 if (!copy_from_iter_full(kbuf, len, from)) 87 if (!copy_from_iter_full(kbuf, len, from)) {
88 kfree(kbuf);
88 return -EFAULT; 89 return -EFAULT;
90 }
89 91
90 ret = qrtr_endpoint_post(&tun->ep, kbuf, len); 92 ret = qrtr_endpoint_post(&tun->ep, kbuf, len);
91 93
94 kfree(kbuf);
92 return ret < 0 ? ret : len; 95 return ret < 0 ? ret : len;
93} 96}
94 97
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 6dbb763bc1fd..20c156a73e73 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -239,34 +239,30 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
239 goto out; 239 goto out;
240 } 240 }
241 241
242 sock_set_flag(sk, SOCK_RCU_FREE); 242 /* The transport can be set using SO_RDS_TRANSPORT option before the
243 ret = rds_add_bound(rs, binding_addr, &port, scope_id); 243 * socket is bound.
244 if (ret) 244 */
245 goto out; 245 if (rs->rs_transport) {
246
247 if (rs->rs_transport) { /* previously bound */
248 trans = rs->rs_transport; 246 trans = rs->rs_transport;
249 if (trans->laddr_check(sock_net(sock->sk), 247 if (trans->laddr_check(sock_net(sock->sk),
250 binding_addr, scope_id) != 0) { 248 binding_addr, scope_id) != 0) {
251 ret = -ENOPROTOOPT; 249 ret = -ENOPROTOOPT;
252 rds_remove_bound(rs); 250 goto out;
253 } else {
254 ret = 0;
255 } 251 }
256 goto out; 252 } else {
257 } 253 trans = rds_trans_get_preferred(sock_net(sock->sk),
258 trans = rds_trans_get_preferred(sock_net(sock->sk), binding_addr, 254 binding_addr, scope_id);
259 scope_id); 255 if (!trans) {
260 if (!trans) { 256 ret = -EADDRNOTAVAIL;
261 ret = -EADDRNOTAVAIL; 257 pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n",
262 rds_remove_bound(rs); 258 __func__, binding_addr);
263 pr_info_ratelimited("RDS: %s could not find a transport for %pI6c, load rds_tcp or rds_rdma?\n", 259 goto out;
264 __func__, binding_addr); 260 }
265 goto out; 261 rs->rs_transport = trans;
266 } 262 }
267 263
268 rs->rs_transport = trans; 264 sock_set_flag(sk, SOCK_RCU_FREE);
269 ret = 0; 265 ret = rds_add_bound(rs, binding_addr, &port, scope_id);
270 266
271out: 267out:
272 release_sock(sk); 268 release_sock(sk);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index d122c53c8697..157be1ff8697 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -1262,8 +1262,8 @@ int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1262 1262
1263 if (nskb != skb) { 1263 if (nskb != skb) {
1264 rxrpc_eaten_skb(skb, rxrpc_skb_received); 1264 rxrpc_eaten_skb(skb, rxrpc_skb_received);
1265 rxrpc_new_skb(skb, rxrpc_skb_unshared);
1266 skb = nskb; 1265 skb = nskb;
1266 rxrpc_new_skb(skb, rxrpc_skb_unshared);
1267 sp = rxrpc_skb(skb); 1267 sp = rxrpc_skb(skb);
1268 } 1268 }
1269 } 1269 }
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 04faee7ccbce..1047825d9f48 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1920,6 +1920,8 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
1920 cl = cops->find(q, portid); 1920 cl = cops->find(q, portid);
1921 if (!cl) 1921 if (!cl)
1922 return; 1922 return;
1923 if (!cops->tcf_block)
1924 return;
1923 block = cops->tcf_block(q, cl, NULL); 1925 block = cops->tcf_block(q, cl, NULL);
1924 if (!block) 1926 if (!block)
1925 return; 1927 return;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 137db1cbde85..ac28f6a5d70e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -46,6 +46,8 @@ EXPORT_SYMBOL(default_qdisc_ops);
46 * - updates to tree and tree walking are only done under the rtnl mutex. 46 * - updates to tree and tree walking are only done under the rtnl mutex.
47 */ 47 */
48 48
49#define SKB_XOFF_MAGIC ((struct sk_buff *)1UL)
50
49static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q) 51static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
50{ 52{
51 const struct netdev_queue *txq = q->dev_queue; 53 const struct netdev_queue *txq = q->dev_queue;
@@ -71,7 +73,7 @@ static inline struct sk_buff *__skb_dequeue_bad_txq(struct Qdisc *q)
71 q->q.qlen--; 73 q->q.qlen--;
72 } 74 }
73 } else { 75 } else {
74 skb = NULL; 76 skb = SKB_XOFF_MAGIC;
75 } 77 }
76 } 78 }
77 79
@@ -253,8 +255,11 @@ validate:
253 return skb; 255 return skb;
254 256
255 skb = qdisc_dequeue_skb_bad_txq(q); 257 skb = qdisc_dequeue_skb_bad_txq(q);
256 if (unlikely(skb)) 258 if (unlikely(skb)) {
259 if (skb == SKB_XOFF_MAGIC)
260 return NULL;
257 goto bulk; 261 goto bulk;
262 }
258 skb = q->dequeue(q); 263 skb = q->dequeue(q);
259 if (skb) { 264 if (skb) {
260bulk: 265bulk:
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index cee6971c1c82..23cd1c873a2c 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -531,7 +531,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
531 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]); 531 new_hhf_non_hh_weight = nla_get_u32(tb[TCA_HHF_NON_HH_WEIGHT]);
532 532
533 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight; 533 non_hh_quantum = (u64)new_quantum * new_hhf_non_hh_weight;
534 if (non_hh_quantum > INT_MAX) 534 if (non_hh_quantum == 0 || non_hh_quantum > INT_MAX)
535 return -EINVAL; 535 return -EINVAL;
536 536
537 sch_tree_lock(sch); 537 sch_tree_lock(sch);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index b48ffe845c31..08d14d86ecfb 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1339,7 +1339,7 @@ static int __net_init sctp_ctrlsock_init(struct net *net)
1339 return status; 1339 return status;
1340} 1340}
1341 1341
1342static void __net_init sctp_ctrlsock_exit(struct net *net) 1342static void __net_exit sctp_ctrlsock_exit(struct net *net)
1343{ 1343{
1344 /* Free the control endpoint. */ 1344 /* Free the control endpoint. */
1345 inet_ctl_sock_destroy(net->sctp.ctl_sock); 1345 inet_ctl_sock_destroy(net->sctp.ctl_sock);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 1cf5bb5b73c4..e52b2128e43b 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -547,7 +547,7 @@ static void sctp_do_8_2_transport_strike(struct sctp_cmd_seq *commands,
547 if (net->sctp.pf_enable && 547 if (net->sctp.pf_enable &&
548 (transport->state == SCTP_ACTIVE) && 548 (transport->state == SCTP_ACTIVE) &&
549 (transport->error_count < transport->pathmaxrxt) && 549 (transport->error_count < transport->pathmaxrxt) &&
550 (transport->error_count > asoc->pf_retrans)) { 550 (transport->error_count > transport->pf_retrans)) {
551 551
552 sctp_assoc_control_transport(asoc, transport, 552 sctp_assoc_control_transport(asoc, transport,
553 SCTP_TRANSPORT_PF, 553 SCTP_TRANSPORT_PF,
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3e50a9712fb1..939b8d2595bc 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -309,7 +309,7 @@ static int sctp_bind(struct sock *sk, struct sockaddr *addr, int addr_len)
309 return retval; 309 return retval;
310} 310}
311 311
312static long sctp_get_port_local(struct sock *, union sctp_addr *); 312static int sctp_get_port_local(struct sock *, union sctp_addr *);
313 313
314/* Verify this is a valid sockaddr. */ 314/* Verify this is a valid sockaddr. */
315static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, 315static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
@@ -399,9 +399,8 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
399 * detection. 399 * detection.
400 */ 400 */
401 addr->v4.sin_port = htons(snum); 401 addr->v4.sin_port = htons(snum);
402 if ((ret = sctp_get_port_local(sk, addr))) { 402 if (sctp_get_port_local(sk, addr))
403 return -EADDRINUSE; 403 return -EADDRINUSE;
404 }
405 404
406 /* Refresh ephemeral port. */ 405 /* Refresh ephemeral port. */
407 if (!bp->port) 406 if (!bp->port)
@@ -413,11 +412,13 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
413 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len, 412 ret = sctp_add_bind_addr(bp, addr, af->sockaddr_len,
414 SCTP_ADDR_SRC, GFP_ATOMIC); 413 SCTP_ADDR_SRC, GFP_ATOMIC);
415 414
416 /* Copy back into socket for getsockname() use. */ 415 if (ret) {
417 if (!ret) { 416 sctp_put_port(sk);
418 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num); 417 return ret;
419 sp->pf->to_sk_saddr(addr, sk);
420 } 418 }
419 /* Copy back into socket for getsockname() use. */
420 inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
421 sp->pf->to_sk_saddr(addr, sk);
421 422
422 return ret; 423 return ret;
423} 424}
@@ -7192,7 +7193,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
7192 val.spt_pathmaxrxt = trans->pathmaxrxt; 7193 val.spt_pathmaxrxt = trans->pathmaxrxt;
7193 val.spt_pathpfthld = trans->pf_retrans; 7194 val.spt_pathpfthld = trans->pf_retrans;
7194 7195
7195 return 0; 7196 goto out;
7196 } 7197 }
7197 7198
7198 asoc = sctp_id2assoc(sk, val.spt_assoc_id); 7199 asoc = sctp_id2assoc(sk, val.spt_assoc_id);
@@ -7210,6 +7211,7 @@ static int sctp_getsockopt_paddr_thresholds(struct sock *sk,
7210 val.spt_pathmaxrxt = sp->pathmaxrxt; 7211 val.spt_pathmaxrxt = sp->pathmaxrxt;
7211 } 7212 }
7212 7213
7214out:
7213 if (put_user(len, optlen) || copy_to_user(optval, &val, len)) 7215 if (put_user(len, optlen) || copy_to_user(optval, &val, len))
7214 return -EFAULT; 7216 return -EFAULT;
7215 7217
@@ -8145,7 +8147,7 @@ static void sctp_unhash(struct sock *sk)
8145static struct sctp_bind_bucket *sctp_bucket_create( 8147static struct sctp_bind_bucket *sctp_bucket_create(
8146 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum); 8148 struct sctp_bind_hashbucket *head, struct net *, unsigned short snum);
8147 8149
8148static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) 8150static int sctp_get_port_local(struct sock *sk, union sctp_addr *addr)
8149{ 8151{
8150 struct sctp_sock *sp = sctp_sk(sk); 8152 struct sctp_sock *sp = sctp_sk(sk);
8151 bool reuse = (sk->sk_reuse || sp->reuse); 8153 bool reuse = (sk->sk_reuse || sp->reuse);
@@ -8255,7 +8257,7 @@ pp_found:
8255 8257
8256 if (sctp_bind_addr_conflict(&ep2->base.bind_addr, 8258 if (sctp_bind_addr_conflict(&ep2->base.bind_addr,
8257 addr, sp2, sp)) { 8259 addr, sp2, sp)) {
8258 ret = (long)sk2; 8260 ret = 1;
8259 goto fail_unlock; 8261 goto fail_unlock;
8260 } 8262 }
8261 } 8263 }
@@ -8327,7 +8329,7 @@ static int sctp_get_port(struct sock *sk, unsigned short snum)
8327 addr.v4.sin_port = htons(snum); 8329 addr.v4.sin_port = htons(snum);
8328 8330
8329 /* Note: sk->sk_num gets filled in if ephemeral port request. */ 8331 /* Note: sk->sk_num gets filled in if ephemeral port request. */
8330 return !!sctp_get_port_local(sk, &addr); 8332 return sctp_get_port_local(sk, &addr);
8331} 8333}
8332 8334
8333/* 8335/*
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 61219f0b9677..836e629e8f4a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -223,7 +223,8 @@ static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
223 publ->key); 223 publ->key);
224 } 224 }
225 225
226 kfree_rcu(p, rcu); 226 if (p)
227 kfree_rcu(p, rcu);
227} 228}
228 229
229/** 230/**
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index 74868f9d81fb..2ab4859df55a 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -145,8 +145,6 @@ static int xfrmi_create(struct net_device *dev)
145 if (err < 0) 145 if (err < 0)
146 goto out; 146 goto out;
147 147
148 strcpy(xi->p.name, dev->name);
149
150 dev_hold(dev); 148 dev_hold(dev);
151 xfrmi_link(xfrmn, xi); 149 xfrmi_link(xfrmn, xi);
152 150
@@ -177,7 +175,6 @@ static void xfrmi_dev_uninit(struct net_device *dev)
177 struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id); 175 struct xfrmi_net *xfrmn = net_generic(xi->net, xfrmi_net_id);
178 176
179 xfrmi_unlink(xfrmn, xi); 177 xfrmi_unlink(xfrmn, xi);
180 dev_put(xi->phydev);
181 dev_put(dev); 178 dev_put(dev);
182} 179}
183 180
@@ -294,7 +291,7 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
294 if (tdev == dev) { 291 if (tdev == dev) {
295 stats->collisions++; 292 stats->collisions++;
296 net_warn_ratelimited("%s: Local routing loop detected!\n", 293 net_warn_ratelimited("%s: Local routing loop detected!\n",
297 xi->p.name); 294 dev->name);
298 goto tx_err_dst_release; 295 goto tx_err_dst_release;
299 } 296 }
300 297
@@ -364,7 +361,7 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev)
364 goto tx_err; 361 goto tx_err;
365 } 362 }
366 363
367 fl.flowi_oif = xi->phydev->ifindex; 364 fl.flowi_oif = xi->p.link;
368 365
369 ret = xfrmi_xmit2(skb, dev, &fl); 366 ret = xfrmi_xmit2(skb, dev, &fl);
370 if (ret < 0) 367 if (ret < 0)
@@ -505,7 +502,7 @@ static int xfrmi_change(struct xfrm_if *xi, const struct xfrm_if_parms *p)
505 502
506static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p) 503static int xfrmi_update(struct xfrm_if *xi, struct xfrm_if_parms *p)
507{ 504{
508 struct net *net = dev_net(xi->dev); 505 struct net *net = xi->net;
509 struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); 506 struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id);
510 int err; 507 int err;
511 508
@@ -550,7 +547,7 @@ static int xfrmi_get_iflink(const struct net_device *dev)
550{ 547{
551 struct xfrm_if *xi = netdev_priv(dev); 548 struct xfrm_if *xi = netdev_priv(dev);
552 549
553 return xi->phydev->ifindex; 550 return xi->p.link;
554} 551}
555 552
556 553
@@ -576,12 +573,14 @@ static void xfrmi_dev_setup(struct net_device *dev)
576 dev->needs_free_netdev = true; 573 dev->needs_free_netdev = true;
577 dev->priv_destructor = xfrmi_dev_free; 574 dev->priv_destructor = xfrmi_dev_free;
578 netif_keep_dst(dev); 575 netif_keep_dst(dev);
576
577 eth_broadcast_addr(dev->broadcast);
579} 578}
580 579
581static int xfrmi_dev_init(struct net_device *dev) 580static int xfrmi_dev_init(struct net_device *dev)
582{ 581{
583 struct xfrm_if *xi = netdev_priv(dev); 582 struct xfrm_if *xi = netdev_priv(dev);
584 struct net_device *phydev = xi->phydev; 583 struct net_device *phydev = __dev_get_by_index(xi->net, xi->p.link);
585 int err; 584 int err;
586 585
587 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 586 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -596,13 +595,19 @@ static int xfrmi_dev_init(struct net_device *dev)
596 595
597 dev->features |= NETIF_F_LLTX; 596 dev->features |= NETIF_F_LLTX;
598 597
599 dev->needed_headroom = phydev->needed_headroom; 598 if (phydev) {
600 dev->needed_tailroom = phydev->needed_tailroom; 599 dev->needed_headroom = phydev->needed_headroom;
600 dev->needed_tailroom = phydev->needed_tailroom;
601 601
602 if (is_zero_ether_addr(dev->dev_addr)) 602 if (is_zero_ether_addr(dev->dev_addr))
603 eth_hw_addr_inherit(dev, phydev); 603 eth_hw_addr_inherit(dev, phydev);
604 if (is_zero_ether_addr(dev->broadcast)) 604 if (is_zero_ether_addr(dev->broadcast))
605 memcpy(dev->broadcast, phydev->broadcast, dev->addr_len); 605 memcpy(dev->broadcast, phydev->broadcast,
606 dev->addr_len);
607 } else {
608 eth_hw_addr_random(dev);
609 eth_broadcast_addr(dev->broadcast);
610 }
606 611
607 return 0; 612 return 0;
608} 613}
@@ -638,12 +643,6 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
638 int err; 643 int err;
639 644
640 xfrmi_netlink_parms(data, &p); 645 xfrmi_netlink_parms(data, &p);
641
642 if (!tb[IFLA_IFNAME])
643 return -EINVAL;
644
645 nla_strlcpy(p.name, tb[IFLA_IFNAME], IFNAMSIZ);
646
647 xi = xfrmi_locate(net, &p); 646 xi = xfrmi_locate(net, &p);
648 if (xi) 647 if (xi)
649 return -EEXIST; 648 return -EEXIST;
@@ -652,13 +651,8 @@ static int xfrmi_newlink(struct net *src_net, struct net_device *dev,
652 xi->p = p; 651 xi->p = p;
653 xi->net = net; 652 xi->net = net;
654 xi->dev = dev; 653 xi->dev = dev;
655 xi->phydev = dev_get_by_index(net, p.link);
656 if (!xi->phydev)
657 return -ENODEV;
658 654
659 err = xfrmi_create(dev); 655 err = xfrmi_create(dev);
660 if (err < 0)
661 dev_put(xi->phydev);
662 return err; 656 return err;
663} 657}
664 658
@@ -672,11 +666,11 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
672 struct netlink_ext_ack *extack) 666 struct netlink_ext_ack *extack)
673{ 667{
674 struct xfrm_if *xi = netdev_priv(dev); 668 struct xfrm_if *xi = netdev_priv(dev);
675 struct net *net = dev_net(dev); 669 struct net *net = xi->net;
676 670 struct xfrm_if_parms p;
677 xfrmi_netlink_parms(data, &xi->p);
678 671
679 xi = xfrmi_locate(net, &xi->p); 672 xfrmi_netlink_parms(data, &p);
673 xi = xfrmi_locate(net, &p);
680 if (!xi) { 674 if (!xi) {
681 xi = netdev_priv(dev); 675 xi = netdev_priv(dev);
682 } else { 676 } else {
@@ -684,7 +678,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
684 return -EEXIST; 678 return -EEXIST;
685 } 679 }
686 680
687 return xfrmi_update(xi, &xi->p); 681 return xfrmi_update(xi, &p);
688} 682}
689 683
690static size_t xfrmi_get_size(const struct net_device *dev) 684static size_t xfrmi_get_size(const struct net_device *dev)
@@ -715,7 +709,7 @@ static struct net *xfrmi_get_link_net(const struct net_device *dev)
715{ 709{
716 struct xfrm_if *xi = netdev_priv(dev); 710 struct xfrm_if *xi = netdev_priv(dev);
717 711
718 return dev_net(xi->phydev); 712 return xi->net;
719} 713}
720 714
721static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = { 715static const struct nla_policy xfrmi_policy[IFLA_XFRM_MAX + 1] = {
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ec94f5795ea4..21e939235b39 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -912,6 +912,7 @@ restart:
912 } else if (delta > 0) { 912 } else if (delta > 0) {
913 p = &parent->rb_right; 913 p = &parent->rb_right;
914 } else { 914 } else {
915 bool same_prefixlen = node->prefixlen == n->prefixlen;
915 struct xfrm_policy *tmp; 916 struct xfrm_policy *tmp;
916 917
917 hlist_for_each_entry(tmp, &n->hhead, bydst) { 918 hlist_for_each_entry(tmp, &n->hhead, bydst) {
@@ -919,9 +920,11 @@ restart:
919 hlist_del_rcu(&tmp->bydst); 920 hlist_del_rcu(&tmp->bydst);
920 } 921 }
921 922
923 node->prefixlen = prefixlen;
924
922 xfrm_policy_inexact_list_reinsert(net, node, family); 925 xfrm_policy_inexact_list_reinsert(net, node, family);
923 926
924 if (node->prefixlen == n->prefixlen) { 927 if (same_prefixlen) {
925 kfree_rcu(n, rcu); 928 kfree_rcu(n, rcu);
926 return; 929 return;
927 } 930 }
@@ -929,7 +932,6 @@ restart:
929 rb_erase(*p, new); 932 rb_erase(*p, new);
930 kfree_rcu(n, rcu); 933 kfree_rcu(n, rcu);
931 n = node; 934 n = node;
932 n->prefixlen = prefixlen;
933 goto restart; 935 goto restart;
934 } 936 }
935 } 937 }
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index e73ec040e250..ecba39c93fd9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -66,6 +66,9 @@ static void request_key_auth_describe(const struct key *key,
66{ 66{
67 struct request_key_auth *rka = dereference_key_rcu(key); 67 struct request_key_auth *rka = dereference_key_rcu(key);
68 68
69 if (!rka)
70 return;
71
69 seq_puts(m, "key:"); 72 seq_puts(m, "key:");
70 seq_puts(m, key->description); 73 seq_puts(m, key->description);
71 if (key_is_positive(key)) 74 if (key_is_positive(key))
@@ -83,6 +86,9 @@ static long request_key_auth_read(const struct key *key,
83 size_t datalen; 86 size_t datalen;
84 long ret; 87 long ret;
85 88
89 if (!rka)
90 return -EKEYREVOKED;
91
86 datalen = rka->callout_len; 92 datalen = rka->callout_len;
87 ret = datalen; 93 ret = datalen;
88 94
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index 92390d457567..18e6546b4467 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -824,6 +824,8 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
824 while (id >= 0) { 824 while (id >= 0) {
825 const struct hda_fixup *fix = codec->fixup_list + id; 825 const struct hda_fixup *fix = codec->fixup_list + id;
826 826
827 if (++depth > 10)
828 break;
827 if (fix->chained_before) 829 if (fix->chained_before)
828 apply_fixup(codec, fix->chain_id, action, depth + 1); 830 apply_fixup(codec, fix->chain_id, action, depth + 1);
829 831
@@ -863,8 +865,6 @@ static void apply_fixup(struct hda_codec *codec, int id, int action, int depth)
863 } 865 }
864 if (!fix->chained || fix->chained_before) 866 if (!fix->chained || fix->chained_before)
865 break; 867 break;
866 if (++depth > 10)
867 break;
868 id = fix->chain_id; 868 id = fix->chain_id;
869 } 869 }
870} 870}
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 5bf24fb819d2..10d502328b76 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -6009,7 +6009,8 @@ int snd_hda_gen_init(struct hda_codec *codec)
6009 if (spec->init_hook) 6009 if (spec->init_hook)
6010 spec->init_hook(codec); 6010 spec->init_hook(codec);
6011 6011
6012 snd_hda_apply_verbs(codec); 6012 if (!spec->skip_verbs)
6013 snd_hda_apply_verbs(codec);
6013 6014
6014 init_multi_out(codec); 6015 init_multi_out(codec);
6015 init_extra_out(codec); 6016 init_extra_out(codec);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 5f199dcb0d18..fb9f1a90238b 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -243,6 +243,7 @@ struct hda_gen_spec {
243 unsigned int indep_hp_enabled:1; /* independent HP enabled */ 243 unsigned int indep_hp_enabled:1; /* independent HP enabled */
244 unsigned int have_aamix_ctl:1; 244 unsigned int have_aamix_ctl:1;
245 unsigned int hp_mic_jack_modes:1; 245 unsigned int hp_mic_jack_modes:1;
246 unsigned int skip_verbs:1; /* don't apply verbs at snd_hda_gen_init() */
246 247
247 /* additional mute flags (only effective with auto_mute_via_amp=1) */ 248 /* additional mute flags (only effective with auto_mute_via_amp=1) */
248 u64 mute_bits; 249 u64 mute_bits;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e333b3e30e31..c1ddfd2fac52 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -837,9 +837,11 @@ static int alc_init(struct hda_codec *codec)
837 if (spec->init_hook) 837 if (spec->init_hook)
838 spec->init_hook(codec); 838 spec->init_hook(codec);
839 839
840 spec->gen.skip_verbs = 1; /* applied in below */
840 snd_hda_gen_init(codec); 841 snd_hda_gen_init(codec);
841 alc_fix_pll(codec); 842 alc_fix_pll(codec);
842 alc_auto_init_amp(codec, spec->init_amp); 843 alc_auto_init_amp(codec, spec->init_amp);
844 snd_hda_apply_verbs(codec); /* apply verbs here after own init */
843 845
844 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT); 846 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_INIT);
845 847
@@ -5797,6 +5799,7 @@ enum {
5797 ALC286_FIXUP_ACER_AIO_HEADSET_MIC, 5799 ALC286_FIXUP_ACER_AIO_HEADSET_MIC,
5798 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE, 5800 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
5799 ALC299_FIXUP_PREDATOR_SPK, 5801 ALC299_FIXUP_PREDATOR_SPK,
5802 ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
5800}; 5803};
5801 5804
5802static const struct hda_fixup alc269_fixups[] = { 5805static const struct hda_fixup alc269_fixups[] = {
@@ -6837,6 +6840,16 @@ static const struct hda_fixup alc269_fixups[] = {
6837 { } 6840 { }
6838 } 6841 }
6839 }, 6842 },
6843 [ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC] = {
6844 .type = HDA_FIXUP_PINS,
6845 .v.pins = (const struct hda_pintbl[]) {
6846 { 0x14, 0x411111f0 }, /* disable confusing internal speaker */
6847 { 0x19, 0x04a11150 }, /* use as headset mic, without its own jack detect */
6848 { }
6849 },
6850 .chained = true,
6851 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6852 },
6840}; 6853};
6841 6854
6842static const struct snd_pci_quirk alc269_fixup_tbl[] = { 6855static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6979,6 +6992,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6979 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE), 6992 SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
6980 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6993 SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6981 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3), 6994 SND_PCI_QUIRK(0x103c, 0x8497, "HP Envy x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6995 SND_PCI_QUIRK(0x103c, 0x84e7, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
6982 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), 6996 SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
6983 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300), 6997 SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
6984 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 6998 SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -6995,6 +7009,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6995 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK), 7009 SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
6996 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A), 7010 SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
6997 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC), 7011 SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
7012 SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC),
6998 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW), 7013 SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
6999 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC), 7014 SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
7000 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC), 7015 SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7072,6 +7087,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7072 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7087 SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7073 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7088 SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7074 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), 7089 SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
7090 SND_PCI_QUIRK(0x17aa, 0x3151, "ThinkCentre Station", ALC283_FIXUP_HEADSET_MIC),
7075 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 7091 SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
7076 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 7092 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
7077 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI), 7093 SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo B50-70", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
@@ -8946,6 +8962,7 @@ static int patch_alc680(struct hda_codec *codec)
8946static const struct hda_device_id snd_hda_id_realtek[] = { 8962static const struct hda_device_id snd_hda_id_realtek[] = {
8947 HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269), 8963 HDA_CODEC_ENTRY(0x10ec0215, "ALC215", patch_alc269),
8948 HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269), 8964 HDA_CODEC_ENTRY(0x10ec0221, "ALC221", patch_alc269),
8965 HDA_CODEC_ENTRY(0x10ec0222, "ALC222", patch_alc269),
8949 HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269), 8966 HDA_CODEC_ENTRY(0x10ec0225, "ALC225", patch_alc269),
8950 HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269), 8967 HDA_CODEC_ENTRY(0x10ec0231, "ALC231", patch_alc269),
8951 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269), 8968 HDA_CODEC_ENTRY(0x10ec0233, "ALC233", patch_alc269),
diff --git a/tools/testing/selftests/cgroup/test_freezer.c b/tools/testing/selftests/cgroup/test_freezer.c
index 8219a30853d2..0fc1b6d4b0f9 100644
--- a/tools/testing/selftests/cgroup/test_freezer.c
+++ b/tools/testing/selftests/cgroup/test_freezer.c
@@ -448,6 +448,59 @@ cleanup:
448} 448}
449 449
450/* 450/*
451 * The test creates a cgroups and freezes it. Then it creates a child cgroup
452 * and populates it with a task. After that it checks that the child cgroup
453 * is frozen and the parent cgroup remains frozen too.
454 */
455static int test_cgfreezer_mkdir(const char *root)
456{
457 int ret = KSFT_FAIL;
458 char *parent, *child = NULL;
459 int pid;
460
461 parent = cg_name(root, "cg_test_mkdir_A");
462 if (!parent)
463 goto cleanup;
464
465 child = cg_name(parent, "cg_test_mkdir_B");
466 if (!child)
467 goto cleanup;
468
469 if (cg_create(parent))
470 goto cleanup;
471
472 if (cg_freeze_wait(parent, true))
473 goto cleanup;
474
475 if (cg_create(child))
476 goto cleanup;
477
478 pid = cg_run_nowait(child, child_fn, NULL);
479 if (pid < 0)
480 goto cleanup;
481
482 if (cg_wait_for_proc_count(child, 1))
483 goto cleanup;
484
485 if (cg_check_frozen(child, true))
486 goto cleanup;
487
488 if (cg_check_frozen(parent, true))
489 goto cleanup;
490
491 ret = KSFT_PASS;
492
493cleanup:
494 if (child)
495 cg_destroy(child);
496 free(child);
497 if (parent)
498 cg_destroy(parent);
499 free(parent);
500 return ret;
501}
502
503/*
451 * The test creates two nested cgroups, freezes the parent 504 * The test creates two nested cgroups, freezes the parent
452 * and removes the child. Then it checks that the parent cgroup 505 * and removes the child. Then it checks that the parent cgroup
453 * remains frozen and it's possible to create a new child 506 * remains frozen and it's possible to create a new child
@@ -815,6 +868,7 @@ struct cgfreezer_test {
815 T(test_cgfreezer_simple), 868 T(test_cgfreezer_simple),
816 T(test_cgfreezer_tree), 869 T(test_cgfreezer_tree),
817 T(test_cgfreezer_forkbomb), 870 T(test_cgfreezer_forkbomb),
871 T(test_cgfreezer_mkdir),
818 T(test_cgfreezer_rmdir), 872 T(test_cgfreezer_rmdir),
819 T(test_cgfreezer_migrate), 873 T(test_cgfreezer_migrate),
820 T(test_cgfreezer_ptrace), 874 T(test_cgfreezer_ptrace),
diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh
index c5c93d5fb3ad..f9ebeac1e6f2 100755
--- a/tools/testing/selftests/net/fib_nexthops.sh
+++ b/tools/testing/selftests/net/fib_nexthops.sh
@@ -212,6 +212,8 @@ check_output()
212 printf " ${out}\n" 212 printf " ${out}\n"
213 printf " Expected:\n" 213 printf " Expected:\n"
214 printf " ${expected}\n\n" 214 printf " ${expected}\n\n"
215 else
216 echo " WARNING: Unexpected route entry"
215 fi 217 fi
216 fi 218 fi
217 219
@@ -274,7 +276,7 @@ ipv6_fcnal()
274 276
275 run_cmd "$IP nexthop get id 52" 277 run_cmd "$IP nexthop get id 52"
276 log_test $? 0 "Get nexthop by id" 278 log_test $? 0 "Get nexthop by id"
277 check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1" 279 check_nexthop "id 52" "id 52 via 2001:db8:91::2 dev veth1 scope link"
278 280
279 run_cmd "$IP nexthop del id 52" 281 run_cmd "$IP nexthop del id 52"
280 log_test $? 0 "Delete nexthop by id" 282 log_test $? 0 "Delete nexthop by id"
@@ -479,12 +481,12 @@ ipv6_fcnal_runtime()
479 run_cmd "$IP -6 nexthop add id 85 dev veth1" 481 run_cmd "$IP -6 nexthop add id 85 dev veth1"
480 run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85" 482 run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 85"
481 log_test $? 0 "IPv6 route with device only nexthop" 483 log_test $? 0 "IPv6 route with device only nexthop"
482 check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1" 484 check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 dev veth1 metric 1024 pref medium"
483 485
484 run_cmd "$IP nexthop add id 123 group 81/85" 486 run_cmd "$IP nexthop add id 123 group 81/85"
485 run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123" 487 run_cmd "$IP ro replace 2001:db8:101::1/128 nhid 123"
486 log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw" 488 log_test $? 0 "IPv6 multipath route with nexthop mix - dev only + gw"
487 check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 85 nexthop via 2001:db8:91::2 dev veth1 nexthop dev veth1" 489 check_route6 "2001:db8:101::1" "2001:db8:101::1 nhid 123 metric 1024 nexthop via 2001:db8:91::2 dev veth1 weight 1 nexthop dev veth1 weight 1 pref medium"
488 490
489 # 491 #
490 # IPv6 route with v4 nexthop - not allowed 492 # IPv6 route with v4 nexthop - not allowed
@@ -538,7 +540,7 @@ ipv4_fcnal()
538 540
539 run_cmd "$IP nexthop get id 12" 541 run_cmd "$IP nexthop get id 12"
540 log_test $? 0 "Get nexthop by id" 542 log_test $? 0 "Get nexthop by id"
541 check_nexthop "id 12" "id 12 via 172.16.1.2 src 172.16.1.1 dev veth1 scope link" 543 check_nexthop "id 12" "id 12 via 172.16.1.2 dev veth1 scope link"
542 544
543 run_cmd "$IP nexthop del id 12" 545 run_cmd "$IP nexthop del id 12"
544 log_test $? 0 "Delete nexthop by id" 546 log_test $? 0 "Delete nexthop by id"
@@ -685,7 +687,7 @@ ipv4_withv6_fcnal()
685 set +e 687 set +e
686 run_cmd "$IP ro add 172.16.101.1/32 nhid 11" 688 run_cmd "$IP ro add 172.16.101.1/32 nhid 11"
687 log_test $? 0 "IPv6 nexthop with IPv4 route" 689 log_test $? 0 "IPv6 nexthop with IPv4 route"
688 check_route "172.16.101.1" "172.16.101.1 nhid 11 via ${lladdr} dev veth1" 690 check_route "172.16.101.1" "172.16.101.1 nhid 11 via inet6 ${lladdr} dev veth1"
689 691
690 set -e 692 set -e
691 run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1" 693 run_cmd "$IP nexthop add id 12 via 172.16.1.2 dev veth1"
@@ -694,11 +696,11 @@ ipv4_withv6_fcnal()
694 run_cmd "$IP ro replace 172.16.101.1/32 nhid 101" 696 run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
695 log_test $? 0 "IPv6 nexthop with IPv4 route" 697 log_test $? 0 "IPv6 nexthop with IPv4 route"
696 698
697 check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" 699 check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
698 700
699 run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1" 701 run_cmd "$IP ro replace 172.16.101.1/32 via inet6 ${lladdr} dev veth1"
700 log_test $? 0 "IPv4 route with IPv6 gateway" 702 log_test $? 0 "IPv4 route with IPv6 gateway"
701 check_route "172.16.101.1" "172.16.101.1 via ${lladdr} dev veth1" 703 check_route "172.16.101.1" "172.16.101.1 via inet6 ${lladdr} dev veth1"
702 704
703 run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1" 705 run_cmd "$IP ro replace 172.16.101.1/32 via inet6 2001:db8:50::1 dev veth1"
704 log_test $? 2 "IPv4 route with invalid IPv6 gateway" 706 log_test $? 2 "IPv4 route with invalid IPv6 gateway"
@@ -785,10 +787,10 @@ ipv4_fcnal_runtime()
785 log_test $? 0 "IPv4 route with device only nexthop" 787 log_test $? 0 "IPv4 route with device only nexthop"
786 check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1" 788 check_route "172.16.101.1" "172.16.101.1 nhid 85 dev veth1"
787 789
788 run_cmd "$IP nexthop add id 122 group 21/85" 790 run_cmd "$IP nexthop add id 123 group 21/85"
789 run_cmd "$IP ro replace 172.16.101.1/32 nhid 122" 791 run_cmd "$IP ro replace 172.16.101.1/32 nhid 123"
790 log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw" 792 log_test $? 0 "IPv4 multipath route with nexthop mix - dev only + gw"
791 check_route "172.16.101.1" "172.16.101.1 nhid 85 nexthop via 172.16.1.2 dev veth1 nexthop dev veth1" 793 check_route "172.16.101.1" "172.16.101.1 nhid 123 nexthop via 172.16.1.2 dev veth1 weight 1 nexthop dev veth1 weight 1"
792 794
793 # 795 #
794 # IPv4 with IPv6 796 # IPv4 with IPv6
@@ -820,7 +822,7 @@ ipv4_fcnal_runtime()
820 run_cmd "$IP ro replace 172.16.101.1/32 nhid 101" 822 run_cmd "$IP ro replace 172.16.101.1/32 nhid 101"
821 log_test $? 0 "IPv4 route with mixed v4-v6 multipath route" 823 log_test $? 0 "IPv4 route with mixed v4-v6 multipath route"
822 824
823 check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1" 825 check_route "172.16.101.1" "172.16.101.1 nhid 101 nexthop via inet6 ${lladdr} dev veth1 weight 1 nexthop via 172.16.1.2 dev veth1 weight 1"
824 826
825 run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1" 827 run_cmd "ip netns exec me ping -c1 -w1 172.16.101.1"
826 log_test $? 0 "IPv6 nexthop with IPv4 route" 828 log_test $? 0 "IPv6 nexthop with IPv4 route"
diff --git a/tools/testing/selftests/net/xfrm_policy.sh b/tools/testing/selftests/net/xfrm_policy.sh
index 5445943bf07f..7a1bf94c5bd3 100755
--- a/tools/testing/selftests/net/xfrm_policy.sh
+++ b/tools/testing/selftests/net/xfrm_policy.sh
@@ -106,6 +106,13 @@ do_overlap()
106 # 106 #
107 # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23. 107 # 10.0.0.0/24 and 10.0.1.0/24 nodes have been merged as 10.0.0.0/23.
108 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block 108 ip -net $ns xfrm policy add src 10.1.0.0/24 dst 10.0.0.0/23 dir fwd priority 200 action block
109
110 # similar to above: add policies (with partially random address), with shrinking prefixes.
111 for p in 29 28 27;do
112 for k in $(seq 1 32); do
113 ip -net $ns xfrm policy add src 10.253.1.$((RANDOM%255))/$p dst 10.254.1.$((RANDOM%255))/$p dir fwd priority $((200+k)) action block 2>/dev/null
114 done
115 done
109} 116}
110 117
111do_esp_policy_get_check() { 118do_esp_policy_get_check() {