aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap2
-rw-r--r--Documentation/bpf/btf.rst8
-rw-r--r--Documentation/devicetree/bindings/hwmon/adc128d818.txt4
-rw-r--r--Documentation/networking/bpf_flow_dissector.rst126
-rw-r--r--Documentation/networking/index.rst1
-rw-r--r--MAINTAINERS6
-rw-r--r--arch/arc/include/asm/syscall.h7
-rw-r--r--arch/arm/include/asm/syscall.h47
-rw-r--r--arch/arm64/include/asm/syscall.h46
-rw-r--r--arch/arm64/kernel/sdei.c6
-rw-r--r--arch/c6x/include/asm/syscall.h79
-rw-r--r--arch/csky/include/asm/syscall.h26
-rw-r--r--arch/h8300/include/asm/syscall.h34
-rw-r--r--arch/hexagon/include/asm/syscall.h4
-rw-r--r--arch/ia64/include/asm/syscall.h13
-rw-r--r--arch/ia64/kernel/ptrace.c7
-rw-r--r--arch/microblaze/include/asm/syscall.h8
-rw-r--r--arch/mips/include/asm/syscall.h3
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/nds32/include/asm/syscall.h62
-rw-r--r--arch/nios2/include/asm/syscall.h84
-rw-r--r--arch/openrisc/include/asm/syscall.h12
-rw-r--r--arch/parisc/include/asm/syscall.h30
-rw-r--r--arch/powerpc/include/asm/syscall.h15
-rw-r--r--arch/riscv/include/asm/fixmap.h2
-rw-r--r--arch/riscv/include/asm/syscall.h24
-rw-r--r--arch/riscv/include/asm/uaccess.h2
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/module.c2
-rw-r--r--arch/riscv/kernel/setup.c8
-rw-r--r--arch/riscv/mm/Makefile6
-rw-r--r--arch/riscv/mm/init.c28
-rw-r--r--arch/s390/include/asm/syscall.h28
-rw-r--r--arch/sh/include/asm/syscall_32.h47
-rw-r--r--arch/sh/include/asm/syscall_64.h8
-rw-r--r--arch/sparc/include/asm/syscall.h11
-rw-r--r--arch/um/include/asm/syscall-generic.h78
-rw-r--r--arch/x86/include/asm/syscall.h142
-rw-r--r--arch/x86/kvm/svm.c22
-rw-r--r--arch/x86/kvm/vmx/nested.c74
-rw-r--r--arch/xtensa/include/asm/syscall.h33
-rw-r--r--drivers/acpi/acpica/evgpe.c6
-rw-r--r--drivers/char/Kconfig2
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c5
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c5
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-debug.c5
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c1
-rw-r--r--drivers/hid/hid-logitech-hidpp.c13
-rw-r--r--drivers/hid/hid-quirks.c11
-rw-r--r--drivers/hid/hid-steam.c26
-rw-r--r--drivers/hid/hid-uclogic-params.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-core.c2
-rw-r--r--drivers/hwmon/Kconfig1
-rw-r--r--drivers/hwmon/ntc_thermistor.c2
-rw-r--r--drivers/hwmon/occ/common.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/sprd-sc27xx-spi.c42
-rw-r--r--drivers/mfd/twl-core.c23
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c34
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c1
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c2
-rw-r--r--drivers/vfio/vfio_iommu_type1.c14
-rw-r--r--fs/aio.c338
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsglob.h8
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/smb2file.c6
-rw-r--r--fs/cifs/smb2ops.c68
-rw-r--r--fs/cifs/smb2pdu.c49
-rw-r--r--fs/cifs/smb2proto.h5
-rw-r--r--fs/debugfs/inode.c13
-rw-r--r--fs/jffs2/readinode.c5
-rw-r--r--fs/jffs2/super.c5
-rw-r--r--fs/proc/base.c17
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--include/asm-generic/syscall.h21
-rw-r--r--include/linux/mii.h2
-rw-r--r--include/linux/mlx5/driver.h2
-rw-r--r--include/linux/ptrace.h11
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netns/hash.h10
-rw-r--r--include/net/sch_generic.h44
-rw-r--r--include/trace/events/syscalls.h2
-rw-r--r--kernel/bpf/cpumap.c13
-rw-r--r--kernel/bpf/inode.c32
-rw-r--r--kernel/bpf/verifier.c5
-rw-r--r--kernel/seccomp.c2
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/trace/trace_syscalls.c9
-rw-r--r--lib/syscall.c57
-rw-r--r--mm/compaction.c29
-rw-r--r--net/8021q/vlan_dev.c26
-rw-r--r--net/batman-adv/bat_v_elp.c6
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c16
-rw-r--r--net/batman-adv/sysfs.c7
-rw-r--r--net/batman-adv/translation-table.c32
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c46
-rw-r--r--net/core/filter.c16
-rw-r--r--net/core/flow_dissector.c4
-rw-r--r--net/core/net_namespace.c1
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/dccp/feat.c7
-rw-r--r--net/dsa/tag_qca.c10
-rw-r--r--net/ipv4/ip_input.c7
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/ipv4/tcp_dctcp.c36
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv6/ila/ila_xlat.c1
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c4
-rw-r--r--net/ipv6/sit.c4
-rw-r--r--net/kcm/kcmsock.c16
-rw-r--r--net/openvswitch/flow_netlink.c4
-rw-r--r--net/rds/tcp.c2
-rw-r--r--net/sched/act_sample.c10
-rw-r--r--net/sched/cls_matchall.c5
-rw-r--r--net/sched/sch_cake.c13
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_drr.c16
-rw-r--r--net/sched/sch_hfsc.c19
-rw-r--r--net/sched/sch_htb.c22
-rw-r--r--net/sched/sch_mq.c2
-rw-r--r--net/sched/sch_mqprio.c3
-rw-r--r--net/sched/sch_multiq.c10
-rw-r--r--net/sched/sch_prio.c10
-rw-r--r--net/sched/sch_qfq.c14
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfb.c3
-rw-r--r--net/sched/sch_taprio.c2
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/tipc/netlink_compat.c24
-rw-r--r--net/tls/tls_sw.c2
-rw-r--r--tools/lib/bpf/Makefile7
-rw-r--r--tools/lib/bpf/btf.c3
-rw-r--r--tools/power/x86/turbostat/turbostat.c277
-rw-r--r--tools/testing/selftests/bpf/prog_tests/flow_dissector.c68
-rw-r--r--tools/testing/selftests/bpf/progs/bpf_flow.c19
-rw-r--r--tools/testing/selftests/bpf/test_btf.c47
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c38
-rw-r--r--tools/testing/selftests/tc-testing/tc-tests/actions/sample.json24
214 files changed, 2147 insertions, 1734 deletions
diff --git a/.mailmap b/.mailmap
index b2cde8668dcc..ae2bcad06f4b 100644
--- a/.mailmap
+++ b/.mailmap
@@ -156,6 +156,8 @@ Morten Welinder <welinder@darter.rentec.com>
156Morten Welinder <welinder@troll.com> 156Morten Welinder <welinder@troll.com>
157Mythri P K <mythripk@ti.com> 157Mythri P K <mythripk@ti.com>
158Nguyen Anh Quynh <aquynh@gmail.com> 158Nguyen Anh Quynh <aquynh@gmail.com>
159Nicolas Pitre <nico@fluxnic.net> <nicolas.pitre@linaro.org>
160Nicolas Pitre <nico@fluxnic.net> <nico@linaro.org>
159Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> 161Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
160Patrick Mochel <mochel@digitalimplant.org> 162Patrick Mochel <mochel@digitalimplant.org>
161Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com> 163Paul Burton <paul.burton@mips.com> <paul.burton@imgtec.com>
diff --git a/Documentation/bpf/btf.rst b/Documentation/bpf/btf.rst
index 9a60a5d60e38..7313d354f20e 100644
--- a/Documentation/bpf/btf.rst
+++ b/Documentation/bpf/btf.rst
@@ -148,16 +148,16 @@ The ``btf_type.size * 8`` must be equal to or greater than ``BTF_INT_BITS()``
148for the type. The maximum value of ``BTF_INT_BITS()`` is 128. 148for the type. The maximum value of ``BTF_INT_BITS()`` is 128.
149 149
150The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values 150The ``BTF_INT_OFFSET()`` specifies the starting bit offset to calculate values
151for this int. For example, a bitfield struct member has: * btf member bit 151for this int. For example, a bitfield struct member has:
152offset 100 from the start of the structure, * btf member pointing to an int 152 * btf member bit offset 100 from the start of the structure,
153type, * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4`` 153 * btf member pointing to an int type,
154 * the int type has ``BTF_INT_OFFSET() = 2`` and ``BTF_INT_BITS() = 4``
154 155
155Then in the struct memory layout, this member will occupy ``4`` bits starting 156Then in the struct memory layout, this member will occupy ``4`` bits starting
156from bits ``100 + 2 = 102``. 157from bits ``100 + 2 = 102``.
157 158
158Alternatively, the bitfield struct member can be the following to access the 159Alternatively, the bitfield struct member can be the following to access the
159same bits as the above: 160same bits as the above:
160
161 * btf member bit offset 102, 161 * btf member bit offset 102,
162 * btf member pointing to an int type, 162 * btf member pointing to an int type,
163 * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4`` 163 * the int type has ``BTF_INT_OFFSET() = 0`` and ``BTF_INT_BITS() = 4``
diff --git a/Documentation/devicetree/bindings/hwmon/adc128d818.txt b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
index 08bab0e94d25..d0ae46d7bac3 100644
--- a/Documentation/devicetree/bindings/hwmon/adc128d818.txt
+++ b/Documentation/devicetree/bindings/hwmon/adc128d818.txt
@@ -26,7 +26,7 @@ Required node properties:
26 26
27Optional node properties: 27Optional node properties:
28 28
29 - ti,mode: Operation mode (see above). 29 - ti,mode: Operation mode (u8) (see above).
30 30
31 31
32Example (operation mode 2): 32Example (operation mode 2):
@@ -34,5 +34,5 @@ Example (operation mode 2):
34 adc128d818@1d { 34 adc128d818@1d {
35 compatible = "ti,adc128d818"; 35 compatible = "ti,adc128d818";
36 reg = <0x1d>; 36 reg = <0x1d>;
37 ti,mode = <2>; 37 ti,mode = /bits/ 8 <2>;
38 }; 38 };
diff --git a/Documentation/networking/bpf_flow_dissector.rst b/Documentation/networking/bpf_flow_dissector.rst
new file mode 100644
index 000000000000..b375ae2ec2c4
--- /dev/null
+++ b/Documentation/networking/bpf_flow_dissector.rst
@@ -0,0 +1,126 @@
1.. SPDX-License-Identifier: GPL-2.0
2
3==================
4BPF Flow Dissector
5==================
6
7Overview
8========
9
10Flow dissector is a routine that parses metadata out of the packets. It's
11used in the various places in the networking subsystem (RFS, flow hash, etc).
12
13BPF flow dissector is an attempt to reimplement C-based flow dissector logic
14in BPF to gain all the benefits of BPF verifier (namely, limits on the
15number of instructions and tail calls).
16
17API
18===
19
20BPF flow dissector programs operate on an ``__sk_buff``. However, only the
21limited set of fields is allowed: ``data``, ``data_end`` and ``flow_keys``.
22``flow_keys`` is ``struct bpf_flow_keys`` and contains flow dissector input
23and output arguments.
24
25The inputs are:
26 * ``nhoff`` - initial offset of the networking header
27 * ``thoff`` - initial offset of the transport header, initialized to nhoff
28 * ``n_proto`` - L3 protocol type, parsed out of L2 header
29
30Flow dissector BPF program should fill out the rest of the ``struct
31bpf_flow_keys`` fields. Input arguments ``nhoff/thoff/n_proto`` should be
32also adjusted accordingly.
33
34The return code of the BPF program is either BPF_OK to indicate successful
35dissection, or BPF_DROP to indicate parsing error.
36
37__sk_buff->data
38===============
39
40In the VLAN-less case, this is what the initial state of the BPF flow
41dissector looks like::
42
43 +------+------+------------+-----------+
44 | DMAC | SMAC | ETHER_TYPE | L3_HEADER |
45 +------+------+------------+-----------+
46 ^
47 |
48 +-- flow dissector starts here
49
50
51.. code:: c
52
53 skb->data + flow_keys->nhoff point to the first byte of L3_HEADER
54 flow_keys->thoff = nhoff
55 flow_keys->n_proto = ETHER_TYPE
56
57In case of VLAN, flow dissector can be called with the two different states.
58
59Pre-VLAN parsing::
60
61 +------+------+------+-----+-----------+-----------+
62 | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
63 +------+------+------+-----+-----------+-----------+
64 ^
65 |
66 +-- flow dissector starts here
67
68.. code:: c
69
70 skb->data + flow_keys->nhoff point the to first byte of TCI
71 flow_keys->thoff = nhoff
72 flow_keys->n_proto = TPID
73
74Please note that TPID can be 802.1AD and, hence, BPF program would
75have to parse VLAN information twice for double tagged packets.
76
77
78Post-VLAN parsing::
79
80 +------+------+------+-----+-----------+-----------+
81 | DMAC | SMAC | TPID | TCI |ETHER_TYPE | L3_HEADER |
82 +------+------+------+-----+-----------+-----------+
83 ^
84 |
85 +-- flow dissector starts here
86
87.. code:: c
88
89 skb->data + flow_keys->nhoff point the to first byte of L3_HEADER
90 flow_keys->thoff = nhoff
91 flow_keys->n_proto = ETHER_TYPE
92
93In this case VLAN information has been processed before the flow dissector
94and BPF flow dissector is not required to handle it.
95
96
97The takeaway here is as follows: BPF flow dissector program can be called with
98the optional VLAN header and should gracefully handle both cases: when single
99or double VLAN is present and when it is not present. The same program
100can be called for both cases and would have to be written carefully to
101handle both cases.
102
103
104Reference Implementation
105========================
106
107See ``tools/testing/selftests/bpf/progs/bpf_flow.c`` for the reference
108implementation and ``tools/testing/selftests/bpf/flow_dissector_load.[hc]``
109for the loader. bpftool can be used to load BPF flow dissector program as well.
110
111The reference implementation is organized as follows:
112 * ``jmp_table`` map that contains sub-programs for each supported L3 protocol
113 * ``_dissect`` routine - entry point; it does input ``n_proto`` parsing and
114 does ``bpf_tail_call`` to the appropriate L3 handler
115
116Since BPF at this point doesn't support looping (or any jumping back),
117jmp_table is used instead to handle multiple levels of encapsulation (and
118IPv6 options).
119
120
121Current Limitations
122===================
123BPF flow dissector doesn't support exporting all the metadata that in-kernel
124C-based implementation can export. Notable example is single VLAN (802.1Q)
125and double VLAN (802.1AD) tags. Please refer to the ``struct bpf_flow_keys``
126for a set of information that's currently can be exported from the BPF context.
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 5449149be496..984e68f9e026 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -9,6 +9,7 @@ Contents:
9 netdev-FAQ 9 netdev-FAQ
10 af_xdp 10 af_xdp
11 batman-adv 11 batman-adv
12 bpf_flow_dissector
12 can 13 can
13 can_ucan_protocol 14 can_ucan_protocol
14 device_drivers/freescale/dpaa2/index 15 device_drivers/freescale/dpaa2/index
diff --git a/MAINTAINERS b/MAINTAINERS
index 43b36dbed48e..6771bd784f5f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4129,7 +4129,7 @@ F: drivers/cpuidle/*
4129F: include/linux/cpuidle.h 4129F: include/linux/cpuidle.h
4130 4130
4131CRAMFS FILESYSTEM 4131CRAMFS FILESYSTEM
4132M: Nicolas Pitre <nico@linaro.org> 4132M: Nicolas Pitre <nico@fluxnic.net>
4133S: Maintained 4133S: Maintained
4134F: Documentation/filesystems/cramfs.txt 4134F: Documentation/filesystems/cramfs.txt
4135F: fs/cramfs/ 4135F: fs/cramfs/
@@ -5833,7 +5833,7 @@ L: netdev@vger.kernel.org
5833S: Maintained 5833S: Maintained
5834F: Documentation/ABI/testing/sysfs-bus-mdio 5834F: Documentation/ABI/testing/sysfs-bus-mdio
5835F: Documentation/devicetree/bindings/net/mdio* 5835F: Documentation/devicetree/bindings/net/mdio*
5836F: Documentation/networking/phy.txt 5836F: Documentation/networking/phy.rst
5837F: drivers/net/phy/ 5837F: drivers/net/phy/
5838F: drivers/of/of_mdio.c 5838F: drivers/of/of_mdio.c
5839F: drivers/of/of_net.c 5839F: drivers/of/of_net.c
@@ -13981,7 +13981,7 @@ F: drivers/media/rc/serial_ir.c
13981SFC NETWORK DRIVER 13981SFC NETWORK DRIVER
13982M: Solarflare linux maintainers <linux-net-drivers@solarflare.com> 13982M: Solarflare linux maintainers <linux-net-drivers@solarflare.com>
13983M: Edward Cree <ecree@solarflare.com> 13983M: Edward Cree <ecree@solarflare.com>
13984M: Bert Kenward <bkenward@solarflare.com> 13984M: Martin Habets <mhabets@solarflare.com>
13985L: netdev@vger.kernel.org 13985L: netdev@vger.kernel.org
13986S: Supported 13986S: Supported
13987F: drivers/net/ethernet/sfc/ 13987F: drivers/net/ethernet/sfc/
diff --git a/arch/arc/include/asm/syscall.h b/arch/arc/include/asm/syscall.h
index 29de09804306..c7a4201ed62b 100644
--- a/arch/arc/include/asm/syscall.h
+++ b/arch/arc/include/asm/syscall.h
@@ -55,12 +55,11 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
55 */ 55 */
56static inline void 56static inline void
57syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 57syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
58 unsigned int i, unsigned int n, unsigned long *args) 58 unsigned long *args)
59{ 59{
60 unsigned long *inside_ptregs = &(regs->r0); 60 unsigned long *inside_ptregs = &(regs->r0);
61 inside_ptregs -= i; 61 unsigned int n = 6;
62 62 unsigned int i = 0;
63 BUG_ON((i + n) > 6);
64 63
65 while (n--) { 64 while (n--) {
66 args[i++] = (*inside_ptregs); 65 args[i++] = (*inside_ptregs);
diff --git a/arch/arm/include/asm/syscall.h b/arch/arm/include/asm/syscall.h
index 06dea6bce293..080ce70cab12 100644
--- a/arch/arm/include/asm/syscall.h
+++ b/arch/arm/include/asm/syscall.h
@@ -55,53 +55,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
55 55
56static inline void syscall_get_arguments(struct task_struct *task, 56static inline void syscall_get_arguments(struct task_struct *task,
57 struct pt_regs *regs, 57 struct pt_regs *regs,
58 unsigned int i, unsigned int n,
59 unsigned long *args) 58 unsigned long *args)
60{ 59{
61 if (n == 0) 60 args[0] = regs->ARM_ORIG_r0;
62 return; 61 args++;
63 62
64 if (i + n > SYSCALL_MAX_ARGS) { 63 memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
65 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
66 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
67 pr_warn("%s called with max args %d, handling only %d\n",
68 __func__, i + n, SYSCALL_MAX_ARGS);
69 memset(args_bad, 0, n_bad * sizeof(args[0]));
70 n = SYSCALL_MAX_ARGS - i;
71 }
72
73 if (i == 0) {
74 args[0] = regs->ARM_ORIG_r0;
75 args++;
76 i++;
77 n--;
78 }
79
80 memcpy(args, &regs->ARM_r0 + i, n * sizeof(args[0]));
81} 64}
82 65
83static inline void syscall_set_arguments(struct task_struct *task, 66static inline void syscall_set_arguments(struct task_struct *task,
84 struct pt_regs *regs, 67 struct pt_regs *regs,
85 unsigned int i, unsigned int n,
86 const unsigned long *args) 68 const unsigned long *args)
87{ 69{
88 if (n == 0) 70 regs->ARM_ORIG_r0 = args[0];
89 return; 71 args++;
90 72
91 if (i + n > SYSCALL_MAX_ARGS) { 73 memcpy(&regs->ARM_r0 + 1, args, 5 * sizeof(args[0]));
92 pr_warn("%s called with max args %d, handling only %d\n",
93 __func__, i + n, SYSCALL_MAX_ARGS);
94 n = SYSCALL_MAX_ARGS - i;
95 }
96
97 if (i == 0) {
98 regs->ARM_ORIG_r0 = args[0];
99 args++;
100 i++;
101 n--;
102 }
103
104 memcpy(&regs->ARM_r0 + i, args, n * sizeof(args[0]));
105} 74}
106 75
107static inline int syscall_get_arch(void) 76static inline int syscall_get_arch(void)
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index ad8be16a39c9..a179df3674a1 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -65,52 +65,22 @@ static inline void syscall_set_return_value(struct task_struct *task,
65 65
66static inline void syscall_get_arguments(struct task_struct *task, 66static inline void syscall_get_arguments(struct task_struct *task,
67 struct pt_regs *regs, 67 struct pt_regs *regs,
68 unsigned int i, unsigned int n,
69 unsigned long *args) 68 unsigned long *args)
70{ 69{
71 if (n == 0) 70 args[0] = regs->orig_x0;
72 return; 71 args++;
73 72
74 if (i + n > SYSCALL_MAX_ARGS) { 73 memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
75 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
76 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
77 pr_warning("%s called with max args %d, handling only %d\n",
78 __func__, i + n, SYSCALL_MAX_ARGS);
79 memset(args_bad, 0, n_bad * sizeof(args[0]));
80 }
81
82 if (i == 0) {
83 args[0] = regs->orig_x0;
84 args++;
85 i++;
86 n--;
87 }
88
89 memcpy(args, &regs->regs[i], n * sizeof(args[0]));
90} 74}
91 75
92static inline void syscall_set_arguments(struct task_struct *task, 76static inline void syscall_set_arguments(struct task_struct *task,
93 struct pt_regs *regs, 77 struct pt_regs *regs,
94 unsigned int i, unsigned int n,
95 const unsigned long *args) 78 const unsigned long *args)
96{ 79{
97 if (n == 0) 80 regs->orig_x0 = args[0];
98 return; 81 args++;
99 82
100 if (i + n > SYSCALL_MAX_ARGS) { 83 memcpy(&regs->regs[1], args, 5 * sizeof(args[0]));
101 pr_warning("%s called with max args %d, handling only %d\n",
102 __func__, i + n, SYSCALL_MAX_ARGS);
103 n = SYSCALL_MAX_ARGS - i;
104 }
105
106 if (i == 0) {
107 regs->orig_x0 = args[0];
108 args++;
109 i++;
110 n--;
111 }
112
113 memcpy(&regs->regs[i], args, n * sizeof(args[0]));
114} 84}
115 85
116/* 86/*
diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c
index 5ba4465e44f0..ea94cf8f9dc6 100644
--- a/arch/arm64/kernel/sdei.c
+++ b/arch/arm64/kernel/sdei.c
@@ -94,6 +94,9 @@ static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
94 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); 94 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
95 unsigned long high = low + SDEI_STACK_SIZE; 95 unsigned long high = low + SDEI_STACK_SIZE;
96 96
97 if (!low)
98 return false;
99
97 if (sp < low || sp >= high) 100 if (sp < low || sp >= high)
98 return false; 101 return false;
99 102
@@ -111,6 +114,9 @@ static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
111 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); 114 unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
112 unsigned long high = low + SDEI_STACK_SIZE; 115 unsigned long high = low + SDEI_STACK_SIZE;
113 116
117 if (!low)
118 return false;
119
114 if (sp < low || sp >= high) 120 if (sp < low || sp >= high)
115 return false; 121 return false;
116 122
diff --git a/arch/c6x/include/asm/syscall.h b/arch/c6x/include/asm/syscall.h
index ae2be315ee9c..15ba8599858e 100644
--- a/arch/c6x/include/asm/syscall.h
+++ b/arch/c6x/include/asm/syscall.h
@@ -46,78 +46,27 @@ static inline void syscall_set_return_value(struct task_struct *task,
46} 46}
47 47
48static inline void syscall_get_arguments(struct task_struct *task, 48static inline void syscall_get_arguments(struct task_struct *task,
49 struct pt_regs *regs, unsigned int i, 49 struct pt_regs *regs,
50 unsigned int n, unsigned long *args) 50 unsigned long *args)
51{ 51{
52 switch (i) { 52 *args++ = regs->a4;
53 case 0: 53 *args++ = regs->b4;
54 if (!n--) 54 *args++ = regs->a6;
55 break; 55 *args++ = regs->b6;
56 *args++ = regs->a4; 56 *args++ = regs->a8;
57 case 1: 57 *args = regs->b8;
58 if (!n--)
59 break;
60 *args++ = regs->b4;
61 case 2:
62 if (!n--)
63 break;
64 *args++ = regs->a6;
65 case 3:
66 if (!n--)
67 break;
68 *args++ = regs->b6;
69 case 4:
70 if (!n--)
71 break;
72 *args++ = regs->a8;
73 case 5:
74 if (!n--)
75 break;
76 *args++ = regs->b8;
77 case 6:
78 if (!n--)
79 break;
80 default:
81 BUG();
82 }
83} 58}
84 59
85static inline void syscall_set_arguments(struct task_struct *task, 60static inline void syscall_set_arguments(struct task_struct *task,
86 struct pt_regs *regs, 61 struct pt_regs *regs,
87 unsigned int i, unsigned int n,
88 const unsigned long *args) 62 const unsigned long *args)
89{ 63{
90 switch (i) { 64 regs->a4 = *args++;
91 case 0: 65 regs->b4 = *args++;
92 if (!n--) 66 regs->a6 = *args++;
93 break; 67 regs->b6 = *args++;
94 regs->a4 = *args++; 68 regs->a8 = *args++;
95 case 1: 69 regs->a9 = *args;
96 if (!n--)
97 break;
98 regs->b4 = *args++;
99 case 2:
100 if (!n--)
101 break;
102 regs->a6 = *args++;
103 case 3:
104 if (!n--)
105 break;
106 regs->b6 = *args++;
107 case 4:
108 if (!n--)
109 break;
110 regs->a8 = *args++;
111 case 5:
112 if (!n--)
113 break;
114 regs->a9 = *args++;
115 case 6:
116 if (!n)
117 break;
118 default:
119 BUG();
120 }
121} 70}
122 71
123#endif /* __ASM_C6X_SYSCALLS_H */ 72#endif /* __ASM_C6X_SYSCALLS_H */
diff --git a/arch/csky/include/asm/syscall.h b/arch/csky/include/asm/syscall.h
index d637445737b7..bda0a446c63e 100644
--- a/arch/csky/include/asm/syscall.h
+++ b/arch/csky/include/asm/syscall.h
@@ -43,30 +43,20 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
43 43
44static inline void 44static inline void
45syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 45syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
46 unsigned int i, unsigned int n, unsigned long *args) 46 unsigned long *args)
47{ 47{
48 BUG_ON(i + n > 6); 48 args[0] = regs->orig_a0;
49 if (i == 0) { 49 args++;
50 args[0] = regs->orig_a0; 50 memcpy(args, &regs->a1, 5 * sizeof(args[0]));
51 args++;
52 i++;
53 n--;
54 }
55 memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
56} 51}
57 52
58static inline void 53static inline void
59syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, 54syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
60 unsigned int i, unsigned int n, const unsigned long *args) 55 const unsigned long *args)
61{ 56{
62 BUG_ON(i + n > 6); 57 regs->orig_a0 = args[0];
63 if (i == 0) { 58 args++;
64 regs->orig_a0 = args[0]; 59 memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
65 args++;
66 i++;
67 n--;
68 }
69 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
70} 60}
71 61
72static inline int 62static inline int
diff --git a/arch/h8300/include/asm/syscall.h b/arch/h8300/include/asm/syscall.h
index 924990401237..ddd483c6ca95 100644
--- a/arch/h8300/include/asm/syscall.h
+++ b/arch/h8300/include/asm/syscall.h
@@ -17,34 +17,14 @@ syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
17 17
18static inline void 18static inline void
19syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 19syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
20 unsigned int i, unsigned int n, unsigned long *args) 20 unsigned long *args)
21{ 21{
22 BUG_ON(i + n > 6); 22 *args++ = regs->er1;
23 23 *args++ = regs->er2;
24 while (n > 0) { 24 *args++ = regs->er3;
25 switch (i) { 25 *args++ = regs->er4;
26 case 0: 26 *args++ = regs->er5;
27 *args++ = regs->er1; 27 *args = regs->er6;
28 break;
29 case 1:
30 *args++ = regs->er2;
31 break;
32 case 2:
33 *args++ = regs->er3;
34 break;
35 case 3:
36 *args++ = regs->er4;
37 break;
38 case 4:
39 *args++ = regs->er5;
40 break;
41 case 5:
42 *args++ = regs->er6;
43 break;
44 }
45 i++;
46 n--;
47 }
48} 28}
49 29
50 30
diff --git a/arch/hexagon/include/asm/syscall.h b/arch/hexagon/include/asm/syscall.h
index 4af9c7b6f13a..ae3a1e24fabd 100644
--- a/arch/hexagon/include/asm/syscall.h
+++ b/arch/hexagon/include/asm/syscall.h
@@ -37,10 +37,8 @@ static inline long syscall_get_nr(struct task_struct *task,
37 37
38static inline void syscall_get_arguments(struct task_struct *task, 38static inline void syscall_get_arguments(struct task_struct *task,
39 struct pt_regs *regs, 39 struct pt_regs *regs,
40 unsigned int i, unsigned int n,
41 unsigned long *args) 40 unsigned long *args)
42{ 41{
43 BUG_ON(i + n > 6); 42 memcpy(args, &(&regs->r00)[0], 6 * sizeof(args[0]));
44 memcpy(args, &(&regs->r00)[i], n * sizeof(args[0]));
45} 43}
46#endif 44#endif
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 1d0b875fec44..0d9e7fab4a79 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -59,26 +59,19 @@ static inline void syscall_set_return_value(struct task_struct *task,
59} 59}
60 60
61extern void ia64_syscall_get_set_arguments(struct task_struct *task, 61extern void ia64_syscall_get_set_arguments(struct task_struct *task,
62 struct pt_regs *regs, unsigned int i, unsigned int n, 62 struct pt_regs *regs, unsigned long *args, int rw);
63 unsigned long *args, int rw);
64static inline void syscall_get_arguments(struct task_struct *task, 63static inline void syscall_get_arguments(struct task_struct *task,
65 struct pt_regs *regs, 64 struct pt_regs *regs,
66 unsigned int i, unsigned int n,
67 unsigned long *args) 65 unsigned long *args)
68{ 66{
69 BUG_ON(i + n > 6); 67 ia64_syscall_get_set_arguments(task, regs, args, 0);
70
71 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
72} 68}
73 69
74static inline void syscall_set_arguments(struct task_struct *task, 70static inline void syscall_set_arguments(struct task_struct *task,
75 struct pt_regs *regs, 71 struct pt_regs *regs,
76 unsigned int i, unsigned int n,
77 unsigned long *args) 72 unsigned long *args)
78{ 73{
79 BUG_ON(i + n > 6); 74 ia64_syscall_get_set_arguments(task, regs, args, 1);
80
81 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
82} 75}
83 76
84static inline int syscall_get_arch(void) 77static inline int syscall_get_arch(void)
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 6d50ede0ed69..bf9c24d9ce84 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -2179,12 +2179,11 @@ static void syscall_get_set_args_cb(struct unw_frame_info *info, void *data)
2179} 2179}
2180 2180
2181void ia64_syscall_get_set_arguments(struct task_struct *task, 2181void ia64_syscall_get_set_arguments(struct task_struct *task,
2182 struct pt_regs *regs, unsigned int i, unsigned int n, 2182 struct pt_regs *regs, unsigned long *args, int rw)
2183 unsigned long *args, int rw)
2184{ 2183{
2185 struct syscall_get_set_args data = { 2184 struct syscall_get_set_args data = {
2186 .i = i, 2185 .i = 0,
2187 .n = n, 2186 .n = 6,
2188 .args = args, 2187 .args = args,
2189 .regs = regs, 2188 .regs = regs,
2190 .rw = rw, 2189 .rw = rw,
diff --git a/arch/microblaze/include/asm/syscall.h b/arch/microblaze/include/asm/syscall.h
index 220decd605a4..833d3a53dab3 100644
--- a/arch/microblaze/include/asm/syscall.h
+++ b/arch/microblaze/include/asm/syscall.h
@@ -82,18 +82,22 @@ static inline void microblaze_set_syscall_arg(struct pt_regs *regs,
82 82
83static inline void syscall_get_arguments(struct task_struct *task, 83static inline void syscall_get_arguments(struct task_struct *task,
84 struct pt_regs *regs, 84 struct pt_regs *regs,
85 unsigned int i, unsigned int n,
86 unsigned long *args) 85 unsigned long *args)
87{ 86{
87 unsigned int i = 0;
88 unsigned int n = 6;
89
88 while (n--) 90 while (n--)
89 *args++ = microblaze_get_syscall_arg(regs, i++); 91 *args++ = microblaze_get_syscall_arg(regs, i++);
90} 92}
91 93
92static inline void syscall_set_arguments(struct task_struct *task, 94static inline void syscall_set_arguments(struct task_struct *task,
93 struct pt_regs *regs, 95 struct pt_regs *regs,
94 unsigned int i, unsigned int n,
95 const unsigned long *args) 96 const unsigned long *args)
96{ 97{
98 unsigned int i = 0;
99 unsigned int n = 6;
100
97 while (n--) 101 while (n--)
98 microblaze_set_syscall_arg(regs, i++, *args++); 102 microblaze_set_syscall_arg(regs, i++, *args++);
99} 103}
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6cf8ffb5367e..a2b4748655df 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -116,9 +116,10 @@ static inline void syscall_set_return_value(struct task_struct *task,
116 116
117static inline void syscall_get_arguments(struct task_struct *task, 117static inline void syscall_get_arguments(struct task_struct *task,
118 struct pt_regs *regs, 118 struct pt_regs *regs,
119 unsigned int i, unsigned int n,
120 unsigned long *args) 119 unsigned long *args)
121{ 120{
121 unsigned int i = 0;
122 unsigned int n = 6;
122 int ret; 123 int ret;
123 124
124 /* O32 ABI syscall() */ 125 /* O32 ABI syscall() */
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 0057c910bc2f..3a62f80958e1 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -1419,7 +1419,7 @@ asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1419 1419
1420 sd.nr = syscall; 1420 sd.nr = syscall;
1421 sd.arch = syscall_get_arch(); 1421 sd.arch = syscall_get_arch();
1422 syscall_get_arguments(current, regs, 0, 6, args); 1422 syscall_get_arguments(current, regs, args);
1423 for (i = 0; i < 6; i++) 1423 for (i = 0; i < 6; i++)
1424 sd.args[i] = args[i]; 1424 sd.args[i] = args[i];
1425 sd.instruction_pointer = KSTK_EIP(current); 1425 sd.instruction_pointer = KSTK_EIP(current);
diff --git a/arch/nds32/include/asm/syscall.h b/arch/nds32/include/asm/syscall.h
index f7e5e86765fe..671ebd357496 100644
--- a/arch/nds32/include/asm/syscall.h
+++ b/arch/nds32/include/asm/syscall.h
@@ -108,81 +108,41 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
108 * syscall_get_arguments - extract system call parameter values 108 * syscall_get_arguments - extract system call parameter values
109 * @task: task of interest, must be blocked 109 * @task: task of interest, must be blocked
110 * @regs: task_pt_regs() of @task 110 * @regs: task_pt_regs() of @task
111 * @i: argument index [0,5]
112 * @n: number of arguments; n+i must be [1,6].
113 * @args: array filled with argument values 111 * @args: array filled with argument values
114 * 112 *
115 * Fetches @n arguments to the system call starting with the @i'th argument 113 * Fetches 6 arguments to the system call (from 0 through 5). The first
116 * (from 0 through 5). Argument @i is stored in @args[0], and so on. 114 * argument is stored in @args[0], and so on.
117 * An arch inline version is probably optimal when @i and @n are constants.
118 * 115 *
119 * It's only valid to call this when @task is stopped for tracing on 116 * It's only valid to call this when @task is stopped for tracing on
120 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. 117 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
121 * It's invalid to call this with @i + @n > 6; we only support system calls
122 * taking up to 6 arguments.
123 */ 118 */
124#define SYSCALL_MAX_ARGS 6 119#define SYSCALL_MAX_ARGS 6
125void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 120void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
126 unsigned int i, unsigned int n, unsigned long *args) 121 unsigned long *args)
127{ 122{
128 if (n == 0) 123 args[0] = regs->orig_r0;
129 return; 124 args++;
130 if (i + n > SYSCALL_MAX_ARGS) { 125 memcpy(args, &regs->uregs[0] + 1, 5 * sizeof(args[0]));
131 unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
132 unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
133 pr_warning("%s called with max args %d, handling only %d\n",
134 __func__, i + n, SYSCALL_MAX_ARGS);
135 memset(args_bad, 0, n_bad * sizeof(args[0]));
136 memset(args_bad, 0, n_bad * sizeof(args[0]));
137 }
138
139 if (i == 0) {
140 args[0] = regs->orig_r0;
141 args++;
142 i++;
143 n--;
144 }
145
146 memcpy(args, &regs->uregs[0] + i, n * sizeof(args[0]));
147} 126}
148 127
149/** 128/**
150 * syscall_set_arguments - change system call parameter value 129 * syscall_set_arguments - change system call parameter value
151 * @task: task of interest, must be in system call entry tracing 130 * @task: task of interest, must be in system call entry tracing
152 * @regs: task_pt_regs() of @task 131 * @regs: task_pt_regs() of @task
153 * @i: argument index [0,5]
154 * @n: number of arguments; n+i must be [1,6].
155 * @args: array of argument values to store 132 * @args: array of argument values to store
156 * 133 *
157 * Changes @n arguments to the system call starting with the @i'th argument. 134 * Changes 6 arguments to the system call. The first argument gets value
158 * Argument @i gets value @args[0], and so on. 135 * @args[0], and so on.
159 * An arch inline version is probably optimal when @i and @n are constants.
160 * 136 *
161 * It's only valid to call this when @task is stopped for tracing on 137 * It's only valid to call this when @task is stopped for tracing on
162 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. 138 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
163 * It's invalid to call this with @i + @n > 6; we only support system calls
164 * taking up to 6 arguments.
165 */ 139 */
166void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, 140void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
167 unsigned int i, unsigned int n,
168 const unsigned long *args) 141 const unsigned long *args)
169{ 142{
170 if (n == 0) 143 regs->orig_r0 = args[0];
171 return; 144 args++;
172
173 if (i + n > SYSCALL_MAX_ARGS) {
174 pr_warn("%s called with max args %d, handling only %d\n",
175 __func__, i + n, SYSCALL_MAX_ARGS);
176 n = SYSCALL_MAX_ARGS - i;
177 }
178
179 if (i == 0) {
180 regs->orig_r0 = args[0];
181 args++;
182 i++;
183 n--;
184 }
185 145
186 memcpy(&regs->uregs[0] + i, args, n * sizeof(args[0])); 146 memcpy(&regs->uregs[0] + 1, args, 5 * sizeof(args[0]));
187} 147}
188#endif /* _ASM_NDS32_SYSCALL_H */ 148#endif /* _ASM_NDS32_SYSCALL_H */
diff --git a/arch/nios2/include/asm/syscall.h b/arch/nios2/include/asm/syscall.h
index 9de220854c4a..d7624ed06efb 100644
--- a/arch/nios2/include/asm/syscall.h
+++ b/arch/nios2/include/asm/syscall.h
@@ -58,81 +58,25 @@ static inline void syscall_set_return_value(struct task_struct *task,
58} 58}
59 59
60static inline void syscall_get_arguments(struct task_struct *task, 60static inline void syscall_get_arguments(struct task_struct *task,
61 struct pt_regs *regs, unsigned int i, unsigned int n, 61 struct pt_regs *regs, unsigned long *args)
62 unsigned long *args)
63{ 62{
64 BUG_ON(i + n > 6); 63 *args++ = regs->r4;
65 64 *args++ = regs->r5;
66 switch (i) { 65 *args++ = regs->r6;
67 case 0: 66 *args++ = regs->r7;
68 if (!n--) 67 *args++ = regs->r8;
69 break; 68 *args = regs->r9;
70 *args++ = regs->r4;
71 case 1:
72 if (!n--)
73 break;
74 *args++ = regs->r5;
75 case 2:
76 if (!n--)
77 break;
78 *args++ = regs->r6;
79 case 3:
80 if (!n--)
81 break;
82 *args++ = regs->r7;
83 case 4:
84 if (!n--)
85 break;
86 *args++ = regs->r8;
87 case 5:
88 if (!n--)
89 break;
90 *args++ = regs->r9;
91 case 6:
92 if (!n--)
93 break;
94 default:
95 BUG();
96 }
97} 69}
98 70
99static inline void syscall_set_arguments(struct task_struct *task, 71static inline void syscall_set_arguments(struct task_struct *task,
100 struct pt_regs *regs, unsigned int i, unsigned int n, 72 struct pt_regs *regs, const unsigned long *args)
101 const unsigned long *args)
102{ 73{
103 BUG_ON(i + n > 6); 74 regs->r4 = *args++;
104 75 regs->r5 = *args++;
105 switch (i) { 76 regs->r6 = *args++;
106 case 0: 77 regs->r7 = *args++;
107 if (!n--) 78 regs->r8 = *args++;
108 break; 79 regs->r9 = *args;
109 regs->r4 = *args++;
110 case 1:
111 if (!n--)
112 break;
113 regs->r5 = *args++;
114 case 2:
115 if (!n--)
116 break;
117 regs->r6 = *args++;
118 case 3:
119 if (!n--)
120 break;
121 regs->r7 = *args++;
122 case 4:
123 if (!n--)
124 break;
125 regs->r8 = *args++;
126 case 5:
127 if (!n--)
128 break;
129 regs->r9 = *args++;
130 case 6:
131 if (!n)
132 break;
133 default:
134 BUG();
135 }
136} 80}
137 81
138#endif 82#endif
diff --git a/arch/openrisc/include/asm/syscall.h b/arch/openrisc/include/asm/syscall.h
index 2db9f1cf0694..b4ff07c1baed 100644
--- a/arch/openrisc/include/asm/syscall.h
+++ b/arch/openrisc/include/asm/syscall.h
@@ -56,20 +56,16 @@ syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
56 56
57static inline void 57static inline void
58syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 58syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
59 unsigned int i, unsigned int n, unsigned long *args) 59 unsigned long *args)
60{ 60{
61 BUG_ON(i + n > 6); 61 memcpy(args, &regs->gpr[3], 6 * sizeof(args[0]));
62
63 memcpy(args, &regs->gpr[3 + i], n * sizeof(args[0]));
64} 62}
65 63
66static inline void 64static inline void
67syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, 65syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
68 unsigned int i, unsigned int n, const unsigned long *args) 66 const unsigned long *args)
69{ 67{
70 BUG_ON(i + n > 6); 68 memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
71
72 memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
73} 69}
74 70
75static inline int syscall_get_arch(void) 71static inline int syscall_get_arch(void)
diff --git a/arch/parisc/include/asm/syscall.h b/arch/parisc/include/asm/syscall.h
index 8bff1a58c97f..62a6d477fae0 100644
--- a/arch/parisc/include/asm/syscall.h
+++ b/arch/parisc/include/asm/syscall.h
@@ -18,29 +18,15 @@ static inline long syscall_get_nr(struct task_struct *tsk,
18} 18}
19 19
20static inline void syscall_get_arguments(struct task_struct *tsk, 20static inline void syscall_get_arguments(struct task_struct *tsk,
21 struct pt_regs *regs, unsigned int i, 21 struct pt_regs *regs,
22 unsigned int n, unsigned long *args) 22 unsigned long *args)
23{ 23{
24 BUG_ON(i); 24 args[5] = regs->gr[21];
25 25 args[4] = regs->gr[22];
26 switch (n) { 26 args[3] = regs->gr[23];
27 case 6: 27 args[2] = regs->gr[24];
28 args[5] = regs->gr[21]; 28 args[1] = regs->gr[25];
29 case 5: 29 args[0] = regs->gr[26];
30 args[4] = regs->gr[22];
31 case 4:
32 args[3] = regs->gr[23];
33 case 3:
34 args[2] = regs->gr[24];
35 case 2:
36 args[1] = regs->gr[25];
37 case 1:
38 args[0] = regs->gr[26];
39 case 0:
40 break;
41 default:
42 BUG();
43 }
44} 30}
45 31
46static inline long syscall_get_return_value(struct task_struct *task, 32static inline long syscall_get_return_value(struct task_struct *task,
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 1a0e7a8b1c81..1243045bad2d 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -65,22 +65,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
65 65
66static inline void syscall_get_arguments(struct task_struct *task, 66static inline void syscall_get_arguments(struct task_struct *task,
67 struct pt_regs *regs, 67 struct pt_regs *regs,
68 unsigned int i, unsigned int n,
69 unsigned long *args) 68 unsigned long *args)
70{ 69{
71 unsigned long val, mask = -1UL; 70 unsigned long val, mask = -1UL;
72 71 unsigned int n = 6;
73 BUG_ON(i + n > 6);
74 72
75#ifdef CONFIG_COMPAT 73#ifdef CONFIG_COMPAT
76 if (test_tsk_thread_flag(task, TIF_32BIT)) 74 if (test_tsk_thread_flag(task, TIF_32BIT))
77 mask = 0xffffffff; 75 mask = 0xffffffff;
78#endif 76#endif
79 while (n--) { 77 while (n--) {
80 if (n == 0 && i == 0) 78 if (n == 0)
81 val = regs->orig_gpr3; 79 val = regs->orig_gpr3;
82 else 80 else
83 val = regs->gpr[3 + i + n]; 81 val = regs->gpr[3 + n];
84 82
85 args[n] = val & mask; 83 args[n] = val & mask;
86 } 84 }
@@ -88,15 +86,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
88 86
89static inline void syscall_set_arguments(struct task_struct *task, 87static inline void syscall_set_arguments(struct task_struct *task,
90 struct pt_regs *regs, 88 struct pt_regs *regs,
91 unsigned int i, unsigned int n,
92 const unsigned long *args) 89 const unsigned long *args)
93{ 90{
94 BUG_ON(i + n > 6); 91 memcpy(&regs->gpr[3], args, 6 * sizeof(args[0]));
95 memcpy(&regs->gpr[3 + i], args, n * sizeof(args[0]));
96 92
97 /* Also copy the first argument into orig_gpr3 */ 93 /* Also copy the first argument into orig_gpr3 */
98 if (i == 0 && n > 0) 94 regs->orig_gpr3 = args[0];
99 regs->orig_gpr3 = args[0];
100} 95}
101 96
102static inline int syscall_get_arch(void) 97static inline int syscall_get_arch(void)
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
index 57afe604b495..c207f6634b91 100644
--- a/arch/riscv/include/asm/fixmap.h
+++ b/arch/riscv/include/asm/fixmap.h
@@ -26,7 +26,7 @@ enum fixed_addresses {
26}; 26};
27 27
28#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE) 28#define FIXADDR_SIZE (__end_of_fixed_addresses * PAGE_SIZE)
29#define FIXADDR_TOP (PAGE_OFFSET) 29#define FIXADDR_TOP (VMALLOC_START)
30#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) 30#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
31 31
32#define FIXMAP_PAGE_IO PAGE_KERNEL 32#define FIXMAP_PAGE_IO PAGE_KERNEL
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index bba3da6ef157..a3d5273ded7c 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -72,32 +72,20 @@ static inline void syscall_set_return_value(struct task_struct *task,
72 72
73static inline void syscall_get_arguments(struct task_struct *task, 73static inline void syscall_get_arguments(struct task_struct *task,
74 struct pt_regs *regs, 74 struct pt_regs *regs,
75 unsigned int i, unsigned int n,
76 unsigned long *args) 75 unsigned long *args)
77{ 76{
78 BUG_ON(i + n > 6); 77 args[0] = regs->orig_a0;
79 if (i == 0) { 78 args++;
80 args[0] = regs->orig_a0; 79 memcpy(args, &regs->a1, 5 * sizeof(args[0]));
81 args++;
82 i++;
83 n--;
84 }
85 memcpy(args, &regs->a1 + i * sizeof(regs->a1), n * sizeof(args[0]));
86} 80}
87 81
88static inline void syscall_set_arguments(struct task_struct *task, 82static inline void syscall_set_arguments(struct task_struct *task,
89 struct pt_regs *regs, 83 struct pt_regs *regs,
90 unsigned int i, unsigned int n,
91 const unsigned long *args) 84 const unsigned long *args)
92{ 85{
93 BUG_ON(i + n > 6); 86 regs->orig_a0 = args[0];
94 if (i == 0) { 87 args++;
95 regs->orig_a0 = args[0]; 88 memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
96 args++;
97 i++;
98 n--;
99 }
100 memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
101} 89}
102 90
103static inline int syscall_get_arch(void) 91static inline int syscall_get_arch(void)
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
index a00168b980d2..fb53a8089e76 100644
--- a/arch/riscv/include/asm/uaccess.h
+++ b/arch/riscv/include/asm/uaccess.h
@@ -300,7 +300,7 @@ do { \
300 " .balign 4\n" \ 300 " .balign 4\n" \
301 "4:\n" \ 301 "4:\n" \
302 " li %0, %6\n" \ 302 " li %0, %6\n" \
303 " jump 2b, %1\n" \ 303 " jump 3b, %1\n" \
304 " .previous\n" \ 304 " .previous\n" \
305 " .section __ex_table,\"a\"\n" \ 305 " .section __ex_table,\"a\"\n" \
306 " .balign " RISCV_SZPTR "\n" \ 306 " .balign " RISCV_SZPTR "\n" \
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index f13f7f276639..598568168d35 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -4,7 +4,6 @@
4 4
5ifdef CONFIG_FTRACE 5ifdef CONFIG_FTRACE
6CFLAGS_REMOVE_ftrace.o = -pg 6CFLAGS_REMOVE_ftrace.o = -pg
7CFLAGS_REMOVE_setup.o = -pg
8endif 7endif
9 8
10extra-y += head.o 9extra-y += head.o
@@ -29,8 +28,6 @@ obj-y += vdso.o
29obj-y += cacheinfo.o 28obj-y += cacheinfo.o
30obj-y += vdso/ 29obj-y += vdso/
31 30
32CFLAGS_setup.o := -mcmodel=medany
33
34obj-$(CONFIG_FPU) += fpu.o 31obj-$(CONFIG_FPU) += fpu.o
35obj-$(CONFIG_SMP) += smpboot.o 32obj-$(CONFIG_SMP) += smpboot.o
36obj-$(CONFIG_SMP) += smp.o 33obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 7dd308129b40..2872edce894d 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -141,7 +141,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
141{ 141{
142 s32 hi20; 142 s32 hi20;
143 143
144 if (IS_ENABLED(CMODEL_MEDLOW)) { 144 if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
145 pr_err( 145 pr_err(
146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", 146 "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
147 me->name, (long long)v, location); 147 me->name, (long long)v, location);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index ecb654f6a79e..540a331d1376 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -48,14 +48,6 @@ struct screen_info screen_info = {
48}; 48};
49#endif 49#endif
50 50
51unsigned long va_pa_offset;
52EXPORT_SYMBOL(va_pa_offset);
53unsigned long pfn_base;
54EXPORT_SYMBOL(pfn_base);
55
56unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
57EXPORT_SYMBOL(empty_zero_page);
58
59/* The lucky hart to first increment this variable will boot the other cores */ 51/* The lucky hart to first increment this variable will boot the other cores */
60atomic_t hart_lottery; 52atomic_t hart_lottery;
61unsigned long boot_cpu_hartid; 53unsigned long boot_cpu_hartid;
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index eb22ab49b3e0..b68aac701803 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -1,3 +1,9 @@
1
2CFLAGS_init.o := -mcmodel=medany
3ifdef CONFIG_FTRACE
4CFLAGS_REMOVE_init.o = -pg
5endif
6
1obj-y += init.o 7obj-y += init.o
2obj-y += fault.o 8obj-y += fault.o
3obj-y += extable.o 9obj-y += extable.o
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index b379a75ac6a6..5fd8c922e1c2 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -25,6 +25,10 @@
25#include <asm/pgtable.h> 25#include <asm/pgtable.h>
26#include <asm/io.h> 26#include <asm/io.h>
27 27
28unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
29 __page_aligned_bss;
30EXPORT_SYMBOL(empty_zero_page);
31
28static void __init zone_sizes_init(void) 32static void __init zone_sizes_init(void)
29{ 33{
30 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, }; 34 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
@@ -143,6 +147,11 @@ void __init setup_bootmem(void)
143 } 147 }
144} 148}
145 149
150unsigned long va_pa_offset;
151EXPORT_SYMBOL(va_pa_offset);
152unsigned long pfn_base;
153EXPORT_SYMBOL(pfn_base);
154
146pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss; 155pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
147pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE); 156pgd_t trampoline_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
148 157
@@ -172,6 +181,25 @@ void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
172 } 181 }
173} 182}
174 183
184/*
185 * setup_vm() is called from head.S with MMU-off.
186 *
187 * Following requirements should be honoured for setup_vm() to work
188 * correctly:
189 * 1) It should use PC-relative addressing for accessing kernel symbols.
190 * To achieve this we always use GCC cmodel=medany.
191 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
192 * so disable compiler instrumentation when FTRACE is enabled.
193 *
194 * Currently, the above requirements are honoured by using custom CFLAGS
195 * for init.o in mm/Makefile.
196 */
197
198#ifndef __riscv_cmodel_medany
199#error "setup_vm() is called from head.S before relocate so it should "
200 "not use absolute addressing."
201#endif
202
175asmlinkage void __init setup_vm(void) 203asmlinkage void __init setup_vm(void)
176{ 204{
177 extern char _start; 205 extern char _start;
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 96f9a9151fde..59c3e91f2cdb 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -56,40 +56,32 @@ static inline void syscall_set_return_value(struct task_struct *task,
56 56
57static inline void syscall_get_arguments(struct task_struct *task, 57static inline void syscall_get_arguments(struct task_struct *task,
58 struct pt_regs *regs, 58 struct pt_regs *regs,
59 unsigned int i, unsigned int n,
60 unsigned long *args) 59 unsigned long *args)
61{ 60{
62 unsigned long mask = -1UL; 61 unsigned long mask = -1UL;
62 unsigned int n = 6;
63 63
64 /*
65 * No arguments for this syscall, there's nothing to do.
66 */
67 if (!n)
68 return;
69
70 BUG_ON(i + n > 6);
71#ifdef CONFIG_COMPAT 64#ifdef CONFIG_COMPAT
72 if (test_tsk_thread_flag(task, TIF_31BIT)) 65 if (test_tsk_thread_flag(task, TIF_31BIT))
73 mask = 0xffffffff; 66 mask = 0xffffffff;
74#endif 67#endif
75 while (n-- > 0) 68 while (n-- > 0)
76 if (i + n > 0) 69 if (n > 0)
77 args[n] = regs->gprs[2 + i + n] & mask; 70 args[n] = regs->gprs[2 + n] & mask;
78 if (i == 0) 71
79 args[0] = regs->orig_gpr2 & mask; 72 args[0] = regs->orig_gpr2 & mask;
80} 73}
81 74
82static inline void syscall_set_arguments(struct task_struct *task, 75static inline void syscall_set_arguments(struct task_struct *task,
83 struct pt_regs *regs, 76 struct pt_regs *regs,
84 unsigned int i, unsigned int n,
85 const unsigned long *args) 77 const unsigned long *args)
86{ 78{
87 BUG_ON(i + n > 6); 79 unsigned int n = 6;
80
88 while (n-- > 0) 81 while (n-- > 0)
89 if (i + n > 0) 82 if (n > 0)
90 regs->gprs[2 + i + n] = args[n]; 83 regs->gprs[2 + n] = args[n];
91 if (i == 0) 84 regs->orig_gpr2 = args[0];
92 regs->orig_gpr2 = args[0];
93} 85}
94 86
95static inline int syscall_get_arch(void) 87static inline int syscall_get_arch(void)
diff --git a/arch/sh/include/asm/syscall_32.h b/arch/sh/include/asm/syscall_32.h
index 6e118799831c..8c9d7e5e5dcc 100644
--- a/arch/sh/include/asm/syscall_32.h
+++ b/arch/sh/include/asm/syscall_32.h
@@ -48,51 +48,28 @@ static inline void syscall_set_return_value(struct task_struct *task,
48 48
49static inline void syscall_get_arguments(struct task_struct *task, 49static inline void syscall_get_arguments(struct task_struct *task,
50 struct pt_regs *regs, 50 struct pt_regs *regs,
51 unsigned int i, unsigned int n,
52 unsigned long *args) 51 unsigned long *args)
53{ 52{
54 /*
55 * Do this simply for now. If we need to start supporting
56 * fetching arguments from arbitrary indices, this will need some
57 * extra logic. Presently there are no in-tree users that depend
58 * on this behaviour.
59 */
60 BUG_ON(i);
61 53
62 /* Argument pattern is: R4, R5, R6, R7, R0, R1 */ 54 /* Argument pattern is: R4, R5, R6, R7, R0, R1 */
63 switch (n) { 55 args[5] = regs->regs[1];
64 case 6: args[5] = regs->regs[1]; 56 args[4] = regs->regs[0];
65 case 5: args[4] = regs->regs[0]; 57 args[3] = regs->regs[7];
66 case 4: args[3] = regs->regs[7]; 58 args[2] = regs->regs[6];
67 case 3: args[2] = regs->regs[6]; 59 args[1] = regs->regs[5];
68 case 2: args[1] = regs->regs[5]; 60 args[0] = regs->regs[4];
69 case 1: args[0] = regs->regs[4];
70 case 0:
71 break;
72 default:
73 BUG();
74 }
75} 61}
76 62
77static inline void syscall_set_arguments(struct task_struct *task, 63static inline void syscall_set_arguments(struct task_struct *task,
78 struct pt_regs *regs, 64 struct pt_regs *regs,
79 unsigned int i, unsigned int n,
80 const unsigned long *args) 65 const unsigned long *args)
81{ 66{
82 /* Same note as above applies */ 67 regs->regs[1] = args[5];
83 BUG_ON(i); 68 regs->regs[0] = args[4];
84 69 regs->regs[7] = args[3];
85 switch (n) { 70 regs->regs[6] = args[2];
86 case 6: regs->regs[1] = args[5]; 71 regs->regs[5] = args[1];
87 case 5: regs->regs[0] = args[4]; 72 regs->regs[4] = args[0];
88 case 4: regs->regs[7] = args[3];
89 case 3: regs->regs[6] = args[2];
90 case 2: regs->regs[5] = args[1];
91 case 1: regs->regs[4] = args[0];
92 break;
93 default:
94 BUG();
95 }
96} 73}
97 74
98static inline int syscall_get_arch(void) 75static inline int syscall_get_arch(void)
diff --git a/arch/sh/include/asm/syscall_64.h b/arch/sh/include/asm/syscall_64.h
index 43882580c7f9..22fad97da066 100644
--- a/arch/sh/include/asm/syscall_64.h
+++ b/arch/sh/include/asm/syscall_64.h
@@ -47,20 +47,16 @@ static inline void syscall_set_return_value(struct task_struct *task,
47 47
48static inline void syscall_get_arguments(struct task_struct *task, 48static inline void syscall_get_arguments(struct task_struct *task,
49 struct pt_regs *regs, 49 struct pt_regs *regs,
50 unsigned int i, unsigned int n,
51 unsigned long *args) 50 unsigned long *args)
52{ 51{
53 BUG_ON(i + n > 6); 52 memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
54 memcpy(args, &regs->regs[2 + i], n * sizeof(args[0]));
55} 53}
56 54
57static inline void syscall_set_arguments(struct task_struct *task, 55static inline void syscall_set_arguments(struct task_struct *task,
58 struct pt_regs *regs, 56 struct pt_regs *regs,
59 unsigned int i, unsigned int n,
60 const unsigned long *args) 57 const unsigned long *args)
61{ 58{
62 BUG_ON(i + n > 6); 59 memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
63 memcpy(&regs->regs[2 + i], args, n * sizeof(args[0]));
64} 60}
65 61
66static inline int syscall_get_arch(void) 62static inline int syscall_get_arch(void)
diff --git a/arch/sparc/include/asm/syscall.h b/arch/sparc/include/asm/syscall.h
index 053989e3f6a6..4d075434e816 100644
--- a/arch/sparc/include/asm/syscall.h
+++ b/arch/sparc/include/asm/syscall.h
@@ -96,11 +96,11 @@ static inline void syscall_set_return_value(struct task_struct *task,
96 96
97static inline void syscall_get_arguments(struct task_struct *task, 97static inline void syscall_get_arguments(struct task_struct *task,
98 struct pt_regs *regs, 98 struct pt_regs *regs,
99 unsigned int i, unsigned int n,
100 unsigned long *args) 99 unsigned long *args)
101{ 100{
102 int zero_extend = 0; 101 int zero_extend = 0;
103 unsigned int j; 102 unsigned int j;
103 unsigned int n = 6;
104 104
105#ifdef CONFIG_SPARC64 105#ifdef CONFIG_SPARC64
106 if (test_tsk_thread_flag(task, TIF_32BIT)) 106 if (test_tsk_thread_flag(task, TIF_32BIT))
@@ -108,7 +108,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
108#endif 108#endif
109 109
110 for (j = 0; j < n; j++) { 110 for (j = 0; j < n; j++) {
111 unsigned long val = regs->u_regs[UREG_I0 + i + j]; 111 unsigned long val = regs->u_regs[UREG_I0 + j];
112 112
113 if (zero_extend) 113 if (zero_extend)
114 args[j] = (u32) val; 114 args[j] = (u32) val;
@@ -119,13 +119,12 @@ static inline void syscall_get_arguments(struct task_struct *task,
119 119
120static inline void syscall_set_arguments(struct task_struct *task, 120static inline void syscall_set_arguments(struct task_struct *task,
121 struct pt_regs *regs, 121 struct pt_regs *regs,
122 unsigned int i, unsigned int n,
123 const unsigned long *args) 122 const unsigned long *args)
124{ 123{
125 unsigned int j; 124 unsigned int i;
126 125
127 for (j = 0; j < n; j++) 126 for (i = 0; i < 6; i++)
128 regs->u_regs[UREG_I0 + i + j] = args[j]; 127 regs->u_regs[UREG_I0 + i] = args[i];
129} 128}
130 129
131static inline int syscall_get_arch(void) 130static inline int syscall_get_arch(void)
diff --git a/arch/um/include/asm/syscall-generic.h b/arch/um/include/asm/syscall-generic.h
index 9fb9cf8cd39a..98e50c50c12e 100644
--- a/arch/um/include/asm/syscall-generic.h
+++ b/arch/um/include/asm/syscall-generic.h
@@ -53,84 +53,30 @@ static inline void syscall_set_return_value(struct task_struct *task,
53 53
54static inline void syscall_get_arguments(struct task_struct *task, 54static inline void syscall_get_arguments(struct task_struct *task,
55 struct pt_regs *regs, 55 struct pt_regs *regs,
56 unsigned int i, unsigned int n,
57 unsigned long *args) 56 unsigned long *args)
58{ 57{
59 const struct uml_pt_regs *r = &regs->regs; 58 const struct uml_pt_regs *r = &regs->regs;
60 59
61 switch (i) { 60 *args++ = UPT_SYSCALL_ARG1(r);
62 case 0: 61 *args++ = UPT_SYSCALL_ARG2(r);
63 if (!n--) 62 *args++ = UPT_SYSCALL_ARG3(r);
64 break; 63 *args++ = UPT_SYSCALL_ARG4(r);
65 *args++ = UPT_SYSCALL_ARG1(r); 64 *args++ = UPT_SYSCALL_ARG5(r);
66 case 1: 65 *args = UPT_SYSCALL_ARG6(r);
67 if (!n--)
68 break;
69 *args++ = UPT_SYSCALL_ARG2(r);
70 case 2:
71 if (!n--)
72 break;
73 *args++ = UPT_SYSCALL_ARG3(r);
74 case 3:
75 if (!n--)
76 break;
77 *args++ = UPT_SYSCALL_ARG4(r);
78 case 4:
79 if (!n--)
80 break;
81 *args++ = UPT_SYSCALL_ARG5(r);
82 case 5:
83 if (!n--)
84 break;
85 *args++ = UPT_SYSCALL_ARG6(r);
86 case 6:
87 if (!n--)
88 break;
89 default:
90 BUG();
91 break;
92 }
93} 66}
94 67
95static inline void syscall_set_arguments(struct task_struct *task, 68static inline void syscall_set_arguments(struct task_struct *task,
96 struct pt_regs *regs, 69 struct pt_regs *regs,
97 unsigned int i, unsigned int n,
98 const unsigned long *args) 70 const unsigned long *args)
99{ 71{
100 struct uml_pt_regs *r = &regs->regs; 72 struct uml_pt_regs *r = &regs->regs;
101 73
102 switch (i) { 74 UPT_SYSCALL_ARG1(r) = *args++;
103 case 0: 75 UPT_SYSCALL_ARG2(r) = *args++;
104 if (!n--) 76 UPT_SYSCALL_ARG3(r) = *args++;
105 break; 77 UPT_SYSCALL_ARG4(r) = *args++;
106 UPT_SYSCALL_ARG1(r) = *args++; 78 UPT_SYSCALL_ARG5(r) = *args++;
107 case 1: 79 UPT_SYSCALL_ARG6(r) = *args;
108 if (!n--)
109 break;
110 UPT_SYSCALL_ARG2(r) = *args++;
111 case 2:
112 if (!n--)
113 break;
114 UPT_SYSCALL_ARG3(r) = *args++;
115 case 3:
116 if (!n--)
117 break;
118 UPT_SYSCALL_ARG4(r) = *args++;
119 case 4:
120 if (!n--)
121 break;
122 UPT_SYSCALL_ARG5(r) = *args++;
123 case 5:
124 if (!n--)
125 break;
126 UPT_SYSCALL_ARG6(r) = *args++;
127 case 6:
128 if (!n--)
129 break;
130 default:
131 BUG();
132 break;
133 }
134} 80}
135 81
136/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */ 82/* See arch/x86/um/asm/syscall.h for syscall_get_arch() definition. */
diff --git a/arch/x86/include/asm/syscall.h b/arch/x86/include/asm/syscall.h
index d653139857af..4c305471ec33 100644
--- a/arch/x86/include/asm/syscall.h
+++ b/arch/x86/include/asm/syscall.h
@@ -91,11 +91,9 @@ static inline void syscall_set_return_value(struct task_struct *task,
91 91
92static inline void syscall_get_arguments(struct task_struct *task, 92static inline void syscall_get_arguments(struct task_struct *task,
93 struct pt_regs *regs, 93 struct pt_regs *regs,
94 unsigned int i, unsigned int n,
95 unsigned long *args) 94 unsigned long *args)
96{ 95{
97 BUG_ON(i + n > 6); 96 memcpy(args, &regs->bx, 6 * sizeof(args[0]));
98 memcpy(args, &regs->bx + i, n * sizeof(args[0]));
99} 97}
100 98
101static inline void syscall_set_arguments(struct task_struct *task, 99static inline void syscall_set_arguments(struct task_struct *task,
@@ -116,124 +114,50 @@ static inline int syscall_get_arch(void)
116 114
117static inline void syscall_get_arguments(struct task_struct *task, 115static inline void syscall_get_arguments(struct task_struct *task,
118 struct pt_regs *regs, 116 struct pt_regs *regs,
119 unsigned int i, unsigned int n,
120 unsigned long *args) 117 unsigned long *args)
121{ 118{
122# ifdef CONFIG_IA32_EMULATION 119# ifdef CONFIG_IA32_EMULATION
123 if (task->thread_info.status & TS_COMPAT) 120 if (task->thread_info.status & TS_COMPAT) {
124 switch (i) { 121 *args++ = regs->bx;
125 case 0: 122 *args++ = regs->cx;
126 if (!n--) break; 123 *args++ = regs->dx;
127 *args++ = regs->bx; 124 *args++ = regs->si;
128 case 1: 125 *args++ = regs->di;
129 if (!n--) break; 126 *args = regs->bp;
130 *args++ = regs->cx; 127 } else
131 case 2:
132 if (!n--) break;
133 *args++ = regs->dx;
134 case 3:
135 if (!n--) break;
136 *args++ = regs->si;
137 case 4:
138 if (!n--) break;
139 *args++ = regs->di;
140 case 5:
141 if (!n--) break;
142 *args++ = regs->bp;
143 case 6:
144 if (!n--) break;
145 default:
146 BUG();
147 break;
148 }
149 else
150# endif 128# endif
151 switch (i) { 129 {
152 case 0: 130 *args++ = regs->di;
153 if (!n--) break; 131 *args++ = regs->si;
154 *args++ = regs->di; 132 *args++ = regs->dx;
155 case 1: 133 *args++ = regs->r10;
156 if (!n--) break; 134 *args++ = regs->r8;
157 *args++ = regs->si; 135 *args = regs->r9;
158 case 2: 136 }
159 if (!n--) break;
160 *args++ = regs->dx;
161 case 3:
162 if (!n--) break;
163 *args++ = regs->r10;
164 case 4:
165 if (!n--) break;
166 *args++ = regs->r8;
167 case 5:
168 if (!n--) break;
169 *args++ = regs->r9;
170 case 6:
171 if (!n--) break;
172 default:
173 BUG();
174 break;
175 }
176} 137}
177 138
178static inline void syscall_set_arguments(struct task_struct *task, 139static inline void syscall_set_arguments(struct task_struct *task,
179 struct pt_regs *regs, 140 struct pt_regs *regs,
180 unsigned int i, unsigned int n,
181 const unsigned long *args) 141 const unsigned long *args)
182{ 142{
183# ifdef CONFIG_IA32_EMULATION 143# ifdef CONFIG_IA32_EMULATION
184 if (task->thread_info.status & TS_COMPAT) 144 if (task->thread_info.status & TS_COMPAT) {
185 switch (i) { 145 regs->bx = *args++;
186 case 0: 146 regs->cx = *args++;
187 if (!n--) break; 147 regs->dx = *args++;
188 regs->bx = *args++; 148 regs->si = *args++;
189 case 1: 149 regs->di = *args++;
190 if (!n--) break; 150 regs->bp = *args;
191 regs->cx = *args++; 151 } else
192 case 2:
193 if (!n--) break;
194 regs->dx = *args++;
195 case 3:
196 if (!n--) break;
197 regs->si = *args++;
198 case 4:
199 if (!n--) break;
200 regs->di = *args++;
201 case 5:
202 if (!n--) break;
203 regs->bp = *args++;
204 case 6:
205 if (!n--) break;
206 default:
207 BUG();
208 break;
209 }
210 else
211# endif 152# endif
212 switch (i) { 153 {
213 case 0: 154 regs->di = *args++;
214 if (!n--) break; 155 regs->si = *args++;
215 regs->di = *args++; 156 regs->dx = *args++;
216 case 1: 157 regs->r10 = *args++;
217 if (!n--) break; 158 regs->r8 = *args++;
218 regs->si = *args++; 159 regs->r9 = *args;
219 case 2: 160 }
220 if (!n--) break;
221 regs->dx = *args++;
222 case 3:
223 if (!n--) break;
224 regs->r10 = *args++;
225 case 4:
226 if (!n--) break;
227 regs->r8 = *args++;
228 case 5:
229 if (!n--) break;
230 regs->r9 = *args++;
231 case 6:
232 if (!n--) break;
233 default:
234 BUG();
235 break;
236 }
237} 161}
238 162
239static inline int syscall_get_arch(void) 163static inline int syscall_get_arch(void)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 426039285fd1..e0a791c3d4fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -6422,11 +6422,11 @@ e_free:
6422 return ret; 6422 return ret;
6423} 6423}
6424 6424
6425static int get_num_contig_pages(int idx, struct page **inpages, 6425static unsigned long get_num_contig_pages(unsigned long idx,
6426 unsigned long npages) 6426 struct page **inpages, unsigned long npages)
6427{ 6427{
6428 unsigned long paddr, next_paddr; 6428 unsigned long paddr, next_paddr;
6429 int i = idx + 1, pages = 1; 6429 unsigned long i = idx + 1, pages = 1;
6430 6430
6431 /* find the number of contiguous pages starting from idx */ 6431 /* find the number of contiguous pages starting from idx */
6432 paddr = __sme_page_pa(inpages[idx]); 6432 paddr = __sme_page_pa(inpages[idx]);
@@ -6445,12 +6445,12 @@ static int get_num_contig_pages(int idx, struct page **inpages,
6445 6445
6446static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) 6446static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6447{ 6447{
6448 unsigned long vaddr, vaddr_end, next_vaddr, npages, size; 6448 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
6449 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; 6449 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
6450 struct kvm_sev_launch_update_data params; 6450 struct kvm_sev_launch_update_data params;
6451 struct sev_data_launch_update_data *data; 6451 struct sev_data_launch_update_data *data;
6452 struct page **inpages; 6452 struct page **inpages;
6453 int i, ret, pages; 6453 int ret;
6454 6454
6455 if (!sev_guest(kvm)) 6455 if (!sev_guest(kvm))
6456 return -ENOTTY; 6456 return -ENOTTY;
@@ -6799,7 +6799,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6799 struct page **src_p, **dst_p; 6799 struct page **src_p, **dst_p;
6800 struct kvm_sev_dbg debug; 6800 struct kvm_sev_dbg debug;
6801 unsigned long n; 6801 unsigned long n;
6802 int ret, size; 6802 unsigned int size;
6803 int ret;
6803 6804
6804 if (!sev_guest(kvm)) 6805 if (!sev_guest(kvm))
6805 return -ENOTTY; 6806 return -ENOTTY;
@@ -6807,6 +6808,11 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6807 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug))) 6808 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6808 return -EFAULT; 6809 return -EFAULT;
6809 6810
6811 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
6812 return -EINVAL;
6813 if (!debug.dst_uaddr)
6814 return -EINVAL;
6815
6810 vaddr = debug.src_uaddr; 6816 vaddr = debug.src_uaddr;
6811 size = debug.len; 6817 size = debug.len;
6812 vaddr_end = vaddr + size; 6818 vaddr_end = vaddr + size;
@@ -6857,8 +6863,8 @@ static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6857 dst_vaddr, 6863 dst_vaddr,
6858 len, &argp->error); 6864 len, &argp->error);
6859 6865
6860 sev_unpin_memory(kvm, src_p, 1); 6866 sev_unpin_memory(kvm, src_p, n);
6861 sev_unpin_memory(kvm, dst_p, 1); 6867 sev_unpin_memory(kvm, dst_p, n);
6862 6868
6863 if (ret) 6869 if (ret)
6864 goto err; 6870 goto err;
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 153e539c29c9..7ec9bb1dd723 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -500,6 +500,17 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
500 } 500 }
501} 501}
502 502
503static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
504 int msr;
505
506 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
507 unsigned word = msr / BITS_PER_LONG;
508
509 msr_bitmap[word] = ~0;
510 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
511 }
512}
513
503/* 514/*
504 * Merge L0's and L1's MSR bitmap, return false to indicate that 515 * Merge L0's and L1's MSR bitmap, return false to indicate that
505 * we do not use the hardware. 516 * we do not use the hardware.
@@ -541,39 +552,44 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
541 return false; 552 return false;
542 553
543 msr_bitmap_l1 = (unsigned long *)kmap(page); 554 msr_bitmap_l1 = (unsigned long *)kmap(page);
544 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
545 /*
546 * L0 need not intercept reads for MSRs between 0x800 and 0x8ff, it
547 * just lets the processor take the value from the virtual-APIC page;
548 * take those 256 bits directly from the L1 bitmap.
549 */
550 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
551 unsigned word = msr / BITS_PER_LONG;
552 msr_bitmap_l0[word] = msr_bitmap_l1[word];
553 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
554 }
555 } else {
556 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
557 unsigned word = msr / BITS_PER_LONG;
558 msr_bitmap_l0[word] = ~0;
559 msr_bitmap_l0[word + (0x800 / sizeof(long))] = ~0;
560 }
561 }
562 555
563 nested_vmx_disable_intercept_for_msr( 556 /*
564 msr_bitmap_l1, msr_bitmap_l0, 557 * To keep the control flow simple, pay eight 8-byte writes (sixteen
565 X2APIC_MSR(APIC_TASKPRI), 558 * 4-byte writes on 32-bit systems) up front to enable intercepts for
566 MSR_TYPE_W); 559 * the x2APIC MSR range and selectively disable them below.
560 */
561 enable_x2apic_msr_intercepts(msr_bitmap_l0);
562
563 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
564 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
565 /*
566 * L0 need not intercept reads for MSRs between 0x800
567 * and 0x8ff, it just lets the processor take the value
568 * from the virtual-APIC page; take those 256 bits
569 * directly from the L1 bitmap.
570 */
571 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
572 unsigned word = msr / BITS_PER_LONG;
573
574 msr_bitmap_l0[word] = msr_bitmap_l1[word];
575 }
576 }
567 577
568 if (nested_cpu_has_vid(vmcs12)) {
569 nested_vmx_disable_intercept_for_msr(
570 msr_bitmap_l1, msr_bitmap_l0,
571 X2APIC_MSR(APIC_EOI),
572 MSR_TYPE_W);
573 nested_vmx_disable_intercept_for_msr( 578 nested_vmx_disable_intercept_for_msr(
574 msr_bitmap_l1, msr_bitmap_l0, 579 msr_bitmap_l1, msr_bitmap_l0,
575 X2APIC_MSR(APIC_SELF_IPI), 580 X2APIC_MSR(APIC_TASKPRI),
576 MSR_TYPE_W); 581 MSR_TYPE_R | MSR_TYPE_W);
582
583 if (nested_cpu_has_vid(vmcs12)) {
584 nested_vmx_disable_intercept_for_msr(
585 msr_bitmap_l1, msr_bitmap_l0,
586 X2APIC_MSR(APIC_EOI),
587 MSR_TYPE_W);
588 nested_vmx_disable_intercept_for_msr(
589 msr_bitmap_l1, msr_bitmap_l0,
590 X2APIC_MSR(APIC_SELF_IPI),
591 MSR_TYPE_W);
592 }
577 } 593 }
578 594
579 if (spec_ctrl) 595 if (spec_ctrl)
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index a168bf81c7f4..91dc06d58060 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -59,45 +59,24 @@ static inline void syscall_set_return_value(struct task_struct *task,
59 59
60static inline void syscall_get_arguments(struct task_struct *task, 60static inline void syscall_get_arguments(struct task_struct *task,
61 struct pt_regs *regs, 61 struct pt_regs *regs,
62 unsigned int i, unsigned int n,
63 unsigned long *args) 62 unsigned long *args)
64{ 63{
65 static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; 64 static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
66 unsigned int j; 65 unsigned int i;
67 66
68 if (n == 0) 67 for (i = 0; i < 6; ++i)
69 return; 68 args[i] = regs->areg[reg[i]];
70
71 WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS);
72
73 for (j = 0; j < n; ++j) {
74 if (i + j < SYSCALL_MAX_ARGS)
75 args[j] = regs->areg[reg[i + j]];
76 else
77 args[j] = 0;
78 }
79} 69}
80 70
81static inline void syscall_set_arguments(struct task_struct *task, 71static inline void syscall_set_arguments(struct task_struct *task,
82 struct pt_regs *regs, 72 struct pt_regs *regs,
83 unsigned int i, unsigned int n,
84 const unsigned long *args) 73 const unsigned long *args)
85{ 74{
86 static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS; 75 static const unsigned int reg[] = XTENSA_SYSCALL_ARGUMENT_REGS;
87 unsigned int j; 76 unsigned int i;
88
89 if (n == 0)
90 return;
91
92 if (WARN_ON_ONCE(i + n > SYSCALL_MAX_ARGS)) {
93 if (i < SYSCALL_MAX_ARGS)
94 n = SYSCALL_MAX_ARGS - i;
95 else
96 return;
97 }
98 77
99 for (j = 0; j < n; ++j) 78 for (i = 0; i < 6; ++i)
100 regs->areg[reg[i + j]] = args[j]; 79 regs->areg[reg[i]] = args[i];
101} 80}
102 81
103asmlinkage long xtensa_rt_sigreturn(struct pt_regs*); 82asmlinkage long xtensa_rt_sigreturn(struct pt_regs*);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 62d3aa74277b..5e9d7348c16f 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -81,8 +81,12 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
81 81
82 ACPI_FUNCTION_TRACE(ev_enable_gpe); 82 ACPI_FUNCTION_TRACE(ev_enable_gpe);
83 83
84 /* Enable the requested GPE */ 84 /* Clear the GPE status */
85 status = acpi_hw_clear_gpe(gpe_event_info);
86 if (ACPI_FAILURE(status))
87 return_ACPI_STATUS(status);
85 88
89 /* Enable the requested GPE */
86 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); 90 status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE);
87 return_ACPI_STATUS(status); 91 return_ACPI_STATUS(status);
88} 92}
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 72866a004f07..466ebd84ad17 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config XILINX_HWICAP
348 348
349config R3964 349config R3964
350 tristate "Siemens R3964 line discipline" 350 tristate "Siemens R3964 line discipline"
351 depends on TTY 351 depends on TTY && BROKEN
352 ---help--- 352 ---help---
353 This driver allows synchronous communication with devices using the 353 This driver allows synchronous communication with devices using the
354 Siemens R3964 packet protocol. Unless you are dealing with special 354 Siemens R3964 packet protocol. Unless you are dealing with special
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b599c7318aab..2986119dd31f 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2596,6 +2596,9 @@ static int __init intel_pstate_init(void)
2596 const struct x86_cpu_id *id; 2596 const struct x86_cpu_id *id;
2597 int rc; 2597 int rc;
2598 2598
2599 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2600 return -ENODEV;
2601
2599 if (no_load) 2602 if (no_load)
2600 return -ENODEV; 2603 return -ENODEV;
2601 2604
@@ -2611,7 +2614,7 @@ static int __init intel_pstate_init(void)
2611 } else { 2614 } else {
2612 id = x86_match_cpu(intel_pstate_cpu_ids); 2615 id = x86_match_cpu(intel_pstate_cpu_ids);
2613 if (!id) { 2616 if (!id) {
2614 pr_info("CPU ID not supported\n"); 2617 pr_info("CPU model not supported\n");
2615 return -ENODEV; 2618 return -ENODEV;
2616 } 2619 }
2617 2620
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 4f8fb4ecde34..ac0d646a7b74 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -3625,6 +3625,7 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3625 struct pci_dev *pdev = adev->pdev; 3625 struct pci_dev *pdev = adev->pdev;
3626 enum pci_bus_speed cur_speed; 3626 enum pci_bus_speed cur_speed;
3627 enum pcie_link_width cur_width; 3627 enum pcie_link_width cur_width;
3628 u32 ret = 1;
3628 3629
3629 *speed = PCI_SPEED_UNKNOWN; 3630 *speed = PCI_SPEED_UNKNOWN;
3630 *width = PCIE_LNK_WIDTH_UNKNOWN; 3631 *width = PCIE_LNK_WIDTH_UNKNOWN;
@@ -3632,6 +3633,10 @@ static void amdgpu_device_get_min_pci_speed_width(struct amdgpu_device *adev,
3632 while (pdev) { 3633 while (pdev) {
3633 cur_speed = pcie_get_speed_cap(pdev); 3634 cur_speed = pcie_get_speed_cap(pdev);
3634 cur_width = pcie_get_width_cap(pdev); 3635 cur_width = pcie_get_width_cap(pdev);
3636 ret = pcie_bandwidth_available(adev->pdev, NULL,
3637 NULL, &cur_width);
3638 if (!ret)
3639 cur_width = PCIE_LNK_WIDTH_RESRV;
3635 3640
3636 if (cur_speed != PCI_SPEED_UNKNOWN) { 3641 if (cur_speed != PCI_SPEED_UNKNOWN) {
3637 if (*speed == PCI_SPEED_UNKNOWN) 3642 if (*speed == PCI_SPEED_UNKNOWN)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index d0309e8c9d12..a11db2b1a63f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2405,8 +2405,6 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
2405 /* disable CG */ 2405 /* disable CG */
2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0); 2406 WREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL, 0);
2407 2407
2408 adev->gfx.rlc.funcs->reset(adev);
2409
2410 gfx_v9_0_init_pg(adev); 2408 gfx_v9_0_init_pg(adev);
2411 2409
2412 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { 2410 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 4eba3c4800b6..ea18e9c2d8ce 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -2660,12 +2660,18 @@ void core_link_enable_stream(
2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option) 2660void core_link_disable_stream(struct pipe_ctx *pipe_ctx, int option)
2661{ 2661{
2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc; 2662 struct dc *core_dc = pipe_ctx->stream->ctx->dc;
2663 struct dc_stream_state *stream = pipe_ctx->stream;
2663 2664
2664 core_dc->hwss.blank_stream(pipe_ctx); 2665 core_dc->hwss.blank_stream(pipe_ctx);
2665 2666
2666 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) 2667 if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2667 deallocate_mst_payload(pipe_ctx); 2668 deallocate_mst_payload(pipe_ctx);
2668 2669
2670 if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
2671 dal_ddc_service_write_scdc_data(
2672 stream->link->ddc, 0,
2673 stream->timing.flags.LTE_340MCSC_SCRAMBLE);
2674
2669 core_dc->hwss.disable_stream(pipe_ctx, option); 2675 core_dc->hwss.disable_stream(pipe_ctx, option);
2670 2676
2671 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal); 2677 disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 9aa7bec1b5fe..23b5b94a4939 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -91,6 +91,12 @@ static void vega20_set_default_registry_data(struct pp_hwmgr *hwmgr)
91 * MP0CLK DS 91 * MP0CLK DS
92 */ 92 */
93 data->registry_data.disallowed_features = 0xE0041C00; 93 data->registry_data.disallowed_features = 0xE0041C00;
94 /* ECC feature should be disabled on old SMUs */
95 smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetSmuVersion);
96 hwmgr->smu_version = smum_get_argument(hwmgr);
97 if (hwmgr->smu_version < 0x282100)
98 data->registry_data.disallowed_features |= FEATURE_ECC_MASK;
99
94 data->registry_data.od_state_in_dc_support = 0; 100 data->registry_data.od_state_in_dc_support = 0;
95 data->registry_data.thermal_support = 1; 101 data->registry_data.thermal_support = 1;
96 data->registry_data.skip_baco_hardware = 0; 102 data->registry_data.skip_baco_hardware = 0;
@@ -357,6 +363,7 @@ static void vega20_init_dpm_defaults(struct pp_hwmgr *hwmgr)
357 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT; 363 data->smu_features[GNLD_DS_MP1CLK].smu_feature_id = FEATURE_DS_MP1CLK_BIT;
358 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT; 364 data->smu_features[GNLD_DS_MP0CLK].smu_feature_id = FEATURE_DS_MP0CLK_BIT;
359 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT; 365 data->smu_features[GNLD_XGMI].smu_feature_id = FEATURE_XGMI_BIT;
366 data->smu_features[GNLD_ECC].smu_feature_id = FEATURE_ECC_BIT;
360 367
361 for (i = 0; i < GNLD_FEATURES_MAX; i++) { 368 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
362 data->smu_features[i].smu_feature_bitmap = 369 data->smu_features[i].smu_feature_bitmap =
@@ -3020,7 +3027,8 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
3020 "FCLK_DS", 3027 "FCLK_DS",
3021 "MP1CLK_DS", 3028 "MP1CLK_DS",
3022 "MP0CLK_DS", 3029 "MP0CLK_DS",
3023 "XGMI"}; 3030 "XGMI",
3031 "ECC"};
3024 static const char *output_title[] = { 3032 static const char *output_title[] = {
3025 "FEATURES", 3033 "FEATURES",
3026 "BITMASK", 3034 "BITMASK",
@@ -3462,6 +3470,7 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3462 struct vega20_single_dpm_table *dpm_table; 3470 struct vega20_single_dpm_table *dpm_table;
3463 bool vblank_too_short = false; 3471 bool vblank_too_short = false;
3464 bool disable_mclk_switching; 3472 bool disable_mclk_switching;
3473 bool disable_fclk_switching;
3465 uint32_t i, latency; 3474 uint32_t i, latency;
3466 3475
3467 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) && 3476 disable_mclk_switching = ((1 < hwmgr->display_config->num_display) &&
@@ -3537,13 +3546,20 @@ static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr *hwmgr)
3537 if (hwmgr->display_config->nb_pstate_switch_disable) 3546 if (hwmgr->display_config->nb_pstate_switch_disable)
3538 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3547 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3539 3548
3549 if ((disable_mclk_switching &&
3550 (dpm_table->dpm_state.hard_min_level == dpm_table->dpm_levels[dpm_table->count - 1].value)) ||
3551 hwmgr->display_config->min_mem_set_clock / 100 >= dpm_table->dpm_levels[dpm_table->count - 1].value)
3552 disable_fclk_switching = true;
3553 else
3554 disable_fclk_switching = false;
3555
3540 /* fclk */ 3556 /* fclk */
3541 dpm_table = &(data->dpm_table.fclk_table); 3557 dpm_table = &(data->dpm_table.fclk_table);
3542 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value; 3558 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[0].value;
3543 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT; 3559 dpm_table->dpm_state.soft_max_level = VG20_CLOCK_MAX_DEFAULT;
3544 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value; 3560 dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[0].value;
3545 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT; 3561 dpm_table->dpm_state.hard_max_level = VG20_CLOCK_MAX_DEFAULT;
3546 if (hwmgr->display_config->nb_pstate_switch_disable) 3562 if (hwmgr->display_config->nb_pstate_switch_disable || disable_fclk_switching)
3547 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value; 3563 dpm_table->dpm_state.soft_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
3548 3564
3549 /* vclk */ 3565 /* vclk */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
index a5bc758ae097..ac2a3118a0ae 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.h
@@ -80,6 +80,7 @@ enum {
80 GNLD_DS_MP1CLK, 80 GNLD_DS_MP1CLK,
81 GNLD_DS_MP0CLK, 81 GNLD_DS_MP0CLK,
82 GNLD_XGMI, 82 GNLD_XGMI,
83 GNLD_ECC,
83 84
84 GNLD_FEATURES_MAX 85 GNLD_FEATURES_MAX
85}; 86};
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
index 63d5cf691549..195c4ae67058 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if.h
@@ -99,7 +99,7 @@
99#define FEATURE_DS_MP1CLK_BIT 30 99#define FEATURE_DS_MP1CLK_BIT 30
100#define FEATURE_DS_MP0CLK_BIT 31 100#define FEATURE_DS_MP0CLK_BIT 31
101#define FEATURE_XGMI_BIT 32 101#define FEATURE_XGMI_BIT 32
102#define FEATURE_SPARE_33_BIT 33 102#define FEATURE_ECC_BIT 33
103#define FEATURE_SPARE_34_BIT 34 103#define FEATURE_SPARE_34_BIT 34
104#define FEATURE_SPARE_35_BIT 35 104#define FEATURE_SPARE_35_BIT 35
105#define FEATURE_SPARE_36_BIT 36 105#define FEATURE_SPARE_36_BIT 36
@@ -165,7 +165,8 @@
165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) 165#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT )
166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) 166#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT )
167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) 167#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT )
168#define FEATURE_XGMI_MASK (1 << FEATURE_XGMI_BIT ) 168#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT )
169#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT )
169 170
170#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 171#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001
171#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 172#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 035479e273be..e3f9caa7839f 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -448,7 +448,7 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
448/** 448/**
449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU 449 * intel_vgpu_emulate_hotplug - trigger hotplug event for vGPU
450 * @vgpu: a vGPU 450 * @vgpu: a vGPU
451 * @conncted: link state 451 * @connected: link state
452 * 452 *
453 * This function is used to trigger hotplug interrupt for vGPU 453 * This function is used to trigger hotplug interrupt for vGPU
454 * 454 *
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 3e7e2b80c857..5d887f7cc0d5 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -238,9 +238,6 @@ static int vgpu_get_plane_info(struct drm_device *dev,
238 default: 238 default:
239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled); 239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
240 } 240 }
241
242 info->size = (((p.stride * p.height * p.bpp) / 8) +
243 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
244 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) { 241 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
245 ret = intel_vgpu_decode_cursor_plane(vgpu, &c); 242 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
246 if (ret) 243 if (ret)
@@ -262,14 +259,13 @@ static int vgpu_get_plane_info(struct drm_device *dev,
262 info->x_hot = UINT_MAX; 259 info->x_hot = UINT_MAX;
263 info->y_hot = UINT_MAX; 260 info->y_hot = UINT_MAX;
264 } 261 }
265
266 info->size = (((info->stride * c.height * c.bpp) / 8)
267 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
268 } else { 262 } else {
269 gvt_vgpu_err("invalid plane id:%d\n", plane_id); 263 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
270 return -EINVAL; 264 return -EINVAL;
271 } 265 }
272 266
267 info->size = (info->stride * info->height + PAGE_SIZE - 1)
268 >> PAGE_SHIFT;
273 if (info->size == 0) { 269 if (info->size == 0) {
274 gvt_vgpu_err("fb size is zero\n"); 270 gvt_vgpu_err("fb size is zero\n");
275 return -EINVAL; 271 return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index d7052ab7908c..cf133ef03873 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1946,7 +1946,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
1946 */ 1946 */
1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm) 1947void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
1948{ 1948{
1949 atomic_dec(&mm->pincount); 1949 atomic_dec_if_positive(&mm->pincount);
1950} 1950}
1951 1951
1952/** 1952/**
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 159192c097cc..05b953793316 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1486,8 +1486,9 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
1486 intel_runtime_pm_put_unchecked(dev_priv); 1486 intel_runtime_pm_put_unchecked(dev_priv);
1487 } 1487 }
1488 1488
1489 if (ret && (vgpu_is_vm_unhealthy(ret))) { 1489 if (ret) {
1490 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR); 1490 if (vgpu_is_vm_unhealthy(ret))
1491 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1491 intel_vgpu_destroy_workload(workload); 1492 intel_vgpu_destroy_workload(workload);
1492 return ERR_PTR(ret); 1493 return ERR_PTR(ret);
1493 } 1494 }
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0bd890c04fe4..f6f6e5b78e97 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -4830,7 +4830,10 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex, 4830 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4831 &ctx); 4831 &ctx);
4832 if (ret) { 4832 if (ret) {
4833 ret = -EINTR; 4833 if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
4834 try_again = true;
4835 continue;
4836 }
4834 break; 4837 break;
4835 } 4838 }
4836 crtc = connector->state->crtc; 4839 crtc = connector->state->crtc;
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 6ca8d322b487..4ca0cdfa6b33 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -150,6 +150,7 @@ config HID_ASUS
150 tristate "Asus" 150 tristate "Asus"
151 depends on LEDS_CLASS 151 depends on LEDS_CLASS
152 depends on ASUS_WMI || ASUS_WMI=n 152 depends on ASUS_WMI || ASUS_WMI=n
153 select POWER_SUPPLY
153 ---help--- 154 ---help---
154 Support for Asus notebook built-in keyboard and touchpad via i2c, and 155 Support for Asus notebook built-in keyboard and touchpad via i2c, and
155 the Asus Republic of Gamers laptop keyboard special keys. 156 the Asus Republic of Gamers laptop keyboard special keys.
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 9993b692598f..860e21ec6a49 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1301,10 +1301,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
1301u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1301u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1302 unsigned offset, unsigned n) 1302 unsigned offset, unsigned n)
1303{ 1303{
1304 if (n > 32) { 1304 if (n > 256) {
1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n", 1305 hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
1306 n, current->comm); 1306 n, current->comm);
1307 n = 32; 1307 n = 256;
1308 } 1308 }
1309 1309
1310 return __extract(report, offset, n); 1310 return __extract(report, offset, n);
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ac9fda1b5a72..1384e57182af 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1060,10 +1060,15 @@ static int hid_debug_rdesc_show(struct seq_file *f, void *p)
1060 seq_printf(f, "\n\n"); 1060 seq_printf(f, "\n\n");
1061 1061
1062 /* dump parsed data and input mappings */ 1062 /* dump parsed data and input mappings */
1063 if (down_interruptible(&hdev->driver_input_lock))
1064 return 0;
1065
1063 hid_dump_device(hdev, f); 1066 hid_dump_device(hdev, f);
1064 seq_printf(f, "\n"); 1067 seq_printf(f, "\n");
1065 hid_dump_input_mapping(hdev, f); 1068 hid_dump_input_mapping(hdev, f);
1066 1069
1070 up(&hdev->driver_input_lock);
1071
1067 return 0; 1072 return 0;
1068} 1073}
1069 1074
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b6d93f4ad037..adce58f24f76 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -1083,6 +1083,7 @@
1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3 1083#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3 1084#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710 1085#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
1086#define I2C_DEVICE_ID_SYNAPTICS_7E7E 0x7e7e
1086 1087
1087#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047 1088#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
1088#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855 1089#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index b10b1922c5bd..1fce0076e7dc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -998,6 +998,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
998 case 0x1b8: map_key_clear(KEY_VIDEO); break; 998 case 0x1b8: map_key_clear(KEY_VIDEO); break;
999 case 0x1bc: map_key_clear(KEY_MESSENGER); break; 999 case 0x1bc: map_key_clear(KEY_MESSENGER); break;
1000 case 0x1bd: map_key_clear(KEY_INFO); break; 1000 case 0x1bd: map_key_clear(KEY_INFO); break;
1001 case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
1001 case 0x201: map_key_clear(KEY_NEW); break; 1002 case 0x201: map_key_clear(KEY_NEW); break;
1002 case 0x202: map_key_clear(KEY_OPEN); break; 1003 case 0x202: map_key_clear(KEY_OPEN); break;
1003 case 0x203: map_key_clear(KEY_CLOSE); break; 1004 case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 15ed6177a7a3..199cc256e9d9 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -2111,6 +2111,13 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2111 kfree(data); 2111 kfree(data);
2112 return -ENOMEM; 2112 return -ENOMEM;
2113 } 2113 }
2114 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2115 if (!data->wq) {
2116 kfree(data->effect_ids);
2117 kfree(data);
2118 return -ENOMEM;
2119 }
2120
2114 data->hidpp = hidpp; 2121 data->hidpp = hidpp;
2115 data->feature_index = feature_index; 2122 data->feature_index = feature_index;
2116 data->version = version; 2123 data->version = version;
@@ -2155,7 +2162,6 @@ static int hidpp_ff_init(struct hidpp_device *hidpp, u8 feature_index)
2155 /* ignore boost value at response.fap.params[2] */ 2162 /* ignore boost value at response.fap.params[2] */
2156 2163
2157 /* init the hardware command queue */ 2164 /* init the hardware command queue */
2158 data->wq = create_singlethread_workqueue("hidpp-ff-sendqueue");
2159 atomic_set(&data->workqueue_size, 0); 2165 atomic_set(&data->workqueue_size, 0);
2160 2166
2161 /* initialize with zero autocenter to get wheel in usable state */ 2167 /* initialize with zero autocenter to get wheel in usable state */
@@ -2608,8 +2614,9 @@ static int m560_raw_event(struct hid_device *hdev, u8 *data, int size)
2608 input_report_rel(mydata->input, REL_Y, v); 2614 input_report_rel(mydata->input, REL_Y, v);
2609 2615
2610 v = hid_snto32(data[6], 8); 2616 v = hid_snto32(data[6], 8);
2611 hidpp_scroll_counter_handle_scroll( 2617 if (v != 0)
2612 &hidpp->vertical_wheel_counter, v); 2618 hidpp_scroll_counter_handle_scroll(
2619 &hidpp->vertical_wheel_counter, v);
2613 2620
2614 input_sync(mydata->input); 2621 input_sync(mydata->input);
2615 } 2622 }
diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
index 953908f2267c..77ffba48cc73 100644
--- a/drivers/hid/hid-quirks.c
+++ b/drivers/hid/hid-quirks.c
@@ -715,7 +715,6 @@ static const struct hid_device_id hid_ignore_list[] = {
715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) }, 715 { HID_USB_DEVICE(USB_VENDOR_ID_DEALEXTREAME, USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701) },
716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) }, 716 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) }, 717 { HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
718 { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, 0x0400) },
719 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) }, 718 { HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) }, 719 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
721 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) }, 720 { HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
@@ -855,7 +854,7 @@ static const struct hid_device_id hid_ignore_list[] = {
855 { } 854 { }
856}; 855};
857 856
858/** 857/*
859 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer 858 * hid_mouse_ignore_list - mouse devices which should not be handled by the hid layer
860 * 859 *
861 * There are composite devices for which we want to ignore only a certain 860 * There are composite devices for which we want to ignore only a certain
@@ -996,6 +995,10 @@ bool hid_ignore(struct hid_device *hdev)
996 if (hdev->product == 0x0401 && 995 if (hdev->product == 0x0401 &&
997 strncmp(hdev->name, "ELAN0800", 8) != 0) 996 strncmp(hdev->name, "ELAN0800", 8) != 0)
998 return true; 997 return true;
998 /* Same with product id 0x0400 */
999 if (hdev->product == 0x0400 &&
1000 strncmp(hdev->name, "QTEC0001", 8) != 0)
1001 return true;
999 break; 1002 break;
1000 } 1003 }
1001 1004
@@ -1042,7 +1045,7 @@ static struct hid_device_id *hid_exists_dquirk(const struct hid_device *hdev)
1042 } 1045 }
1043 1046
1044 if (bl_entry != NULL) 1047 if (bl_entry != NULL)
1045 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%hx:0x%hx\n", 1048 dbg_hid("Found dynamic quirk 0x%lx for HID device 0x%04x:0x%04x\n",
1046 bl_entry->driver_data, bl_entry->vendor, 1049 bl_entry->driver_data, bl_entry->vendor,
1047 bl_entry->product); 1050 bl_entry->product);
1048 1051
@@ -1209,7 +1212,7 @@ static unsigned long hid_gets_squirk(const struct hid_device *hdev)
1209 quirks |= bl_entry->driver_data; 1212 quirks |= bl_entry->driver_data;
1210 1213
1211 if (quirks) 1214 if (quirks)
1212 dbg_hid("Found squirk 0x%lx for HID device 0x%hx:0x%hx\n", 1215 dbg_hid("Found squirk 0x%lx for HID device 0x%04x:0x%04x\n",
1213 quirks, hdev->vendor, hdev->product); 1216 quirks, hdev->vendor, hdev->product);
1214 return quirks; 1217 return quirks;
1215} 1218}
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index 8141cadfca0e..8dae0f9b819e 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -499,6 +499,7 @@ static void steam_battery_unregister(struct steam_device *steam)
499static int steam_register(struct steam_device *steam) 499static int steam_register(struct steam_device *steam)
500{ 500{
501 int ret; 501 int ret;
502 bool client_opened;
502 503
503 /* 504 /*
504 * This function can be called several times in a row with the 505 * This function can be called several times in a row with the
@@ -511,9 +512,11 @@ static int steam_register(struct steam_device *steam)
511 * Unlikely, but getting the serial could fail, and it is not so 512 * Unlikely, but getting the serial could fail, and it is not so
512 * important, so make up a serial number and go on. 513 * important, so make up a serial number and go on.
513 */ 514 */
515 mutex_lock(&steam->mutex);
514 if (steam_get_serial(steam) < 0) 516 if (steam_get_serial(steam) < 0)
515 strlcpy(steam->serial_no, "XXXXXXXXXX", 517 strlcpy(steam->serial_no, "XXXXXXXXXX",
516 sizeof(steam->serial_no)); 518 sizeof(steam->serial_no));
519 mutex_unlock(&steam->mutex);
517 520
518 hid_info(steam->hdev, "Steam Controller '%s' connected", 521 hid_info(steam->hdev, "Steam Controller '%s' connected",
519 steam->serial_no); 522 steam->serial_no);
@@ -528,13 +531,15 @@ static int steam_register(struct steam_device *steam)
528 } 531 }
529 532
530 mutex_lock(&steam->mutex); 533 mutex_lock(&steam->mutex);
531 if (!steam->client_opened) { 534 client_opened = steam->client_opened;
535 if (!client_opened)
532 steam_set_lizard_mode(steam, lizard_mode); 536 steam_set_lizard_mode(steam, lizard_mode);
537 mutex_unlock(&steam->mutex);
538
539 if (!client_opened)
533 ret = steam_input_register(steam); 540 ret = steam_input_register(steam);
534 } else { 541 else
535 ret = 0; 542 ret = 0;
536 }
537 mutex_unlock(&steam->mutex);
538 543
539 return ret; 544 return ret;
540} 545}
@@ -630,14 +635,21 @@ static void steam_client_ll_close(struct hid_device *hdev)
630{ 635{
631 struct steam_device *steam = hdev->driver_data; 636 struct steam_device *steam = hdev->driver_data;
632 637
638 unsigned long flags;
639 bool connected;
640
641 spin_lock_irqsave(&steam->lock, flags);
642 connected = steam->connected;
643 spin_unlock_irqrestore(&steam->lock, flags);
644
633 mutex_lock(&steam->mutex); 645 mutex_lock(&steam->mutex);
634 steam->client_opened = false; 646 steam->client_opened = false;
647 if (connected)
648 steam_set_lizard_mode(steam, lizard_mode);
635 mutex_unlock(&steam->mutex); 649 mutex_unlock(&steam->mutex);
636 650
637 if (steam->connected) { 651 if (connected)
638 steam_set_lizard_mode(steam, lizard_mode);
639 steam_input_register(steam); 652 steam_input_register(steam);
640 }
641} 653}
642 654
643static int steam_client_ll_raw_request(struct hid_device *hdev, 655static int steam_client_ll_raw_request(struct hid_device *hdev,
diff --git a/drivers/hid/hid-uclogic-params.c b/drivers/hid/hid-uclogic-params.c
index 7710d9f957da..0187c9f8fc22 100644
--- a/drivers/hid/hid-uclogic-params.c
+++ b/drivers/hid/hid-uclogic-params.c
@@ -735,10 +735,6 @@ static int uclogic_params_huion_init(struct uclogic_params *params,
735 goto cleanup; 735 goto cleanup;
736 } 736 }
737 rc = usb_string(udev, 201, ver_ptr, ver_len); 737 rc = usb_string(udev, 201, ver_ptr, ver_len);
738 if (ver_ptr == NULL) {
739 rc = -ENOMEM;
740 goto cleanup;
741 }
742 if (rc == -EPIPE) { 738 if (rc == -EPIPE) {
743 *ver_ptr = '\0'; 739 *ver_ptr = '\0';
744 } else if (rc < 0) { 740 } else if (rc < 0) {
diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
index 90164fed08d3..4d1f24ee249c 100644
--- a/drivers/hid/i2c-hid/i2c-hid-core.c
+++ b/drivers/hid/i2c-hid/i2c-hid-core.c
@@ -184,6 +184,8 @@ static const struct i2c_hid_quirks {
184 I2C_HID_QUIRK_NO_RUNTIME_PM }, 184 I2C_HID_QUIRK_NO_RUNTIME_PM },
185 { USB_VENDOR_ID_ELAN, HID_ANY_ID, 185 { USB_VENDOR_ID_ELAN, HID_ANY_ID,
186 I2C_HID_QUIRK_BOGUS_IRQ }, 186 I2C_HID_QUIRK_BOGUS_IRQ },
187 { USB_VENDOR_ID_SYNAPTICS, I2C_DEVICE_ID_SYNAPTICS_7E7E,
188 I2C_HID_QUIRK_NO_RUNTIME_PM },
187 { 0, 0 } 189 { 0, 0 }
188}; 190};
189 191
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6f929bfa9fcd..d0f1dfe2bcbb 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1759,6 +1759,7 @@ config SENSORS_VT8231
1759config SENSORS_W83773G 1759config SENSORS_W83773G
1760 tristate "Nuvoton W83773G" 1760 tristate "Nuvoton W83773G"
1761 depends on I2C 1761 depends on I2C
1762 select REGMAP_I2C
1762 help 1763 help
1763 If you say yes here you get support for the Nuvoton W83773G hardware 1764 If you say yes here you get support for the Nuvoton W83773G hardware
1764 monitoring chip. 1765 monitoring chip.
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e4f9f7ce92fa..f9abeeeead9e 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -640,7 +640,7 @@ static const struct hwmon_channel_info ntc_chip = {
640}; 640};
641 641
642static const u32 ntc_temp_config[] = { 642static const u32 ntc_temp_config[] = {
643 HWMON_T_INPUT, HWMON_T_TYPE, 643 HWMON_T_INPUT | HWMON_T_TYPE,
644 0 644 0
645}; 645};
646 646
diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c
index b91a80abf724..4679acb4918e 100644
--- a/drivers/hwmon/occ/common.c
+++ b/drivers/hwmon/occ/common.c
@@ -890,6 +890,8 @@ static int occ_setup_sensor_attrs(struct occ *occ)
890 s++; 890 s++;
891 } 891 }
892 } 892 }
893
894 s = (sensors->power.num_sensors * 4) + 1;
893 } else { 895 } else {
894 for (i = 0; i < sensors->power.num_sensors; ++i) { 896 for (i = 0; i < sensors->power.num_sensors; ++i) {
895 s = i + 1; 897 s = i + 1;
@@ -918,11 +920,11 @@ static int occ_setup_sensor_attrs(struct occ *occ)
918 show_power, NULL, 3, i); 920 show_power, NULL, 3, i);
919 attr++; 921 attr++;
920 } 922 }
921 }
922 923
923 if (sensors->caps.num_sensors >= 1) {
924 s = sensors->power.num_sensors + 1; 924 s = sensors->power.num_sensors + 1;
925 }
925 926
927 if (sensors->caps.num_sensors >= 1) {
926 snprintf(attr->name, sizeof(attr->name), "power%d_label", s); 928 snprintf(attr->name, sizeof(attr->name), "power%d_label", s);
927 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL, 929 attr->sensor = OCC_INIT_ATTR(attr->name, 0444, show_caps, NULL,
928 0, 0); 930 0, 0);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 0ce2d8dfc5f1..26ad6468d13a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -1246,7 +1246,7 @@ config MFD_STA2X11
1246 1246
1247config MFD_SUN6I_PRCM 1247config MFD_SUN6I_PRCM
1248 bool "Allwinner A31 PRCM controller" 1248 bool "Allwinner A31 PRCM controller"
1249 depends on ARCH_SUNXI 1249 depends on ARCH_SUNXI || COMPILE_TEST
1250 select MFD_CORE 1250 select MFD_CORE
1251 help 1251 help
1252 Support for the PRCM (Power/Reset/Clock Management) unit available 1252 Support for the PRCM (Power/Reset/Clock Management) unit available
diff --git a/drivers/mfd/sprd-sc27xx-spi.c b/drivers/mfd/sprd-sc27xx-spi.c
index 69df27769c21..43ac71691fe4 100644
--- a/drivers/mfd/sprd-sc27xx-spi.c
+++ b/drivers/mfd/sprd-sc27xx-spi.c
@@ -53,67 +53,67 @@ static const struct sprd_pmic_data sc2731_data = {
53static const struct mfd_cell sprd_pmic_devs[] = { 53static const struct mfd_cell sprd_pmic_devs[] = {
54 { 54 {
55 .name = "sc27xx-wdt", 55 .name = "sc27xx-wdt",
56 .of_compatible = "sprd,sc27xx-wdt", 56 .of_compatible = "sprd,sc2731-wdt",
57 }, { 57 }, {
58 .name = "sc27xx-rtc", 58 .name = "sc27xx-rtc",
59 .of_compatible = "sprd,sc27xx-rtc", 59 .of_compatible = "sprd,sc2731-rtc",
60 }, { 60 }, {
61 .name = "sc27xx-charger", 61 .name = "sc27xx-charger",
62 .of_compatible = "sprd,sc27xx-charger", 62 .of_compatible = "sprd,sc2731-charger",
63 }, { 63 }, {
64 .name = "sc27xx-chg-timer", 64 .name = "sc27xx-chg-timer",
65 .of_compatible = "sprd,sc27xx-chg-timer", 65 .of_compatible = "sprd,sc2731-chg-timer",
66 }, { 66 }, {
67 .name = "sc27xx-fast-chg", 67 .name = "sc27xx-fast-chg",
68 .of_compatible = "sprd,sc27xx-fast-chg", 68 .of_compatible = "sprd,sc2731-fast-chg",
69 }, { 69 }, {
70 .name = "sc27xx-chg-wdt", 70 .name = "sc27xx-chg-wdt",
71 .of_compatible = "sprd,sc27xx-chg-wdt", 71 .of_compatible = "sprd,sc2731-chg-wdt",
72 }, { 72 }, {
73 .name = "sc27xx-typec", 73 .name = "sc27xx-typec",
74 .of_compatible = "sprd,sc27xx-typec", 74 .of_compatible = "sprd,sc2731-typec",
75 }, { 75 }, {
76 .name = "sc27xx-flash", 76 .name = "sc27xx-flash",
77 .of_compatible = "sprd,sc27xx-flash", 77 .of_compatible = "sprd,sc2731-flash",
78 }, { 78 }, {
79 .name = "sc27xx-eic", 79 .name = "sc27xx-eic",
80 .of_compatible = "sprd,sc27xx-eic", 80 .of_compatible = "sprd,sc2731-eic",
81 }, { 81 }, {
82 .name = "sc27xx-efuse", 82 .name = "sc27xx-efuse",
83 .of_compatible = "sprd,sc27xx-efuse", 83 .of_compatible = "sprd,sc2731-efuse",
84 }, { 84 }, {
85 .name = "sc27xx-thermal", 85 .name = "sc27xx-thermal",
86 .of_compatible = "sprd,sc27xx-thermal", 86 .of_compatible = "sprd,sc2731-thermal",
87 }, { 87 }, {
88 .name = "sc27xx-adc", 88 .name = "sc27xx-adc",
89 .of_compatible = "sprd,sc27xx-adc", 89 .of_compatible = "sprd,sc2731-adc",
90 }, { 90 }, {
91 .name = "sc27xx-audio-codec", 91 .name = "sc27xx-audio-codec",
92 .of_compatible = "sprd,sc27xx-audio-codec", 92 .of_compatible = "sprd,sc2731-audio-codec",
93 }, { 93 }, {
94 .name = "sc27xx-regulator", 94 .name = "sc27xx-regulator",
95 .of_compatible = "sprd,sc27xx-regulator", 95 .of_compatible = "sprd,sc2731-regulator",
96 }, { 96 }, {
97 .name = "sc27xx-vibrator", 97 .name = "sc27xx-vibrator",
98 .of_compatible = "sprd,sc27xx-vibrator", 98 .of_compatible = "sprd,sc2731-vibrator",
99 }, { 99 }, {
100 .name = "sc27xx-keypad-led", 100 .name = "sc27xx-keypad-led",
101 .of_compatible = "sprd,sc27xx-keypad-led", 101 .of_compatible = "sprd,sc2731-keypad-led",
102 }, { 102 }, {
103 .name = "sc27xx-bltc", 103 .name = "sc27xx-bltc",
104 .of_compatible = "sprd,sc27xx-bltc", 104 .of_compatible = "sprd,sc2731-bltc",
105 }, { 105 }, {
106 .name = "sc27xx-fgu", 106 .name = "sc27xx-fgu",
107 .of_compatible = "sprd,sc27xx-fgu", 107 .of_compatible = "sprd,sc2731-fgu",
108 }, { 108 }, {
109 .name = "sc27xx-7sreset", 109 .name = "sc27xx-7sreset",
110 .of_compatible = "sprd,sc27xx-7sreset", 110 .of_compatible = "sprd,sc2731-7sreset",
111 }, { 111 }, {
112 .name = "sc27xx-poweroff", 112 .name = "sc27xx-poweroff",
113 .of_compatible = "sprd,sc27xx-poweroff", 113 .of_compatible = "sprd,sc2731-poweroff",
114 }, { 114 }, {
115 .name = "sc27xx-syscon", 115 .name = "sc27xx-syscon",
116 .of_compatible = "sprd,sc27xx-syscon", 116 .of_compatible = "sprd,sc2731-syscon",
117 }, 117 },
118}; 118};
119 119
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 299016bc46d9..104477b512a2 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -1245,6 +1245,28 @@ free:
1245 return status; 1245 return status;
1246} 1246}
1247 1247
1248static int __maybe_unused twl_suspend(struct device *dev)
1249{
1250 struct i2c_client *client = to_i2c_client(dev);
1251
1252 if (client->irq)
1253 disable_irq(client->irq);
1254
1255 return 0;
1256}
1257
1258static int __maybe_unused twl_resume(struct device *dev)
1259{
1260 struct i2c_client *client = to_i2c_client(dev);
1261
1262 if (client->irq)
1263 enable_irq(client->irq);
1264
1265 return 0;
1266}
1267
1268static SIMPLE_DEV_PM_OPS(twl_dev_pm_ops, twl_suspend, twl_resume);
1269
1248static const struct i2c_device_id twl_ids[] = { 1270static const struct i2c_device_id twl_ids[] = {
1249 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ 1271 { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */
1250 { "twl5030", 0 }, /* T2 updated */ 1272 { "twl5030", 0 }, /* T2 updated */
@@ -1262,6 +1284,7 @@ static const struct i2c_device_id twl_ids[] = {
1262/* One Client Driver , 4 Clients */ 1284/* One Client Driver , 4 Clients */
1263static struct i2c_driver twl_driver = { 1285static struct i2c_driver twl_driver = {
1264 .driver.name = DRIVER_NAME, 1286 .driver.name = DRIVER_NAME,
1287 .driver.pm = &twl_dev_pm_ops,
1265 .id_table = twl_ids, 1288 .id_table = twl_ids,
1266 .probe = twl_probe, 1289 .probe = twl_probe,
1267 .remove = twl_remove, 1290 .remove = twl_remove,
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
55 55
56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf) 56static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
57{ 57{
58 return sprintf(buf, "%pM\n", slave->perm_hwaddr); 58 return sprintf(buf, "%*phC\n",
59 slave->dev->addr_len,
60 slave->perm_hwaddr);
59} 61}
60static SLAVE_ATTR_RO(perm_hwaddr); 62static SLAVE_ATTR_RO(perm_hwaddr);
61 63
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
427 return 0; 427 return 0;
428 428
429 lane = mv88e6390x_serdes_get_lane(chip, port); 429 lane = mv88e6390x_serdes_get_lane(chip, port);
430 if (lane < 0) 430 if (lane < 0 && lane != -ENODEV)
431 return lane; 431 return lane;
432 432
433 if (chip->ports[port].serdes_irq) { 433 if (lane >= 0) {
434 err = mv88e6390_serdes_irq_disable(chip, port, lane); 434 if (chip->ports[port].serdes_irq) {
435 err = mv88e6390_serdes_irq_disable(chip, port, lane);
436 if (err)
437 return err;
438 }
439
440 err = mv88e6390x_serdes_power(chip, port, false);
435 if (err) 441 if (err)
436 return err; 442 return err;
437 } 443 }
438 444
439 err = mv88e6390x_serdes_power(chip, port, false); 445 chip->ports[port].cmode = 0;
440 if (err)
441 return err;
442 446
443 if (cmode) { 447 if (cmode) {
444 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg); 448 err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
452 if (err) 456 if (err)
453 return err; 457 return err;
454 458
459 chip->ports[port].cmode = cmode;
460
461 lane = mv88e6390x_serdes_get_lane(chip, port);
462 if (lane < 0)
463 return lane;
464
455 err = mv88e6390x_serdes_power(chip, port, true); 465 err = mv88e6390x_serdes_power(chip, port, true);
456 if (err) 466 if (err)
457 return err; 467 return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
463 } 473 }
464 } 474 }
465 475
466 chip->ports[port].cmode = cmode;
467
468 return 0; 476 return 0;
469} 477}
470 478
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..28eac9056211 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1328,10 +1328,11 @@ int nicvf_stop(struct net_device *netdev)
1328 struct nicvf_cq_poll *cq_poll = NULL; 1328 struct nicvf_cq_poll *cq_poll = NULL;
1329 union nic_mbx mbx = {}; 1329 union nic_mbx mbx = {};
1330 1330
1331 cancel_delayed_work_sync(&nic->link_change_work);
1332
1333 /* wait till all queued set_rx_mode tasks completes */ 1331 /* wait till all queued set_rx_mode tasks completes */
1334 drain_workqueue(nic->nicvf_rx_mode_wq); 1332 if (nic->nicvf_rx_mode_wq) {
1333 cancel_delayed_work_sync(&nic->link_change_work);
1334 drain_workqueue(nic->nicvf_rx_mode_wq);
1335 }
1335 1336
1336 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; 1337 mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
1337 nicvf_send_msg_to_pf(nic, &mbx); 1338 nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1453,8 @@ int nicvf_open(struct net_device *netdev)
1452 struct nicvf_cq_poll *cq_poll = NULL; 1453 struct nicvf_cq_poll *cq_poll = NULL;
1453 1454
1454 /* wait till all queued set_rx_mode tasks completes if any */ 1455 /* wait till all queued set_rx_mode tasks completes if any */
1455 drain_workqueue(nic->nicvf_rx_mode_wq); 1456 if (nic->nicvf_rx_mode_wq)
1457 drain_workqueue(nic->nicvf_rx_mode_wq);
1456 1458
1457 netif_carrier_off(netdev); 1459 netif_carrier_off(netdev);
1458 1460
@@ -1550,10 +1552,12 @@ int nicvf_open(struct net_device *netdev)
1550 /* Send VF config done msg to PF */ 1552 /* Send VF config done msg to PF */
1551 nicvf_send_cfg_done(nic); 1553 nicvf_send_cfg_done(nic);
1552 1554
1553 INIT_DELAYED_WORK(&nic->link_change_work, 1555 if (nic->nicvf_rx_mode_wq) {
1554 nicvf_link_status_check_task); 1556 INIT_DELAYED_WORK(&nic->link_change_work,
1555 queue_delayed_work(nic->nicvf_rx_mode_wq, 1557 nicvf_link_status_check_task);
1556 &nic->link_change_work, 0); 1558 queue_delayed_work(nic->nicvf_rx_mode_wq,
1559 &nic->link_change_work, 0);
1560 }
1557 1561
1558 return 0; 1562 return 0;
1559cleanup: 1563cleanup:
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
105 /* Check if page can be recycled */ 105 /* Check if page can be recycled */
106 if (page) { 106 if (page) {
107 ref_count = page_ref_count(page); 107 ref_count = page_ref_count(page);
108 /* Check if this page has been used once i.e 'put_page' 108 /* This page can be recycled if internal ref_count and page's
109 * called after packet transmission i.e internal ref_count 109 * ref_count are equal, indicating that the page has been used
110 * and page's ref_count are equal i.e page can be recycled. 110 * once for packet transmission. For non-XDP mode, internal
111 * ref_count is always '1'.
111 */ 112 */
112 if (rbdr->is_xdp && (ref_count == pgcache->ref_count)) 113 if (rbdr->is_xdp) {
113 pgcache->ref_count--; 114 if (ref_count == pgcache->ref_count)
114 else 115 pgcache->ref_count--;
115 page = NULL; 116 else
116 117 page = NULL;
117 /* In non-XDP mode, page's ref_count needs to be '1' for it 118 } else if (ref_count != 1) {
118 * to be recycled.
119 */
120 if (!rbdr->is_xdp && (ref_count != 1))
121 page = NULL; 119 page = NULL;
120 }
122 } 121 }
123 122
124 if (!page) { 123 if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
365 while (head < rbdr->pgcnt) { 364 while (head < rbdr->pgcnt) {
366 pgcache = &rbdr->pgcache[head]; 365 pgcache = &rbdr->pgcache[head];
367 if (pgcache->page && page_ref_count(pgcache->page) != 0) { 366 if (pgcache->page && page_ref_count(pgcache->page) != 0) {
368 if (!rbdr->is_xdp) { 367 if (rbdr->is_xdp) {
369 put_page(pgcache->page); 368 page_ref_sub(pgcache->page,
370 continue; 369 pgcache->ref_count - 1);
371 } 370 }
372 page_ref_sub(pgcache->page, pgcache->ref_count - 1);
373 put_page(pgcache->page); 371 put_page(pgcache->page);
374 } 372 }
375 head++; 373 head++;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
354 ppmax = max; 354 ppmax = max;
355 355
356 /* pool size must be multiple of unsigned long */ 356 /* pool size must be multiple of unsigned long */
357 bmap = BITS_TO_LONGS(ppmax); 357 bmap = ppmax / BITS_PER_TYPE(unsigned long);
358 if (!bmap)
359 return NULL;
360
358 ppmax = (bmap * sizeof(unsigned long)) << 3; 361 ppmax = (bmap * sizeof(unsigned long)) << 3;
359 362
360 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap; 363 alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
402 if (reserve_factor) { 405 if (reserve_factor) {
403 ppmax_pool = ppmax / reserve_factor; 406 ppmax_pool = ppmax / reserve_factor;
404 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max); 407 pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
408 if (!pool) {
409 ppmax_pool = 0;
410 reserve_factor = 0;
411 }
405 412
406 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n", 413 pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
407 ndev->name, ppmax, ppmax_pool, pool_index_max); 414 ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
150/* free desc along with its attached buffer */ 150/* free desc along with its attached buffer */
151static void hnae_free_desc(struct hnae_ring *ring) 151static void hnae_free_desc(struct hnae_ring *ring)
152{ 152{
153 hnae_free_buffers(ring);
154 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr, 153 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
155 ring->desc_num * sizeof(ring->desc[0]), 154 ring->desc_num * sizeof(ring->desc[0]),
156 ring_to_dma_dir(ring)); 155 ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
183/* fini ring, also free the buffer for the ring */ 182/* fini ring, also free the buffer for the ring */
184static void hnae_fini_ring(struct hnae_ring *ring) 183static void hnae_fini_ring(struct hnae_ring *ring)
185{ 184{
185 if (is_rx_ring(ring))
186 hnae_free_buffers(ring);
187
186 hnae_free_desc(ring); 188 hnae_free_desc(ring);
187 kfree(ring->desc_cb); 189 kfree(ring->desc_cb);
188 ring->desc_cb = NULL; 190 ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
357}; 357};
358 358
359struct hnae_queue { 359struct hnae_queue {
360 void __iomem *io_base; 360 u8 __iomem *io_base;
361 phys_addr_t phy_base; 361 phys_addr_t phy_base;
362 struct hnae_ae_dev *dev; /* the device who use this queue */ 362 struct hnae_ae_dev *dev; /* the device who use this queue */
363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp; 363 struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
370static void hns_mac_param_get(struct mac_params *param, 370static void hns_mac_param_get(struct mac_params *param,
371 struct hns_mac_cb *mac_cb) 371 struct hns_mac_cb *mac_cb)
372{ 372{
373 param->vaddr = (void *)mac_cb->vaddr; 373 param->vaddr = mac_cb->vaddr;
374 param->mac_mode = hns_get_enet_interface(mac_cb); 374 param->mac_mode = hns_get_enet_interface(mac_cb);
375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr); 375 ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
376 param->mac_id = mac_cb->mac_id; 376 param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
187/*mac para struct ,mac get param from nic or dsaf when initialize*/ 187/*mac para struct ,mac get param from nic or dsaf when initialize*/
188struct mac_params { 188struct mac_params {
189 char addr[ETH_ALEN]; 189 char addr[ETH_ALEN];
190 void *vaddr; /*virtual address*/ 190 u8 __iomem *vaddr; /*virtual address*/
191 struct device *dev; 191 struct device *dev;
192 u8 mac_id; 192 u8 mac_id;
193 /**< Ethernet operation mode (MAC-PHY interface and speed) */ 193 /**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
402 enum mac_mode mac_mode; 402 enum mac_mode mac_mode;
403 u8 mac_id; 403 u8 mac_id;
404 struct hns_mac_cb *mac_cb; 404 struct hns_mac_cb *mac_cb;
405 void __iomem *io_base; 405 u8 __iomem *io_base;
406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/ 406 unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
407 unsigned int virt_dev_num; 407 unsigned int virt_dev_num;
408 struct device *dev; 408 struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id); 1602 DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M, 1603 dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
1604 DSAF_TBL_TCAM_KEY_PORT_S, port); 1604 DSAF_TBL_TCAM_KEY_PORT_S, port);
1605
1606 mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
1607} 1605}
1608 1606
1609/** 1607/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
1663 /* default config dvc to 0 */ 1661 /* default config dvc to 0 */
1664 mac_data.tbl_ucast_dvc = 0; 1662 mac_data.tbl_ucast_dvc = 0;
1665 mac_data.tbl_ucast_out_port = mac_entry->port_num; 1663 mac_data.tbl_ucast_out_port = mac_entry->port_num;
1666 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1664 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1667 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1665 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1668 1666
1669 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data); 1667 hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
1670 1668
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1786 0xff, 1784 0xff,
1787 mc_mask); 1785 mc_mask);
1788 1786
1789 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1790 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1791
1792 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1787 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1793 } 1788 }
1794 1789
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
1840 dsaf_dev->ae_dev.name, mac_key.high.val, 1835 dsaf_dev->ae_dev.name, mac_key.high.val,
1841 mac_key.low.val, entry_index); 1836 mac_key.low.val, entry_index);
1842 1837
1843 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 1838 tcam_data.tbl_tcam_data_high = mac_key.high.val;
1844 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 1839 tcam_data.tbl_tcam_data_low = mac_key.low.val;
1845 1840
1846 /* config mc entry with mask */ 1841 /* config mc entry with mask */
1847 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data, 1842 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
1956 /* config key mask */ 1951 /* config key mask */
1957 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask); 1952 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
1958 1953
1959 mask_key.high.val = le32_to_cpu(mask_key.high.val);
1960 mask_key.low.val = le32_to_cpu(mask_key.low.val);
1961
1962 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key); 1954 pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
1963 } 1955 }
1964 1956
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
2012 soft_mac_entry += entry_index; 2004 soft_mac_entry += entry_index;
2013 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX; 2005 soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
2014 } else { /* not zero, just del port, update */ 2006 } else { /* not zero, just del port, update */
2015 tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val); 2007 tcam_data.tbl_tcam_data_high = mac_key.high.val;
2016 tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val); 2008 tcam_data.tbl_tcam_data_low = mac_key.low.val;
2017 2009
2018 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, 2010 hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
2019 &tcam_data, 2011 &tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
2750 return DSAF_DUMP_REGS_NUM; 2742 return DSAF_DUMP_REGS_NUM;
2751} 2743}
2752 2744
2745static int hns_dsaf_get_port_id(u8 port)
2746{
2747 if (port < DSAF_SERVICE_NW_NUM)
2748 return port;
2749
2750 if (port >= DSAF_BASE_INNER_PORT_NUM)
2751 return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2752
2753 return -EINVAL;
2754}
2755
2753static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port) 2756static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2754{ 2757{
2755 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80}; 2758 struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
2815 memset(&temp_key, 0x0, sizeof(temp_key)); 2818 memset(&temp_key, 0x0, sizeof(temp_key));
2816 mask_entry.addr[0] = 0x01; 2819 mask_entry.addr[0] = 0x01;
2817 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id, 2820 hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
2818 port, mask_entry.addr); 2821 0xf, mask_entry.addr);
2819 tbl_tcam_mcast.tbl_mcast_item_vld = 1; 2822 tbl_tcam_mcast.tbl_mcast_item_vld = 1;
2820 tbl_tcam_mcast.tbl_mcast_old_en = 0; 2823 tbl_tcam_mcast.tbl_mcast_old_en = 0;
2821 2824
2822 if (port < DSAF_SERVICE_NW_NUM) { 2825 /* set MAC port to handle multicast */
2823 mskid = port; 2826 mskid = hns_dsaf_get_port_id(port);
2824 } else if (port >= DSAF_BASE_INNER_PORT_NUM) { 2827 if (mskid == -EINVAL) {
2825 mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
2826 } else {
2827 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n", 2828 dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
2828 dsaf_dev->ae_dev.name, port, 2829 dsaf_dev->ae_dev.name, port,
2829 mask_key.high.val, mask_key.low.val); 2830 mask_key.high.val, mask_key.low.val);
2830 return; 2831 return;
2831 } 2832 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1);
2832 2835
2836 /* set pool bit map to handle multicast */
2837 mskid = hns_dsaf_get_port_id(port_num);
2838 if (mskid == -EINVAL) {
2839 dev_err(dsaf_dev->dev,
2840 "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
2841 dsaf_dev->ae_dev.name, port_num,
2842 mask_key.high.val, mask_key.low.val);
2843 return;
2844 }
2833 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32], 2845 dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
2834 mskid % 32, 1); 2846 mskid % 32, 1);
2847
2835 memcpy(&temp_key, &mask_key, sizeof(mask_key)); 2848 memcpy(&temp_key, &mask_key, sizeof(mask_key));
2836 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc, 2849 hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
2837 (struct dsaf_tbl_tcam_data *)(&mask_key), 2850 (struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
467 u8 mac_id, u8 port_num); 467 u8 mac_id, u8 port_num);
468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port); 468int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
469 469
470int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
471
470#endif /* __HNS_DSAF_MAIN_H__ */ 472#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
670 dsaf_set_field(origin, 1ull << 10, 10, en); 670 dsaf_set_field(origin, 1ull << 10, 10, en);
671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin); 671 dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
672 } else { 672 } else {
673 u8 *base_addr = (u8 *)mac_cb->serdes_vaddr + 673 u8 __iomem *base_addr = mac_cb->serdes_vaddr +
674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000); 674 (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en); 675 dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
676 } 676 }
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
61 } 61 }
62} 62}
63 63
64static void __iomem * 64static u8 __iomem *
65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common) 65hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
66{ 66{
67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET; 67 return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
111 dsaf_dev->ppe_common[comm_index] = NULL; 111 dsaf_dev->ppe_common[comm_index] = NULL;
112} 112}
113 113
114static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common, 114static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
115 int ppe_idx) 115 int ppe_idx)
116{ 116{
117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET; 117 return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
118} 118}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
80 struct hns_ppe_hw_stats hw_stats; 80 struct hns_ppe_hw_stats hw_stats;
81 81
82 u8 index; /* index in a ppe common device */ 82 u8 index; /* index in a ppe common device */
83 void __iomem *io_base; 83 u8 __iomem *io_base;
84 int virq; 84 int virq;
85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */ 85 u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */ 86 u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
89struct ppe_common_cb { 89struct ppe_common_cb {
90 struct device *dev; 90 struct device *dev;
91 struct dsaf_device *dsaf_dev; 91 struct dsaf_device *dsaf_dev;
92 void __iomem *io_base; 92 u8 __iomem *io_base;
93 93
94 enum ppe_common_mode ppe_mode; 94 enum ppe_common_mode ppe_mode;
95 95
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT; 458 mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
459 } else { 459 } else {
460 ring = &q->tx_ring; 460 ring = &q->tx_ring;
461 ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base + 461 ring->io_base = ring_pair_cb->q.io_base +
462 HNS_RCB_TX_REG_OFFSET; 462 HNS_RCB_TX_REG_OFFSET;
463 irq_idx = HNS_RCB_IRQ_IDX_TX; 463 irq_idx = HNS_RCB_IRQ_IDX_TX;
464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT : 464 mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
764 } 764 }
765} 765}
766 766
767static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common) 767static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
768{ 768{
769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev; 769 struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
770 770
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2 1018#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3 1019#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
1020 1020
1021static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value) 1021static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
1022{ 1022{
1023 writel(value, base + reg); 1023 writel(value, base + reg);
1024} 1024}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
1053#define dsaf_set_bit(origin, shift, val) \ 1053#define dsaf_set_bit(origin, shift, val) \
1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val)) 1054 dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
1055 1055
1056static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask, 1056static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1057 u32 shift, u32 val) 1057 u32 shift, u32 val)
1058{ 1058{
1059 u32 origin = dsaf_read_reg(base, reg); 1059 u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
1073#define dsaf_get_bit(origin, shift) \ 1073#define dsaf_get_bit(origin, shift) \
1074 dsaf_get_field((origin), (1ull << (shift)), (shift)) 1074 dsaf_get_field((origin), (1ull << (shift)), (shift))
1075 1075
1076static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask, 1076static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
1077 u32 shift) 1077 u32 shift)
1078{ 1078{
1079 u32 origin; 1079 u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit)) 1089 dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
1090 1090
1091#define dsaf_write_b(addr, data)\ 1091#define dsaf_write_b(addr, data)\
1092 writeb((data), (__iomem unsigned char *)(addr)) 1092 writeb((data), (__iomem u8 *)(addr))
1093#define dsaf_read_b(addr)\ 1093#define dsaf_read_b(addr)\
1094 readb((__iomem unsigned char *)(addr)) 1094 readb((__iomem u8 *)(addr))
1095 1095
1096#define hns_mac_reg_read64(drv, offset) \ 1096#define hns_mac_reg_read64(drv, offset) \
1097 readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset)))) 1097 readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
1098 1098
1099#endif /* _DSAF_REG_H */ 1099#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0); 129 dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1); 130 dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0); 131 dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
132 dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val); 132 dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
133} 133}
134 134
135/** 135/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
29 29
30#define SERVICE_TIMER_HZ (1 * HZ) 30#define SERVICE_TIMER_HZ (1 * HZ)
31 31
32#define NIC_TX_CLEAN_MAX_NUM 256
33#define NIC_RX_CLEAN_MAX_NUM 64
34
35#define RCB_IRQ_NOT_INITED 0 32#define RCB_IRQ_NOT_INITED 0
36#define RCB_IRQ_INITED 1 33#define RCB_IRQ_INITED 1
37#define HNS_BUFFER_SIZE_2048 2048 34#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
376 wmb(); /* commit all data before submit */ 373 wmb(); /* commit all data before submit */
377 assert(skb->queue_mapping < priv->ae_handle->q_num); 374 assert(skb->queue_mapping < priv->ae_handle->q_num);
378 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num); 375 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
379 ring->stats.tx_pkts++;
380 ring->stats.tx_bytes += skb->len;
381 376
382 return NETDEV_TX_OK; 377 return NETDEV_TX_OK;
383 378
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
999 /* issue prefetch for next Tx descriptor */ 994 /* issue prefetch for next Tx descriptor */
1000 prefetch(&ring->desc_cb[ring->next_to_clean]); 995 prefetch(&ring->desc_cb[ring->next_to_clean]);
1001 } 996 }
997 /* update tx ring statistics. */
998 ring->stats.tx_pkts += pkts;
999 ring->stats.tx_bytes += bytes;
1002 1000
1003 NETIF_TX_UNLOCK(ring); 1001 NETIF_TX_UNLOCK(ring);
1004 1002
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2152 hns_nic_tx_fini_pro_v2; 2150 hns_nic_tx_fini_pro_v2;
2153 2151
2154 netif_napi_add(priv->netdev, &rd->napi, 2152 netif_napi_add(priv->netdev, &rd->napi,
2155 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM); 2153 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2156 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2154 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2157 } 2155 }
2158 for (i = h->q_num; i < h->q_num * 2; i++) { 2156 for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2165 hns_nic_rx_fini_pro_v2; 2163 hns_nic_rx_fini_pro_v2;
2166 2164
2167 netif_napi_add(priv->netdev, &rd->napi, 2165 netif_napi_add(priv->netdev, &rd->napi,
2168 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM); 2166 hns_nic_common_poll, NAPI_POLL_WEIGHT);
2169 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED; 2167 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2170 } 2168 }
2171 2169
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGE) += hclge.o 8obj-$(CONFIG_HNS3_HCLGE) += hclge.o
9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o 9hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the HISILICON network device drivers. 3# Makefile for the HISILICON network device drivers.
4# 4#
5 5
6ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 6ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
7 7
8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o 8obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file 9hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
39}; 39};
40 40
41struct hns_mdio_device { 41struct hns_mdio_device {
42 void *vbase; /* mdio reg base address */ 42 u8 __iomem *vbase; /* mdio reg base address */
43 struct regmap *subctrl_vbase; 43 struct regmap *subctrl_vbase;
44 struct hns_mdio_sc_reg sc_reg; 44 struct hns_mdio_sc_reg sc_reg;
45}; 45};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
96#define MDIO_SC_CLK_ST 0x531C 96#define MDIO_SC_CLK_ST 0x531C
97#define MDIO_SC_RESET_ST 0x5A1C 97#define MDIO_SC_RESET_ST 0x5A1C
98 98
99static void mdio_write_reg(void *base, u32 reg, u32 value) 99static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
100{ 100{
101 u8 __iomem *reg_addr = (u8 __iomem *)base; 101 writel_relaxed(value, base + reg);
102
103 writel_relaxed(value, reg_addr + reg);
104} 102}
105 103
106#define MDIO_WRITE_REG(a, reg, value) \ 104#define MDIO_WRITE_REG(a, reg, value) \
107 mdio_write_reg((a)->vbase, (reg), (value)) 105 mdio_write_reg((a)->vbase, (reg), (value))
108 106
109static u32 mdio_read_reg(void *base, u32 reg) 107static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
110{ 108{
111 u8 __iomem *reg_addr = (u8 __iomem *)base; 109 return readl_relaxed(base + reg);
112
113 return readl_relaxed(reg_addr + reg);
114} 110}
115 111
116#define mdio_set_field(origin, mask, shift, val) \ 112#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
121 117
122#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask)) 118#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
123 119
124static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift, 120static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
125 u32 val) 121 u32 val)
126{ 122{
127 u32 origin = mdio_read_reg(base, reg); 123 u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
133#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \ 129#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
134 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val)) 130 mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
135 131
136static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift) 132static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
137{ 133{
138 u32 origin; 134 u32 origin;
139 135
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..51cfe95f3e24 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
1885 */ 1885 */
1886 adapter->state = VNIC_PROBED; 1886 adapter->state = VNIC_PROBED;
1887 1887
1888 reinit_completion(&adapter->init_done);
1888 rc = init_crq_queue(adapter); 1889 rc = init_crq_queue(adapter);
1889 if (rc) { 1890 if (rc) {
1890 netdev_err(adapter->netdev, 1891 netdev_err(adapter->netdev,
@@ -4625,7 +4626,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
4625 old_num_rx_queues = adapter->req_rx_queues; 4626 old_num_rx_queues = adapter->req_rx_queues;
4626 old_num_tx_queues = adapter->req_tx_queues; 4627 old_num_tx_queues = adapter->req_tx_queues;
4627 4628
4628 init_completion(&adapter->init_done); 4629 reinit_completion(&adapter->init_done);
4629 adapter->init_done_rc = 0; 4630 adapter->init_done_rc = 0;
4630 ibmvnic_send_crq_init(adapter); 4631 ibmvnic_send_crq_init(adapter);
4631 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4632 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4681,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
4680 4681
4681 adapter->from_passive_init = false; 4682 adapter->from_passive_init = false;
4682 4683
4683 init_completion(&adapter->init_done);
4684 adapter->init_done_rc = 0; 4684 adapter->init_done_rc = 0;
4685 ibmvnic_send_crq_init(adapter); 4685 ibmvnic_send_crq_init(adapter);
4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) { 4686 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4759,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset); 4759 INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
4760 INIT_LIST_HEAD(&adapter->rwi_list); 4760 INIT_LIST_HEAD(&adapter->rwi_list);
4761 spin_lock_init(&adapter->rwi_lock); 4761 spin_lock_init(&adapter->rwi_lock);
4762 init_completion(&adapter->init_done);
4762 adapter->resetting = false; 4763 adapter->resetting = false;
4763 4764
4764 adapter->mac_change_pending = false; 4765 adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..ecef949f3baa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
41 /* create driver workqueue */ 41 /* create driver workqueue */
42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, 42 fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
43 fm10k_driver_name); 43 fm10k_driver_name);
44 if (!fm10k_workqueue)
45 return -ENOMEM;
44 46
45 fm10k_dbg_init(); 47 fm10k_dbg_init();
46 48
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
790 790
791 /* VSI specific handlers */ 791 /* VSI specific handlers */
792 irqreturn_t (*irq_handler)(int irq, void *data); 792 irqreturn_t (*irq_handler)(int irq, void *data);
793
794 unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
793} ____cacheline_internodealigned_in_smp; 795} ____cacheline_internodealigned_in_smp;
794 796
795struct i40e_netdev_priv { 797struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
1096 return !!vsi->xdp_prog; 1098 return !!vsi->xdp_prog;
1097} 1099}
1098 1100
1099static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
1100{
1101 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
1102 int qid = ring->queue_index;
1103
1104 if (ring_is_xdp(ring))
1105 qid -= ring->vsi->alloc_queue_pairs;
1106
1107 if (!xdp_on)
1108 return NULL;
1109
1110 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
1111}
1112
1113int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch); 1101int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
1114int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate); 1102int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
1115int i40e_add_del_cloud_filter(struct i40e_vsi *vsi, 1103int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2573 return -EOPNOTSUPP; 2573 return -EOPNOTSUPP;
2574 2574
2575 /* only magic packet is supported */ 2575 /* only magic packet is supported */
2576 if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) 2576 if (wol->wolopts & ~WAKE_MAGIC)
2577 | (wol->wolopts != WAKE_FILTER))
2578 return -EOPNOTSUPP; 2577 return -EOPNOTSUPP;
2579 2578
2580 /* is this a new value? */ 2579 /* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
3064} 3064}
3065 3065
3066/** 3066/**
3067 * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
3068 * @ring: The Tx or Rx ring
3069 *
3070 * Returns the UMEM or NULL.
3071 **/
3072static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
3073{
3074 bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
3075 int qid = ring->queue_index;
3076
3077 if (ring_is_xdp(ring))
3078 qid -= ring->vsi->alloc_queue_pairs;
3079
3080 if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
3081 return NULL;
3082
3083 return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
3084}
3085
3086/**
3067 * i40e_configure_tx_ring - Configure a transmit ring context and rest 3087 * i40e_configure_tx_ring - Configure a transmit ring context and rest
3068 * @ring: The Tx ring to configure 3088 * @ring: The Tx ring to configure
3069 * 3089 *
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10064 hash_init(vsi->mac_filter_hash); 10084 hash_init(vsi->mac_filter_hash);
10065 vsi->irqs_ready = false; 10085 vsi->irqs_ready = false;
10066 10086
10087 if (type == I40E_VSI_MAIN) {
10088 vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
10089 if (!vsi->af_xdp_zc_qps)
10090 goto err_rings;
10091 }
10092
10067 ret = i40e_set_num_rings_in_vsi(vsi); 10093 ret = i40e_set_num_rings_in_vsi(vsi);
10068 if (ret) 10094 if (ret)
10069 goto err_rings; 10095 goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
10082 goto unlock_pf; 10108 goto unlock_pf;
10083 10109
10084err_rings: 10110err_rings:
10111 bitmap_free(vsi->af_xdp_zc_qps);
10085 pf->next_vsi = i - 1; 10112 pf->next_vsi = i - 1;
10086 kfree(vsi); 10113 kfree(vsi);
10087unlock_pf: 10114unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
10162 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); 10189 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
10163 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); 10190 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
10164 10191
10192 bitmap_free(vsi->af_xdp_zc_qps);
10165 i40e_vsi_free_arrays(vsi, true); 10193 i40e_vsi_free_arrays(vsi, true);
10166 i40e_clear_rss_config_user(vsi); 10194 i40e_clear_rss_config_user(vsi);
10167 10195
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 146static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
147{ 147{
148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); 148 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
149 struct timespec64 now; 149 struct timespec64 now, then;
150 150
151 then = ns_to_timespec64(delta);
151 mutex_lock(&pf->tmreg_lock); 152 mutex_lock(&pf->tmreg_lock);
152 153
153 i40e_ptp_read(pf, &now, NULL); 154 i40e_ptp_read(pf, &now, NULL);
154 timespec64_add_ns(&now, delta); 155 now = timespec64_add(now, then);
155 i40e_ptp_write(pf, (const struct timespec64 *)&now); 156 i40e_ptp_write(pf, (const struct timespec64 *)&now);
156 157
157 mutex_unlock(&pf->tmreg_lock); 158 mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
102 if (err) 102 if (err)
103 return err; 103 return err;
104 104
105 set_bit(qid, vsi->af_xdp_zc_qps);
106
105 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi); 107 if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
106 108
107 if (if_running) { 109 if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
148 return err; 150 return err;
149 } 151 }
150 152
153 clear_bit(qid, vsi->af_xdp_zc_qps);
151 i40e_xsk_umem_dma_unmap(vsi, umem); 154 i40e_xsk_umem_dma_unmap(vsi, umem);
152 155
153 if (if_running) { 156 if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
194/* enable link status from external LINK_0 and LINK_1 pins */ 194/* enable link status from external LINK_0 and LINK_1 pins */
195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */ 195#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */ 196#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
197#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
198#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
197#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */ 199#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
198#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */ 200#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
199#define E1000_CTRL_RST 0x04000000 /* Global reset */ 201#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..3269d8e94744 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8740 struct e1000_hw *hw = &adapter->hw; 8740 struct e1000_hw *hw = &adapter->hw;
8741 u32 ctrl, rctl, status; 8741 u32 ctrl, rctl, status;
8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol; 8742 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
8743#ifdef CONFIG_PM 8743 bool wake;
8744 int retval = 0;
8745#endif
8746 8744
8747 rtnl_lock(); 8745 rtnl_lock();
8748 netif_device_detach(netdev); 8746 netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8755 igb_clear_interrupt_scheme(adapter); 8753 igb_clear_interrupt_scheme(adapter);
8756 rtnl_unlock(); 8754 rtnl_unlock();
8757 8755
8758#ifdef CONFIG_PM
8759 if (!runtime) {
8760 retval = pci_save_state(pdev);
8761 if (retval)
8762 return retval;
8763 }
8764#endif
8765
8766 status = rd32(E1000_STATUS); 8756 status = rd32(E1000_STATUS);
8767 if (status & E1000_STATUS_LU) 8757 if (status & E1000_STATUS_LU)
8768 wufc &= ~E1000_WUFC_LNKC; 8758 wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8779 } 8769 }
8780 8770
8781 ctrl = rd32(E1000_CTRL); 8771 ctrl = rd32(E1000_CTRL);
8782 /* advertise wake from D3Cold */
8783 #define E1000_CTRL_ADVD3WUC 0x00100000
8784 /* phy power management enable */
8785 #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
8786 ctrl |= E1000_CTRL_ADVD3WUC; 8772 ctrl |= E1000_CTRL_ADVD3WUC;
8787 wr32(E1000_CTRL, ctrl); 8773 wr32(E1000_CTRL, ctrl);
8788 8774
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
8796 wr32(E1000_WUFC, 0); 8782 wr32(E1000_WUFC, 0);
8797 } 8783 }
8798 8784
8799 *enable_wake = wufc || adapter->en_mng_pt; 8785 wake = wufc || adapter->en_mng_pt;
8800 if (!*enable_wake) 8786 if (!wake)
8801 igb_power_down_link(adapter); 8787 igb_power_down_link(adapter);
8802 else 8788 else
8803 igb_power_up_link(adapter); 8789 igb_power_up_link(adapter);
8804 8790
8791 if (enable_wake)
8792 *enable_wake = wake;
8793
8805 /* Release control of h/w to f/w. If f/w is AMT enabled, this 8794 /* Release control of h/w to f/w. If f/w is AMT enabled, this
8806 * would have already happened in close and is redundant. 8795 * would have already happened in close and is redundant.
8807 */ 8796 */
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
8844 8833
8845static int __maybe_unused igb_suspend(struct device *dev) 8834static int __maybe_unused igb_suspend(struct device *dev)
8846{ 8835{
8847 int retval; 8836 return __igb_shutdown(to_pci_dev(dev), NULL, 0);
8848 bool wake;
8849 struct pci_dev *pdev = to_pci_dev(dev);
8850
8851 retval = __igb_shutdown(pdev, &wake, 0);
8852 if (retval)
8853 return retval;
8854
8855 if (wake) {
8856 pci_prepare_to_sleep(pdev);
8857 } else {
8858 pci_wake_from_d3(pdev, false);
8859 pci_set_power_state(pdev, PCI_D3hot);
8860 }
8861
8862 return 0;
8863} 8837}
8864 8838
8865static int __maybe_unused igb_resume(struct device *dev) 8839static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
8930 8904
8931static int __maybe_unused igb_runtime_suspend(struct device *dev) 8905static int __maybe_unused igb_runtime_suspend(struct device *dev)
8932{ 8906{
8933 struct pci_dev *pdev = to_pci_dev(dev); 8907 return __igb_shutdown(to_pci_dev(dev), NULL, 1);
8934 int retval;
8935 bool wake;
8936
8937 retval = __igb_shutdown(pdev, &wake, 1);
8938 if (retval)
8939 return retval;
8940
8941 if (wake) {
8942 pci_prepare_to_sleep(pdev);
8943 } else {
8944 pci_wake_from_d3(pdev, false);
8945 pci_set_power_state(pdev, PCI_D3hot);
8946 }
8947
8948 return 0;
8949} 8908}
8950 8909
8951static int __maybe_unused igb_runtime_resume(struct device *dev) 8910static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
905 struct pci_dev *pdev = adapter->pdev; 905 struct pci_dev *pdev = adapter->pdev;
906 struct device *dev = &adapter->netdev->dev; 906 struct device *dev = &adapter->netdev->dev;
907 struct mii_bus *bus; 907 struct mii_bus *bus;
908 int err = -ENODEV;
908 909
909 adapter->mii_bus = devm_mdiobus_alloc(dev); 910 bus = devm_mdiobus_alloc(dev);
910 if (!adapter->mii_bus) 911 if (!bus)
911 return -ENOMEM; 912 return -ENOMEM;
912 913
913 bus = adapter->mii_bus;
914
915 switch (hw->device_id) { 914 switch (hw->device_id) {
916 /* C3000 SoCs */ 915 /* C3000 SoCs */
917 case IXGBE_DEV_ID_X550EM_A_KR: 916 case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
949 */ 948 */
950 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; 949 hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
951 950
952 return mdiobus_register(bus); 951 err = mdiobus_register(bus);
952 if (!err) {
953 adapter->mii_bus = bus;
954 return 0;
955 }
953 956
954ixgbe_no_mii_bus: 957ixgbe_no_mii_bus:
955 devm_mdiobus_free(dev, bus); 958 devm_mdiobus_free(dev, bus);
956 adapter->mii_bus = NULL; 959 return err;
957 return -ENODEV;
958} 960}
959 961
960/** 962/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
96 if (!eproto) 96 if (!eproto)
97 return -EINVAL; 97 return -EINVAL;
98 98
99 if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
100 return -EOPNOTSUPP;
101
102 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port); 99 err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
103 if (err) 100 if (err)
104 return err; 101 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
122 return err; 122 return err;
123} 123}
124 124
125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */ 125/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
126 * minimum speed value is 40Gbps
127 */
126static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu) 128static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
127{ 129{
128 u32 speed; 130 u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
130 int err; 132 int err;
131 133
132 err = mlx5e_port_linkspeed(priv->mdev, &speed); 134 err = mlx5e_port_linkspeed(priv->mdev, &speed);
133 if (err) { 135 if (err)
134 mlx5_core_warn(priv->mdev, "cannot get port speed\n"); 136 speed = SPEED_40000;
135 return 0; 137 speed = max_t(u32, speed, SPEED_40000);
136 }
137 138
138 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100; 139 xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
139 140
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
142} 143}
143 144
144static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer, 145static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
145 u32 xoff, unsigned int mtu) 146 u32 xoff, unsigned int max_mtu)
146{ 147{
147 int i; 148 int i;
148 149
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
154 } 155 }
155 156
156 if (port_buffer->buffer[i].size < 157 if (port_buffer->buffer[i].size <
157 (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT))) 158 (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
158 return -ENOMEM; 159 return -ENOMEM;
159 160
160 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff; 161 port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
161 port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu; 162 port_buffer->buffer[i].xon =
163 port_buffer->buffer[i].xoff - max_mtu;
162 } 164 }
163 165
164 return 0; 166 return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
166 168
167/** 169/**
168 * update_buffer_lossy() 170 * update_buffer_lossy()
169 * mtu: device's MTU 171 * max_mtu: netdev's max_mtu
170 * pfc_en: <input> current pfc configuration 172 * pfc_en: <input> current pfc configuration
171 * buffer: <input> current prio to buffer mapping 173 * buffer: <input> current prio to buffer mapping
172 * xoff: <input> xoff value 174 * xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
183 * Return 0 if no error. 185 * Return 0 if no error.
184 * Set change to true if buffer configuration is modified. 186 * Set change to true if buffer configuration is modified.
185 */ 187 */
186static int update_buffer_lossy(unsigned int mtu, 188static int update_buffer_lossy(unsigned int max_mtu,
187 u8 pfc_en, u8 *buffer, u32 xoff, 189 u8 pfc_en, u8 *buffer, u32 xoff,
188 struct mlx5e_port_buffer *port_buffer, 190 struct mlx5e_port_buffer *port_buffer,
189 bool *change) 191 bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
220 } 222 }
221 223
222 if (changed) { 224 if (changed) {
223 err = update_xoff_threshold(port_buffer, xoff, mtu); 225 err = update_xoff_threshold(port_buffer, xoff, max_mtu);
224 if (err) 226 if (err)
225 return err; 227 return err;
226 228
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
230 return 0; 232 return 0;
231} 233}
232 234
235#define MINIMUM_MAX_MTU 9216
233int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv, 236int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
234 u32 change, unsigned int mtu, 237 u32 change, unsigned int mtu,
235 struct ieee_pfc *pfc, 238 struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
241 bool update_prio2buffer = false; 244 bool update_prio2buffer = false;
242 u8 buffer[MLX5E_MAX_PRIORITY]; 245 u8 buffer[MLX5E_MAX_PRIORITY];
243 bool update_buffer = false; 246 bool update_buffer = false;
247 unsigned int max_mtu;
244 u32 total_used = 0; 248 u32 total_used = 0;
245 u8 curr_pfc_en; 249 u8 curr_pfc_en;
246 int err; 250 int err;
247 int i; 251 int i;
248 252
249 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change); 253 mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
254 max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
250 255
251 err = mlx5e_port_query_buffer(priv, &port_buffer); 256 err = mlx5e_port_query_buffer(priv, &port_buffer);
252 if (err) 257 if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
254 259
255 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) { 260 if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
256 update_buffer = true; 261 update_buffer = true;
257 err = update_xoff_threshold(&port_buffer, xoff, mtu); 262 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
258 if (err) 263 if (err)
259 return err; 264 return err;
260 } 265 }
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
264 if (err) 269 if (err)
265 return err; 270 return err;
266 271
267 err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff, 272 err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
268 &port_buffer, &update_buffer); 273 &port_buffer, &update_buffer);
269 if (err) 274 if (err)
270 return err; 275 return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
276 if (err) 281 if (err)
277 return err; 282 return err;
278 283
279 err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff, 284 err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
280 &port_buffer, &update_buffer); 285 xoff, &port_buffer, &update_buffer);
281 if (err) 286 if (err)
282 return err; 287 return err;
283 } 288 }
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
301 return -EINVAL; 306 return -EINVAL;
302 307
303 update_buffer = true; 308 update_buffer = true;
304 err = update_xoff_threshold(&port_buffer, xoff, mtu); 309 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
305 if (err) 310 if (err)
306 return err; 311 return err;
307 } 312 }
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
309 /* Need to update buffer configuration if xoff value is changed */ 314 /* Need to update buffer configuration if xoff value is changed */
310 if (!update_buffer && xoff != priv->dcbx.xoff) { 315 if (!update_buffer && xoff != priv->dcbx.xoff) {
311 update_buffer = true; 316 update_buffer = true;
312 err = update_xoff_threshold(&port_buffer, xoff, mtu); 317 err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
313 if (err) 318 if (err)
314 return err; 319 return err;
315 } 320 }
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
45 if (err) 45 if (err)
46 return err; 46 return err;
47 47
48 mutex_lock(&mdev->mlx5e_res.td.list_lock);
48 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list); 49 list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
50 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
49 51
50 return 0; 52 return 0;
51} 53}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
53void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, 55void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
54 struct mlx5e_tir *tir) 56 struct mlx5e_tir *tir)
55{ 57{
58 mutex_lock(&mdev->mlx5e_res.td.list_lock);
56 mlx5_core_destroy_tir(mdev, tir->tirn); 59 mlx5_core_destroy_tir(mdev, tir->tirn);
57 list_del(&tir->list); 60 list_del(&tir->list);
61 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
58} 62}
59 63
60static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, 64static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
114 } 118 }
115 119
116 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list); 120 INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
121 mutex_init(&mdev->mlx5e_res.td.list_lock);
117 122
118 return 0; 123 return 0;
119 124
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
141{ 146{
142 struct mlx5_core_dev *mdev = priv->mdev; 147 struct mlx5_core_dev *mdev = priv->mdev;
143 struct mlx5e_tir *tir; 148 struct mlx5e_tir *tir;
144 int err = -ENOMEM; 149 int err = 0;
145 u32 tirn = 0; 150 u32 tirn = 0;
146 int inlen; 151 int inlen;
147 void *in; 152 void *in;
148 153
149 inlen = MLX5_ST_SZ_BYTES(modify_tir_in); 154 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
150 in = kvzalloc(inlen, GFP_KERNEL); 155 in = kvzalloc(inlen, GFP_KERNEL);
151 if (!in) 156 if (!in) {
157 err = -ENOMEM;
152 goto out; 158 goto out;
159 }
153 160
154 if (enable_uc_lb) 161 if (enable_uc_lb)
155 MLX5_SET(modify_tir_in, in, ctx.self_lb_block, 162 MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
157 164
158 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1); 165 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
159 166
167 mutex_lock(&mdev->mlx5e_res.td.list_lock);
160 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) { 168 list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
161 tirn = tir->tirn; 169 tirn = tir->tirn;
162 err = mlx5_core_modify_tir(mdev, tirn, in, inlen); 170 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
168 kvfree(in); 176 kvfree(in);
169 if (err) 177 if (err)
170 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err); 178 netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
179 mutex_unlock(&mdev->mlx5e_res.td.list_lock);
171 180
172 return err; 181 return err;
173} 182}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..5efce4a3ff79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
603 __ETHTOOL_LINK_MODE_MASK_NBITS); 603 __ETHTOOL_LINK_MODE_MASK_NBITS);
604} 604}
605 605
606static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev, 606static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
607 unsigned long *advertising_modes, 607 u32 eth_proto_cap, bool ext)
608 u32 eth_proto_cap)
609{ 608{
610 unsigned long proto_cap = eth_proto_cap; 609 unsigned long proto_cap = eth_proto_cap;
611 struct ptys2ethtool_config *table; 610 struct ptys2ethtool_config *table;
612 u32 max_size; 611 u32 max_size;
613 int proto; 612 int proto;
614 613
615 mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size); 614 table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
615 max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
616 ARRAY_SIZE(ptys2legacy_ethtool_table);
617
616 for_each_set_bit(proto, &proto_cap, max_size) 618 for_each_set_bit(proto, &proto_cap, max_size)
617 bitmap_or(advertising_modes, advertising_modes, 619 bitmap_or(advertising_modes, advertising_modes,
618 table[proto].advertised, 620 table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
794 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause); 796 ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
795} 797}
796 798
797static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap, 799static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
798 u8 tx_pause, u8 rx_pause, 800 struct ethtool_link_ksettings *link_ksettings,
799 struct ethtool_link_ksettings *link_ksettings) 801 bool ext)
800{ 802{
801 unsigned long *advertising = link_ksettings->link_modes.advertising; 803 unsigned long *advertising = link_ksettings->link_modes.advertising;
802 ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap); 804 ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
803 805
804 if (rx_pause) 806 if (rx_pause)
805 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause); 807 ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
854 struct ethtool_link_ksettings *link_ksettings) 856 struct ethtool_link_ksettings *link_ksettings)
855{ 857{
856 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising; 858 unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
859 bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
857 860
858 ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp); 861 ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
859} 862}
860 863
861int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, 864int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
872 u8 an_disable_admin; 875 u8 an_disable_admin;
873 u8 an_status; 876 u8 an_status;
874 u8 connector_type; 877 u8 connector_type;
878 bool admin_ext;
875 bool ext; 879 bool ext;
876 int err; 880 int err;
877 881
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
886 eth_proto_capability); 890 eth_proto_capability);
887 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 891 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
888 eth_proto_admin); 892 eth_proto_admin);
893 /* Fields: eth_proto_admin and ext_eth_proto_admin are
894 * mutually exclusive. Hence try reading legacy advertising
895 * when extended advertising is zero.
896 * admin_ext indicates how eth_proto_admin should be
897 * interpreted
898 */
899 admin_ext = ext;
900 if (ext && !eth_proto_admin) {
901 eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
902 eth_proto_admin);
903 admin_ext = false;
904 }
905
889 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, 906 eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
890 eth_proto_oper); 907 eth_proto_oper);
891 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise); 908 eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
899 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising); 916 ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
900 917
901 get_supported(mdev, eth_proto_cap, link_ksettings); 918 get_supported(mdev, eth_proto_cap, link_ksettings);
902 get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings); 919 get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
920 admin_ext);
903 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); 921 get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
904 922
905 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 923 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
997 1015
998#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1) 1016#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
999 1017
1000 ext_requested = (link_ksettings->link_modes.advertising[0] > 1018 ext_requested = !!(link_ksettings->link_modes.advertising[0] >
1001 MLX5E_PTYS_EXT); 1019 MLX5E_PTYS_EXT ||
1020 link_ksettings->link_modes.advertising[1]);
1002 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet); 1021 ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
1003 1022 ext_requested &= ext_supported;
1004 /*when ptys_extended_ethernet is set legacy link modes are deprecated */
1005 if (ext_requested != ext_supported)
1006 return -EPROTONOSUPPORT;
1007 1023
1008 speed = link_ksettings->base.speed; 1024 speed = link_ksettings->base.speed;
1009 ethtool2ptys_adver_func = ext_requested ? 1025 ethtool2ptys_adver_func = ext_requested ?
1010 mlx5e_ethtool2ptys_ext_adver_link : 1026 mlx5e_ethtool2ptys_ext_adver_link :
1011 mlx5e_ethtool2ptys_adver_link; 1027 mlx5e_ethtool2ptys_adver_link;
1012 err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto); 1028 err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
1013 if (err) { 1029 if (err) {
1014 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n", 1030 netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
1015 __func__, err); 1031 __func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
1037 if (!an_changes && link_modes == eproto.admin) 1053 if (!an_changes && link_modes == eproto.admin)
1038 goto out; 1054 goto out;
1039 1055
1040 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported); 1056 mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
1041 mlx5_toggle_port_link(mdev); 1057 mlx5_toggle_port_link(mdev);
1042 1058
1043out: 1059out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
2158 return true; 2158 return true;
2159} 2159}
2160 2160
2161struct ip_ttl_word {
2162 __u8 ttl;
2163 __u8 protocol;
2164 __sum16 check;
2165};
2166
2167struct ipv6_hoplimit_word {
2168 __be16 payload_len;
2169 __u8 nexthdr;
2170 __u8 hop_limit;
2171};
2172
2173static bool is_action_keys_supported(const struct flow_action_entry *act)
2174{
2175 u32 mask, offset;
2176 u8 htype;
2177
2178 htype = act->mangle.htype;
2179 offset = act->mangle.offset;
2180 mask = ~act->mangle.mask;
2181 /* For IPv4 & IPv6 header check 4 byte word,
2182 * to determine that modified fields
2183 * are NOT ttl & hop_limit only.
2184 */
2185 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2186 struct ip_ttl_word *ttl_word =
2187 (struct ip_ttl_word *)&mask;
2188
2189 if (offset != offsetof(struct iphdr, ttl) ||
2190 ttl_word->protocol ||
2191 ttl_word->check) {
2192 return true;
2193 }
2194 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2195 struct ipv6_hoplimit_word *hoplimit_word =
2196 (struct ipv6_hoplimit_word *)&mask;
2197
2198 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2199 hoplimit_word->payload_len ||
2200 hoplimit_word->nexthdr) {
2201 return true;
2202 }
2203 }
2204 return false;
2205}
2206
2161static bool modify_header_match_supported(struct mlx5_flow_spec *spec, 2207static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2162 struct flow_action *flow_action, 2208 struct flow_action *flow_action,
2163 u32 actions, 2209 u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2165{ 2211{
2166 const struct flow_action_entry *act; 2212 const struct flow_action_entry *act;
2167 bool modify_ip_header; 2213 bool modify_ip_header;
2168 u8 htype, ip_proto;
2169 void *headers_v; 2214 void *headers_v;
2170 u16 ethertype; 2215 u16 ethertype;
2216 u8 ip_proto;
2171 int i; 2217 int i;
2172 2218
2173 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP) 2219 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2187 act->id != FLOW_ACTION_ADD) 2233 act->id != FLOW_ACTION_ADD)
2188 continue; 2234 continue;
2189 2235
2190 htype = act->mangle.htype; 2236 if (is_action_keys_supported(act)) {
2191 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2192 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2193 modify_ip_header = true; 2237 modify_ip_header = true;
2194 break; 2238 break;
2195 } 2239 }
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2340 return 0; 2384 return 0;
2341} 2385}
2342 2386
2343static inline int cmp_encap_info(struct ip_tunnel_key *a, 2387struct encap_key {
2344 struct ip_tunnel_key *b) 2388 struct ip_tunnel_key *ip_tun_key;
2389 int tunnel_type;
2390};
2391
2392static inline int cmp_encap_info(struct encap_key *a,
2393 struct encap_key *b)
2345{ 2394{
2346 return memcmp(a, b, sizeof(*a)); 2395 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2396 a->tunnel_type != b->tunnel_type;
2347} 2397}
2348 2398
2349static inline int hash_encap_info(struct ip_tunnel_key *key) 2399static inline int hash_encap_info(struct encap_key *key)
2350{ 2400{
2351 return jhash(key, sizeof(*key), 0); 2401 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2402 key->tunnel_type);
2352} 2403}
2353 2404
2354 2405
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2379 struct mlx5_esw_flow_attr *attr = flow->esw_attr; 2430 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2380 struct mlx5e_tc_flow_parse_attr *parse_attr; 2431 struct mlx5e_tc_flow_parse_attr *parse_attr;
2381 struct ip_tunnel_info *tun_info; 2432 struct ip_tunnel_info *tun_info;
2382 struct ip_tunnel_key *key; 2433 struct encap_key key, e_key;
2383 struct mlx5e_encap_entry *e; 2434 struct mlx5e_encap_entry *e;
2384 unsigned short family; 2435 unsigned short family;
2385 uintptr_t hash_key; 2436 uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2389 parse_attr = attr->parse_attr; 2440 parse_attr = attr->parse_attr;
2390 tun_info = &parse_attr->tun_info[out_index]; 2441 tun_info = &parse_attr->tun_info[out_index];
2391 family = ip_tunnel_info_af(tun_info); 2442 family = ip_tunnel_info_af(tun_info);
2392 key = &tun_info->key; 2443 key.ip_tun_key = &tun_info->key;
2444 key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
2393 2445
2394 hash_key = hash_encap_info(key); 2446 hash_key = hash_encap_info(&key);
2395 2447
2396 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e, 2448 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2397 encap_hlist, hash_key) { 2449 encap_hlist, hash_key) {
2398 if (!cmp_encap_info(&e->tun_info.key, key)) { 2450 e_key.ip_tun_key = &e->tun_info.key;
2451 e_key.tunnel_type = e->tunnel_type;
2452 if (!cmp_encap_info(&e_key, &key)) {
2399 found = true; 2453 found = true;
2400 break; 2454 break;
2401 } 2455 }
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2657 2711
2658 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits || 2712 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2659 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) { 2713 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2660 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL, 2714 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2661 parse_attr, hdrs, extack); 2715 parse_attr, hdrs, extack);
2662 if (err) 2716 if (err)
2663 return err; 2717 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT); 105 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1); 106 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport); 107 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
108 if (vport) 108 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
109 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
110 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in, 109 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
111 in, nic_vport_context); 110 in, nic_vport_context);
112 111
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
134 MLX5_SET(modify_esw_vport_context_in, in, opcode, 133 MLX5_SET(modify_esw_vport_context_in, in, opcode,
135 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT); 134 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
136 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport); 135 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
137 if (vport) 136 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
138 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
139 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); 137 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
140} 138}
141 139
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
431{ 429{
432 int err; 430 int err;
433 431
432 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
433
434 err = esw_create_legacy_vepa_table(esw); 434 err = esw_create_legacy_vepa_table(esw);
435 if (err) 435 if (err)
436 return err; 436 return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2157 2157
2158 /* Star rule to forward all traffic to uplink vport */ 2158 /* Star rule to forward all traffic to uplink vport */
2159 memset(spec, 0, sizeof(*spec)); 2159 memset(spec, 0, sizeof(*spec));
2160 memset(&dest, 0, sizeof(dest));
2160 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; 2161 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2161 dest.vport.num = MLX5_VPORT_UPLINK; 2162 dest.vport.num = MLX5_VPORT_UPLINK;
2162 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; 2163 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
1611{ 1611{
1612 int err; 1612 int err;
1613 1613
1614 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
1614 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1615 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
1615 1616
1616 err = esw_create_offloads_fdb_tables(esw, nvports); 1617 err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..8de64e88c670 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -217,15 +217,21 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
217 void *cmd; 217 void *cmd;
218 int ret; 218 int ret;
219 219
220 rcu_read_lock();
221 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
222 rcu_read_unlock();
223
224 if (!flow) {
225 WARN_ONCE(1, "Received NULL pointer for handle\n");
226 return -EINVAL;
227 }
228
220 buf = kzalloc(size, GFP_ATOMIC); 229 buf = kzalloc(size, GFP_ATOMIC);
221 if (!buf) 230 if (!buf)
222 return -ENOMEM; 231 return -ENOMEM;
223 232
224 cmd = (buf + 1); 233 cmd = (buf + 1);
225 234
226 rcu_read_lock();
227 flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
228 rcu_read_unlock();
229 mlx5_fpga_tls_flow_to_cmd(flow, cmd); 235 mlx5_fpga_tls_flow_to_cmd(flow, cmd);
230 236
231 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle)); 237 MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
@@ -238,6 +244,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
238 buf->complete = mlx_tls_kfree_complete; 244 buf->complete = mlx_tls_kfree_complete;
239 245
240 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf); 246 ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
247 if (ret < 0)
248 kfree(buf);
241 249
242 return ret; 250 return ret;
243} 251}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
164 .size = 8, 164 .size = 8,
165 .limit = 4 165 .limit = 4
166 }, 166 },
167 .mr_cache[16] = {
168 .size = 8,
169 .limit = 4
170 },
171 .mr_cache[17] = {
172 .size = 8,
173 .limit = 4
174 },
175 .mr_cache[18] = {
176 .size = 8,
177 .limit = 4
178 },
179 .mr_cache[19] = {
180 .size = 4,
181 .limit = 2
182 },
183 .mr_cache[20] = {
184 .size = 4,
185 .limit = 2
186 },
187 }, 167 },
188}; 168};
189 169
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
48 48
49 tmp_push_vlan_tci = 49 tmp_push_vlan_tci =
50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) | 50 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) | 51 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
52 NFP_FL_PUSH_VLAN_CFI;
53 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci); 52 push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
54} 53}
55 54
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6) 26#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
27 27
28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13) 28#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
29#define NFP_FLOWER_MASK_VLAN_CFI BIT(12) 29#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0) 30#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
31 31
32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12) 32#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0) 82#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
83 83
84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13) 84#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
85#define NFP_FL_PUSH_VLAN_CFI BIT(12)
86#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) 85#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
87 86
88#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) 87#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
30 30
31 flow_rule_match_vlan(rule, &match); 31 flow_rule_match_vlan(rule, &match);
32 /* Populate the tci field. */ 32 /* Populate the tci field. */
33 if (match.key->vlan_id || match.key->vlan_priority) { 33 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
34 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 34 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
35 match.key->vlan_priority) | 35 match.key->vlan_priority) |
36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 36 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
37 match.key->vlan_id) | 37 match.key->vlan_id);
38 NFP_FLOWER_MASK_VLAN_CFI; 38 ext->tci = cpu_to_be16(tmp_tci);
39 ext->tci = cpu_to_be16(tmp_tci); 39
40 tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, 40 tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
41 match.mask->vlan_priority) | 41 tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
42 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, 42 match.mask->vlan_priority) |
43 match.mask->vlan_id) | 43 FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
44 NFP_FLOWER_MASK_VLAN_CFI; 44 match.mask->vlan_id);
45 msk->tci = cpu_to_be16(tmp_tci); 45 msk->tci = cpu_to_be16(tmp_tci);
46 }
47 } 46 }
48} 47}
49 48
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
195 ret = dev_queue_xmit(skb); 195 ret = dev_queue_xmit(skb);
196 nfp_repr_inc_tx_stats(netdev, len, ret); 196 nfp_repr_inc_tx_stats(netdev, len, ret);
197 197
198 return ret; 198 return NETDEV_TX_OK;
199} 199}
200 200
201static int nfp_repr_stop(struct net_device *netdev) 201static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 383 netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; 384 netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
385 385
386 netdev->priv_flags |= IFF_NO_QUEUE; 386 netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
387 netdev->features |= NETIF_F_LLTX; 387 netdev->features |= NETIF_F_LLTX;
388 388
389 if (nfp_app_has_tc(app)) { 389 if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7562ccbbb39a..19efa88f3f02 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5460,7 +5460,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
5460 tp->cp_cmd |= PktCntrDisable | INTT_1; 5460 tp->cp_cmd |= PktCntrDisable | INTT_1;
5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd); 5461 RTL_W16(tp, CPlusCmd, tp->cp_cmd);
5462 5462
5463 RTL_W16(tp, IntrMitigate, 0x5151); 5463 RTL_W16(tp, IntrMitigate, 0x5100);
5464 5464
5465 /* Work around for RxFIFO overflow. */ 5465 /* Work around for RxFIFO overflow. */
5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) { 5466 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
29/* Specific functions used for Ring mode */ 29/* Specific functions used for Ring mode */
30 30
31/* Enhanced descriptors */ 31/* Enhanced descriptors */
32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end) 32static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
33 int bfsize)
33{ 34{
34 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB 35 if (bfsize == BUF_SIZE_16KiB)
35 << ERDES1_BUFFER2_SIZE_SHIFT) 36 p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
36 & ERDES1_BUFFER2_SIZE_MASK); 37 << ERDES1_BUFFER2_SIZE_SHIFT)
38 & ERDES1_BUFFER2_SIZE_MASK);
37 39
38 if (end) 40 if (end)
39 p->des1 |= cpu_to_le32(ERDES1_END_RING); 41 p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
59} 61}
60 62
61/* Normal descriptors */ 63/* Normal descriptors */
62static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end) 64static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
63{ 65{
64 p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1) 66 if (bfsize >= BUF_SIZE_2KiB) {
65 << RDES1_BUFFER2_SIZE_SHIFT) 67 int bfsize2;
66 & RDES1_BUFFER2_SIZE_MASK); 68
69 bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
70 p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
71 & RDES1_BUFFER2_SIZE_MASK);
72 }
67 73
68 if (end) 74 if (end)
69 p->des1 |= cpu_to_le32(RDES1_END_RING); 75 p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
296} 296}
297 297
298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 298static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
299 int mode, int end) 299 int mode, int end, int bfsize)
300{ 300{
301 dwmac4_set_rx_owner(p, disable_rx_ic); 301 dwmac4_set_rx_owner(p, disable_rx_ic);
302} 302}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
123} 123}
124 124
125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 125static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
126 int mode, int end) 126 int mode, int end, int bfsize)
127{ 127{
128 dwxgmac2_set_rx_owner(p, disable_rx_ic); 128 dwxgmac2_set_rx_owner(p, disable_rx_ic);
129} 129}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
201 if (unlikely(rdes0 & RDES0_OWN)) 201 if (unlikely(rdes0 & RDES0_OWN))
202 return dma_own; 202 return dma_own;
203 203
204 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
205 stats->rx_length_errors++;
206 return discard_frame;
207 }
208
204 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) { 209 if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
205 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) { 210 if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
206 x->rx_desc++; 211 x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
231 * It doesn't match with the information reported into the databook. 236 * It doesn't match with the information reported into the databook.
232 * At any rate, we need to understand if the CSUM hw computation is ok 237 * At any rate, we need to understand if the CSUM hw computation is ok
233 * and report this info to the upper layers. */ 238 * and report this info to the upper layers. */
234 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR), 239 if (likely(ret == good_frame))
235 !!(rdes0 & RDES0_FRAME_TYPE), 240 ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
236 !!(rdes0 & ERDES0_RX_MAC_ADDR)); 241 !!(rdes0 & RDES0_FRAME_TYPE),
242 !!(rdes0 & ERDES0_RX_MAC_ADDR));
237 243
238 if (unlikely(rdes0 & RDES0_DRIBBLING)) 244 if (unlikely(rdes0 & RDES0_DRIBBLING))
239 x->dribbling_bit++; 245 x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
259} 265}
260 266
261static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 267static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
262 int mode, int end) 268 int mode, int end, int bfsize)
263{ 269{
270 int bfsize1;
271
264 p->des0 |= cpu_to_le32(RDES0_OWN); 272 p->des0 |= cpu_to_le32(RDES0_OWN);
265 p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK); 273
274 bfsize1 = min(bfsize, BUF_SIZE_8KiB);
275 p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
266 276
267 if (mode == STMMAC_CHAIN_MODE) 277 if (mode == STMMAC_CHAIN_MODE)
268 ehn_desc_rx_set_on_chain(p); 278 ehn_desc_rx_set_on_chain(p);
269 else 279 else
270 ehn_desc_rx_set_on_ring(p, end); 280 ehn_desc_rx_set_on_ring(p, end, bfsize);
271 281
272 if (disable_rx_ic) 282 if (disable_rx_ic)
273 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC); 283 p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
33struct stmmac_desc_ops { 33struct stmmac_desc_ops {
34 /* DMA RX descriptor ring initialization */ 34 /* DMA RX descriptor ring initialization */
35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode, 35 void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
36 int end); 36 int end, int bfsize);
37 /* DMA TX descriptor ring initialization */ 37 /* DMA TX descriptor ring initialization */
38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end); 38 void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
39 /* Invoked by the xmit function to prepare the tx descriptor */ 39 /* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
91 return dma_own; 91 return dma_own;
92 92
93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) { 93 if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
94 pr_warn("%s: Oversized frame spanned multiple buffers\n",
95 __func__);
96 stats->rx_length_errors++; 94 stats->rx_length_errors++;
97 return discard_frame; 95 return discard_frame;
98 } 96 }
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
135} 133}
136 134
137static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode, 135static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
138 int end) 136 int end, int bfsize)
139{ 137{
138 int bfsize1;
139
140 p->des0 |= cpu_to_le32(RDES0_OWN); 140 p->des0 |= cpu_to_le32(RDES0_OWN);
141 p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK); 141
142 bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
143 p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
142 144
143 if (mode == STMMAC_CHAIN_MODE) 145 if (mode == STMMAC_CHAIN_MODE)
144 ndesc_rx_set_on_chain(p, end); 146 ndesc_rx_set_on_chain(p, end);
145 else 147 else
146 ndesc_rx_set_on_ring(p, end); 148 ndesc_rx_set_on_ring(p, end, bfsize);
147 149
148 if (disable_rx_ic) 150 if (disable_rx_ic)
149 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC); 151 p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
1136 if (priv->extend_desc) 1136 if (priv->extend_desc)
1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, 1137 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1138 priv->use_riwt, priv->mode, 1138 priv->use_riwt, priv->mode,
1139 (i == DMA_RX_SIZE - 1)); 1139 (i == DMA_RX_SIZE - 1),
1140 priv->dma_buf_sz);
1140 else 1141 else
1141 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], 1142 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1142 priv->use_riwt, priv->mode, 1143 priv->use_riwt, priv->mode,
1143 (i == DMA_RX_SIZE - 1)); 1144 (i == DMA_RX_SIZE - 1),
1145 priv->dma_buf_sz);
1144} 1146}
1145 1147
1146/** 1148/**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3352{ 3354{
3353 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; 3355 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3354 struct stmmac_channel *ch = &priv->channel[queue]; 3356 struct stmmac_channel *ch = &priv->channel[queue];
3355 unsigned int entry = rx_q->cur_rx; 3357 unsigned int next_entry = rx_q->cur_rx;
3356 int coe = priv->hw->rx_csum; 3358 int coe = priv->hw->rx_csum;
3357 unsigned int next_entry;
3358 unsigned int count = 0; 3359 unsigned int count = 0;
3359 bool xmac; 3360 bool xmac;
3360 3361
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3372 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true); 3373 stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
3373 } 3374 }
3374 while (count < limit) { 3375 while (count < limit) {
3375 int status; 3376 int entry, status;
3376 struct dma_desc *p; 3377 struct dma_desc *p;
3377 struct dma_desc *np; 3378 struct dma_desc *np;
3378 3379
3380 entry = next_entry;
3381
3379 if (priv->extend_desc) 3382 if (priv->extend_desc)
3380 p = (struct dma_desc *)(rx_q->dma_erx + entry); 3383 p = (struct dma_desc *)(rx_q->dma_erx + entry);
3381 else 3384 else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3431 * ignored 3434 * ignored
3432 */ 3435 */
3433 if (frame_len > priv->dma_buf_sz) { 3436 if (frame_len > priv->dma_buf_sz) {
3434 netdev_err(priv->dev, 3437 if (net_ratelimit())
3435 "len %d larger than size (%d)\n", 3438 netdev_err(priv->dev,
3436 frame_len, priv->dma_buf_sz); 3439 "len %d larger than size (%d)\n",
3440 frame_len, priv->dma_buf_sz);
3437 priv->dev->stats.rx_length_errors++; 3441 priv->dev->stats.rx_length_errors++;
3438 break; 3442 continue;
3439 } 3443 }
3440 3444
3441 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3 3445 /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3470 dev_warn(priv->device, 3474 dev_warn(priv->device,
3471 "packet dropped\n"); 3475 "packet dropped\n");
3472 priv->dev->stats.rx_dropped++; 3476 priv->dev->stats.rx_dropped++;
3473 break; 3477 continue;
3474 } 3478 }
3475 3479
3476 dma_sync_single_for_cpu(priv->device, 3480 dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3490 } else { 3494 } else {
3491 skb = rx_q->rx_skbuff[entry]; 3495 skb = rx_q->rx_skbuff[entry];
3492 if (unlikely(!skb)) { 3496 if (unlikely(!skb)) {
3493 netdev_err(priv->dev, 3497 if (net_ratelimit())
3494 "%s: Inconsistent Rx chain\n", 3498 netdev_err(priv->dev,
3495 priv->dev->name); 3499 "%s: Inconsistent Rx chain\n",
3500 priv->dev->name);
3496 priv->dev->stats.rx_dropped++; 3501 priv->dev->stats.rx_dropped++;
3497 break; 3502 continue;
3498 } 3503 }
3499 prefetch(skb->data - NET_IP_ALIGN); 3504 prefetch(skb->data - NET_IP_ALIGN);
3500 rx_q->rx_skbuff[entry] = NULL; 3505 rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
3529 priv->dev->stats.rx_packets++; 3534 priv->dev->stats.rx_packets++;
3530 priv->dev->stats.rx_bytes += frame_len; 3535 priv->dev->stats.rx_bytes += frame_len;
3531 } 3536 }
3532 entry = next_entry;
3533 } 3537 }
3534 3538
3535 stmmac_rx_refill(priv, queue); 3539 stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
987 987
988 wait_queue_head_t wait_drain; 988 wait_queue_head_t wait_drain;
989 bool destroy; 989 bool destroy;
990 bool tx_disable; /* if true, do not wake up queue again */
990 991
991 /* Receive buffer allocated by us but manages by NetVSP */ 992 /* Receive buffer allocated by us but manages by NetVSP */
992 void *recv_buf; 993 void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
110 110
111 init_waitqueue_head(&net_device->wait_drain); 111 init_waitqueue_head(&net_device->wait_drain);
112 net_device->destroy = false; 112 net_device->destroy = false;
113 net_device->tx_disable = false;
113 114
114 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT; 115 net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
115 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT; 116 net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
719 } else { 720 } else {
720 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx); 721 struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
721 722
722 if (netif_tx_queue_stopped(txq) && 723 if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
723 (hv_get_avail_to_write_percent(&channel->outbound) > 724 (hv_get_avail_to_write_percent(&channel->outbound) >
724 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) { 725 RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
725 netif_tx_wake_queue(txq); 726 netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
874 } else if (ret == -EAGAIN) { 875 } else if (ret == -EAGAIN) {
875 netif_tx_stop_queue(txq); 876 netif_tx_stop_queue(txq);
876 ndev_ctx->eth_stats.stop_queue++; 877 ndev_ctx->eth_stats.stop_queue++;
877 if (atomic_read(&nvchan->queue_sends) < 1) { 878 if (atomic_read(&nvchan->queue_sends) < 1 &&
879 !net_device->tx_disable) {
878 netif_tx_wake_queue(txq); 880 netif_tx_wake_queue(txq);
879 ndev_ctx->eth_stats.wake_queue++; 881 ndev_ctx->eth_stats.wake_queue++;
880 ret = -ENOSPC; 882 ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
109 rcu_read_unlock(); 109 rcu_read_unlock();
110} 110}
111 111
112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
112static int netvsc_open(struct net_device *net) 121static int netvsc_open(struct net_device *net)
113{ 122{
114 struct net_device_context *ndev_ctx = netdev_priv(net); 123 struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
129 rdev = nvdev->extension; 138 rdev = nvdev->extension;
130 if (!rdev->link_state) { 139 if (!rdev->link_state) {
131 netif_carrier_on(net); 140 netif_carrier_on(net);
132 netif_tx_wake_all_queues(net); 141 netvsc_tx_enable(nvdev, net);
133 } 142 }
134 143
135 if (vf_netdev) { 144 if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
184 } 193 }
185} 194}
186 195
196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
187static int netvsc_close(struct net_device *net) 207static int netvsc_close(struct net_device *net)
188{ 208{
189 struct net_device_context *net_device_ctx = netdev_priv(net); 209 struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
192 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev); 212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
193 int ret; 213 int ret;
194 214
195 netif_tx_disable(net); 215 netvsc_tx_disable(nvdev, net);
196 216
197 /* No need to close rndis filter if it is removed already */ 217 /* No need to close rndis filter if it is removed already */
198 if (!nvdev) 218 if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
920 940
921 /* If device was up (receiving) then shutdown */ 941 /* If device was up (receiving) then shutdown */
922 if (netif_running(ndev)) { 942 if (netif_running(ndev)) {
923 netif_tx_disable(ndev); 943 netvsc_tx_disable(nvdev, ndev);
924 944
925 ret = rndis_filter_close(nvdev); 945 ret = rndis_filter_close(nvdev);
926 if (ret) { 946 if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
1908 if (rdev->link_state) { 1928 if (rdev->link_state) {
1909 rdev->link_state = false; 1929 rdev->link_state = false;
1910 netif_carrier_on(net); 1930 netif_carrier_on(net);
1911 netif_tx_wake_all_queues(net); 1931 netvsc_tx_enable(net_device, net);
1912 } else { 1932 } else {
1913 notify = true; 1933 notify = true;
1914 } 1934 }
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
1918 if (!rdev->link_state) { 1938 if (!rdev->link_state) {
1919 rdev->link_state = true; 1939 rdev->link_state = true;
1920 netif_carrier_off(net); 1940 netif_carrier_off(net);
1921 netif_tx_stop_all_queues(net); 1941 netvsc_tx_disable(net_device, net);
1922 } 1942 }
1923 kfree(event); 1943 kfree(event);
1924 break; 1944 break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
1927 if (!rdev->link_state) { 1947 if (!rdev->link_state) {
1928 rdev->link_state = true; 1948 rdev->link_state = true;
1929 netif_carrier_off(net); 1949 netif_carrier_off(net);
1930 netif_tx_stop_all_queues(net); 1950 netvsc_tx_disable(net_device, net);
1931 event->event = RNDIS_STATUS_MEDIA_CONNECT; 1951 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1932 spin_lock_irqsave(&ndev_ctx->lock, flags); 1952 spin_lock_irqsave(&ndev_ctx->lock, flags);
1933 list_add(&event->list, &ndev_ctx->reconfig_events); 1953 list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 1203 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */ 1204 {QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */ 1205 {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
1206 {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
1206 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */ 1207 {QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
1207 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 1208 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
1208 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 1209 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..6d1a1abbed27 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,6 +1273,7 @@ static void vrf_setup(struct net_device *dev)
1273 1273
1274 /* default to no qdisc; user can add if desired */ 1274 /* default to no qdisc; user can add if desired */
1275 dev->priv_flags |= IFF_NO_QUEUE; 1275 dev->priv_flags |= IFF_NO_QUEUE;
1276 dev->priv_flags |= IFF_NO_RX_HANDLER;
1276 1277
1277 dev->min_mtu = 0; 1278 dev->min_mtu = 0;
1278 dev->max_mtu = 0; 1279 dev->max_mtu = 0;
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index a25659b5a5d1..3fa20e95a6bb 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -1661,11 +1661,11 @@ static void __init vfio_pci_fill_ids(void)
1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device, 1661 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1662 subvendor, subdevice, class, class_mask, 0); 1662 subvendor, subdevice, class, class_mask, 0);
1663 if (rc) 1663 if (rc)
1664 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n", 1664 pr_warn("failed to add dynamic id [%04x:%04x[%04x:%04x]] class %#08x/%08x (%d)\n",
1665 vendor, device, subvendor, subdevice, 1665 vendor, device, subvendor, subdevice,
1666 class, class_mask, rc); 1666 class, class_mask, rc);
1667 else 1667 else
1668 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n", 1668 pr_info("add [%04x:%04x[%04x:%04x]] class %#08x/%08x\n",
1669 vendor, device, subvendor, subdevice, 1669 vendor, device, subvendor, subdevice,
1670 class, class_mask); 1670 class, class_mask);
1671 } 1671 }
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 8dbb270998f4..6b64e45a5269 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1398,7 +1398,7 @@ unlock_exit:
1398 mutex_unlock(&container->lock); 1398 mutex_unlock(&container->lock);
1399} 1399}
1400 1400
1401const struct vfio_iommu_driver_ops tce_iommu_driver_ops = { 1401static const struct vfio_iommu_driver_ops tce_iommu_driver_ops = {
1402 .name = "iommu-vfio-powerpc", 1402 .name = "iommu-vfio-powerpc",
1403 .owner = THIS_MODULE, 1403 .owner = THIS_MODULE,
1404 .open = tce_iommu_open, 1404 .open = tce_iommu_open,
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 73652e21efec..d0f731c9920a 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -58,12 +58,18 @@ module_param_named(disable_hugepages,
58MODULE_PARM_DESC(disable_hugepages, 58MODULE_PARM_DESC(disable_hugepages,
59 "Disable VFIO IOMMU support for IOMMU hugepages."); 59 "Disable VFIO IOMMU support for IOMMU hugepages.");
60 60
61static unsigned int dma_entry_limit __read_mostly = U16_MAX;
62module_param_named(dma_entry_limit, dma_entry_limit, uint, 0644);
63MODULE_PARM_DESC(dma_entry_limit,
64 "Maximum number of user DMA mappings per container (65535).");
65
61struct vfio_iommu { 66struct vfio_iommu {
62 struct list_head domain_list; 67 struct list_head domain_list;
63 struct vfio_domain *external_domain; /* domain for external user */ 68 struct vfio_domain *external_domain; /* domain for external user */
64 struct mutex lock; 69 struct mutex lock;
65 struct rb_root dma_list; 70 struct rb_root dma_list;
66 struct blocking_notifier_head notifier; 71 struct blocking_notifier_head notifier;
72 unsigned int dma_avail;
67 bool v2; 73 bool v2;
68 bool nesting; 74 bool nesting;
69}; 75};
@@ -836,6 +842,7 @@ static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
836 vfio_unlink_dma(iommu, dma); 842 vfio_unlink_dma(iommu, dma);
837 put_task_struct(dma->task); 843 put_task_struct(dma->task);
838 kfree(dma); 844 kfree(dma);
845 iommu->dma_avail++;
839} 846}
840 847
841static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) 848static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu)
@@ -1081,12 +1088,18 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
1081 goto out_unlock; 1088 goto out_unlock;
1082 } 1089 }
1083 1090
1091 if (!iommu->dma_avail) {
1092 ret = -ENOSPC;
1093 goto out_unlock;
1094 }
1095
1084 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 1096 dma = kzalloc(sizeof(*dma), GFP_KERNEL);
1085 if (!dma) { 1097 if (!dma) {
1086 ret = -ENOMEM; 1098 ret = -ENOMEM;
1087 goto out_unlock; 1099 goto out_unlock;
1088 } 1100 }
1089 1101
1102 iommu->dma_avail--;
1090 dma->iova = iova; 1103 dma->iova = iova;
1091 dma->vaddr = vaddr; 1104 dma->vaddr = vaddr;
1092 dma->prot = prot; 1105 dma->prot = prot;
@@ -1583,6 +1596,7 @@ static void *vfio_iommu_type1_open(unsigned long arg)
1583 1596
1584 INIT_LIST_HEAD(&iommu->domain_list); 1597 INIT_LIST_HEAD(&iommu->domain_list);
1585 iommu->dma_list = RB_ROOT; 1598 iommu->dma_list = RB_ROOT;
1599 iommu->dma_avail = dma_entry_limit;
1586 mutex_init(&iommu->lock); 1600 mutex_init(&iommu->lock);
1587 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier); 1601 BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
1588 1602
diff --git a/fs/aio.c b/fs/aio.c
index 38b741aef0bf..a4cc2a1cccb7 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -181,7 +181,7 @@ struct poll_iocb {
181 struct file *file; 181 struct file *file;
182 struct wait_queue_head *head; 182 struct wait_queue_head *head;
183 __poll_t events; 183 __poll_t events;
184 bool woken; 184 bool done;
185 bool cancelled; 185 bool cancelled;
186 struct wait_queue_entry wait; 186 struct wait_queue_entry wait;
187 struct work_struct work; 187 struct work_struct work;
@@ -204,8 +204,7 @@ struct aio_kiocb {
204 struct kioctx *ki_ctx; 204 struct kioctx *ki_ctx;
205 kiocb_cancel_fn *ki_cancel; 205 kiocb_cancel_fn *ki_cancel;
206 206
207 struct iocb __user *ki_user_iocb; /* user's aiocb */ 207 struct io_event ki_res;
208 __u64 ki_user_data; /* user's data for completion */
209 208
210 struct list_head ki_list; /* the aio core uses this 209 struct list_head ki_list; /* the aio core uses this
211 * for cancellation */ 210 * for cancellation */
@@ -1022,6 +1021,9 @@ static bool get_reqs_available(struct kioctx *ctx)
1022/* aio_get_req 1021/* aio_get_req
1023 * Allocate a slot for an aio request. 1022 * Allocate a slot for an aio request.
1024 * Returns NULL if no requests are free. 1023 * Returns NULL if no requests are free.
1024 *
1025 * The refcount is initialized to 2 - one for the async op completion,
1026 * one for the synchronous code that does this.
1025 */ 1027 */
1026static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) 1028static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1027{ 1029{
@@ -1031,10 +1033,15 @@ static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
1031 if (unlikely(!req)) 1033 if (unlikely(!req))
1032 return NULL; 1034 return NULL;
1033 1035
1036 if (unlikely(!get_reqs_available(ctx))) {
1037 kfree(req);
1038 return NULL;
1039 }
1040
1034 percpu_ref_get(&ctx->reqs); 1041 percpu_ref_get(&ctx->reqs);
1035 req->ki_ctx = ctx; 1042 req->ki_ctx = ctx;
1036 INIT_LIST_HEAD(&req->ki_list); 1043 INIT_LIST_HEAD(&req->ki_list);
1037 refcount_set(&req->ki_refcnt, 0); 1044 refcount_set(&req->ki_refcnt, 2);
1038 req->ki_eventfd = NULL; 1045 req->ki_eventfd = NULL;
1039 return req; 1046 return req;
1040} 1047}
@@ -1067,30 +1074,20 @@ out:
1067 return ret; 1074 return ret;
1068} 1075}
1069 1076
1070static inline void iocb_put(struct aio_kiocb *iocb) 1077static inline void iocb_destroy(struct aio_kiocb *iocb)
1071{
1072 if (refcount_read(&iocb->ki_refcnt) == 0 ||
1073 refcount_dec_and_test(&iocb->ki_refcnt)) {
1074 if (iocb->ki_filp)
1075 fput(iocb->ki_filp);
1076 percpu_ref_put(&iocb->ki_ctx->reqs);
1077 kmem_cache_free(kiocb_cachep, iocb);
1078 }
1079}
1080
1081static void aio_fill_event(struct io_event *ev, struct aio_kiocb *iocb,
1082 long res, long res2)
1083{ 1078{
1084 ev->obj = (u64)(unsigned long)iocb->ki_user_iocb; 1079 if (iocb->ki_eventfd)
1085 ev->data = iocb->ki_user_data; 1080 eventfd_ctx_put(iocb->ki_eventfd);
1086 ev->res = res; 1081 if (iocb->ki_filp)
1087 ev->res2 = res2; 1082 fput(iocb->ki_filp);
1083 percpu_ref_put(&iocb->ki_ctx->reqs);
1084 kmem_cache_free(kiocb_cachep, iocb);
1088} 1085}
1089 1086
1090/* aio_complete 1087/* aio_complete
1091 * Called when the io request on the given iocb is complete. 1088 * Called when the io request on the given iocb is complete.
1092 */ 1089 */
1093static void aio_complete(struct aio_kiocb *iocb, long res, long res2) 1090static void aio_complete(struct aio_kiocb *iocb)
1094{ 1091{
1095 struct kioctx *ctx = iocb->ki_ctx; 1092 struct kioctx *ctx = iocb->ki_ctx;
1096 struct aio_ring *ring; 1093 struct aio_ring *ring;
@@ -1114,14 +1111,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1114 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1111 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1115 event = ev_page + pos % AIO_EVENTS_PER_PAGE; 1112 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1116 1113
1117 aio_fill_event(event, iocb, res, res2); 1114 *event = iocb->ki_res;
1118 1115
1119 kunmap_atomic(ev_page); 1116 kunmap_atomic(ev_page);
1120 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); 1117 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
1121 1118
1122 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n", 1119 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb,
1123 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, 1120 (void __user *)(unsigned long)iocb->ki_res.obj,
1124 res, res2); 1121 iocb->ki_res.data, iocb->ki_res.res, iocb->ki_res.res2);
1125 1122
1126 /* after flagging the request as done, we 1123 /* after flagging the request as done, we
1127 * must never even look at it again 1124 * must never even look at it again
@@ -1148,10 +1145,8 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1148 * eventfd. The eventfd_signal() function is safe to be called 1145 * eventfd. The eventfd_signal() function is safe to be called
1149 * from IRQ context. 1146 * from IRQ context.
1150 */ 1147 */
1151 if (iocb->ki_eventfd) { 1148 if (iocb->ki_eventfd)
1152 eventfd_signal(iocb->ki_eventfd, 1); 1149 eventfd_signal(iocb->ki_eventfd, 1);
1153 eventfd_ctx_put(iocb->ki_eventfd);
1154 }
1155 1150
1156 /* 1151 /*
1157 * We have to order our ring_info tail store above and test 1152 * We have to order our ring_info tail store above and test
@@ -1163,7 +1158,14 @@ static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
1163 1158
1164 if (waitqueue_active(&ctx->wait)) 1159 if (waitqueue_active(&ctx->wait))
1165 wake_up(&ctx->wait); 1160 wake_up(&ctx->wait);
1166 iocb_put(iocb); 1161}
1162
1163static inline void iocb_put(struct aio_kiocb *iocb)
1164{
1165 if (refcount_dec_and_test(&iocb->ki_refcnt)) {
1166 aio_complete(iocb);
1167 iocb_destroy(iocb);
1168 }
1167} 1169}
1168 1170
1169/* aio_read_events_ring 1171/* aio_read_events_ring
@@ -1437,7 +1439,9 @@ static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1437 file_end_write(kiocb->ki_filp); 1439 file_end_write(kiocb->ki_filp);
1438 } 1440 }
1439 1441
1440 aio_complete(iocb, res, res2); 1442 iocb->ki_res.res = res;
1443 iocb->ki_res.res2 = res2;
1444 iocb_put(iocb);
1441} 1445}
1442 1446
1443static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb) 1447static int aio_prep_rw(struct kiocb *req, const struct iocb *iocb)
@@ -1514,13 +1518,13 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
1514 } 1518 }
1515} 1519}
1516 1520
1517static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb, 1521static int aio_read(struct kiocb *req, const struct iocb *iocb,
1518 bool vectored, bool compat) 1522 bool vectored, bool compat)
1519{ 1523{
1520 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1524 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1521 struct iov_iter iter; 1525 struct iov_iter iter;
1522 struct file *file; 1526 struct file *file;
1523 ssize_t ret; 1527 int ret;
1524 1528
1525 ret = aio_prep_rw(req, iocb); 1529 ret = aio_prep_rw(req, iocb);
1526 if (ret) 1530 if (ret)
@@ -1542,13 +1546,13 @@ static ssize_t aio_read(struct kiocb *req, const struct iocb *iocb,
1542 return ret; 1546 return ret;
1543} 1547}
1544 1548
1545static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb, 1549static int aio_write(struct kiocb *req, const struct iocb *iocb,
1546 bool vectored, bool compat) 1550 bool vectored, bool compat)
1547{ 1551{
1548 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; 1552 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1549 struct iov_iter iter; 1553 struct iov_iter iter;
1550 struct file *file; 1554 struct file *file;
1551 ssize_t ret; 1555 int ret;
1552 1556
1553 ret = aio_prep_rw(req, iocb); 1557 ret = aio_prep_rw(req, iocb);
1554 if (ret) 1558 if (ret)
@@ -1585,11 +1589,10 @@ static ssize_t aio_write(struct kiocb *req, const struct iocb *iocb,
1585 1589
1586static void aio_fsync_work(struct work_struct *work) 1590static void aio_fsync_work(struct work_struct *work)
1587{ 1591{
1588 struct fsync_iocb *req = container_of(work, struct fsync_iocb, work); 1592 struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, fsync.work);
1589 int ret;
1590 1593
1591 ret = vfs_fsync(req->file, req->datasync); 1594 iocb->ki_res.res = vfs_fsync(iocb->fsync.file, iocb->fsync.datasync);
1592 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0); 1595 iocb_put(iocb);
1593} 1596}
1594 1597
1595static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb, 1598static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
@@ -1608,11 +1611,6 @@ static int aio_fsync(struct fsync_iocb *req, const struct iocb *iocb,
1608 return 0; 1611 return 0;
1609} 1612}
1610 1613
1611static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1612{
1613 aio_complete(iocb, mangle_poll(mask), 0);
1614}
1615
1616static void aio_poll_complete_work(struct work_struct *work) 1614static void aio_poll_complete_work(struct work_struct *work)
1617{ 1615{
1618 struct poll_iocb *req = container_of(work, struct poll_iocb, work); 1616 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
@@ -1638,9 +1636,11 @@ static void aio_poll_complete_work(struct work_struct *work)
1638 return; 1636 return;
1639 } 1637 }
1640 list_del_init(&iocb->ki_list); 1638 list_del_init(&iocb->ki_list);
1639 iocb->ki_res.res = mangle_poll(mask);
1640 req->done = true;
1641 spin_unlock_irq(&ctx->ctx_lock); 1641 spin_unlock_irq(&ctx->ctx_lock);
1642 1642
1643 aio_poll_complete(iocb, mask); 1643 iocb_put(iocb);
1644} 1644}
1645 1645
1646/* assumes we are called with irqs disabled */ 1646/* assumes we are called with irqs disabled */
@@ -1668,31 +1668,27 @@ static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1668 __poll_t mask = key_to_poll(key); 1668 __poll_t mask = key_to_poll(key);
1669 unsigned long flags; 1669 unsigned long flags;
1670 1670
1671 req->woken = true;
1672
1673 /* for instances that support it check for an event match first: */ 1671 /* for instances that support it check for an event match first: */
1674 if (mask) { 1672 if (mask && !(mask & req->events))
1675 if (!(mask & req->events)) 1673 return 0;
1676 return 0; 1674
1675 list_del_init(&req->wait.entry);
1677 1676
1677 if (mask && spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
1678 /* 1678 /*
1679 * Try to complete the iocb inline if we can. Use 1679 * Try to complete the iocb inline if we can. Use
1680 * irqsave/irqrestore because not all filesystems (e.g. fuse) 1680 * irqsave/irqrestore because not all filesystems (e.g. fuse)
1681 * call this function with IRQs disabled and because IRQs 1681 * call this function with IRQs disabled and because IRQs
1682 * have to be disabled before ctx_lock is obtained. 1682 * have to be disabled before ctx_lock is obtained.
1683 */ 1683 */
1684 if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) { 1684 list_del(&iocb->ki_list);
1685 list_del(&iocb->ki_list); 1685 iocb->ki_res.res = mangle_poll(mask);
1686 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags); 1686 req->done = true;
1687 1687 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
1688 list_del_init(&req->wait.entry); 1688 iocb_put(iocb);
1689 aio_poll_complete(iocb, mask); 1689 } else {
1690 return 1; 1690 schedule_work(&req->work);
1691 }
1692 } 1691 }
1693
1694 list_del_init(&req->wait.entry);
1695 schedule_work(&req->work);
1696 return 1; 1692 return 1;
1697} 1693}
1698 1694
@@ -1719,11 +1715,12 @@ aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1719 add_wait_queue(head, &pt->iocb->poll.wait); 1715 add_wait_queue(head, &pt->iocb->poll.wait);
1720} 1716}
1721 1717
1722static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb) 1718static int aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1723{ 1719{
1724 struct kioctx *ctx = aiocb->ki_ctx; 1720 struct kioctx *ctx = aiocb->ki_ctx;
1725 struct poll_iocb *req = &aiocb->poll; 1721 struct poll_iocb *req = &aiocb->poll;
1726 struct aio_poll_table apt; 1722 struct aio_poll_table apt;
1723 bool cancel = false;
1727 __poll_t mask; 1724 __poll_t mask;
1728 1725
1729 /* reject any unknown events outside the normal event mask. */ 1726 /* reject any unknown events outside the normal event mask. */
@@ -1737,7 +1734,7 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1737 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP; 1734 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1738 1735
1739 req->head = NULL; 1736 req->head = NULL;
1740 req->woken = false; 1737 req->done = false;
1741 req->cancelled = false; 1738 req->cancelled = false;
1742 1739
1743 apt.pt._qproc = aio_poll_queue_proc; 1740 apt.pt._qproc = aio_poll_queue_proc;
@@ -1749,156 +1746,135 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
1749 INIT_LIST_HEAD(&req->wait.entry); 1746 INIT_LIST_HEAD(&req->wait.entry);
1750 init_waitqueue_func_entry(&req->wait, aio_poll_wake); 1747 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1751 1748
1752 /* one for removal from waitqueue, one for this function */
1753 refcount_set(&aiocb->ki_refcnt, 2);
1754
1755 mask = vfs_poll(req->file, &apt.pt) & req->events; 1749 mask = vfs_poll(req->file, &apt.pt) & req->events;
1756 if (unlikely(!req->head)) {
1757 /* we did not manage to set up a waitqueue, done */
1758 goto out;
1759 }
1760
1761 spin_lock_irq(&ctx->ctx_lock); 1750 spin_lock_irq(&ctx->ctx_lock);
1762 spin_lock(&req->head->lock); 1751 if (likely(req->head)) {
1763 if (req->woken) { 1752 spin_lock(&req->head->lock);
1764 /* wake_up context handles the rest */ 1753 if (unlikely(list_empty(&req->wait.entry))) {
1765 mask = 0; 1754 if (apt.error)
1755 cancel = true;
1756 apt.error = 0;
1757 mask = 0;
1758 }
1759 if (mask || apt.error) {
1760 list_del_init(&req->wait.entry);
1761 } else if (cancel) {
1762 WRITE_ONCE(req->cancelled, true);
1763 } else if (!req->done) { /* actually waiting for an event */
1764 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1765 aiocb->ki_cancel = aio_poll_cancel;
1766 }
1767 spin_unlock(&req->head->lock);
1768 }
1769 if (mask) { /* no async, we'd stolen it */
1770 aiocb->ki_res.res = mangle_poll(mask);
1766 apt.error = 0; 1771 apt.error = 0;
1767 } else if (mask || apt.error) {
1768 /* if we get an error or a mask we are done */
1769 WARN_ON_ONCE(list_empty(&req->wait.entry));
1770 list_del_init(&req->wait.entry);
1771 } else {
1772 /* actually waiting for an event */
1773 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1774 aiocb->ki_cancel = aio_poll_cancel;
1775 } 1772 }
1776 spin_unlock(&req->head->lock);
1777 spin_unlock_irq(&ctx->ctx_lock); 1773 spin_unlock_irq(&ctx->ctx_lock);
1778
1779out:
1780 if (unlikely(apt.error))
1781 return apt.error;
1782
1783 if (mask) 1774 if (mask)
1784 aio_poll_complete(aiocb, mask); 1775 iocb_put(aiocb);
1785 iocb_put(aiocb); 1776 return apt.error;
1786 return 0;
1787} 1777}
1788 1778
1789static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, 1779static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
1790 struct iocb __user *user_iocb, bool compat) 1780 struct iocb __user *user_iocb, struct aio_kiocb *req,
1781 bool compat)
1791{ 1782{
1792 struct aio_kiocb *req;
1793 ssize_t ret;
1794
1795 /* enforce forwards compatibility on users */
1796 if (unlikely(iocb->aio_reserved2)) {
1797 pr_debug("EINVAL: reserve field set\n");
1798 return -EINVAL;
1799 }
1800
1801 /* prevent overflows */
1802 if (unlikely(
1803 (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1804 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1805 ((ssize_t)iocb->aio_nbytes < 0)
1806 )) {
1807 pr_debug("EINVAL: overflow check\n");
1808 return -EINVAL;
1809 }
1810
1811 if (!get_reqs_available(ctx))
1812 return -EAGAIN;
1813
1814 ret = -EAGAIN;
1815 req = aio_get_req(ctx);
1816 if (unlikely(!req))
1817 goto out_put_reqs_available;
1818
1819 req->ki_filp = fget(iocb->aio_fildes); 1783 req->ki_filp = fget(iocb->aio_fildes);
1820 ret = -EBADF;
1821 if (unlikely(!req->ki_filp)) 1784 if (unlikely(!req->ki_filp))
1822 goto out_put_req; 1785 return -EBADF;
1823 1786
1824 if (iocb->aio_flags & IOCB_FLAG_RESFD) { 1787 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1788 struct eventfd_ctx *eventfd;
1825 /* 1789 /*
1826 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an 1790 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1827 * instance of the file* now. The file descriptor must be 1791 * instance of the file* now. The file descriptor must be
1828 * an eventfd() fd, and will be signaled for each completed 1792 * an eventfd() fd, and will be signaled for each completed
1829 * event using the eventfd_signal() function. 1793 * event using the eventfd_signal() function.
1830 */ 1794 */
1831 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd); 1795 eventfd = eventfd_ctx_fdget(iocb->aio_resfd);
1832 if (IS_ERR(req->ki_eventfd)) { 1796 if (IS_ERR(eventfd))
1833 ret = PTR_ERR(req->ki_eventfd); 1797 return PTR_ERR(req->ki_eventfd);
1834 req->ki_eventfd = NULL; 1798
1835 goto out_put_req; 1799 req->ki_eventfd = eventfd;
1836 }
1837 } 1800 }
1838 1801
1839 ret = put_user(KIOCB_KEY, &user_iocb->aio_key); 1802 if (unlikely(put_user(KIOCB_KEY, &user_iocb->aio_key))) {
1840 if (unlikely(ret)) {
1841 pr_debug("EFAULT: aio_key\n"); 1803 pr_debug("EFAULT: aio_key\n");
1842 goto out_put_req; 1804 return -EFAULT;
1843 } 1805 }
1844 1806
1845 req->ki_user_iocb = user_iocb; 1807 req->ki_res.obj = (u64)(unsigned long)user_iocb;
1846 req->ki_user_data = iocb->aio_data; 1808 req->ki_res.data = iocb->aio_data;
1809 req->ki_res.res = 0;
1810 req->ki_res.res2 = 0;
1847 1811
1848 switch (iocb->aio_lio_opcode) { 1812 switch (iocb->aio_lio_opcode) {
1849 case IOCB_CMD_PREAD: 1813 case IOCB_CMD_PREAD:
1850 ret = aio_read(&req->rw, iocb, false, compat); 1814 return aio_read(&req->rw, iocb, false, compat);
1851 break;
1852 case IOCB_CMD_PWRITE: 1815 case IOCB_CMD_PWRITE:
1853 ret = aio_write(&req->rw, iocb, false, compat); 1816 return aio_write(&req->rw, iocb, false, compat);
1854 break;
1855 case IOCB_CMD_PREADV: 1817 case IOCB_CMD_PREADV:
1856 ret = aio_read(&req->rw, iocb, true, compat); 1818 return aio_read(&req->rw, iocb, true, compat);
1857 break;
1858 case IOCB_CMD_PWRITEV: 1819 case IOCB_CMD_PWRITEV:
1859 ret = aio_write(&req->rw, iocb, true, compat); 1820 return aio_write(&req->rw, iocb, true, compat);
1860 break;
1861 case IOCB_CMD_FSYNC: 1821 case IOCB_CMD_FSYNC:
1862 ret = aio_fsync(&req->fsync, iocb, false); 1822 return aio_fsync(&req->fsync, iocb, false);
1863 break;
1864 case IOCB_CMD_FDSYNC: 1823 case IOCB_CMD_FDSYNC:
1865 ret = aio_fsync(&req->fsync, iocb, true); 1824 return aio_fsync(&req->fsync, iocb, true);
1866 break;
1867 case IOCB_CMD_POLL: 1825 case IOCB_CMD_POLL:
1868 ret = aio_poll(req, iocb); 1826 return aio_poll(req, iocb);
1869 break;
1870 default: 1827 default:
1871 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode); 1828 pr_debug("invalid aio operation %d\n", iocb->aio_lio_opcode);
1872 ret = -EINVAL; 1829 return -EINVAL;
1873 break;
1874 } 1830 }
1875
1876 /*
1877 * If ret is 0, we'd either done aio_complete() ourselves or have
1878 * arranged for that to be done asynchronously. Anything non-zero
1879 * means that we need to destroy req ourselves.
1880 */
1881 if (ret)
1882 goto out_put_req;
1883 return 0;
1884out_put_req:
1885 if (req->ki_eventfd)
1886 eventfd_ctx_put(req->ki_eventfd);
1887 iocb_put(req);
1888out_put_reqs_available:
1889 put_reqs_available(ctx, 1);
1890 return ret;
1891} 1831}
1892 1832
1893static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, 1833static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1894 bool compat) 1834 bool compat)
1895{ 1835{
1836 struct aio_kiocb *req;
1896 struct iocb iocb; 1837 struct iocb iocb;
1838 int err;
1897 1839
1898 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb)))) 1840 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1899 return -EFAULT; 1841 return -EFAULT;
1900 1842
1901 return __io_submit_one(ctx, &iocb, user_iocb, compat); 1843 /* enforce forwards compatibility on users */
1844 if (unlikely(iocb.aio_reserved2)) {
1845 pr_debug("EINVAL: reserve field set\n");
1846 return -EINVAL;
1847 }
1848
1849 /* prevent overflows */
1850 if (unlikely(
1851 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1852 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1853 ((ssize_t)iocb.aio_nbytes < 0)
1854 )) {
1855 pr_debug("EINVAL: overflow check\n");
1856 return -EINVAL;
1857 }
1858
1859 req = aio_get_req(ctx);
1860 if (unlikely(!req))
1861 return -EAGAIN;
1862
1863 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat);
1864
1865 /* Done with the synchronous reference */
1866 iocb_put(req);
1867
1868 /*
1869 * If err is 0, we'd either done aio_complete() ourselves or have
1870 * arranged for that to be done asynchronously. Anything non-zero
1871 * means that we need to destroy req ourselves.
1872 */
1873 if (unlikely(err)) {
1874 iocb_destroy(req);
1875 put_reqs_available(ctx, 1);
1876 }
1877 return err;
1902} 1878}
1903 1879
1904/* sys_io_submit: 1880/* sys_io_submit:
@@ -1997,24 +1973,6 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
1997} 1973}
1998#endif 1974#endif
1999 1975
2000/* lookup_kiocb
2001 * Finds a given iocb for cancellation.
2002 */
2003static struct aio_kiocb *
2004lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
2005{
2006 struct aio_kiocb *kiocb;
2007
2008 assert_spin_locked(&ctx->ctx_lock);
2009
2010 /* TODO: use a hash or array, this sucks. */
2011 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2012 if (kiocb->ki_user_iocb == iocb)
2013 return kiocb;
2014 }
2015 return NULL;
2016}
2017
2018/* sys_io_cancel: 1976/* sys_io_cancel:
2019 * Attempts to cancel an iocb previously passed to io_submit. If 1977 * Attempts to cancel an iocb previously passed to io_submit. If
2020 * the operation is successfully cancelled, the resulting event is 1978 * the operation is successfully cancelled, the resulting event is
@@ -2032,6 +1990,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2032 struct aio_kiocb *kiocb; 1990 struct aio_kiocb *kiocb;
2033 int ret = -EINVAL; 1991 int ret = -EINVAL;
2034 u32 key; 1992 u32 key;
1993 u64 obj = (u64)(unsigned long)iocb;
2035 1994
2036 if (unlikely(get_user(key, &iocb->aio_key))) 1995 if (unlikely(get_user(key, &iocb->aio_key)))
2037 return -EFAULT; 1996 return -EFAULT;
@@ -2043,10 +2002,13 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2043 return -EINVAL; 2002 return -EINVAL;
2044 2003
2045 spin_lock_irq(&ctx->ctx_lock); 2004 spin_lock_irq(&ctx->ctx_lock);
2046 kiocb = lookup_kiocb(ctx, iocb); 2005 /* TODO: use a hash or array, this sucks. */
2047 if (kiocb) { 2006 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
2048 ret = kiocb->ki_cancel(&kiocb->rw); 2007 if (kiocb->ki_res.obj == obj) {
2049 list_del_init(&kiocb->ki_list); 2008 ret = kiocb->ki_cancel(&kiocb->rw);
2009 list_del_init(&kiocb->ki_list);
2010 break;
2011 }
2050 } 2012 }
2051 spin_unlock_irq(&ctx->ctx_lock); 2013 spin_unlock_irq(&ctx->ctx_lock);
2052 2014
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index f9b71c12cc9f..a05bf1d6e1d0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -559,6 +559,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
559 tcon->ses->server->echo_interval / HZ); 559 tcon->ses->server->echo_interval / HZ);
560 if (tcon->snapshot_time) 560 if (tcon->snapshot_time)
561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time); 561 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
562 if (tcon->handle_timeout)
563 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
562 /* convert actimeo and display it in seconds */ 564 /* convert actimeo and display it in seconds */
563 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ); 565 seq_printf(s, ",actimeo=%lu", cifs_sb->actimeo / HZ);
564 566
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 38feae812b47..5b18d4585740 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -60,6 +60,12 @@
60#define CIFS_MAX_ACTIMEO (1 << 30) 60#define CIFS_MAX_ACTIMEO (1 << 30)
61 61
62/* 62/*
63 * Max persistent and resilient handle timeout (milliseconds).
64 * Windows durable max was 960000 (16 minutes)
65 */
66#define SMB3_MAX_HANDLE_TIMEOUT 960000
67
68/*
63 * MAX_REQ is the maximum number of requests that WE will send 69 * MAX_REQ is the maximum number of requests that WE will send
64 * on one socket concurrently. 70 * on one socket concurrently.
65 */ 71 */
@@ -586,6 +592,7 @@ struct smb_vol {
586 struct nls_table *local_nls; 592 struct nls_table *local_nls;
587 unsigned int echo_interval; /* echo interval in secs */ 593 unsigned int echo_interval; /* echo interval in secs */
588 __u64 snapshot_time; /* needed for timewarp tokens */ 594 __u64 snapshot_time; /* needed for timewarp tokens */
595 __u32 handle_timeout; /* persistent and durable handle timeout in ms */
589 unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */ 596 unsigned int max_credits; /* smb3 max_credits 10 < credits < 60000 */
590}; 597};
591 598
@@ -1058,6 +1065,7 @@ struct cifs_tcon {
1058 __u32 vol_serial_number; 1065 __u32 vol_serial_number;
1059 __le64 vol_create_time; 1066 __le64 vol_create_time;
1060 __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */ 1067 __u64 snapshot_time; /* for timewarp tokens - timestamp of snapshot */
1068 __u32 handle_timeout; /* persistent and durable handle timeout in ms */
1061 __u32 ss_flags; /* sector size flags */ 1069 __u32 ss_flags; /* sector size flags */
1062 __u32 perf_sector_size; /* best sector size for perf */ 1070 __u32 perf_sector_size; /* best sector size for perf */
1063 __u32 max_chunks; 1071 __u32 max_chunks;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a8e9738db691..4c0e44489f21 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -103,7 +103,7 @@ enum {
103 Opt_cruid, Opt_gid, Opt_file_mode, 103 Opt_cruid, Opt_gid, Opt_file_mode,
104 Opt_dirmode, Opt_port, 104 Opt_dirmode, Opt_port,
105 Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo, 105 Opt_blocksize, Opt_rsize, Opt_wsize, Opt_actimeo,
106 Opt_echo_interval, Opt_max_credits, 106 Opt_echo_interval, Opt_max_credits, Opt_handletimeout,
107 Opt_snapshot, 107 Opt_snapshot,
108 108
109 /* Mount options which take string value */ 109 /* Mount options which take string value */
@@ -208,6 +208,7 @@ static const match_table_t cifs_mount_option_tokens = {
208 { Opt_rsize, "rsize=%s" }, 208 { Opt_rsize, "rsize=%s" },
209 { Opt_wsize, "wsize=%s" }, 209 { Opt_wsize, "wsize=%s" },
210 { Opt_actimeo, "actimeo=%s" }, 210 { Opt_actimeo, "actimeo=%s" },
211 { Opt_handletimeout, "handletimeout=%s" },
211 { Opt_echo_interval, "echo_interval=%s" }, 212 { Opt_echo_interval, "echo_interval=%s" },
212 { Opt_max_credits, "max_credits=%s" }, 213 { Opt_max_credits, "max_credits=%s" },
213 { Opt_snapshot, "snapshot=%s" }, 214 { Opt_snapshot, "snapshot=%s" },
@@ -1619,6 +1620,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1619 1620
1620 vol->actimeo = CIFS_DEF_ACTIMEO; 1621 vol->actimeo = CIFS_DEF_ACTIMEO;
1621 1622
1623 /* Most clients set timeout to 0, allows server to use its default */
1624 vol->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
1625
1622 /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */ 1626 /* offer SMB2.1 and later (SMB3 etc). Secure and widely accepted */
1623 vol->ops = &smb30_operations; 1627 vol->ops = &smb30_operations;
1624 vol->vals = &smbdefault_values; 1628 vol->vals = &smbdefault_values;
@@ -2017,6 +2021,18 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
2017 goto cifs_parse_mount_err; 2021 goto cifs_parse_mount_err;
2018 } 2022 }
2019 break; 2023 break;
2024 case Opt_handletimeout:
2025 if (get_option_ul(args, &option)) {
2026 cifs_dbg(VFS, "%s: Invalid handletimeout value\n",
2027 __func__);
2028 goto cifs_parse_mount_err;
2029 }
2030 vol->handle_timeout = option;
2031 if (vol->handle_timeout > SMB3_MAX_HANDLE_TIMEOUT) {
2032 cifs_dbg(VFS, "Invalid handle cache timeout, longer than 16 minutes\n");
2033 goto cifs_parse_mount_err;
2034 }
2035 break;
2020 case Opt_echo_interval: 2036 case Opt_echo_interval:
2021 if (get_option_ul(args, &option)) { 2037 if (get_option_ul(args, &option)) {
2022 cifs_dbg(VFS, "%s: Invalid echo interval value\n", 2038 cifs_dbg(VFS, "%s: Invalid echo interval value\n",
@@ -3183,6 +3199,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
3183 return 0; 3199 return 0;
3184 if (tcon->snapshot_time != volume_info->snapshot_time) 3200 if (tcon->snapshot_time != volume_info->snapshot_time)
3185 return 0; 3201 return 0;
3202 if (tcon->handle_timeout != volume_info->handle_timeout)
3203 return 0;
3186 return 1; 3204 return 1;
3187} 3205}
3188 3206
@@ -3297,6 +3315,16 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
3297 tcon->snapshot_time = volume_info->snapshot_time; 3315 tcon->snapshot_time = volume_info->snapshot_time;
3298 } 3316 }
3299 3317
3318 if (volume_info->handle_timeout) {
3319 if (ses->server->vals->protocol_id == 0) {
3320 cifs_dbg(VFS,
3321 "Use SMB2.1 or later for handle timeout option\n");
3322 rc = -EOPNOTSUPP;
3323 goto out_fail;
3324 } else
3325 tcon->handle_timeout = volume_info->handle_timeout;
3326 }
3327
3300 tcon->ses = ses; 3328 tcon->ses = ses;
3301 if (volume_info->password) { 3329 if (volume_info->password) {
3302 tcon->password = kstrdup(volume_info->password, GFP_KERNEL); 3330 tcon->password = kstrdup(volume_info->password, GFP_KERNEL);
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index b204e84b87fb..54bffb2a1786 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -68,13 +68,15 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
68 68
69 69
70 if (oparms->tcon->use_resilient) { 70 if (oparms->tcon->use_resilient) {
71 nr_ioctl_req.Timeout = 0; /* use server default (120 seconds) */ 71 /* default timeout is 0, servers pick default (120 seconds) */
72 nr_ioctl_req.Timeout =
73 cpu_to_le32(oparms->tcon->handle_timeout);
72 nr_ioctl_req.Reserved = 0; 74 nr_ioctl_req.Reserved = 0;
73 rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid, 75 rc = SMB2_ioctl(xid, oparms->tcon, fid->persistent_fid,
74 fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY, 76 fid->volatile_fid, FSCTL_LMR_REQUEST_RESILIENCY,
75 true /* is_fsctl */, 77 true /* is_fsctl */,
76 (char *)&nr_ioctl_req, sizeof(nr_ioctl_req), 78 (char *)&nr_ioctl_req, sizeof(nr_ioctl_req),
77 NULL, NULL /* no return info */); 79 CIFSMaxBufSize, NULL, NULL /* no return info */);
78 if (rc == -EOPNOTSUPP) { 80 if (rc == -EOPNOTSUPP) {
79 cifs_dbg(VFS, 81 cifs_dbg(VFS,
80 "resiliency not supported by server, disabling\n"); 82 "resiliency not supported by server, disabling\n");
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 1022a3771e14..00225e699d03 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -581,7 +581,7 @@ SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 581 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */, 582 FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
583 NULL /* no data input */, 0 /* no data input */, 583 NULL /* no data input */, 0 /* no data input */,
584 (char **)&out_buf, &ret_data_len); 584 CIFSMaxBufSize, (char **)&out_buf, &ret_data_len);
585 if (rc == -EOPNOTSUPP) { 585 if (rc == -EOPNOTSUPP) {
586 cifs_dbg(FYI, 586 cifs_dbg(FYI,
587 "server does not support query network interfaces\n"); 587 "server does not support query network interfaces\n");
@@ -717,32 +717,28 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
717 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId); 717 oparms.fid->mid = le64_to_cpu(o_rsp->sync_hdr.MessageId);
718#endif /* CIFS_DEBUG2 */ 718#endif /* CIFS_DEBUG2 */
719 719
720 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE)
721 oplock = smb2_parse_lease_state(server, o_rsp,
722 &oparms.fid->epoch,
723 oparms.fid->lease_key);
724 else
725 goto oshr_exit;
726
727
728 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid)); 720 memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
729 tcon->crfid.tcon = tcon; 721 tcon->crfid.tcon = tcon;
730 tcon->crfid.is_valid = true; 722 tcon->crfid.is_valid = true;
731 kref_init(&tcon->crfid.refcount); 723 kref_init(&tcon->crfid.refcount);
732 kref_get(&tcon->crfid.refcount);
733 724
725 if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) {
726 kref_get(&tcon->crfid.refcount);
727 oplock = smb2_parse_lease_state(server, o_rsp,
728 &oparms.fid->epoch,
729 oparms.fid->lease_key);
730 } else
731 goto oshr_exit;
734 732
735 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 733 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
736 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 734 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
737 goto oshr_exit; 735 goto oshr_exit;
738 rc = smb2_validate_and_copy_iov( 736 if (!smb2_validate_and_copy_iov(
739 le16_to_cpu(qi_rsp->OutputBufferOffset), 737 le16_to_cpu(qi_rsp->OutputBufferOffset),
740 sizeof(struct smb2_file_all_info), 738 sizeof(struct smb2_file_all_info),
741 &rsp_iov[1], sizeof(struct smb2_file_all_info), 739 &rsp_iov[1], sizeof(struct smb2_file_all_info),
742 (char *)&tcon->crfid.file_all_info); 740 (char *)&tcon->crfid.file_all_info))
743 if (rc) 741 tcon->crfid.file_all_info_is_valid = 1;
744 goto oshr_exit;
745 tcon->crfid.file_all_info_is_valid = 1;
746 742
747 oshr_exit: 743 oshr_exit:
748 mutex_unlock(&tcon->crfid.fid_mutex); 744 mutex_unlock(&tcon->crfid.fid_mutex);
@@ -1299,7 +1295,7 @@ SMB2_request_res_key(const unsigned int xid, struct cifs_tcon *tcon,
1299 1295
1300 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 1296 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
1301 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */, 1297 FSCTL_SRV_REQUEST_RESUME_KEY, true /* is_fsctl */,
1302 NULL, 0 /* no input */, 1298 NULL, 0 /* no input */, CIFSMaxBufSize,
1303 (char **)&res_key, &ret_data_len); 1299 (char **)&res_key, &ret_data_len);
1304 1300
1305 if (rc) { 1301 if (rc) {
@@ -1404,7 +1400,7 @@ smb2_ioctl_query_info(const unsigned int xid,
1404 rc = SMB2_ioctl_init(tcon, &rqst[1], 1400 rc = SMB2_ioctl_init(tcon, &rqst[1],
1405 COMPOUND_FID, COMPOUND_FID, 1401 COMPOUND_FID, COMPOUND_FID,
1406 qi.info_type, true, NULL, 1402 qi.info_type, true, NULL,
1407 0); 1403 0, CIFSMaxBufSize);
1408 } 1404 }
1409 } else if (qi.flags == PASSTHRU_QUERY_INFO) { 1405 } else if (qi.flags == PASSTHRU_QUERY_INFO) {
1410 memset(&qi_iov, 0, sizeof(qi_iov)); 1406 memset(&qi_iov, 0, sizeof(qi_iov));
@@ -1532,8 +1528,8 @@ smb2_copychunk_range(const unsigned int xid,
1532 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid, 1528 rc = SMB2_ioctl(xid, tcon, trgtfile->fid.persistent_fid,
1533 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE, 1529 trgtfile->fid.volatile_fid, FSCTL_SRV_COPYCHUNK_WRITE,
1534 true /* is_fsctl */, (char *)pcchunk, 1530 true /* is_fsctl */, (char *)pcchunk,
1535 sizeof(struct copychunk_ioctl), (char **)&retbuf, 1531 sizeof(struct copychunk_ioctl), CIFSMaxBufSize,
1536 &ret_data_len); 1532 (char **)&retbuf, &ret_data_len);
1537 if (rc == 0) { 1533 if (rc == 0) {
1538 if (ret_data_len != 1534 if (ret_data_len !=
1539 sizeof(struct copychunk_ioctl_rsp)) { 1535 sizeof(struct copychunk_ioctl_rsp)) {
@@ -1693,7 +1689,7 @@ static bool smb2_set_sparse(const unsigned int xid, struct cifs_tcon *tcon,
1693 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 1689 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1694 cfile->fid.volatile_fid, FSCTL_SET_SPARSE, 1690 cfile->fid.volatile_fid, FSCTL_SET_SPARSE,
1695 true /* is_fctl */, 1691 true /* is_fctl */,
1696 &setsparse, 1, NULL, NULL); 1692 &setsparse, 1, CIFSMaxBufSize, NULL, NULL);
1697 if (rc) { 1693 if (rc) {
1698 tcon->broken_sparse_sup = true; 1694 tcon->broken_sparse_sup = true;
1699 cifs_dbg(FYI, "set sparse rc = %d\n", rc); 1695 cifs_dbg(FYI, "set sparse rc = %d\n", rc);
@@ -1766,7 +1762,7 @@ smb2_duplicate_extents(const unsigned int xid,
1766 true /* is_fsctl */, 1762 true /* is_fsctl */,
1767 (char *)&dup_ext_buf, 1763 (char *)&dup_ext_buf,
1768 sizeof(struct duplicate_extents_to_file), 1764 sizeof(struct duplicate_extents_to_file),
1769 NULL, 1765 CIFSMaxBufSize, NULL,
1770 &ret_data_len); 1766 &ret_data_len);
1771 1767
1772 if (ret_data_len > 0) 1768 if (ret_data_len > 0)
@@ -1801,7 +1797,7 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1801 true /* is_fsctl */, 1797 true /* is_fsctl */,
1802 (char *)&integr_info, 1798 (char *)&integr_info,
1803 sizeof(struct fsctl_set_integrity_information_req), 1799 sizeof(struct fsctl_set_integrity_information_req),
1804 NULL, 1800 CIFSMaxBufSize, NULL,
1805 &ret_data_len); 1801 &ret_data_len);
1806 1802
1807} 1803}
@@ -1809,6 +1805,8 @@ smb3_set_integrity(const unsigned int xid, struct cifs_tcon *tcon,
1809/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */ 1805/* GMT Token is @GMT-YYYY.MM.DD-HH.MM.SS Unicode which is 48 bytes + null */
1810#define GMT_TOKEN_SIZE 50 1806#define GMT_TOKEN_SIZE 50
1811 1807
1808#define MIN_SNAPSHOT_ARRAY_SIZE 16 /* See MS-SMB2 section 3.3.5.15.1 */
1809
1812/* 1810/*
1813 * Input buffer contains (empty) struct smb_snapshot array with size filled in 1811 * Input buffer contains (empty) struct smb_snapshot array with size filled in
1814 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2 1812 * For output see struct SRV_SNAPSHOT_ARRAY in MS-SMB2 section 2.2.32.2
@@ -1820,13 +1818,29 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
1820 char *retbuf = NULL; 1818 char *retbuf = NULL;
1821 unsigned int ret_data_len = 0; 1819 unsigned int ret_data_len = 0;
1822 int rc; 1820 int rc;
1821 u32 max_response_size;
1823 struct smb_snapshot_array snapshot_in; 1822 struct smb_snapshot_array snapshot_in;
1824 1823
1824 if (get_user(ret_data_len, (unsigned int __user *)ioc_buf))
1825 return -EFAULT;
1826
1827 /*
1828 * Note that for snapshot queries that servers like Azure expect that
1829 * the first query be minimal size (and just used to get the number/size
1830 * of previous versions) so response size must be specified as EXACTLY
1831 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
1832 * of eight bytes.
1833 */
1834 if (ret_data_len == 0)
1835 max_response_size = MIN_SNAPSHOT_ARRAY_SIZE;
1836 else
1837 max_response_size = CIFSMaxBufSize;
1838
1825 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 1839 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
1826 cfile->fid.volatile_fid, 1840 cfile->fid.volatile_fid,
1827 FSCTL_SRV_ENUMERATE_SNAPSHOTS, 1841 FSCTL_SRV_ENUMERATE_SNAPSHOTS,
1828 true /* is_fsctl */, 1842 true /* is_fsctl */,
1829 NULL, 0 /* no input data */, 1843 NULL, 0 /* no input data */, max_response_size,
1830 (char **)&retbuf, 1844 (char **)&retbuf,
1831 &ret_data_len); 1845 &ret_data_len);
1832 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n", 1846 cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
@@ -2304,7 +2318,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
2304 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 2318 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
2305 FSCTL_DFS_GET_REFERRALS, 2319 FSCTL_DFS_GET_REFERRALS,
2306 true /* is_fsctl */, 2320 true /* is_fsctl */,
2307 (char *)dfs_req, dfs_req_size, 2321 (char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
2308 (char **)&dfs_rsp, &dfs_rsp_size); 2322 (char **)&dfs_rsp, &dfs_rsp_size);
2309 } while (rc == -EAGAIN); 2323 } while (rc == -EAGAIN);
2310 2324
@@ -2658,7 +2672,8 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
2658 rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid, 2672 rc = SMB2_ioctl_init(tcon, &rqst[num++], cfile->fid.persistent_fid,
2659 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 2673 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
2660 true /* is_fctl */, (char *)&fsctl_buf, 2674 true /* is_fctl */, (char *)&fsctl_buf,
2661 sizeof(struct file_zero_data_information)); 2675 sizeof(struct file_zero_data_information),
2676 CIFSMaxBufSize);
2662 if (rc) 2677 if (rc)
2663 goto zero_range_exit; 2678 goto zero_range_exit;
2664 2679
@@ -2735,7 +2750,8 @@ static long smb3_punch_hole(struct file *file, struct cifs_tcon *tcon,
2735 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid, 2750 rc = SMB2_ioctl(xid, tcon, cfile->fid.persistent_fid,
2736 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA, 2751 cfile->fid.volatile_fid, FSCTL_SET_ZERO_DATA,
2737 true /* is_fctl */, (char *)&fsctl_buf, 2752 true /* is_fctl */, (char *)&fsctl_buf,
2738 sizeof(struct file_zero_data_information), NULL, NULL); 2753 sizeof(struct file_zero_data_information),
2754 CIFSMaxBufSize, NULL, NULL);
2739 free_xid(xid); 2755 free_xid(xid);
2740 return rc; 2756 return rc;
2741} 2757}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 21ac19ff19cb..21ad01d55ab2 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -1002,7 +1002,8 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
1002 1002
1003 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID, 1003 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
1004 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */, 1004 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
1005 (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen); 1005 (char *)pneg_inbuf, inbuflen, CIFSMaxBufSize,
1006 (char **)&pneg_rsp, &rsplen);
1006 if (rc == -EOPNOTSUPP) { 1007 if (rc == -EOPNOTSUPP) {
1007 /* 1008 /*
1008 * Old Windows versions or Netapp SMB server can return 1009 * Old Windows versions or Netapp SMB server can return
@@ -1858,8 +1859,9 @@ add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
1858} 1859}
1859 1860
1860static struct create_durable_v2 * 1861static struct create_durable_v2 *
1861create_durable_v2_buf(struct cifs_fid *pfid) 1862create_durable_v2_buf(struct cifs_open_parms *oparms)
1862{ 1863{
1864 struct cifs_fid *pfid = oparms->fid;
1863 struct create_durable_v2 *buf; 1865 struct create_durable_v2 *buf;
1864 1866
1865 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL); 1867 buf = kzalloc(sizeof(struct create_durable_v2), GFP_KERNEL);
@@ -1873,7 +1875,14 @@ create_durable_v2_buf(struct cifs_fid *pfid)
1873 (struct create_durable_v2, Name)); 1875 (struct create_durable_v2, Name));
1874 buf->ccontext.NameLength = cpu_to_le16(4); 1876 buf->ccontext.NameLength = cpu_to_le16(4);
1875 1877
1876 buf->dcontext.Timeout = 0; /* Should this be configurable by workload */ 1878 /*
1879 * NB: Handle timeout defaults to 0, which allows server to choose
1880 * (most servers default to 120 seconds) and most clients default to 0.
1881 * This can be overridden at mount ("handletimeout=") if the user wants
1882 * a different persistent (or resilient) handle timeout for all opens
1883 * opens on a particular SMB3 mount.
1884 */
1885 buf->dcontext.Timeout = cpu_to_le32(oparms->tcon->handle_timeout);
1877 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT); 1886 buf->dcontext.Flags = cpu_to_le32(SMB2_DHANDLE_FLAG_PERSISTENT);
1878 generate_random_uuid(buf->dcontext.CreateGuid); 1887 generate_random_uuid(buf->dcontext.CreateGuid);
1879 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16); 1888 memcpy(pfid->create_guid, buf->dcontext.CreateGuid, 16);
@@ -1926,7 +1935,7 @@ add_durable_v2_context(struct kvec *iov, unsigned int *num_iovec,
1926 struct smb2_create_req *req = iov[0].iov_base; 1935 struct smb2_create_req *req = iov[0].iov_base;
1927 unsigned int num = *num_iovec; 1936 unsigned int num = *num_iovec;
1928 1937
1929 iov[num].iov_base = create_durable_v2_buf(oparms->fid); 1938 iov[num].iov_base = create_durable_v2_buf(oparms);
1930 if (iov[num].iov_base == NULL) 1939 if (iov[num].iov_base == NULL)
1931 return -ENOMEM; 1940 return -ENOMEM;
1932 iov[num].iov_len = sizeof(struct create_durable_v2); 1941 iov[num].iov_len = sizeof(struct create_durable_v2);
@@ -2478,7 +2487,8 @@ creat_exit:
2478int 2487int
2479SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, 2488SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
2480 u64 persistent_fid, u64 volatile_fid, u32 opcode, 2489 u64 persistent_fid, u64 volatile_fid, u32 opcode,
2481 bool is_fsctl, char *in_data, u32 indatalen) 2490 bool is_fsctl, char *in_data, u32 indatalen,
2491 __u32 max_response_size)
2482{ 2492{
2483 struct smb2_ioctl_req *req; 2493 struct smb2_ioctl_req *req;
2484 struct kvec *iov = rqst->rq_iov; 2494 struct kvec *iov = rqst->rq_iov;
@@ -2520,16 +2530,21 @@ SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
2520 req->OutputCount = 0; /* MBZ */ 2530 req->OutputCount = 0; /* MBZ */
2521 2531
2522 /* 2532 /*
2523 * Could increase MaxOutputResponse, but that would require more 2533 * In most cases max_response_size is set to 16K (CIFSMaxBufSize)
2524 * than one credit. Windows typically sets this smaller, but for some 2534 * We Could increase default MaxOutputResponse, but that could require
2535 * more credits. Windows typically sets this smaller, but for some
2525 * ioctls it may be useful to allow server to send more. No point 2536 * ioctls it may be useful to allow server to send more. No point
2526 * limiting what the server can send as long as fits in one credit 2537 * limiting what the server can send as long as fits in one credit
2527 * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE 2538 * We can not handle more than CIFS_MAX_BUF_SIZE yet but may want
2528 * (by default, note that it can be overridden to make max larger) 2539 * to increase this limit up in the future.
2529 * in responses (except for read responses which can be bigger. 2540 * Note that for snapshot queries that servers like Azure expect that
2530 * We may want to bump this limit up 2541 * the first query be minimal size (and just used to get the number/size
2542 * of previous versions) so response size must be specified as EXACTLY
2543 * sizeof(struct snapshot_array) which is 16 when rounded up to multiple
2544 * of eight bytes. Currently that is the only case where we set max
2545 * response size smaller.
2531 */ 2546 */
2532 req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize); 2547 req->MaxOutputResponse = cpu_to_le32(max_response_size);
2533 2548
2534 if (is_fsctl) 2549 if (is_fsctl)
2535 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); 2550 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2550,13 +2565,14 @@ SMB2_ioctl_free(struct smb_rqst *rqst)
2550 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */ 2565 cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
2551} 2566}
2552 2567
2568
2553/* 2569/*
2554 * SMB2 IOCTL is used for both IOCTLs and FSCTLs 2570 * SMB2 IOCTL is used for both IOCTLs and FSCTLs
2555 */ 2571 */
2556int 2572int
2557SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, 2573SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2558 u64 volatile_fid, u32 opcode, bool is_fsctl, 2574 u64 volatile_fid, u32 opcode, bool is_fsctl,
2559 char *in_data, u32 indatalen, 2575 char *in_data, u32 indatalen, u32 max_out_data_len,
2560 char **out_data, u32 *plen /* returned data len */) 2576 char **out_data, u32 *plen /* returned data len */)
2561{ 2577{
2562 struct smb_rqst rqst; 2578 struct smb_rqst rqst;
@@ -2593,8 +2609,8 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
2593 rqst.rq_iov = iov; 2609 rqst.rq_iov = iov;
2594 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE; 2610 rqst.rq_nvec = SMB2_IOCTL_IOV_SIZE;
2595 2611
2596 rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, 2612 rc = SMB2_ioctl_init(tcon, &rqst, persistent_fid, volatile_fid, opcode,
2597 opcode, is_fsctl, in_data, indatalen); 2613 is_fsctl, in_data, indatalen, max_out_data_len);
2598 if (rc) 2614 if (rc)
2599 goto ioctl_exit; 2615 goto ioctl_exit;
2600 2616
@@ -2672,7 +2688,8 @@ SMB2_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
2672 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid, 2688 rc = SMB2_ioctl(xid, tcon, persistent_fid, volatile_fid,
2673 FSCTL_SET_COMPRESSION, true /* is_fsctl */, 2689 FSCTL_SET_COMPRESSION, true /* is_fsctl */,
2674 (char *)&fsctl_input /* data input */, 2690 (char *)&fsctl_input /* data input */,
2675 2 /* in data len */, &ret_data /* out data */, NULL); 2691 2 /* in data len */, CIFSMaxBufSize /* max out data */,
2692 &ret_data /* out data */, NULL);
2676 2693
2677 cifs_dbg(FYI, "set compression rc %d\n", rc); 2694 cifs_dbg(FYI, "set compression rc %d\n", rc);
2678 2695
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 3c32d0cfea69..52df125e9189 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -142,11 +142,12 @@ extern int SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
142extern void SMB2_open_free(struct smb_rqst *rqst); 142extern void SMB2_open_free(struct smb_rqst *rqst);
143extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, 143extern int SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon,
144 u64 persistent_fid, u64 volatile_fid, u32 opcode, 144 u64 persistent_fid, u64 volatile_fid, u32 opcode,
145 bool is_fsctl, char *in_data, u32 indatalen, 145 bool is_fsctl, char *in_data, u32 indatalen, u32 maxoutlen,
146 char **out_data, u32 *plen /* returned data len */); 146 char **out_data, u32 *plen /* returned data len */);
147extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, 147extern int SMB2_ioctl_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
148 u64 persistent_fid, u64 volatile_fid, u32 opcode, 148 u64 persistent_fid, u64 volatile_fid, u32 opcode,
149 bool is_fsctl, char *in_data, u32 indatalen); 149 bool is_fsctl, char *in_data, u32 indatalen,
150 __u32 max_response_size);
150extern void SMB2_ioctl_free(struct smb_rqst *rqst); 151extern void SMB2_ioctl_free(struct smb_rqst *rqst);
151extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon, 152extern int SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
152 u64 persistent_file_id, u64 volatile_file_id); 153 u64 persistent_file_id, u64 volatile_file_id);
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 95b5e78c22b1..f25daa207421 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -163,19 +163,24 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
163 return 0; 163 return 0;
164} 164}
165 165
166static void debugfs_evict_inode(struct inode *inode) 166static void debugfs_i_callback(struct rcu_head *head)
167{ 167{
168 truncate_inode_pages_final(&inode->i_data); 168 struct inode *inode = container_of(head, struct inode, i_rcu);
169 clear_inode(inode);
170 if (S_ISLNK(inode->i_mode)) 169 if (S_ISLNK(inode->i_mode))
171 kfree(inode->i_link); 170 kfree(inode->i_link);
171 free_inode_nonrcu(inode);
172}
173
174static void debugfs_destroy_inode(struct inode *inode)
175{
176 call_rcu(&inode->i_rcu, debugfs_i_callback);
172} 177}
173 178
174static const struct super_operations debugfs_super_operations = { 179static const struct super_operations debugfs_super_operations = {
175 .statfs = simple_statfs, 180 .statfs = simple_statfs,
176 .remount_fs = debugfs_remount, 181 .remount_fs = debugfs_remount,
177 .show_options = debugfs_show_options, 182 .show_options = debugfs_show_options,
178 .evict_inode = debugfs_evict_inode, 183 .destroy_inode = debugfs_destroy_inode,
179}; 184};
180 185
181static void debugfs_release_dentry(struct dentry *dentry) 186static void debugfs_release_dentry(struct dentry *dentry)
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 389ea53ea487..bccfc40b3a74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1414,11 +1414,6 @@ void jffs2_do_clear_inode(struct jffs2_sb_info *c, struct jffs2_inode_info *f)
1414 1414
1415 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL); 1415 jffs2_kill_fragtree(&f->fragtree, deleted?c:NULL);
1416 1416
1417 if (f->target) {
1418 kfree(f->target);
1419 f->target = NULL;
1420 }
1421
1422 fds = f->dents; 1417 fds = f->dents;
1423 while(fds) { 1418 while(fds) {
1424 fd = fds; 1419 fd = fds;
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index bb6ae387469f..05d892c79339 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -47,7 +47,10 @@ static struct inode *jffs2_alloc_inode(struct super_block *sb)
47static void jffs2_i_callback(struct rcu_head *head) 47static void jffs2_i_callback(struct rcu_head *head)
48{ 48{
49 struct inode *inode = container_of(head, struct inode, i_rcu); 49 struct inode *inode = container_of(head, struct inode, i_rcu);
50 kmem_cache_free(jffs2_inode_cachep, JFFS2_INODE_INFO(inode)); 50 struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
51
52 kfree(f->target);
53 kmem_cache_free(jffs2_inode_cachep, f);
51} 54}
52 55
53static void jffs2_destroy_inode(struct inode *inode) 56static void jffs2_destroy_inode(struct inode *inode)
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ddef482f1334..6a803a0b75df 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -616,24 +616,25 @@ static int proc_pid_limits(struct seq_file *m, struct pid_namespace *ns,
616static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns, 616static int proc_pid_syscall(struct seq_file *m, struct pid_namespace *ns,
617 struct pid *pid, struct task_struct *task) 617 struct pid *pid, struct task_struct *task)
618{ 618{
619 long nr; 619 struct syscall_info info;
620 unsigned long args[6], sp, pc; 620 u64 *args = &info.data.args[0];
621 int res; 621 int res;
622 622
623 res = lock_trace(task); 623 res = lock_trace(task);
624 if (res) 624 if (res)
625 return res; 625 return res;
626 626
627 if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) 627 if (task_current_syscall(task, &info))
628 seq_puts(m, "running\n"); 628 seq_puts(m, "running\n");
629 else if (nr < 0) 629 else if (info.data.nr < 0)
630 seq_printf(m, "%ld 0x%lx 0x%lx\n", nr, sp, pc); 630 seq_printf(m, "%d 0x%llx 0x%llx\n",
631 info.data.nr, info.sp, info.data.instruction_pointer);
631 else 632 else
632 seq_printf(m, 633 seq_printf(m,
633 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", 634 "%d 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx 0x%llx\n",
634 nr, 635 info.data.nr,
635 args[0], args[1], args[2], args[3], args[4], args[5], 636 args[0], args[1], args[2], args[3], args[4], args[5],
636 sp, pc); 637 info.sp, info.data.instruction_pointer);
637 unlock_trace(task); 638 unlock_trace(task);
638 639
639 return 0; 640 return 0;
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 8dc2818fdd84..12628184772c 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -276,14 +276,12 @@ static void ubifs_i_callback(struct rcu_head *head)
276{ 276{
277 struct inode *inode = container_of(head, struct inode, i_rcu); 277 struct inode *inode = container_of(head, struct inode, i_rcu);
278 struct ubifs_inode *ui = ubifs_inode(inode); 278 struct ubifs_inode *ui = ubifs_inode(inode);
279 kfree(ui->data);
279 kmem_cache_free(ubifs_inode_slab, ui); 280 kmem_cache_free(ubifs_inode_slab, ui);
280} 281}
281 282
282static void ubifs_destroy_inode(struct inode *inode) 283static void ubifs_destroy_inode(struct inode *inode)
283{ 284{
284 struct ubifs_inode *ui = ubifs_inode(inode);
285
286 kfree(ui->data);
287 call_rcu(&inode->i_rcu, ubifs_i_callback); 285 call_rcu(&inode->i_rcu, ubifs_i_callback);
288} 286}
289 287
diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h
index 0c938a4354f6..b88239e9efe4 100644
--- a/include/asm-generic/syscall.h
+++ b/include/asm-generic/syscall.h
@@ -105,41 +105,30 @@ void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
105 * syscall_get_arguments - extract system call parameter values 105 * syscall_get_arguments - extract system call parameter values
106 * @task: task of interest, must be blocked 106 * @task: task of interest, must be blocked
107 * @regs: task_pt_regs() of @task 107 * @regs: task_pt_regs() of @task
108 * @i: argument index [0,5]
109 * @n: number of arguments; n+i must be [1,6].
110 * @args: array filled with argument values 108 * @args: array filled with argument values
111 * 109 *
112 * Fetches @n arguments to the system call starting with the @i'th argument 110 * Fetches 6 arguments to the system call. First argument is stored in
113 * (from 0 through 5). Argument @i is stored in @args[0], and so on. 111* @args[0], and so on.
114 * An arch inline version is probably optimal when @i and @n are constants.
115 * 112 *
116 * It's only valid to call this when @task is stopped for tracing on 113 * It's only valid to call this when @task is stopped for tracing on
117 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. 114 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
118 * It's invalid to call this with @i + @n > 6; we only support system calls
119 * taking up to 6 arguments.
120 */ 115 */
121void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs, 116void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
122 unsigned int i, unsigned int n, unsigned long *args); 117 unsigned long *args);
123 118
124/** 119/**
125 * syscall_set_arguments - change system call parameter value 120 * syscall_set_arguments - change system call parameter value
126 * @task: task of interest, must be in system call entry tracing 121 * @task: task of interest, must be in system call entry tracing
127 * @regs: task_pt_regs() of @task 122 * @regs: task_pt_regs() of @task
128 * @i: argument index [0,5]
129 * @n: number of arguments; n+i must be [1,6].
130 * @args: array of argument values to store 123 * @args: array of argument values to store
131 * 124 *
132 * Changes @n arguments to the system call starting with the @i'th argument. 125 * Changes 6 arguments to the system call.
133 * Argument @i gets value @args[0], and so on. 126 * The first argument gets value @args[0], and so on.
134 * An arch inline version is probably optimal when @i and @n are constants.
135 * 127 *
136 * It's only valid to call this when @task is stopped for tracing on 128 * It's only valid to call this when @task is stopped for tracing on
137 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT. 129 * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
138 * It's invalid to call this with @i + @n > 6; we only support system calls
139 * taking up to 6 arguments.
140 */ 130 */
141void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs, 131void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
142 unsigned int i, unsigned int n,
143 const unsigned long *args); 132 const unsigned long *args);
144 133
145/** 134/**
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 6fee8b1a4400..5cd824c1c0ca 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising)
469 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, 469 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
470 advertising)) 470 advertising))
471 lcl_adv |= ADVERTISE_PAUSE_CAP; 471 lcl_adv |= ADVERTISE_PAUSE_CAP;
472 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, 472 if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
473 advertising)) 473 advertising))
474 lcl_adv |= ADVERTISE_PAUSE_ASYM; 474 lcl_adv |= ADVERTISE_PAUSE_ASYM;
475 475
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 022541dc5dbf..0d0729648844 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -594,6 +594,8 @@ enum mlx5_pagefault_type_flags {
594}; 594};
595 595
596struct mlx5_td { 596struct mlx5_td {
597 /* protects tirs list changes while tirs refresh */
598 struct mutex list_lock;
597 struct list_head tirs_list; 599 struct list_head tirs_list;
598 u32 tdn; 600 u32 tdn;
599}; 601};
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index edb9b040c94c..d5084ebd9f03 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -9,6 +9,13 @@
9#include <linux/bug.h> /* For BUG_ON. */ 9#include <linux/bug.h> /* For BUG_ON. */
10#include <linux/pid_namespace.h> /* For task_active_pid_ns. */ 10#include <linux/pid_namespace.h> /* For task_active_pid_ns. */
11#include <uapi/linux/ptrace.h> 11#include <uapi/linux/ptrace.h>
12#include <linux/seccomp.h>
13
14/* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */
15struct syscall_info {
16 __u64 sp;
17 struct seccomp_data data;
18};
12 19
13extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, 20extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr,
14 void *buf, int len, unsigned int gup_flags); 21 void *buf, int len, unsigned int gup_flags);
@@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs)
407#define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) 414#define current_user_stack_pointer() user_stack_pointer(current_pt_regs())
408#endif 415#endif
409 416
410extern int task_current_syscall(struct task_struct *target, long *callno, 417extern int task_current_syscall(struct task_struct *target, struct syscall_info *info);
411 unsigned long args[6], unsigned int maxargs,
412 unsigned long *sp, unsigned long *pc);
413 418
414extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); 419extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact);
415#endif 420#endif
diff --git a/include/net/ip.h b/include/net/ip.h
index be3cad9c2e4c..583526aad1d0 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -677,7 +677,7 @@ int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
677 unsigned char __user *data, int optlen); 677 unsigned char __user *data, int optlen);
678void ip_options_undo(struct ip_options *opt); 678void ip_options_undo(struct ip_options *opt);
679void ip_forward_options(struct sk_buff *skb); 679void ip_forward_options(struct sk_buff *skb);
680int ip_options_rcv_srr(struct sk_buff *skb); 680int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev);
681 681
682/* 682/*
683 * Functions provided by ip_sockglue.c 683 * Functions provided by ip_sockglue.c
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index a68ced28d8f4..12689ddfc24c 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -59,6 +59,7 @@ struct net {
59 */ 59 */
60 spinlock_t rules_mod_lock; 60 spinlock_t rules_mod_lock;
61 61
62 u32 hash_mix;
62 atomic64_t cookie_gen; 63 atomic64_t cookie_gen;
63 64
64 struct list_head list; /* list of network namespaces */ 65 struct list_head list; /* list of network namespaces */
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 16a842456189..d9b665151f3d 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -2,16 +2,10 @@
2#ifndef __NET_NS_HASH_H__ 2#ifndef __NET_NS_HASH_H__
3#define __NET_NS_HASH_H__ 3#define __NET_NS_HASH_H__
4 4
5#include <asm/cache.h> 5#include <net/net_namespace.h>
6
7struct net;
8 6
9static inline u32 net_hash_mix(const struct net *net) 7static inline u32 net_hash_mix(const struct net *net)
10{ 8{
11#ifdef CONFIG_NET_NS 9 return net->hash_mix;
12 return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
13#else
14 return 0;
15#endif
16} 10}
17#endif 11#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 7d1a0483a17b..a2b38b3deeca 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
923 sch->qstats.overlimits++; 923 sch->qstats.overlimits++;
924} 924}
925 925
926static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
927{
928 __u32 qlen = qdisc_qlen_sum(sch);
929
930 return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
931}
932
933static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
934 __u32 *backlog)
935{
936 struct gnet_stats_queue qstats = { 0 };
937 __u32 len = qdisc_qlen_sum(sch);
938
939 __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
940 *qlen = qstats.qlen;
941 *backlog = qstats.backlog;
942}
943
944static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
945{
946 __u32 qlen, backlog;
947
948 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
949 qdisc_tree_reduce_backlog(sch, qlen, backlog);
950}
951
952static inline void qdisc_purge_queue(struct Qdisc *sch)
953{
954 __u32 qlen, backlog;
955
956 qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
957 qdisc_reset(sch);
958 qdisc_tree_reduce_backlog(sch, qlen, backlog);
959}
960
926static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) 961static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
927{ 962{
928 qh->head = NULL; 963 qh->head = NULL;
@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
1106 sch_tree_lock(sch); 1141 sch_tree_lock(sch);
1107 old = *pold; 1142 old = *pold;
1108 *pold = new; 1143 *pold = new;
1109 if (old != NULL) { 1144 if (old != NULL)
1110 unsigned int qlen = old->q.qlen; 1145 qdisc_tree_flush_backlog(old);
1111 unsigned int backlog = old->qstats.backlog;
1112
1113 qdisc_reset(old);
1114 qdisc_tree_reduce_backlog(old, qlen, backlog);
1115 }
1116 sch_tree_unlock(sch); 1146 sch_tree_unlock(sch);
1117 1147
1118 return old; 1148 return old;
diff --git a/include/trace/events/syscalls.h b/include/trace/events/syscalls.h
index 44a3259ed4a5..b6e0cbc2c71f 100644
--- a/include/trace/events/syscalls.h
+++ b/include/trace/events/syscalls.h
@@ -28,7 +28,7 @@ TRACE_EVENT_FN(sys_enter,
28 28
29 TP_fast_assign( 29 TP_fast_assign(
30 __entry->id = id; 30 __entry->id = id;
31 syscall_get_arguments(current, regs, 0, 6, __entry->args); 31 syscall_get_arguments(current, regs, __entry->args);
32 ), 32 ),
33 33
34 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)", 34 TP_printk("NR %ld (%lx, %lx, %lx, %lx, %lx, %lx)",
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 8974b3755670..3c18260403dd 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -162,10 +162,14 @@ static void cpu_map_kthread_stop(struct work_struct *work)
162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu, 162static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
163 struct xdp_frame *xdpf) 163 struct xdp_frame *xdpf)
164{ 164{
165 unsigned int hard_start_headroom;
165 unsigned int frame_size; 166 unsigned int frame_size;
166 void *pkt_data_start; 167 void *pkt_data_start;
167 struct sk_buff *skb; 168 struct sk_buff *skb;
168 169
170 /* Part of headroom was reserved to xdpf */
171 hard_start_headroom = sizeof(struct xdp_frame) + xdpf->headroom;
172
169 /* build_skb need to place skb_shared_info after SKB end, and 173 /* build_skb need to place skb_shared_info after SKB end, and
170 * also want to know the memory "truesize". Thus, need to 174 * also want to know the memory "truesize". Thus, need to
171 * know the memory frame size backing xdp_buff. 175 * know the memory frame size backing xdp_buff.
@@ -183,15 +187,15 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
183 * is not at a fixed memory location, with mixed length 187 * is not at a fixed memory location, with mixed length
184 * packets, which is bad for cache-line hotness. 188 * packets, which is bad for cache-line hotness.
185 */ 189 */
186 frame_size = SKB_DATA_ALIGN(xdpf->len + xdpf->headroom) + 190 frame_size = SKB_DATA_ALIGN(xdpf->len + hard_start_headroom) +
187 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 191 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
188 192
189 pkt_data_start = xdpf->data - xdpf->headroom; 193 pkt_data_start = xdpf->data - hard_start_headroom;
190 skb = build_skb(pkt_data_start, frame_size); 194 skb = build_skb(pkt_data_start, frame_size);
191 if (!skb) 195 if (!skb)
192 return NULL; 196 return NULL;
193 197
194 skb_reserve(skb, xdpf->headroom); 198 skb_reserve(skb, hard_start_headroom);
195 __skb_put(skb, xdpf->len); 199 __skb_put(skb, xdpf->len);
196 if (xdpf->metasize) 200 if (xdpf->metasize)
197 skb_metadata_set(skb, xdpf->metasize); 201 skb_metadata_set(skb, xdpf->metasize);
@@ -205,6 +209,9 @@ static struct sk_buff *cpu_map_build_skb(struct bpf_cpu_map_entry *rcpu,
205 * - RX ring dev queue index (skb_record_rx_queue) 209 * - RX ring dev queue index (skb_record_rx_queue)
206 */ 210 */
207 211
212 /* Allow SKB to reuse area used by xdp_frame */
213 xdp_scrub_frame(xdpf);
214
208 return skb; 215 return skb;
209} 216}
210 217
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 2ada5e21dfa6..4a8f390a2b82 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -554,19 +554,6 @@ struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type typ
554} 554}
555EXPORT_SYMBOL(bpf_prog_get_type_path); 555EXPORT_SYMBOL(bpf_prog_get_type_path);
556 556
557static void bpf_evict_inode(struct inode *inode)
558{
559 enum bpf_type type;
560
561 truncate_inode_pages_final(&inode->i_data);
562 clear_inode(inode);
563
564 if (S_ISLNK(inode->i_mode))
565 kfree(inode->i_link);
566 if (!bpf_inode_type(inode, &type))
567 bpf_any_put(inode->i_private, type);
568}
569
570/* 557/*
571 * Display the mount options in /proc/mounts. 558 * Display the mount options in /proc/mounts.
572 */ 559 */
@@ -579,11 +566,28 @@ static int bpf_show_options(struct seq_file *m, struct dentry *root)
579 return 0; 566 return 0;
580} 567}
581 568
569static void bpf_destroy_inode_deferred(struct rcu_head *head)
570{
571 struct inode *inode = container_of(head, struct inode, i_rcu);
572 enum bpf_type type;
573
574 if (S_ISLNK(inode->i_mode))
575 kfree(inode->i_link);
576 if (!bpf_inode_type(inode, &type))
577 bpf_any_put(inode->i_private, type);
578 free_inode_nonrcu(inode);
579}
580
581static void bpf_destroy_inode(struct inode *inode)
582{
583 call_rcu(&inode->i_rcu, bpf_destroy_inode_deferred);
584}
585
582static const struct super_operations bpf_super_ops = { 586static const struct super_operations bpf_super_ops = {
583 .statfs = simple_statfs, 587 .statfs = simple_statfs,
584 .drop_inode = generic_delete_inode, 588 .drop_inode = generic_delete_inode,
585 .show_options = bpf_show_options, 589 .show_options = bpf_show_options,
586 .evict_inode = bpf_evict_inode, 590 .destroy_inode = bpf_destroy_inode,
587}; 591};
588 592
589enum { 593enum {
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index fd502c1f71eb..6c5a41f7f338 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1897,8 +1897,9 @@ continue_func:
1897 } 1897 }
1898 frame++; 1898 frame++;
1899 if (frame >= MAX_CALL_FRAMES) { 1899 if (frame >= MAX_CALL_FRAMES) {
1900 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1900 verbose(env, "the call stack of %d frames is too deep !\n",
1901 return -EFAULT; 1901 frame);
1902 return -E2BIG;
1902 } 1903 }
1903 goto process_func; 1904 goto process_func;
1904 } 1905 }
diff --git a/kernel/seccomp.c b/kernel/seccomp.c
index 54a0347ca812..df27e499956a 100644
--- a/kernel/seccomp.c
+++ b/kernel/seccomp.c
@@ -149,7 +149,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
149 149
150 sd->nr = syscall_get_nr(task, regs); 150 sd->nr = syscall_get_nr(task, regs);
151 sd->arch = syscall_get_arch(); 151 sd->arch = syscall_get_arch();
152 syscall_get_arguments(task, regs, 0, 6, args); 152 syscall_get_arguments(task, regs, args);
153 sd->args[0] = args[0]; 153 sd->args[0] = args[0];
154 sd->args[1] = args[1]; 154 sd->args[1] = args[1];
155 sd->args[2] = args[2]; 155 sd->args[2] = args[2];
diff --git a/kernel/signal.c b/kernel/signal.c
index b7953934aa99..f98448cf2def 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -3605,16 +3605,11 @@ SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3605 if (unlikely(sig != kinfo.si_signo)) 3605 if (unlikely(sig != kinfo.si_signo))
3606 goto err; 3606 goto err;
3607 3607
3608 /* Only allow sending arbitrary signals to yourself. */
3609 ret = -EPERM;
3608 if ((task_pid(current) != pid) && 3610 if ((task_pid(current) != pid) &&
3609 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) { 3611 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3610 /* Only allow sending arbitrary signals to yourself. */ 3612 goto err;
3611 ret = -EPERM;
3612 if (kinfo.si_code != SI_USER)
3613 goto err;
3614
3615 /* Turn this into a regular kill signal. */
3616 prepare_kill_siginfo(sig, &kinfo);
3617 }
3618 } else { 3613 } else {
3619 prepare_kill_siginfo(sig, &kinfo); 3614 prepare_kill_siginfo(sig, &kinfo);
3620 } 3615 }
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index f93a56d2db27..fa8fbff736d6 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -314,6 +314,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
314 struct ring_buffer_event *event; 314 struct ring_buffer_event *event;
315 struct ring_buffer *buffer; 315 struct ring_buffer *buffer;
316 unsigned long irq_flags; 316 unsigned long irq_flags;
317 unsigned long args[6];
317 int pc; 318 int pc;
318 int syscall_nr; 319 int syscall_nr;
319 int size; 320 int size;
@@ -347,7 +348,8 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
347 348
348 entry = ring_buffer_event_data(event); 349 entry = ring_buffer_event_data(event);
349 entry->nr = syscall_nr; 350 entry->nr = syscall_nr;
350 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); 351 syscall_get_arguments(current, regs, args);
352 memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
351 353
352 event_trigger_unlock_commit(trace_file, buffer, event, entry, 354 event_trigger_unlock_commit(trace_file, buffer, event, entry,
353 irq_flags, pc); 355 irq_flags, pc);
@@ -583,6 +585,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
583 struct syscall_metadata *sys_data; 585 struct syscall_metadata *sys_data;
584 struct syscall_trace_enter *rec; 586 struct syscall_trace_enter *rec;
585 struct hlist_head *head; 587 struct hlist_head *head;
588 unsigned long args[6];
586 bool valid_prog_array; 589 bool valid_prog_array;
587 int syscall_nr; 590 int syscall_nr;
588 int rctx; 591 int rctx;
@@ -613,8 +616,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
613 return; 616 return;
614 617
615 rec->nr = syscall_nr; 618 rec->nr = syscall_nr;
616 syscall_get_arguments(current, regs, 0, sys_data->nb_args, 619 syscall_get_arguments(current, regs, args);
617 (unsigned long *)&rec->args); 620 memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
618 621
619 if ((valid_prog_array && 622 if ((valid_prog_array &&
620 !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) || 623 !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
diff --git a/lib/syscall.c b/lib/syscall.c
index 1a7077f20eae..fb328e7ccb08 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -5,16 +5,14 @@
5#include <linux/export.h> 5#include <linux/export.h>
6#include <asm/syscall.h> 6#include <asm/syscall.h>
7 7
8static int collect_syscall(struct task_struct *target, long *callno, 8static int collect_syscall(struct task_struct *target, struct syscall_info *info)
9 unsigned long args[6], unsigned int maxargs,
10 unsigned long *sp, unsigned long *pc)
11{ 9{
12 struct pt_regs *regs; 10 struct pt_regs *regs;
13 11
14 if (!try_get_task_stack(target)) { 12 if (!try_get_task_stack(target)) {
15 /* Task has no stack, so the task isn't in a syscall. */ 13 /* Task has no stack, so the task isn't in a syscall. */
16 *sp = *pc = 0; 14 memset(info, 0, sizeof(*info));
17 *callno = -1; 15 info->data.nr = -1;
18 return 0; 16 return 0;
19 } 17 }
20 18
@@ -24,12 +22,13 @@ static int collect_syscall(struct task_struct *target, long *callno,
24 return -EAGAIN; 22 return -EAGAIN;
25 } 23 }
26 24
27 *sp = user_stack_pointer(regs); 25 info->sp = user_stack_pointer(regs);
28 *pc = instruction_pointer(regs); 26 info->data.instruction_pointer = instruction_pointer(regs);
29 27
30 *callno = syscall_get_nr(target, regs); 28 info->data.nr = syscall_get_nr(target, regs);
31 if (*callno != -1L && maxargs > 0) 29 if (info->data.nr != -1L)
32 syscall_get_arguments(target, regs, 0, maxargs, args); 30 syscall_get_arguments(target, regs,
31 (unsigned long *)&info->data.args[0]);
33 32
34 put_task_stack(target); 33 put_task_stack(target);
35 return 0; 34 return 0;
@@ -38,41 +37,35 @@ static int collect_syscall(struct task_struct *target, long *callno,
38/** 37/**
39 * task_current_syscall - Discover what a blocked task is doing. 38 * task_current_syscall - Discover what a blocked task is doing.
40 * @target: thread to examine 39 * @target: thread to examine
41 * @callno: filled with system call number or -1 40 * @info: structure with the following fields:
42 * @args: filled with @maxargs system call arguments 41 * .sp - filled with user stack pointer
43 * @maxargs: number of elements in @args to fill 42 * .data.nr - filled with system call number or -1
44 * @sp: filled with user stack pointer 43 * .data.args - filled with @maxargs system call arguments
45 * @pc: filled with user PC 44 * .data.instruction_pointer - filled with user PC
46 * 45 *
47 * If @target is blocked in a system call, returns zero with *@callno 46 * If @target is blocked in a system call, returns zero with @info.data.nr
48 * set to the the call's number and @args filled in with its arguments. 47 * set to the the call's number and @info.data.args filled in with its
49 * Registers not used for system call arguments may not be available and 48 * arguments. Registers not used for system call arguments may not be available
50 * it is not kosher to use &struct user_regset calls while the system 49 * and it is not kosher to use &struct user_regset calls while the system
51 * call is still in progress. Note we may get this result if @target 50 * call is still in progress. Note we may get this result if @target
52 * has finished its system call but not yet returned to user mode, such 51 * has finished its system call but not yet returned to user mode, such
53 * as when it's stopped for signal handling or syscall exit tracing. 52 * as when it's stopped for signal handling or syscall exit tracing.
54 * 53 *
55 * If @target is blocked in the kernel during a fault or exception, 54 * If @target is blocked in the kernel during a fault or exception,
56 * returns zero with *@callno set to -1 and does not fill in @args. 55 * returns zero with *@info.data.nr set to -1 and does not fill in
57 * If so, it's now safe to examine @target using &struct user_regset 56 * @info.data.args. If so, it's now safe to examine @target using
58 * get() calls as long as we're sure @target won't return to user mode. 57 * &struct user_regset get() calls as long as we're sure @target won't return
58 * to user mode.
59 * 59 *
60 * Returns -%EAGAIN if @target does not remain blocked. 60 * Returns -%EAGAIN if @target does not remain blocked.
61 *
62 * Returns -%EINVAL if @maxargs is too large (maximum is six).
63 */ 61 */
64int task_current_syscall(struct task_struct *target, long *callno, 62int task_current_syscall(struct task_struct *target, struct syscall_info *info)
65 unsigned long args[6], unsigned int maxargs,
66 unsigned long *sp, unsigned long *pc)
67{ 63{
68 long state; 64 long state;
69 unsigned long ncsw; 65 unsigned long ncsw;
70 66
71 if (unlikely(maxargs > 6))
72 return -EINVAL;
73
74 if (target == current) 67 if (target == current)
75 return collect_syscall(target, callno, args, maxargs, sp, pc); 68 return collect_syscall(target, info);
76 69
77 state = target->state; 70 state = target->state;
78 if (unlikely(!state)) 71 if (unlikely(!state))
@@ -80,7 +73,7 @@ int task_current_syscall(struct task_struct *target, long *callno,
80 73
81 ncsw = wait_task_inactive(target, state); 74 ncsw = wait_task_inactive(target, state);
82 if (unlikely(!ncsw) || 75 if (unlikely(!ncsw) ||
83 unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || 76 unlikely(collect_syscall(target, info)) ||
84 unlikely(wait_task_inactive(target, state) != ncsw)) 77 unlikely(wait_task_inactive(target, state) != ncsw))
85 return -EAGAIN; 78 return -EAGAIN;
86 79
diff --git a/mm/compaction.c b/mm/compaction.c
index f171a83707ce..3319e0872d01 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -242,6 +242,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
242 bool check_target) 242 bool check_target)
243{ 243{
244 struct page *page = pfn_to_online_page(pfn); 244 struct page *page = pfn_to_online_page(pfn);
245 struct page *block_page;
245 struct page *end_page; 246 struct page *end_page;
246 unsigned long block_pfn; 247 unsigned long block_pfn;
247 248
@@ -267,20 +268,26 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
267 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) 268 get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
268 return false; 269 return false;
269 270
271 /* Ensure the start of the pageblock or zone is online and valid */
272 block_pfn = pageblock_start_pfn(pfn);
273 block_page = pfn_to_online_page(max(block_pfn, zone->zone_start_pfn));
274 if (block_page) {
275 page = block_page;
276 pfn = block_pfn;
277 }
278
279 /* Ensure the end of the pageblock or zone is online and valid */
280 block_pfn += pageblock_nr_pages;
281 block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
282 end_page = pfn_to_online_page(block_pfn);
283 if (!end_page)
284 return false;
285
270 /* 286 /*
271 * Only clear the hint if a sample indicates there is either a 287 * Only clear the hint if a sample indicates there is either a
272 * free page or an LRU page in the block. One or other condition 288 * free page or an LRU page in the block. One or other condition
273 * is necessary for the block to be a migration source/target. 289 * is necessary for the block to be a migration source/target.
274 */ 290 */
275 block_pfn = pageblock_start_pfn(pfn);
276 pfn = max(block_pfn, zone->zone_start_pfn);
277 page = pfn_to_page(pfn);
278 if (zone != page_zone(page))
279 return false;
280 pfn = block_pfn + pageblock_nr_pages;
281 pfn = min(pfn, zone_end_pfn(zone));
282 end_page = pfn_to_page(pfn);
283
284 do { 291 do {
285 if (pfn_valid_within(pfn)) { 292 if (pfn_valid_within(pfn)) {
286 if (check_source && PageLRU(page)) { 293 if (check_source && PageLRU(page)) {
@@ -309,7 +316,7 @@ __reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
309static void __reset_isolation_suitable(struct zone *zone) 316static void __reset_isolation_suitable(struct zone *zone)
310{ 317{
311 unsigned long migrate_pfn = zone->zone_start_pfn; 318 unsigned long migrate_pfn = zone->zone_start_pfn;
312 unsigned long free_pfn = zone_end_pfn(zone); 319 unsigned long free_pfn = zone_end_pfn(zone) - 1;
313 unsigned long reset_migrate = free_pfn; 320 unsigned long reset_migrate = free_pfn;
314 unsigned long reset_free = migrate_pfn; 321 unsigned long reset_free = migrate_pfn;
315 bool source_set = false; 322 bool source_set = false;
@@ -1363,7 +1370,7 @@ fast_isolate_freepages(struct compact_control *cc)
1363 count_compact_events(COMPACTISOLATED, nr_isolated); 1370 count_compact_events(COMPACTISOLATED, nr_isolated);
1364 } else { 1371 } else {
1365 /* If isolation fails, abort the search */ 1372 /* If isolation fails, abort the search */
1366 order = -1; 1373 order = cc->search_order + 1;
1367 page = NULL; 1374 page = NULL;
1368 } 1375 }
1369 } 1376 }
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 15293c2a5dd8..8d77b6ee4477 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -443,27 +443,29 @@ static int vlan_dev_fcoe_disable(struct net_device *dev)
443 return rc; 443 return rc;
444} 444}
445 445
446static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) 446static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid,
447 struct scatterlist *sgl, unsigned int sgc)
447{ 448{
448 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 449 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
449 const struct net_device_ops *ops = real_dev->netdev_ops; 450 const struct net_device_ops *ops = real_dev->netdev_ops;
450 int rc = -EINVAL; 451 int rc = 0;
452
453 if (ops->ndo_fcoe_ddp_target)
454 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
451 455
452 if (ops->ndo_fcoe_get_wwn)
453 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
454 return rc; 456 return rc;
455} 457}
458#endif
456 459
457static int vlan_dev_fcoe_ddp_target(struct net_device *dev, u16 xid, 460#ifdef NETDEV_FCOE_WWNN
458 struct scatterlist *sgl, unsigned int sgc) 461static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
459{ 462{
460 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev; 463 struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
461 const struct net_device_ops *ops = real_dev->netdev_ops; 464 const struct net_device_ops *ops = real_dev->netdev_ops;
462 int rc = 0; 465 int rc = -EINVAL;
463
464 if (ops->ndo_fcoe_ddp_target)
465 rc = ops->ndo_fcoe_ddp_target(real_dev, xid, sgl, sgc);
466 466
467 if (ops->ndo_fcoe_get_wwn)
468 rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type);
467 return rc; 469 return rc;
468} 470}
469#endif 471#endif
@@ -794,9 +796,11 @@ static const struct net_device_ops vlan_netdev_ops = {
794 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done, 796 .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
795 .ndo_fcoe_enable = vlan_dev_fcoe_enable, 797 .ndo_fcoe_enable = vlan_dev_fcoe_enable,
796 .ndo_fcoe_disable = vlan_dev_fcoe_disable, 798 .ndo_fcoe_disable = vlan_dev_fcoe_disable,
797 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
798 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target, 799 .ndo_fcoe_ddp_target = vlan_dev_fcoe_ddp_target,
799#endif 800#endif
801#ifdef NETDEV_FCOE_WWNN
802 .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
803#endif
800#ifdef CONFIG_NET_POLL_CONTROLLER 804#ifdef CONFIG_NET_POLL_CONTROLLER
801 .ndo_poll_controller = vlan_dev_poll_controller, 805 .ndo_poll_controller = vlan_dev_poll_controller,
802 .ndo_netpoll_setup = vlan_dev_netpoll_setup, 806 .ndo_netpoll_setup = vlan_dev_netpoll_setup,
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index a9b7919c9de5..d5df0114f08a 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,8 +104,10 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
104 104
105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo); 105 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
106 106
107 /* free the TID stats immediately */ 107 if (!ret) {
108 cfg80211_sinfo_release_content(&sinfo); 108 /* free the TID stats immediately */
109 cfg80211_sinfo_release_content(&sinfo);
110 }
109 111
110 dev_put(real_netdev); 112 dev_put(real_netdev);
111 if (ret == -ENOENT) { 113 if (ret == -ENOENT) {
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index ef39aabdb694..4fb01108e5f5 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -803,6 +803,8 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
803 const u8 *mac, const unsigned short vid) 803 const u8 *mac, const unsigned short vid)
804{ 804{
805 struct batadv_bla_claim search_claim, *claim; 805 struct batadv_bla_claim search_claim, *claim;
806 struct batadv_bla_claim *claim_removed_entry;
807 struct hlist_node *claim_removed_node;
806 808
807 ether_addr_copy(search_claim.addr, mac); 809 ether_addr_copy(search_claim.addr, mac);
808 search_claim.vid = vid; 810 search_claim.vid = vid;
@@ -813,10 +815,18 @@ static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
813 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__, 815 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
814 mac, batadv_print_vid(vid)); 816 mac, batadv_print_vid(vid));
815 817
816 batadv_hash_remove(bat_priv->bla.claim_hash, batadv_compare_claim, 818 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
817 batadv_choose_claim, claim); 819 batadv_compare_claim,
818 batadv_claim_put(claim); /* reference from the hash is gone */ 820 batadv_choose_claim, claim);
821 if (!claim_removed_node)
822 goto free_claim;
819 823
824 /* reference from the hash is gone */
825 claim_removed_entry = hlist_entry(claim_removed_node,
826 struct batadv_bla_claim, hash_entry);
827 batadv_claim_put(claim_removed_entry);
828
829free_claim:
820 /* don't need the reference from hash_find() anymore */ 830 /* don't need the reference from hash_find() anymore */
821 batadv_claim_put(claim); 831 batadv_claim_put(claim);
822} 832}
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 0b4b3fb778a6..208655cf6717 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1116,9 +1116,9 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1116 struct attribute *attr, 1116 struct attribute *attr,
1117 char *buff, size_t count) 1117 char *buff, size_t count)
1118{ 1118{
1119 struct batadv_priv *bat_priv = batadv_kobj_to_batpriv(kobj);
1120 struct net_device *net_dev = batadv_kobj_to_netdev(kobj); 1119 struct net_device *net_dev = batadv_kobj_to_netdev(kobj);
1121 struct batadv_hard_iface *hard_iface; 1120 struct batadv_hard_iface *hard_iface;
1121 struct batadv_priv *bat_priv;
1122 u32 tp_override; 1122 u32 tp_override;
1123 u32 old_tp_override; 1123 u32 old_tp_override;
1124 bool ret; 1124 bool ret;
@@ -1147,7 +1147,10 @@ static ssize_t batadv_store_throughput_override(struct kobject *kobj,
1147 1147
1148 atomic_set(&hard_iface->bat_v.throughput_override, tp_override); 1148 atomic_set(&hard_iface->bat_v.throughput_override, tp_override);
1149 1149
1150 batadv_netlink_notify_hardif(bat_priv, hard_iface); 1150 if (hard_iface->soft_iface) {
1151 bat_priv = netdev_priv(hard_iface->soft_iface);
1152 batadv_netlink_notify_hardif(bat_priv, hard_iface);
1153 }
1151 1154
1152out: 1155out:
1153 batadv_hardif_put(hard_iface); 1156 batadv_hardif_put(hard_iface);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index f73d79139ae7..26c4e2493ddf 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -616,14 +616,26 @@ static void batadv_tt_global_free(struct batadv_priv *bat_priv,
616 struct batadv_tt_global_entry *tt_global, 616 struct batadv_tt_global_entry *tt_global,
617 const char *message) 617 const char *message)
618{ 618{
619 struct batadv_tt_global_entry *tt_removed_entry;
620 struct hlist_node *tt_removed_node;
621
619 batadv_dbg(BATADV_DBG_TT, bat_priv, 622 batadv_dbg(BATADV_DBG_TT, bat_priv,
620 "Deleting global tt entry %pM (vid: %d): %s\n", 623 "Deleting global tt entry %pM (vid: %d): %s\n",
621 tt_global->common.addr, 624 tt_global->common.addr,
622 batadv_print_vid(tt_global->common.vid), message); 625 batadv_print_vid(tt_global->common.vid), message);
623 626
624 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt, 627 tt_removed_node = batadv_hash_remove(bat_priv->tt.global_hash,
625 batadv_choose_tt, &tt_global->common); 628 batadv_compare_tt,
626 batadv_tt_global_entry_put(tt_global); 629 batadv_choose_tt,
630 &tt_global->common);
631 if (!tt_removed_node)
632 return;
633
634 /* drop reference of remove hash entry */
635 tt_removed_entry = hlist_entry(tt_removed_node,
636 struct batadv_tt_global_entry,
637 common.hash_entry);
638 batadv_tt_global_entry_put(tt_removed_entry);
627} 639}
628 640
629/** 641/**
@@ -1337,9 +1349,10 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1337 unsigned short vid, const char *message, 1349 unsigned short vid, const char *message,
1338 bool roaming) 1350 bool roaming)
1339{ 1351{
1352 struct batadv_tt_local_entry *tt_removed_entry;
1340 struct batadv_tt_local_entry *tt_local_entry; 1353 struct batadv_tt_local_entry *tt_local_entry;
1341 u16 flags, curr_flags = BATADV_NO_FLAGS; 1354 u16 flags, curr_flags = BATADV_NO_FLAGS;
1342 void *tt_entry_exists; 1355 struct hlist_node *tt_removed_node;
1343 1356
1344 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1357 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1345 if (!tt_local_entry) 1358 if (!tt_local_entry)
@@ -1368,15 +1381,18 @@ u16 batadv_tt_local_remove(struct batadv_priv *bat_priv, const u8 *addr,
1368 */ 1381 */
1369 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1382 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1370 1383
1371 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash, 1384 tt_removed_node = batadv_hash_remove(bat_priv->tt.local_hash,
1372 batadv_compare_tt, 1385 batadv_compare_tt,
1373 batadv_choose_tt, 1386 batadv_choose_tt,
1374 &tt_local_entry->common); 1387 &tt_local_entry->common);
1375 if (!tt_entry_exists) 1388 if (!tt_removed_node)
1376 goto out; 1389 goto out;
1377 1390
1378 /* extra call to free the local tt entry */ 1391 /* drop reference of remove hash entry */
1379 batadv_tt_local_entry_put(tt_local_entry); 1392 tt_removed_entry = hlist_entry(tt_removed_node,
1393 struct batadv_tt_local_entry,
1394 common.hash_entry);
1395 batadv_tt_local_entry_put(tt_removed_entry);
1380 1396
1381out: 1397out:
1382 if (tt_local_entry) 1398 if (tt_local_entry)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a0e369179f6d..02da21d771c9 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -601,6 +601,7 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
601 if (ipv4_is_local_multicast(group)) 601 if (ipv4_is_local_multicast(group))
602 return 0; 602 return 0;
603 603
604 memset(&br_group, 0, sizeof(br_group));
604 br_group.u.ip4 = group; 605 br_group.u.ip4 = group;
605 br_group.proto = htons(ETH_P_IP); 606 br_group.proto = htons(ETH_P_IP);
606 br_group.vid = vid; 607 br_group.vid = vid;
@@ -1497,6 +1498,7 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
1497 1498
1498 own_query = port ? &port->ip4_own_query : &br->ip4_own_query; 1499 own_query = port ? &port->ip4_own_query : &br->ip4_own_query;
1499 1500
1501 memset(&br_group, 0, sizeof(br_group));
1500 br_group.u.ip4 = group; 1502 br_group.u.ip4 = group;
1501 br_group.proto = htons(ETH_P_IP); 1503 br_group.proto = htons(ETH_P_IP);
1502 br_group.vid = vid; 1504 br_group.vid = vid;
@@ -1520,6 +1522,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1520 1522
1521 own_query = port ? &port->ip6_own_query : &br->ip6_own_query; 1523 own_query = port ? &port->ip6_own_query : &br->ip6_own_query;
1522 1524
1525 memset(&br_group, 0, sizeof(br_group));
1523 br_group.u.ip6 = *group; 1526 br_group.u.ip6 = *group;
1524 br_group.proto = htons(ETH_P_IPV6); 1527 br_group.proto = htons(ETH_P_IPV6);
1525 br_group.vid = vid; 1528 br_group.vid = vid;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b2651bb6d2a3..e657289db4ac 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -279,7 +279,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
279 break; 279 break;
280 280
281 sk_busy_loop(sk, flags & MSG_DONTWAIT); 281 sk_busy_loop(sk, flags & MSG_DONTWAIT);
282 } while (!skb_queue_empty(&sk->sk_receive_queue)); 282 } while (sk->sk_receive_queue.prev != *last);
283 283
284 error = -EAGAIN; 284 error = -EAGAIN;
285 285
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b67f2aa59dd..fdcff29df915 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5014,8 +5014,10 @@ static inline void __netif_receive_skb_list_ptype(struct list_head *head,
5014 if (pt_prev->list_func != NULL) 5014 if (pt_prev->list_func != NULL)
5015 pt_prev->list_func(head, pt_prev, orig_dev); 5015 pt_prev->list_func(head, pt_prev, orig_dev);
5016 else 5016 else
5017 list_for_each_entry_safe(skb, next, head, list) 5017 list_for_each_entry_safe(skb, next, head, list) {
5018 skb_list_del_init(skb);
5018 pt_prev->func(skb, skb->dev, pt_prev, orig_dev); 5019 pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
5020 }
5019} 5021}
5020 5022
5021static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc) 5023static void __netif_receive_skb_list_core(struct list_head *head, bool pfmemalloc)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index b1eb32419732..36ed619faf36 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1797,11 +1797,16 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
1797 WARN_ON_ONCE(!ret); 1797 WARN_ON_ONCE(!ret);
1798 1798
1799 gstrings.len = ret; 1799 gstrings.len = ret;
1800 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1801 if (gstrings.len && !data)
1802 return -ENOMEM;
1803 1800
1804 __ethtool_get_strings(dev, gstrings.string_set, data); 1801 if (gstrings.len) {
1802 data = vzalloc(array_size(gstrings.len, ETH_GSTRING_LEN));
1803 if (!data)
1804 return -ENOMEM;
1805
1806 __ethtool_get_strings(dev, gstrings.string_set, data);
1807 } else {
1808 data = NULL;
1809 }
1805 1810
1806 ret = -EFAULT; 1811 ret = -EFAULT;
1807 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) 1812 if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
@@ -1897,11 +1902,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
1897 return -EFAULT; 1902 return -EFAULT;
1898 1903
1899 stats.n_stats = n_stats; 1904 stats.n_stats = n_stats;
1900 data = vzalloc(array_size(n_stats, sizeof(u64)));
1901 if (n_stats && !data)
1902 return -ENOMEM;
1903 1905
1904 ops->get_ethtool_stats(dev, &stats, data); 1906 if (n_stats) {
1907 data = vzalloc(array_size(n_stats, sizeof(u64)));
1908 if (!data)
1909 return -ENOMEM;
1910 ops->get_ethtool_stats(dev, &stats, data);
1911 } else {
1912 data = NULL;
1913 }
1905 1914
1906 ret = -EFAULT; 1915 ret = -EFAULT;
1907 if (copy_to_user(useraddr, &stats, sizeof(stats))) 1916 if (copy_to_user(useraddr, &stats, sizeof(stats)))
@@ -1941,16 +1950,21 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
1941 return -EFAULT; 1950 return -EFAULT;
1942 1951
1943 stats.n_stats = n_stats; 1952 stats.n_stats = n_stats;
1944 data = vzalloc(array_size(n_stats, sizeof(u64)));
1945 if (n_stats && !data)
1946 return -ENOMEM;
1947 1953
1948 if (dev->phydev && !ops->get_ethtool_phy_stats) { 1954 if (n_stats) {
1949 ret = phy_ethtool_get_stats(dev->phydev, &stats, data); 1955 data = vzalloc(array_size(n_stats, sizeof(u64)));
1950 if (ret < 0) 1956 if (!data)
1951 return ret; 1957 return -ENOMEM;
1958
1959 if (dev->phydev && !ops->get_ethtool_phy_stats) {
1960 ret = phy_ethtool_get_stats(dev->phydev, &stats, data);
1961 if (ret < 0)
1962 goto out;
1963 } else {
1964 ops->get_ethtool_phy_stats(dev, &stats, data);
1965 }
1952 } else { 1966 } else {
1953 ops->get_ethtool_phy_stats(dev, &stats, data); 1967 data = NULL;
1954 } 1968 }
1955 1969
1956 ret = -EFAULT; 1970 ret = -EFAULT;
diff --git a/net/core/filter.c b/net/core/filter.c
index 647c63a7b25b..fc92ebc4e200 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -6613,14 +6613,8 @@ static bool flow_dissector_is_valid_access(int off, int size,
6613 const struct bpf_prog *prog, 6613 const struct bpf_prog *prog,
6614 struct bpf_insn_access_aux *info) 6614 struct bpf_insn_access_aux *info)
6615{ 6615{
6616 if (type == BPF_WRITE) { 6616 if (type == BPF_WRITE)
6617 switch (off) { 6617 return false;
6618 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6619 break;
6620 default:
6621 return false;
6622 }
6623 }
6624 6618
6625 switch (off) { 6619 switch (off) {
6626 case bpf_ctx_range(struct __sk_buff, data): 6620 case bpf_ctx_range(struct __sk_buff, data):
@@ -6632,11 +6626,7 @@ static bool flow_dissector_is_valid_access(int off, int size,
6632 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 6626 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6633 info->reg_type = PTR_TO_FLOW_KEYS; 6627 info->reg_type = PTR_TO_FLOW_KEYS;
6634 break; 6628 break;
6635 case bpf_ctx_range(struct __sk_buff, tc_classid): 6629 default:
6636 case bpf_ctx_range(struct __sk_buff, data_meta):
6637 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6638 case bpf_ctx_range(struct __sk_buff, tstamp):
6639 case bpf_ctx_range(struct __sk_buff, wire_len):
6640 return false; 6630 return false;
6641 } 6631 }
6642 6632
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index bb1a54747d64..94a450b2191a 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -707,6 +707,7 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
707 /* Pass parameters to the BPF program */ 707 /* Pass parameters to the BPF program */
708 memset(flow_keys, 0, sizeof(*flow_keys)); 708 memset(flow_keys, 0, sizeof(*flow_keys));
709 cb->qdisc_cb.flow_keys = flow_keys; 709 cb->qdisc_cb.flow_keys = flow_keys;
710 flow_keys->n_proto = skb->protocol;
710 flow_keys->nhoff = skb_network_offset(skb); 711 flow_keys->nhoff = skb_network_offset(skb);
711 flow_keys->thoff = flow_keys->nhoff; 712 flow_keys->thoff = flow_keys->nhoff;
712 713
@@ -716,7 +717,8 @@ bool __skb_flow_bpf_dissect(struct bpf_prog *prog,
716 /* Restore state */ 717 /* Restore state */
717 memcpy(cb, &cb_saved, sizeof(cb_saved)); 718 memcpy(cb, &cb_saved, sizeof(cb_saved));
718 719
719 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff, 0, skb->len); 720 flow_keys->nhoff = clamp_t(u16, flow_keys->nhoff,
721 skb_network_offset(skb), skb->len);
720 flow_keys->thoff = clamp_t(u16, flow_keys->thoff, 722 flow_keys->thoff = clamp_t(u16, flow_keys->thoff,
721 flow_keys->nhoff, skb->len); 723 flow_keys->nhoff, skb->len);
722 724
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 17f36317363d..7e6dcc625701 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -304,6 +304,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
304 304
305 refcount_set(&net->count, 1); 305 refcount_set(&net->count, 1);
306 refcount_set(&net->passive, 1); 306 refcount_set(&net->passive, 1);
307 get_random_bytes(&net->hash_mix, sizeof(u32));
307 net->dev_base_seq = 1; 308 net->dev_base_seq = 1;
308 net->user_ns = user_ns; 309 net->user_ns = user_ns;
309 idr_init(&net->netns_ids); 310 idr_init(&net->netns_ids);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2415d9cb9b89..ef2cd5712098 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3801,7 +3801,7 @@ int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
3801 unsigned int delta_truesize; 3801 unsigned int delta_truesize;
3802 struct sk_buff *lp; 3802 struct sk_buff *lp;
3803 3803
3804 if (unlikely(p->len + len >= 65536)) 3804 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
3805 return -E2BIG; 3805 return -E2BIG;
3806 3806
3807 lp = NAPI_GRO_CB(p)->last; 3807 lp = NAPI_GRO_CB(p)->last;
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index f227f002c73d..db87d9f58019 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -738,7 +738,12 @@ static int __feat_register_sp(struct list_head *fn, u8 feat, u8 is_local,
738 if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len)) 738 if (dccp_feat_clone_sp_val(&fval, sp_val, sp_len))
739 return -ENOMEM; 739 return -ENOMEM;
740 740
741 return dccp_feat_push_change(fn, feat, is_local, mandatory, &fval); 741 if (dccp_feat_push_change(fn, feat, is_local, mandatory, &fval)) {
742 kfree(fval.sp.vec);
743 return -ENOMEM;
744 }
745
746 return 0;
742} 747}
743 748
744/** 749/**
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index ed4f6dc26365..85c22ada4744 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -98,8 +98,18 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
98 return skb; 98 return skb;
99} 99}
100 100
101static int qca_tag_flow_dissect(const struct sk_buff *skb, __be16 *proto,
102 int *offset)
103{
104 *offset = QCA_HDR_LEN;
105 *proto = ((__be16 *)skb->data)[0];
106
107 return 0;
108}
109
101const struct dsa_device_ops qca_netdev_ops = { 110const struct dsa_device_ops qca_netdev_ops = {
102 .xmit = qca_tag_xmit, 111 .xmit = qca_tag_xmit,
103 .rcv = qca_tag_rcv, 112 .rcv = qca_tag_rcv,
113 .flow_dissect = qca_tag_flow_dissect,
104 .overhead = QCA_HDR_LEN, 114 .overhead = QCA_HDR_LEN,
105}; 115};
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index ecce2dc78f17..1132d6d1796a 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -257,11 +257,10 @@ int ip_local_deliver(struct sk_buff *skb)
257 ip_local_deliver_finish); 257 ip_local_deliver_finish);
258} 258}
259 259
260static inline bool ip_rcv_options(struct sk_buff *skb) 260static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
261{ 261{
262 struct ip_options *opt; 262 struct ip_options *opt;
263 const struct iphdr *iph; 263 const struct iphdr *iph;
264 struct net_device *dev = skb->dev;
265 264
266 /* It looks as overkill, because not all 265 /* It looks as overkill, because not all
267 IP options require packet mangling. 266 IP options require packet mangling.
@@ -297,7 +296,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
297 } 296 }
298 } 297 }
299 298
300 if (ip_options_rcv_srr(skb)) 299 if (ip_options_rcv_srr(skb, dev))
301 goto drop; 300 goto drop;
302 } 301 }
303 302
@@ -353,7 +352,7 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk,
353 } 352 }
354#endif 353#endif
355 354
356 if (iph->ihl > 5 && ip_rcv_options(skb)) 355 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
357 goto drop; 356 goto drop;
358 357
359 rt = skb_rtable(skb); 358 rt = skb_rtable(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 32a35043c9f5..3db31bb9df50 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -612,7 +612,7 @@ void ip_forward_options(struct sk_buff *skb)
612 } 612 }
613} 613}
614 614
615int ip_options_rcv_srr(struct sk_buff *skb) 615int ip_options_rcv_srr(struct sk_buff *skb, struct net_device *dev)
616{ 616{
617 struct ip_options *opt = &(IPCB(skb)->opt); 617 struct ip_options *opt = &(IPCB(skb)->opt);
618 int srrspace, srrptr; 618 int srrspace, srrptr;
@@ -647,7 +647,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
647 647
648 orefdst = skb->_skb_refdst; 648 orefdst = skb->_skb_refdst;
649 skb_dst_set(skb, NULL); 649 skb_dst_set(skb, NULL);
650 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); 650 err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, dev);
651 rt2 = skb_rtable(skb); 651 rt2 = skb_rtable(skb);
652 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { 652 if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) {
653 skb_dst_drop(skb); 653 skb_dst_drop(skb);
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index cd4814f7e962..359da68d7c06 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -67,11 +67,6 @@ static unsigned int dctcp_alpha_on_init __read_mostly = DCTCP_MAX_ALPHA;
67module_param(dctcp_alpha_on_init, uint, 0644); 67module_param(dctcp_alpha_on_init, uint, 0644);
68MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value"); 68MODULE_PARM_DESC(dctcp_alpha_on_init, "parameter for initial alpha value");
69 69
70static unsigned int dctcp_clamp_alpha_on_loss __read_mostly;
71module_param(dctcp_clamp_alpha_on_loss, uint, 0644);
72MODULE_PARM_DESC(dctcp_clamp_alpha_on_loss,
73 "parameter for clamping alpha on loss");
74
75static struct tcp_congestion_ops dctcp_reno; 70static struct tcp_congestion_ops dctcp_reno;
76 71
77static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca) 72static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
@@ -164,21 +159,23 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
164 } 159 }
165} 160}
166 161
167static void dctcp_state(struct sock *sk, u8 new_state) 162static void dctcp_react_to_loss(struct sock *sk)
168{ 163{
169 if (dctcp_clamp_alpha_on_loss && new_state == TCP_CA_Loss) { 164 struct dctcp *ca = inet_csk_ca(sk);
170 struct dctcp *ca = inet_csk_ca(sk); 165 struct tcp_sock *tp = tcp_sk(sk);
171 166
172 /* If this extension is enabled, we clamp dctcp_alpha to 167 ca->loss_cwnd = tp->snd_cwnd;
173 * max on packet loss; the motivation is that dctcp_alpha 168 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
174 * is an indicator to the extend of congestion and packet 169}
175 * loss is an indicator of extreme congestion; setting 170
176 * this in practice turned out to be beneficial, and 171static void dctcp_state(struct sock *sk, u8 new_state)
177 * effectively assumes total congestion which reduces the 172{
178 * window by half. 173 if (new_state == TCP_CA_Recovery &&
179 */ 174 new_state != inet_csk(sk)->icsk_ca_state)
180 ca->dctcp_alpha = DCTCP_MAX_ALPHA; 175 dctcp_react_to_loss(sk);
181 } 176 /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
177 * one loss-adjustment per RTT.
178 */
182} 179}
183 180
184static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev) 181static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
@@ -190,6 +187,9 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
190 case CA_EVENT_ECN_NO_CE: 187 case CA_EVENT_ECN_NO_CE:
191 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state); 188 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
192 break; 189 break;
190 case CA_EVENT_LOSS:
191 dctcp_react_to_loss(sk);
192 break;
193 default: 193 default:
194 /* Don't care for the rest. */ 194 /* Don't care for the rest. */
195 break; 195 break;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 277d71239d75..2f8039a26b08 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2578,7 +2578,8 @@ static void __net_exit tcp_sk_exit(struct net *net)
2578{ 2578{
2579 int cpu; 2579 int cpu;
2580 2580
2581 module_put(net->ipv4.tcp_congestion_control->owner); 2581 if (net->ipv4.tcp_congestion_control)
2582 module_put(net->ipv4.tcp_congestion_control->owner);
2582 2583
2583 for_each_possible_cpu(cpu) 2584 for_each_possible_cpu(cpu)
2584 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); 2585 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
diff --git a/net/ipv6/ila/ila_xlat.c b/net/ipv6/ila/ila_xlat.c
index 79d2e43c05c5..5fc1f4e0c0cf 100644
--- a/net/ipv6/ila/ila_xlat.c
+++ b/net/ipv6/ila/ila_xlat.c
@@ -417,6 +417,7 @@ int ila_xlat_nl_cmd_flush(struct sk_buff *skb, struct genl_info *info)
417 417
418done: 418done:
419 rhashtable_walk_stop(&iter); 419 rhashtable_walk_stop(&iter);
420 rhashtable_walk_exit(&iter);
420 return ret; 421 return ret;
421} 422}
422 423
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index edbd12067170..e51f3c648b09 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -601,7 +601,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
601 inet6_sk(skb->sk) : NULL; 601 inet6_sk(skb->sk) : NULL;
602 struct ipv6hdr *tmp_hdr; 602 struct ipv6hdr *tmp_hdr;
603 struct frag_hdr *fh; 603 struct frag_hdr *fh;
604 unsigned int mtu, hlen, left, len; 604 unsigned int mtu, hlen, left, len, nexthdr_offset;
605 int hroom, troom; 605 int hroom, troom;
606 __be32 frag_id; 606 __be32 frag_id;
607 int ptr, offset = 0, err = 0; 607 int ptr, offset = 0, err = 0;
@@ -612,6 +612,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
612 goto fail; 612 goto fail;
613 hlen = err; 613 hlen = err;
614 nexthdr = *prevhdr; 614 nexthdr = *prevhdr;
615 nexthdr_offset = prevhdr - skb_network_header(skb);
615 616
616 mtu = ip6_skb_dst_mtu(skb); 617 mtu = ip6_skb_dst_mtu(skb);
617 618
@@ -646,6 +647,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
646 (err = skb_checksum_help(skb))) 647 (err = skb_checksum_help(skb)))
647 goto fail; 648 goto fail;
648 649
650 prevhdr = skb_network_header(skb) + nexthdr_offset;
649 hroom = LL_RESERVED_SPACE(rt->dst.dev); 651 hroom = LL_RESERVED_SPACE(rt->dst.dev);
650 if (skb_has_frag_list(skb)) { 652 if (skb_has_frag_list(skb)) {
651 unsigned int first_len = skb_pagelen(skb); 653 unsigned int first_len = skb_pagelen(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0c6403cf8b52..ade1390c6348 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -627,7 +627,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, 627 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
628 eiph->daddr, eiph->saddr, 0, 0, 628 eiph->daddr, eiph->saddr, 0, 0,
629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0); 629 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL) { 630 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
631 if (!IS_ERR(rt)) 631 if (!IS_ERR(rt))
632 ip_rt_put(rt); 632 ip_rt_put(rt);
633 goto out; 633 goto out;
@@ -636,7 +636,7 @@ ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
636 } else { 636 } else {
637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos, 637 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
638 skb2->dev) || 638 skb2->dev) ||
639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL) 639 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
640 goto out; 640 goto out;
641 } 641 }
642 642
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 07e21a82ce4c..b2109b74857d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -669,6 +669,10 @@ static int ipip6_rcv(struct sk_buff *skb)
669 !net_eq(tunnel->net, dev_net(tunnel->dev)))) 669 !net_eq(tunnel->net, dev_net(tunnel->dev))))
670 goto out; 670 goto out;
671 671
672 /* skb can be uncloned in iptunnel_pull_header, so
673 * old iph is no longer valid
674 */
675 iph = (const struct iphdr *)skb_mac_header(skb);
672 err = IP_ECN_decapsulate(iph, skb); 676 err = IP_ECN_decapsulate(iph, skb);
673 if (unlikely(err)) { 677 if (unlikely(err)) {
674 if (log_ecn_error) 678 if (log_ecn_error)
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index c5c5ab6c5a1c..44fdc641710d 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -2054,14 +2054,14 @@ static int __init kcm_init(void)
2054 if (err) 2054 if (err)
2055 goto fail; 2055 goto fail;
2056 2056
2057 err = sock_register(&kcm_family_ops);
2058 if (err)
2059 goto sock_register_fail;
2060
2061 err = register_pernet_device(&kcm_net_ops); 2057 err = register_pernet_device(&kcm_net_ops);
2062 if (err) 2058 if (err)
2063 goto net_ops_fail; 2059 goto net_ops_fail;
2064 2060
2061 err = sock_register(&kcm_family_ops);
2062 if (err)
2063 goto sock_register_fail;
2064
2065 err = kcm_proc_init(); 2065 err = kcm_proc_init();
2066 if (err) 2066 if (err)
2067 goto proc_init_fail; 2067 goto proc_init_fail;
@@ -2069,12 +2069,12 @@ static int __init kcm_init(void)
2069 return 0; 2069 return 0;
2070 2070
2071proc_init_fail: 2071proc_init_fail:
2072 unregister_pernet_device(&kcm_net_ops);
2073
2074net_ops_fail:
2075 sock_unregister(PF_KCM); 2072 sock_unregister(PF_KCM);
2076 2073
2077sock_register_fail: 2074sock_register_fail:
2075 unregister_pernet_device(&kcm_net_ops);
2076
2077net_ops_fail:
2078 proto_unregister(&kcm_proto); 2078 proto_unregister(&kcm_proto);
2079 2079
2080fail: 2080fail:
@@ -2090,8 +2090,8 @@ fail:
2090static void __exit kcm_exit(void) 2090static void __exit kcm_exit(void)
2091{ 2091{
2092 kcm_proc_exit(); 2092 kcm_proc_exit();
2093 unregister_pernet_device(&kcm_net_ops);
2094 sock_unregister(PF_KCM); 2093 sock_unregister(PF_KCM);
2094 unregister_pernet_device(&kcm_net_ops);
2095 proto_unregister(&kcm_proto); 2095 proto_unregister(&kcm_proto);
2096 destroy_workqueue(kcm_wq); 2096 destroy_workqueue(kcm_wq);
2097 2097
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 691da853bef5..4bdf5e3ac208 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2306,14 +2306,14 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2306 2306
2307 struct sw_flow_actions *acts; 2307 struct sw_flow_actions *acts;
2308 int new_acts_size; 2308 int new_acts_size;
2309 int req_size = NLA_ALIGN(attr_len); 2309 size_t req_size = NLA_ALIGN(attr_len);
2310 int next_offset = offsetof(struct sw_flow_actions, actions) + 2310 int next_offset = offsetof(struct sw_flow_actions, actions) +
2311 (*sfa)->actions_len; 2311 (*sfa)->actions_len;
2312 2312
2313 if (req_size <= (ksize(*sfa) - next_offset)) 2313 if (req_size <= (ksize(*sfa) - next_offset))
2314 goto out; 2314 goto out;
2315 2315
2316 new_acts_size = ksize(*sfa) * 2; 2316 new_acts_size = max(next_offset + req_size, ksize(*sfa) * 2);
2317 2317
2318 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 2318 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2319 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) { 2319 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index fd2694174607..faf726e00e27 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -608,7 +608,7 @@ static void rds_tcp_kill_sock(struct net *net)
608 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) { 608 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
609 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net); 609 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
610 610
611 if (net != c_net || !tc->t_sock) 611 if (net != c_net)
612 continue; 612 continue;
613 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) { 613 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn)) {
614 list_move_tail(&tc->t_tcp_node, &tmp_list); 614 list_move_tail(&tc->t_tcp_node, &tmp_list);
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
index 4060b0955c97..0f82d50ea232 100644
--- a/net/sched/act_sample.c
+++ b/net/sched/act_sample.c
@@ -45,8 +45,8 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
45 struct nlattr *tb[TCA_SAMPLE_MAX + 1]; 45 struct nlattr *tb[TCA_SAMPLE_MAX + 1];
46 struct psample_group *psample_group; 46 struct psample_group *psample_group;
47 struct tcf_chain *goto_ch = NULL; 47 struct tcf_chain *goto_ch = NULL;
48 u32 psample_group_num, rate;
48 struct tc_sample *parm; 49 struct tc_sample *parm;
49 u32 psample_group_num;
50 struct tcf_sample *s; 50 struct tcf_sample *s;
51 bool exists = false; 51 bool exists = false;
52 int ret, err; 52 int ret, err;
@@ -85,6 +85,12 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
85 if (err < 0) 85 if (err < 0)
86 goto release_idr; 86 goto release_idr;
87 87
88 rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
89 if (!rate) {
90 NL_SET_ERR_MSG(extack, "invalid sample rate");
91 err = -EINVAL;
92 goto put_chain;
93 }
88 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]); 94 psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
89 psample_group = psample_group_get(net, psample_group_num); 95 psample_group = psample_group_get(net, psample_group_num);
90 if (!psample_group) { 96 if (!psample_group) {
@@ -96,7 +102,7 @@ static int tcf_sample_init(struct net *net, struct nlattr *nla,
96 102
97 spin_lock_bh(&s->tcf_lock); 103 spin_lock_bh(&s->tcf_lock);
98 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 104 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
99 s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]); 105 s->rate = rate;
100 s->psample_group_num = psample_group_num; 106 s->psample_group_num = psample_group_num;
101 RCU_INIT_POINTER(s->psample_group, psample_group); 107 RCU_INIT_POINTER(s->psample_group, psample_group);
102 108
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 459921bd3d87..a13bc351a414 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -130,6 +130,11 @@ static void mall_destroy(struct tcf_proto *tp, bool rtnl_held,
130 130
131static void *mall_get(struct tcf_proto *tp, u32 handle) 131static void *mall_get(struct tcf_proto *tp, u32 handle)
132{ 132{
133 struct cls_mall_head *head = rtnl_dereference(tp->root);
134
135 if (head && head->handle == handle)
136 return head;
137
133 return NULL; 138 return NULL;
134} 139}
135 140
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index acc9b9da985f..259d97bc2abd 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1517,16 +1517,27 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1517 1517
1518static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash) 1518static u8 cake_handle_diffserv(struct sk_buff *skb, u16 wash)
1519{ 1519{
1520 int wlen = skb_network_offset(skb);
1520 u8 dscp; 1521 u8 dscp;
1521 1522
1522 switch (skb->protocol) { 1523 switch (tc_skb_protocol(skb)) {
1523 case htons(ETH_P_IP): 1524 case htons(ETH_P_IP):
1525 wlen += sizeof(struct iphdr);
1526 if (!pskb_may_pull(skb, wlen) ||
1527 skb_try_make_writable(skb, wlen))
1528 return 0;
1529
1524 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2; 1530 dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
1525 if (wash && dscp) 1531 if (wash && dscp)
1526 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0); 1532 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, 0);
1527 return dscp; 1533 return dscp;
1528 1534
1529 case htons(ETH_P_IPV6): 1535 case htons(ETH_P_IPV6):
1536 wlen += sizeof(struct ipv6hdr);
1537 if (!pskb_may_pull(skb, wlen) ||
1538 skb_try_make_writable(skb, wlen))
1539 return 0;
1540
1530 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2; 1541 dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
1531 if (wash && dscp) 1542 if (wash && dscp)
1532 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0); 1543 ipv6_change_dsfield(ipv6_hdr(skb), INET_ECN_MASK, 0);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4dc05409e3fb..114b9048ea7e 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1358{ 1358{
1359 struct cbq_sched_data *q = qdisc_priv(sch); 1359 struct cbq_sched_data *q = qdisc_priv(sch);
1360 struct cbq_class *cl = (struct cbq_class *)arg; 1360 struct cbq_class *cl = (struct cbq_class *)arg;
1361 __u32 qlen;
1361 1362
1362 cl->xstats.avgidle = cl->avgidle; 1363 cl->xstats.avgidle = cl->avgidle;
1363 cl->xstats.undertime = 0; 1364 cl->xstats.undertime = 0;
1365 qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
1364 1366
1365 if (cl->undertime != PSCHED_PASTPERFECT) 1367 if (cl->undertime != PSCHED_PASTPERFECT)
1366 cl->xstats.undertime = cl->undertime - q->now; 1368 cl->xstats.undertime = cl->undertime - q->now;
@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1368 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 1370 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1369 d, NULL, &cl->bstats) < 0 || 1371 d, NULL, &cl->bstats) < 0 ||
1370 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1372 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1371 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) 1373 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1372 return -1; 1374 return -1;
1373 1375
1374 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); 1376 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1665{ 1667{
1666 struct cbq_sched_data *q = qdisc_priv(sch); 1668 struct cbq_sched_data *q = qdisc_priv(sch);
1667 struct cbq_class *cl = (struct cbq_class *)arg; 1669 struct cbq_class *cl = (struct cbq_class *)arg;
1668 unsigned int qlen, backlog;
1669 1670
1670 if (cl->filters || cl->children || cl == &q->link) 1671 if (cl->filters || cl->children || cl == &q->link)
1671 return -EBUSY; 1672 return -EBUSY;
1672 1673
1673 sch_tree_lock(sch); 1674 sch_tree_lock(sch);
1674 1675
1675 qlen = cl->q->q.qlen; 1676 qdisc_purge_queue(cl->q);
1676 backlog = cl->q->qstats.backlog;
1677 qdisc_reset(cl->q);
1678 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
1679 1677
1680 if (cl->next_alive) 1678 if (cl->next_alive)
1681 cbq_deactivate_class(cl); 1679 cbq_deactivate_class(cl);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index 09b800991065..430df9a55ec4 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
50 return container_of(clc, struct drr_class, common); 50 return container_of(clc, struct drr_class, common);
51} 51}
52 52
53static void drr_purge_queue(struct drr_class *cl)
54{
55 unsigned int len = cl->qdisc->q.qlen;
56 unsigned int backlog = cl->qdisc->qstats.backlog;
57
58 qdisc_reset(cl->qdisc);
59 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
60}
61
62static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { 53static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
63 [TCA_DRR_QUANTUM] = { .type = NLA_U32 }, 54 [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
64}; 55};
@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
167 158
168 sch_tree_lock(sch); 159 sch_tree_lock(sch);
169 160
170 drr_purge_queue(cl); 161 qdisc_purge_queue(cl->qdisc);
171 qdisc_class_hash_remove(&q->clhash, &cl->common); 162 qdisc_class_hash_remove(&q->clhash, &cl->common);
172 163
173 sch_tree_unlock(sch); 164 sch_tree_unlock(sch);
@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
269 struct gnet_dump *d) 260 struct gnet_dump *d)
270{ 261{
271 struct drr_class *cl = (struct drr_class *)arg; 262 struct drr_class *cl = (struct drr_class *)arg;
272 __u32 qlen = cl->qdisc->q.qlen; 263 __u32 qlen = qdisc_qlen_sum(cl->qdisc);
264 struct Qdisc *cl_q = cl->qdisc;
273 struct tc_drr_stats xstats; 265 struct tc_drr_stats xstats;
274 266
275 memset(&xstats, 0, sizeof(xstats)); 267 memset(&xstats, 0, sizeof(xstats));
@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
279 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 271 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
280 d, NULL, &cl->bstats) < 0 || 272 d, NULL, &cl->bstats) < 0 ||
281 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 273 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
282 gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) 274 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
283 return -1; 275 return -1;
284 276
285 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 277 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 24cc220a3218..d2ab463f22ae 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -845,16 +845,6 @@ qdisc_peek_len(struct Qdisc *sch)
845} 845}
846 846
847static void 847static void
848hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
849{
850 unsigned int len = cl->qdisc->q.qlen;
851 unsigned int backlog = cl->qdisc->qstats.backlog;
852
853 qdisc_reset(cl->qdisc);
854 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
855}
856
857static void
858hfsc_adjust_levels(struct hfsc_class *cl) 848hfsc_adjust_levels(struct hfsc_class *cl)
859{ 849{
860 struct hfsc_class *p; 850 struct hfsc_class *p;
@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
1076 qdisc_class_hash_insert(&q->clhash, &cl->cl_common); 1066 qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
1077 list_add_tail(&cl->siblings, &parent->children); 1067 list_add_tail(&cl->siblings, &parent->children);
1078 if (parent->level == 0) 1068 if (parent->level == 0)
1079 hfsc_purge_queue(sch, parent); 1069 qdisc_purge_queue(parent->qdisc);
1080 hfsc_adjust_levels(parent); 1070 hfsc_adjust_levels(parent);
1081 sch_tree_unlock(sch); 1071 sch_tree_unlock(sch);
1082 1072
@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
1112 list_del(&cl->siblings); 1102 list_del(&cl->siblings);
1113 hfsc_adjust_levels(cl->cl_parent); 1103 hfsc_adjust_levels(cl->cl_parent);
1114 1104
1115 hfsc_purge_queue(sch, cl); 1105 qdisc_purge_queue(cl->qdisc);
1116 qdisc_class_hash_remove(&q->clhash, &cl->cl_common); 1106 qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
1117 1107
1118 sch_tree_unlock(sch); 1108 sch_tree_unlock(sch);
@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1328{ 1318{
1329 struct hfsc_class *cl = (struct hfsc_class *)arg; 1319 struct hfsc_class *cl = (struct hfsc_class *)arg;
1330 struct tc_hfsc_stats xstats; 1320 struct tc_hfsc_stats xstats;
1321 __u32 qlen;
1331 1322
1332 cl->qstats.backlog = cl->qdisc->qstats.backlog; 1323 qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
1333 xstats.level = cl->level; 1324 xstats.level = cl->level;
1334 xstats.period = cl->cl_vtperiod; 1325 xstats.period = cl->cl_vtperiod;
1335 xstats.work = cl->cl_total; 1326 xstats.work = cl->cl_total;
@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1337 1328
1338 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || 1329 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
1339 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 1330 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
1340 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) 1331 gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1341 return -1; 1332 return -1;
1342 1333
1343 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 1334 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 30f9da7e1076..2f9883b196e8 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1127 }; 1127 };
1128 __u32 qlen = 0; 1128 __u32 qlen = 0;
1129 1129
1130 if (!cl->level && cl->leaf.q) { 1130 if (!cl->level && cl->leaf.q)
1131 qlen = cl->leaf.q->q.qlen; 1131 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1132 qs.backlog = cl->leaf.q->qstats.backlog; 1132
1133 }
1134 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), 1133 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1135 INT_MIN, INT_MAX); 1134 INT_MIN, INT_MAX);
1136 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), 1135 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
1270 1269
1271 sch_tree_lock(sch); 1270 sch_tree_lock(sch);
1272 1271
1273 if (!cl->level) { 1272 if (!cl->level)
1274 unsigned int qlen = cl->leaf.q->q.qlen; 1273 qdisc_purge_queue(cl->leaf.q);
1275 unsigned int backlog = cl->leaf.q->qstats.backlog;
1276
1277 qdisc_reset(cl->leaf.q);
1278 qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
1279 }
1280 1274
1281 /* delete from hash and active; remainder in destroy_class */ 1275 /* delete from hash and active; remainder in destroy_class */
1282 qdisc_class_hash_remove(&q->clhash, &cl->common); 1276 qdisc_class_hash_remove(&q->clhash, &cl->common);
@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
1404 classid, NULL); 1398 classid, NULL);
1405 sch_tree_lock(sch); 1399 sch_tree_lock(sch);
1406 if (parent && !parent->level) { 1400 if (parent && !parent->level) {
1407 unsigned int qlen = parent->leaf.q->q.qlen;
1408 unsigned int backlog = parent->leaf.q->qstats.backlog;
1409
1410 /* turn parent into inner node */ 1401 /* turn parent into inner node */
1411 qdisc_reset(parent->leaf.q); 1402 qdisc_purge_queue(parent->leaf.q);
1412 qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
1413 qdisc_put(parent->leaf.q); 1403 qdisc_put(parent->leaf.q);
1414 if (parent->prio_activity) 1404 if (parent->prio_activity)
1415 htb_deactivate(q, parent); 1405 htb_deactivate(q, parent);
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 203659bc3906..3a3312467692 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
249 249
250 sch = dev_queue->qdisc_sleeping; 250 sch = dev_queue->qdisc_sleeping;
251 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 251 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
252 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 252 qdisc_qstats_copy(d, sch) < 0)
253 return -1; 253 return -1;
254 return 0; 254 return 0;
255} 255}
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index d364e63c396d..ea0dc112b38d 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
561 sch = dev_queue->qdisc_sleeping; 561 sch = dev_queue->qdisc_sleeping;
562 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 562 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
563 d, NULL, &sch->bstats) < 0 || 563 d, NULL, &sch->bstats) < 0 ||
564 gnet_stats_copy_queue(d, NULL, 564 qdisc_qstats_copy(d, sch) < 0)
565 &sch->qstats, sch->q.qlen) < 0)
566 return -1; 565 return -1;
567 } 566 }
568 return 0; 567 return 0;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 7410ce4d0321..35b03ae08e0f 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
201 for (i = q->bands; i < q->max_bands; i++) { 201 for (i = q->bands; i < q->max_bands; i++) {
202 if (q->queues[i] != &noop_qdisc) { 202 if (q->queues[i] != &noop_qdisc) {
203 struct Qdisc *child = q->queues[i]; 203 struct Qdisc *child = q->queues[i];
204
204 q->queues[i] = &noop_qdisc; 205 q->queues[i] = &noop_qdisc;
205 qdisc_tree_reduce_backlog(child, child->q.qlen, 206 qdisc_tree_flush_backlog(child);
206 child->qstats.backlog);
207 qdisc_put(child); 207 qdisc_put(child);
208 } 208 }
209 } 209 }
@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
225 qdisc_hash_add(child, true); 225 qdisc_hash_add(child, true);
226 226
227 if (old != &noop_qdisc) { 227 if (old != &noop_qdisc) {
228 qdisc_tree_reduce_backlog(old, 228 qdisc_tree_flush_backlog(old);
229 old->q.qlen,
230 old->qstats.backlog);
231 qdisc_put(old); 229 qdisc_put(old);
232 } 230 }
233 sch_tree_unlock(sch); 231 sch_tree_unlock(sch);
@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
344 cl_q = q->queues[cl - 1]; 342 cl_q = q->queues[cl - 1];
345 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 343 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
346 d, NULL, &cl_q->bstats) < 0 || 344 d, NULL, &cl_q->bstats) < 0 ||
347 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 345 qdisc_qstats_copy(d, cl_q) < 0)
348 return -1; 346 return -1;
349 347
350 return 0; 348 return 0;
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 847141cd900f..d519b21535b3 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
216 q->bands = qopt->bands; 216 q->bands = qopt->bands;
217 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); 217 memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
218 218
219 for (i = q->bands; i < oldbands; i++) { 219 for (i = q->bands; i < oldbands; i++)
220 struct Qdisc *child = q->queues[i]; 220 qdisc_tree_flush_backlog(q->queues[i]);
221
222 qdisc_tree_reduce_backlog(child, child->q.qlen,
223 child->qstats.backlog);
224 }
225 221
226 for (i = oldbands; i < q->bands; i++) { 222 for (i = oldbands; i < q->bands; i++) {
227 q->queues[i] = queues[i]; 223 q->queues[i] = queues[i];
@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
365 cl_q = q->queues[cl - 1]; 361 cl_q = q->queues[cl - 1];
366 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 362 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
367 d, NULL, &cl_q->bstats) < 0 || 363 d, NULL, &cl_q->bstats) < 0 ||
368 gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) 364 qdisc_qstats_copy(d, cl_q) < 0)
369 return -1; 365 return -1;
370 366
371 return 0; 367 return 0;
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index 29f5c4a24688..1589364b54da 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
217 return container_of(clc, struct qfq_class, common); 217 return container_of(clc, struct qfq_class, common);
218} 218}
219 219
220static void qfq_purge_queue(struct qfq_class *cl)
221{
222 unsigned int len = cl->qdisc->q.qlen;
223 unsigned int backlog = cl->qdisc->qstats.backlog;
224
225 qdisc_reset(cl->qdisc);
226 qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
227}
228
229static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { 220static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
230 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, 221 [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
231 [TCA_QFQ_LMAX] = { .type = NLA_U32 }, 222 [TCA_QFQ_LMAX] = { .type = NLA_U32 },
@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
551 542
552 sch_tree_lock(sch); 543 sch_tree_lock(sch);
553 544
554 qfq_purge_queue(cl); 545 qdisc_purge_queue(cl->qdisc);
555 qdisc_class_hash_remove(&q->clhash, &cl->common); 546 qdisc_class_hash_remove(&q->clhash, &cl->common);
556 547
557 sch_tree_unlock(sch); 548 sch_tree_unlock(sch);
@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
655 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), 646 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
656 d, NULL, &cl->bstats) < 0 || 647 d, NULL, &cl->bstats) < 0 ||
657 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || 648 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
658 gnet_stats_copy_queue(d, NULL, 649 qdisc_qstats_copy(d, cl->qdisc) < 0)
659 &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
660 return -1; 650 return -1;
661 651
662 return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); 652 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 9df9942340ea..4e8c0abf6194 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
233 q->flags = ctl->flags; 233 q->flags = ctl->flags;
234 q->limit = ctl->limit; 234 q->limit = ctl->limit;
235 if (child) { 235 if (child) {
236 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 236 qdisc_tree_flush_backlog(q->qdisc);
237 q->qdisc->qstats.backlog);
238 old_child = q->qdisc; 237 old_child = q->qdisc;
239 q->qdisc = child; 238 q->qdisc = child;
240 } 239 }
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index bab506b01a32..2419fdb75966 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
521 qdisc_hash_add(child, true); 521 qdisc_hash_add(child, true);
522 sch_tree_lock(sch); 522 sch_tree_lock(sch);
523 523
524 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 524 qdisc_tree_flush_backlog(q->qdisc);
525 q->qdisc->qstats.backlog);
526 qdisc_put(q->qdisc); 525 qdisc_put(q->qdisc);
527 q->qdisc = child; 526 q->qdisc = child;
528 527
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 206e4dbed12f..c7041999eb5d 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
895 895
896 sch = dev_queue->qdisc_sleeping; 896 sch = dev_queue->qdisc_sleeping;
897 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || 897 if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
898 gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) 898 qdisc_qstats_copy(d, sch) < 0)
899 return -1; 899 return -1;
900 return 0; 900 return 0;
901} 901}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7f272a9070c5..f71578dbb9e3 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
391 391
392 sch_tree_lock(sch); 392 sch_tree_lock(sch);
393 if (child) { 393 if (child) {
394 qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, 394 qdisc_tree_flush_backlog(q->qdisc);
395 q->qdisc->qstats.backlog);
396 qdisc_put(q->qdisc); 395 qdisc_put(q->qdisc);
397 q->qdisc = child; 396 q->qdisc = child;
398 } 397 }
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 6abc8b274270..951afdeea5e9 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -600,6 +600,7 @@ out:
600static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr) 600static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
601{ 601{
602 /* No address mapping for V4 sockets */ 602 /* No address mapping for V4 sockets */
603 memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
603 return sizeof(struct sockaddr_in); 604 return sizeof(struct sockaddr_in);
604} 605}
605 606
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index 4ad3586da8f0..340a6e7c43a7 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -267,8 +267,14 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
267 if (msg->rep_type) 267 if (msg->rep_type)
268 tipc_tlv_init(msg->rep, msg->rep_type); 268 tipc_tlv_init(msg->rep, msg->rep_type);
269 269
270 if (cmd->header) 270 if (cmd->header) {
271 (*cmd->header)(msg); 271 err = (*cmd->header)(msg);
272 if (err) {
273 kfree_skb(msg->rep);
274 msg->rep = NULL;
275 return err;
276 }
277 }
272 278
273 arg = nlmsg_new(0, GFP_KERNEL); 279 arg = nlmsg_new(0, GFP_KERNEL);
274 if (!arg) { 280 if (!arg) {
@@ -397,7 +403,12 @@ static int tipc_nl_compat_bearer_enable(struct tipc_nl_compat_cmd_doit *cmd,
397 if (!bearer) 403 if (!bearer)
398 return -EMSGSIZE; 404 return -EMSGSIZE;
399 405
400 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_BEARER_NAME); 406 len = TLV_GET_DATA_LEN(msg->req);
407 len -= offsetof(struct tipc_bearer_config, name);
408 if (len <= 0)
409 return -EINVAL;
410
411 len = min_t(int, len, TIPC_MAX_BEARER_NAME);
401 if (!string_is_valid(b->name, len)) 412 if (!string_is_valid(b->name, len))
402 return -EINVAL; 413 return -EINVAL;
403 414
@@ -766,7 +777,12 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
766 777
767 lc = (struct tipc_link_config *)TLV_DATA(msg->req); 778 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
768 779
769 len = min_t(int, TLV_GET_DATA_LEN(msg->req), TIPC_MAX_LINK_NAME); 780 len = TLV_GET_DATA_LEN(msg->req);
781 len -= offsetof(struct tipc_link_config, name);
782 if (len <= 0)
783 return -EINVAL;
784
785 len = min_t(int, len, TIPC_MAX_LINK_NAME);
770 if (!string_is_valid(lc->name, len)) 786 if (!string_is_valid(lc->name, len))
771 return -EINVAL; 787 return -EINVAL;
772 788
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 425351ac2a9b..20b191227969 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -1484,6 +1484,8 @@ static int decrypt_skb_update(struct sock *sk, struct sk_buff *skb,
1484 1484
1485 return err; 1485 return err;
1486 } 1486 }
1487 } else {
1488 *zc = false;
1487 } 1489 }
1488 1490
1489 rxm->full_len -= padding_length(ctx, tls_ctx, skb); 1491 rxm->full_len -= padding_length(ctx, tls_ctx, skb);
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index 5bf8e52c41fc..8e7c56e9590f 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -177,7 +177,7 @@ $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION)
177 177
178$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) 178$(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN)
179 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ 179 $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \
180 -Wl,--version-script=$(VERSION_SCRIPT) $^ -o $@ 180 -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@
181 @ln -sf $(@F) $(OUTPUT)libbpf.so 181 @ln -sf $(@F) $(OUTPUT)libbpf.so
182 @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) 182 @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION)
183 183
@@ -220,8 +220,9 @@ install_lib: all_cmd
220install_headers: 220install_headers:
221 $(call QUIET_INSTALL, headers) \ 221 $(call QUIET_INSTALL, headers) \
222 $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ 222 $(call do_install,bpf.h,$(prefix)/include/bpf,644); \
223 $(call do_install,libbpf.h,$(prefix)/include/bpf,644); 223 $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \
224 $(call do_install,btf.h,$(prefix)/include/bpf,644); 224 $(call do_install,btf.h,$(prefix)/include/bpf,644); \
225 $(call do_install,xsk.h,$(prefix)/include/bpf,644);
225 226
226install: install_lib 227install: install_lib
227 228
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 87e3020ac1bc..cf119c9b6f27 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -2107,6 +2107,9 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2107 return fwd_kind == real_kind; 2107 return fwd_kind == real_kind;
2108 } 2108 }
2109 2109
2110 if (cand_kind != canon_kind)
2111 return 0;
2112
2110 switch (cand_kind) { 2113 switch (cand_kind) {
2111 case BTF_KIND_INT: 2114 case BTF_KIND_INT:
2112 return btf_equal_int(cand_type, canon_type); 2115 return btf_equal_int(cand_type, canon_type);
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index c3fad065c89c..c7727be9719f 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -44,6 +44,7 @@
44#include <cpuid.h> 44#include <cpuid.h>
45#include <linux/capability.h> 45#include <linux/capability.h>
46#include <errno.h> 46#include <errno.h>
47#include <math.h>
47 48
48char *proc_stat = "/proc/stat"; 49char *proc_stat = "/proc/stat";
49FILE *outf; 50FILE *outf;
@@ -63,7 +64,6 @@ unsigned int dump_only;
63unsigned int do_snb_cstates; 64unsigned int do_snb_cstates;
64unsigned int do_knl_cstates; 65unsigned int do_knl_cstates;
65unsigned int do_slm_cstates; 66unsigned int do_slm_cstates;
66unsigned int do_cnl_cstates;
67unsigned int use_c1_residency_msr; 67unsigned int use_c1_residency_msr;
68unsigned int has_aperf; 68unsigned int has_aperf;
69unsigned int has_epb; 69unsigned int has_epb;
@@ -141,9 +141,21 @@ unsigned int first_counter_read = 1;
141 141
142#define RAPL_CORES_ENERGY_STATUS (1 << 9) 142#define RAPL_CORES_ENERGY_STATUS (1 << 9)
143 /* 0x639 MSR_PP0_ENERGY_STATUS */ 143 /* 0x639 MSR_PP0_ENERGY_STATUS */
144#define RAPL_PER_CORE_ENERGY (1 << 10)
145 /* Indicates cores energy collection is per-core,
146 * not per-package. */
147#define RAPL_AMD_F17H (1 << 11)
148 /* 0xc0010299 MSR_RAPL_PWR_UNIT */
149 /* 0xc001029a MSR_CORE_ENERGY_STAT */
150 /* 0xc001029b MSR_PKG_ENERGY_STAT */
144#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT) 151#define RAPL_CORES (RAPL_CORES_ENERGY_STATUS | RAPL_CORES_POWER_LIMIT)
145#define TJMAX_DEFAULT 100 152#define TJMAX_DEFAULT 100
146 153
154/* MSRs that are not yet in the kernel-provided header. */
155#define MSR_RAPL_PWR_UNIT 0xc0010299
156#define MSR_CORE_ENERGY_STAT 0xc001029a
157#define MSR_PKG_ENERGY_STAT 0xc001029b
158
147#define MAX(a, b) ((a) > (b) ? (a) : (b)) 159#define MAX(a, b) ((a) > (b) ? (a) : (b))
148 160
149/* 161/*
@@ -187,6 +199,7 @@ struct core_data {
187 unsigned long long c7; 199 unsigned long long c7;
188 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */ 200 unsigned long long mc6_us; /* duplicate as per-core for now, even though per module */
189 unsigned int core_temp_c; 201 unsigned int core_temp_c;
202 unsigned int core_energy; /* MSR_CORE_ENERGY_STAT */
190 unsigned int core_id; 203 unsigned int core_id;
191 unsigned long long counter[MAX_ADDED_COUNTERS]; 204 unsigned long long counter[MAX_ADDED_COUNTERS];
192} *core_even, *core_odd; 205} *core_even, *core_odd;
@@ -273,6 +286,7 @@ struct system_summary {
273 286
274struct cpu_topology { 287struct cpu_topology {
275 int physical_package_id; 288 int physical_package_id;
289 int die_id;
276 int logical_cpu_id; 290 int logical_cpu_id;
277 int physical_node_id; 291 int physical_node_id;
278 int logical_node_id; /* 0-based count within the package */ 292 int logical_node_id; /* 0-based count within the package */
@@ -283,6 +297,7 @@ struct cpu_topology {
283 297
284struct topo_params { 298struct topo_params {
285 int num_packages; 299 int num_packages;
300 int num_die;
286 int num_cpus; 301 int num_cpus;
287 int num_cores; 302 int num_cores;
288 int max_cpu_num; 303 int max_cpu_num;
@@ -314,9 +329,8 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg
314 int retval, pkg_no, core_no, thread_no, node_no; 329 int retval, pkg_no, core_no, thread_no, node_no;
315 330
316 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) { 331 for (pkg_no = 0; pkg_no < topo.num_packages; ++pkg_no) {
317 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) { 332 for (node_no = 0; node_no < topo.nodes_per_pkg; node_no++) {
318 for (node_no = 0; node_no < topo.nodes_per_pkg; 333 for (core_no = 0; core_no < topo.cores_per_node; ++core_no) {
319 node_no++) {
320 for (thread_no = 0; thread_no < 334 for (thread_no = 0; thread_no <
321 topo.threads_per_core; ++thread_no) { 335 topo.threads_per_core; ++thread_no) {
322 struct thread_data *t; 336 struct thread_data *t;
@@ -442,6 +456,7 @@ struct msr_counter bic[] = {
442 { 0x0, "CPU" }, 456 { 0x0, "CPU" },
443 { 0x0, "APIC" }, 457 { 0x0, "APIC" },
444 { 0x0, "X2APIC" }, 458 { 0x0, "X2APIC" },
459 { 0x0, "Die" },
445}; 460};
446 461
447#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter)) 462#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
@@ -495,6 +510,7 @@ struct msr_counter bic[] = {
495#define BIC_CPU (1ULL << 47) 510#define BIC_CPU (1ULL << 47)
496#define BIC_APIC (1ULL << 48) 511#define BIC_APIC (1ULL << 48)
497#define BIC_X2APIC (1ULL << 49) 512#define BIC_X2APIC (1ULL << 49)
513#define BIC_Die (1ULL << 50)
498 514
499#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC) 515#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
500 516
@@ -621,6 +637,8 @@ void print_header(char *delim)
621 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : "")); 637 outp += sprintf(outp, "%sTime_Of_Day_Seconds", (printed++ ? delim : ""));
622 if (DO_BIC(BIC_Package)) 638 if (DO_BIC(BIC_Package))
623 outp += sprintf(outp, "%sPackage", (printed++ ? delim : "")); 639 outp += sprintf(outp, "%sPackage", (printed++ ? delim : ""));
640 if (DO_BIC(BIC_Die))
641 outp += sprintf(outp, "%sDie", (printed++ ? delim : ""));
624 if (DO_BIC(BIC_Node)) 642 if (DO_BIC(BIC_Node))
625 outp += sprintf(outp, "%sNode", (printed++ ? delim : "")); 643 outp += sprintf(outp, "%sNode", (printed++ ? delim : ""));
626 if (DO_BIC(BIC_Core)) 644 if (DO_BIC(BIC_Core))
@@ -667,7 +685,7 @@ void print_header(char *delim)
667 685
668 if (DO_BIC(BIC_CPU_c1)) 686 if (DO_BIC(BIC_CPU_c1))
669 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : "")); 687 outp += sprintf(outp, "%sCPU%%c1", (printed++ ? delim : ""));
670 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 688 if (DO_BIC(BIC_CPU_c3))
671 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : "")); 689 outp += sprintf(outp, "%sCPU%%c3", (printed++ ? delim : ""));
672 if (DO_BIC(BIC_CPU_c6)) 690 if (DO_BIC(BIC_CPU_c6))
673 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : "")); 691 outp += sprintf(outp, "%sCPU%%c6", (printed++ ? delim : ""));
@@ -680,6 +698,14 @@ void print_header(char *delim)
680 if (DO_BIC(BIC_CoreTmp)) 698 if (DO_BIC(BIC_CoreTmp))
681 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : "")); 699 outp += sprintf(outp, "%sCoreTmp", (printed++ ? delim : ""));
682 700
701 if (do_rapl && !rapl_joules) {
702 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
703 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
704 } else if (do_rapl && rapl_joules) {
705 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
706 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
707 }
708
683 for (mp = sys.cp; mp; mp = mp->next) { 709 for (mp = sys.cp; mp; mp = mp->next) {
684 if (mp->format == FORMAT_RAW) { 710 if (mp->format == FORMAT_RAW) {
685 if (mp->width == 64) 711 if (mp->width == 64)
@@ -734,7 +760,7 @@ void print_header(char *delim)
734 if (do_rapl && !rapl_joules) { 760 if (do_rapl && !rapl_joules) {
735 if (DO_BIC(BIC_PkgWatt)) 761 if (DO_BIC(BIC_PkgWatt))
736 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : "")); 762 outp += sprintf(outp, "%sPkgWatt", (printed++ ? delim : ""));
737 if (DO_BIC(BIC_CorWatt)) 763 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
738 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : "")); 764 outp += sprintf(outp, "%sCorWatt", (printed++ ? delim : ""));
739 if (DO_BIC(BIC_GFXWatt)) 765 if (DO_BIC(BIC_GFXWatt))
740 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : "")); 766 outp += sprintf(outp, "%sGFXWatt", (printed++ ? delim : ""));
@@ -747,7 +773,7 @@ void print_header(char *delim)
747 } else if (do_rapl && rapl_joules) { 773 } else if (do_rapl && rapl_joules) {
748 if (DO_BIC(BIC_Pkg_J)) 774 if (DO_BIC(BIC_Pkg_J))
749 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : "")); 775 outp += sprintf(outp, "%sPkg_J", (printed++ ? delim : ""));
750 if (DO_BIC(BIC_Cor_J)) 776 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
751 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : "")); 777 outp += sprintf(outp, "%sCor_J", (printed++ ? delim : ""));
752 if (DO_BIC(BIC_GFX_J)) 778 if (DO_BIC(BIC_GFX_J))
753 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : "")); 779 outp += sprintf(outp, "%sGFX_J", (printed++ ? delim : ""));
@@ -808,6 +834,7 @@ int dump_counters(struct thread_data *t, struct core_data *c,
808 outp += sprintf(outp, "c6: %016llX\n", c->c6); 834 outp += sprintf(outp, "c6: %016llX\n", c->c6);
809 outp += sprintf(outp, "c7: %016llX\n", c->c7); 835 outp += sprintf(outp, "c7: %016llX\n", c->c7);
810 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c); 836 outp += sprintf(outp, "DTS: %dC\n", c->core_temp_c);
837 outp += sprintf(outp, "Joules: %0X\n", c->core_energy);
811 838
812 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 839 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
813 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n", 840 outp += sprintf(outp, "cADDED [%d] msr0x%x: %08llX\n",
@@ -904,6 +931,8 @@ int format_counters(struct thread_data *t, struct core_data *c,
904 if (t == &average.threads) { 931 if (t == &average.threads) {
905 if (DO_BIC(BIC_Package)) 932 if (DO_BIC(BIC_Package))
906 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 933 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
934 if (DO_BIC(BIC_Die))
935 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
907 if (DO_BIC(BIC_Node)) 936 if (DO_BIC(BIC_Node))
908 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 937 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
909 if (DO_BIC(BIC_Core)) 938 if (DO_BIC(BIC_Core))
@@ -921,6 +950,12 @@ int format_counters(struct thread_data *t, struct core_data *c,
921 else 950 else
922 outp += sprintf(outp, "%s-", (printed++ ? delim : "")); 951 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
923 } 952 }
953 if (DO_BIC(BIC_Die)) {
954 if (c)
955 outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), cpus[t->cpu_id].die_id);
956 else
957 outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
958 }
924 if (DO_BIC(BIC_Node)) { 959 if (DO_BIC(BIC_Node)) {
925 if (t) 960 if (t)
926 outp += sprintf(outp, "%s%d", 961 outp += sprintf(outp, "%s%d",
@@ -1003,7 +1038,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
1003 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1038 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1004 goto done; 1039 goto done;
1005 1040
1006 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) 1041 if (DO_BIC(BIC_CPU_c3))
1007 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc); 1042 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c3/tsc);
1008 if (DO_BIC(BIC_CPU_c6)) 1043 if (DO_BIC(BIC_CPU_c6))
1009 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc); 1044 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * c->c6/tsc);
@@ -1033,6 +1068,20 @@ int format_counters(struct thread_data *t, struct core_data *c,
1033 } 1068 }
1034 } 1069 }
1035 1070
1071 /*
1072 * If measurement interval exceeds minimum RAPL Joule Counter range,
1073 * indicate that results are suspect by printing "**" in fraction place.
1074 */
1075 if (interval_float < rapl_joule_counter_range)
1076 fmt8 = "%s%.2f";
1077 else
1078 fmt8 = "%6.0f**";
1079
1080 if (DO_BIC(BIC_CorWatt) && (do_rapl & RAPL_PER_CORE_ENERGY))
1081 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units / interval_float);
1082 if (DO_BIC(BIC_Cor_J) && (do_rapl & RAPL_PER_CORE_ENERGY))
1083 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), c->core_energy * rapl_energy_units);
1084
1036 /* print per-package data only for 1st core in package */ 1085 /* print per-package data only for 1st core in package */
1037 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE)) 1086 if (!(t->flags & CPU_IS_FIRST_CORE_IN_PACKAGE))
1038 goto done; 1087 goto done;
@@ -1085,18 +1134,9 @@ int format_counters(struct thread_data *t, struct core_data *c,
1085 if (DO_BIC(BIC_SYS_LPI)) 1134 if (DO_BIC(BIC_SYS_LPI))
1086 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float); 1135 outp += sprintf(outp, "%s%.2f", (printed++ ? delim : ""), 100.0 * p->sys_lpi / 1000000.0 / interval_float);
1087 1136
1088 /*
1089 * If measurement interval exceeds minimum RAPL Joule Counter range,
1090 * indicate that results are suspect by printing "**" in fraction place.
1091 */
1092 if (interval_float < rapl_joule_counter_range)
1093 fmt8 = "%s%.2f";
1094 else
1095 fmt8 = "%6.0f**";
1096
1097 if (DO_BIC(BIC_PkgWatt)) 1137 if (DO_BIC(BIC_PkgWatt))
1098 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float); 1138 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units / interval_float);
1099 if (DO_BIC(BIC_CorWatt)) 1139 if (DO_BIC(BIC_CorWatt) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1100 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float); 1140 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units / interval_float);
1101 if (DO_BIC(BIC_GFXWatt)) 1141 if (DO_BIC(BIC_GFXWatt))
1102 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float); 1142 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units / interval_float);
@@ -1104,7 +1144,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
1104 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float); 1144 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_dram * rapl_dram_energy_units / interval_float);
1105 if (DO_BIC(BIC_Pkg_J)) 1145 if (DO_BIC(BIC_Pkg_J))
1106 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units); 1146 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_pkg * rapl_energy_units);
1107 if (DO_BIC(BIC_Cor_J)) 1147 if (DO_BIC(BIC_Cor_J) && !(do_rapl & RAPL_PER_CORE_ENERGY))
1108 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units); 1148 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_cores * rapl_energy_units);
1109 if (DO_BIC(BIC_GFX_J)) 1149 if (DO_BIC(BIC_GFX_J))
1110 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units); 1150 outp += sprintf(outp, fmt8, (printed++ ? delim : ""), p->energy_gfx * rapl_energy_units);
@@ -1249,6 +1289,8 @@ delta_core(struct core_data *new, struct core_data *old)
1249 old->core_temp_c = new->core_temp_c; 1289 old->core_temp_c = new->core_temp_c;
1250 old->mc6_us = new->mc6_us - old->mc6_us; 1290 old->mc6_us = new->mc6_us - old->mc6_us;
1251 1291
1292 DELTA_WRAP32(new->core_energy, old->core_energy);
1293
1252 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1294 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1253 if (mp->format == FORMAT_RAW) 1295 if (mp->format == FORMAT_RAW)
1254 old->counter[i] = new->counter[i]; 1296 old->counter[i] = new->counter[i];
@@ -1391,6 +1433,7 @@ void clear_counters(struct thread_data *t, struct core_data *c, struct pkg_data
1391 c->c7 = 0; 1433 c->c7 = 0;
1392 c->mc6_us = 0; 1434 c->mc6_us = 0;
1393 c->core_temp_c = 0; 1435 c->core_temp_c = 0;
1436 c->core_energy = 0;
1394 1437
1395 p->pkg_wtd_core_c0 = 0; 1438 p->pkg_wtd_core_c0 = 0;
1396 p->pkg_any_core_c0 = 0; 1439 p->pkg_any_core_c0 = 0;
@@ -1473,6 +1516,8 @@ int sum_counters(struct thread_data *t, struct core_data *c,
1473 1516
1474 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c); 1517 average.cores.core_temp_c = MAX(average.cores.core_temp_c, c->core_temp_c);
1475 1518
1519 average.cores.core_energy += c->core_energy;
1520
1476 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1521 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1477 if (mp->format == FORMAT_RAW) 1522 if (mp->format == FORMAT_RAW)
1478 continue; 1523 continue;
@@ -1818,7 +1863,7 @@ retry:
1818 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE)) 1863 if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
1819 goto done; 1864 goto done;
1820 1865
1821 if (DO_BIC(BIC_CPU_c3) && !do_slm_cstates && !do_knl_cstates && !do_cnl_cstates) { 1866 if (DO_BIC(BIC_CPU_c3)) {
1822 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3)) 1867 if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
1823 return -6; 1868 return -6;
1824 } 1869 }
@@ -1845,6 +1890,12 @@ retry:
1845 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F); 1890 c->core_temp_c = tcc_activation_temp - ((msr >> 16) & 0x7F);
1846 } 1891 }
1847 1892
1893 if (do_rapl & RAPL_AMD_F17H) {
1894 if (get_msr(cpu, MSR_CORE_ENERGY_STAT, &msr))
1895 return -14;
1896 c->core_energy = msr & 0xFFFFFFFF;
1897 }
1898
1848 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) { 1899 for (i = 0, mp = sys.cp; mp; i++, mp = mp->next) {
1849 if (get_mp(cpu, mp, &c->counter[i])) 1900 if (get_mp(cpu, mp, &c->counter[i]))
1850 return -10; 1901 return -10;
@@ -1934,6 +1985,11 @@ retry:
1934 return -16; 1985 return -16;
1935 p->rapl_dram_perf_status = msr & 0xFFFFFFFF; 1986 p->rapl_dram_perf_status = msr & 0xFFFFFFFF;
1936 } 1987 }
1988 if (do_rapl & RAPL_AMD_F17H) {
1989 if (get_msr(cpu, MSR_PKG_ENERGY_STAT, &msr))
1990 return -13;
1991 p->energy_pkg = msr & 0xFFFFFFFF;
1992 }
1937 if (DO_BIC(BIC_PkgTmp)) { 1993 if (DO_BIC(BIC_PkgTmp)) {
1938 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr)) 1994 if (get_msr(cpu, MSR_IA32_PACKAGE_THERM_STATUS, &msr))
1939 return -17; 1995 return -17;
@@ -2456,6 +2512,8 @@ void free_all_buffers(void)
2456 2512
2457/* 2513/*
2458 * Parse a file containing a single int. 2514 * Parse a file containing a single int.
2515 * Return 0 if file can not be opened
2516 * Exit if file can be opened, but can not be parsed
2459 */ 2517 */
2460int parse_int_file(const char *fmt, ...) 2518int parse_int_file(const char *fmt, ...)
2461{ 2519{
@@ -2467,7 +2525,9 @@ int parse_int_file(const char *fmt, ...)
2467 va_start(args, fmt); 2525 va_start(args, fmt);
2468 vsnprintf(path, sizeof(path), fmt, args); 2526 vsnprintf(path, sizeof(path), fmt, args);
2469 va_end(args); 2527 va_end(args);
2470 filep = fopen_or_die(path, "r"); 2528 filep = fopen(path, "r");
2529 if (!filep)
2530 return 0;
2471 if (fscanf(filep, "%d", &value) != 1) 2531 if (fscanf(filep, "%d", &value) != 1)
2472 err(1, "%s: failed to parse number from file", path); 2532 err(1, "%s: failed to parse number from file", path);
2473 fclose(filep); 2533 fclose(filep);
@@ -2488,6 +2548,11 @@ int get_physical_package_id(int cpu)
2488 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu); 2548 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu);
2489} 2549}
2490 2550
2551int get_die_id(int cpu)
2552{
2553 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/die_id", cpu);
2554}
2555
2491int get_core_id(int cpu) 2556int get_core_id(int cpu)
2492{ 2557{
2493 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu); 2558 return parse_int_file("/sys/devices/system/cpu/cpu%d/topology/core_id", cpu);
@@ -2578,7 +2643,8 @@ int get_thread_siblings(struct cpu_topology *thiscpu)
2578 filep = fopen_or_die(path, "r"); 2643 filep = fopen_or_die(path, "r");
2579 do { 2644 do {
2580 offset -= BITMASK_SIZE; 2645 offset -= BITMASK_SIZE;
2581 fscanf(filep, "%lx%c", &map, &character); 2646 if (fscanf(filep, "%lx%c", &map, &character) != 2)
2647 err(1, "%s: failed to parse file", path);
2582 for (shift = 0; shift < BITMASK_SIZE; shift++) { 2648 for (shift = 0; shift < BITMASK_SIZE; shift++) {
2583 if ((map >> shift) & 0x1) { 2649 if ((map >> shift) & 0x1) {
2584 so = shift + offset; 2650 so = shift + offset;
@@ -2855,8 +2921,11 @@ int snapshot_cpu_lpi_us(void)
2855 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r"); 2921 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_cpu_residency_us", "r");
2856 2922
2857 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us); 2923 retval = fscanf(fp, "%lld", &cpuidle_cur_cpu_lpi_us);
2858 if (retval != 1) 2924 if (retval != 1) {
2859 err(1, "CPU LPI"); 2925 fprintf(stderr, "Disabling Low Power Idle CPU output\n");
2926 BIC_NOT_PRESENT(BIC_CPU_LPI);
2927 return -1;
2928 }
2860 2929
2861 fclose(fp); 2930 fclose(fp);
2862 2931
@@ -2878,9 +2947,11 @@ int snapshot_sys_lpi_us(void)
2878 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r"); 2947 fp = fopen_or_die("/sys/devices/system/cpu/cpuidle/low_power_idle_system_residency_us", "r");
2879 2948
2880 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us); 2949 retval = fscanf(fp, "%lld", &cpuidle_cur_sys_lpi_us);
2881 if (retval != 1) 2950 if (retval != 1) {
2882 err(1, "SYS LPI"); 2951 fprintf(stderr, "Disabling Low Power Idle System output\n");
2883 2952 BIC_NOT_PRESENT(BIC_SYS_LPI);
2953 return -1;
2954 }
2884 fclose(fp); 2955 fclose(fp);
2885 2956
2886 return 0; 2957 return 0;
@@ -3410,14 +3481,14 @@ dump_sysfs_cstate_config(void)
3410 input = fopen(path, "r"); 3481 input = fopen(path, "r");
3411 if (input == NULL) 3482 if (input == NULL)
3412 continue; 3483 continue;
3413 fgets(name_buf, sizeof(name_buf), input); 3484 if (!fgets(name_buf, sizeof(name_buf), input))
3485 err(1, "%s: failed to read file", path);
3414 3486
3415 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 3487 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
3416 sp = strchr(name_buf, '-'); 3488 sp = strchr(name_buf, '-');
3417 if (!sp) 3489 if (!sp)
3418 sp = strchrnul(name_buf, '\n'); 3490 sp = strchrnul(name_buf, '\n');
3419 *sp = '\0'; 3491 *sp = '\0';
3420
3421 fclose(input); 3492 fclose(input);
3422 3493
3423 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc", 3494 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpuidle/state%d/desc",
@@ -3425,7 +3496,8 @@ dump_sysfs_cstate_config(void)
3425 input = fopen(path, "r"); 3496 input = fopen(path, "r");
3426 if (input == NULL) 3497 if (input == NULL)
3427 continue; 3498 continue;
3428 fgets(desc, sizeof(desc), input); 3499 if (!fgets(desc, sizeof(desc), input))
3500 err(1, "%s: failed to read file", path);
3429 3501
3430 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc); 3502 fprintf(outf, "cpu%d: %s: %s", base_cpu, name_buf, desc);
3431 fclose(input); 3503 fclose(input);
@@ -3444,20 +3516,22 @@ dump_sysfs_pstate_config(void)
3444 base_cpu); 3516 base_cpu);
3445 input = fopen(path, "r"); 3517 input = fopen(path, "r");
3446 if (input == NULL) { 3518 if (input == NULL) {
3447 fprintf(stderr, "NSFOD %s\n", path); 3519 fprintf(outf, "NSFOD %s\n", path);
3448 return; 3520 return;
3449 } 3521 }
3450 fgets(driver_buf, sizeof(driver_buf), input); 3522 if (!fgets(driver_buf, sizeof(driver_buf), input))
3523 err(1, "%s: failed to read file", path);
3451 fclose(input); 3524 fclose(input);
3452 3525
3453 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor", 3526 sprintf(path, "/sys/devices/system/cpu/cpu%d/cpufreq/scaling_governor",
3454 base_cpu); 3527 base_cpu);
3455 input = fopen(path, "r"); 3528 input = fopen(path, "r");
3456 if (input == NULL) { 3529 if (input == NULL) {
3457 fprintf(stderr, "NSFOD %s\n", path); 3530 fprintf(outf, "NSFOD %s\n", path);
3458 return; 3531 return;
3459 } 3532 }
3460 fgets(governor_buf, sizeof(governor_buf), input); 3533 if (!fgets(governor_buf, sizeof(governor_buf), input))
3534 err(1, "%s: failed to read file", path);
3461 fclose(input); 3535 fclose(input);
3462 3536
3463 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf); 3537 fprintf(outf, "cpu%d: cpufreq driver: %s", base_cpu, driver_buf);
@@ -3466,7 +3540,8 @@ dump_sysfs_pstate_config(void)
3466 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost"); 3540 sprintf(path, "/sys/devices/system/cpu/cpufreq/boost");
3467 input = fopen(path, "r"); 3541 input = fopen(path, "r");
3468 if (input != NULL) { 3542 if (input != NULL) {
3469 fscanf(input, "%d", &turbo); 3543 if (fscanf(input, "%d", &turbo) != 1)
3544 err(1, "%s: failed to parse number from file", path);
3470 fprintf(outf, "cpufreq boost: %d\n", turbo); 3545 fprintf(outf, "cpufreq boost: %d\n", turbo);
3471 fclose(input); 3546 fclose(input);
3472 } 3547 }
@@ -3474,7 +3549,8 @@ dump_sysfs_pstate_config(void)
3474 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo"); 3549 sprintf(path, "/sys/devices/system/cpu/intel_pstate/no_turbo");
3475 input = fopen(path, "r"); 3550 input = fopen(path, "r");
3476 if (input != NULL) { 3551 if (input != NULL) {
3477 fscanf(input, "%d", &turbo); 3552 if (fscanf(input, "%d", &turbo) != 1)
3553 err(1, "%s: failed to parse number from file", path);
3478 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo); 3554 fprintf(outf, "cpufreq intel_pstate no_turbo: %d\n", turbo);
3479 fclose(input); 3555 fclose(input);
3480 } 3556 }
@@ -3718,7 +3794,7 @@ int print_perf_limit(struct thread_data *t, struct core_data *c, struct pkg_data
3718#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */ 3794#define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
3719#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */ 3795#define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
3720 3796
3721double get_tdp(unsigned int model) 3797double get_tdp_intel(unsigned int model)
3722{ 3798{
3723 unsigned long long msr; 3799 unsigned long long msr;
3724 3800
@@ -3735,6 +3811,16 @@ double get_tdp(unsigned int model)
3735 } 3811 }
3736} 3812}
3737 3813
3814double get_tdp_amd(unsigned int family)
3815{
3816 switch (family) {
3817 case 0x17:
3818 default:
3819 /* This is the max stock TDP of HEDT/Server Fam17h chips */
3820 return 250.0;
3821 }
3822}
3823
3738/* 3824/*
3739 * rapl_dram_energy_units_probe() 3825 * rapl_dram_energy_units_probe()
3740 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR. 3826 * Energy units are either hard-coded, or come from RAPL Energy Unit MSR.
@@ -3754,21 +3840,12 @@ rapl_dram_energy_units_probe(int model, double rapl_energy_units)
3754 } 3840 }
3755} 3841}
3756 3842
3757 3843void rapl_probe_intel(unsigned int family, unsigned int model)
3758/*
3759 * rapl_probe()
3760 *
3761 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
3762 */
3763void rapl_probe(unsigned int family, unsigned int model)
3764{ 3844{
3765 unsigned long long msr; 3845 unsigned long long msr;
3766 unsigned int time_unit; 3846 unsigned int time_unit;
3767 double tdp; 3847 double tdp;
3768 3848
3769 if (!genuine_intel)
3770 return;
3771
3772 if (family != 6) 3849 if (family != 6)
3773 return; 3850 return;
3774 3851
@@ -3892,13 +3969,69 @@ void rapl_probe(unsigned int family, unsigned int model)
3892 3969
3893 rapl_time_units = 1.0 / (1 << (time_unit)); 3970 rapl_time_units = 1.0 / (1 << (time_unit));
3894 3971
3895 tdp = get_tdp(model); 3972 tdp = get_tdp_intel(model);
3896 3973
3897 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp; 3974 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
3898 if (!quiet) 3975 if (!quiet)
3899 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp); 3976 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
3977}
3900 3978
3901 return; 3979void rapl_probe_amd(unsigned int family, unsigned int model)
3980{
3981 unsigned long long msr;
3982 unsigned int eax, ebx, ecx, edx;
3983 unsigned int has_rapl = 0;
3984 double tdp;
3985
3986 if (max_extended_level >= 0x80000007) {
3987 __cpuid(0x80000007, eax, ebx, ecx, edx);
3988 /* RAPL (Fam 17h) */
3989 has_rapl = edx & (1 << 14);
3990 }
3991
3992 if (!has_rapl)
3993 return;
3994
3995 switch (family) {
3996 case 0x17: /* Zen, Zen+ */
3997 do_rapl = RAPL_AMD_F17H | RAPL_PER_CORE_ENERGY;
3998 if (rapl_joules) {
3999 BIC_PRESENT(BIC_Pkg_J);
4000 BIC_PRESENT(BIC_Cor_J);
4001 } else {
4002 BIC_PRESENT(BIC_PkgWatt);
4003 BIC_PRESENT(BIC_CorWatt);
4004 }
4005 break;
4006 default:
4007 return;
4008 }
4009
4010 if (get_msr(base_cpu, MSR_RAPL_PWR_UNIT, &msr))
4011 return;
4012
4013 rapl_time_units = ldexp(1.0, -(msr >> 16 & 0xf));
4014 rapl_energy_units = ldexp(1.0, -(msr >> 8 & 0x1f));
4015 rapl_power_units = ldexp(1.0, -(msr & 0xf));
4016
4017 tdp = get_tdp_amd(model);
4018
4019 rapl_joule_counter_range = 0xFFFFFFFF * rapl_energy_units / tdp;
4020 if (!quiet)
4021 fprintf(outf, "RAPL: %.0f sec. Joule Counter Range, at %.0f Watts\n", rapl_joule_counter_range, tdp);
4022}
4023
4024/*
4025 * rapl_probe()
4026 *
4027 * sets do_rapl, rapl_power_units, rapl_energy_units, rapl_time_units
4028 */
4029void rapl_probe(unsigned int family, unsigned int model)
4030{
4031 if (genuine_intel)
4032 rapl_probe_intel(family, model);
4033 if (authentic_amd)
4034 rapl_probe_amd(family, model);
3902} 4035}
3903 4036
3904void perf_limit_reasons_probe(unsigned int family, unsigned int model) 4037void perf_limit_reasons_probe(unsigned int family, unsigned int model)
@@ -4003,6 +4136,7 @@ void print_power_limit_msr(int cpu, unsigned long long msr, char *label)
4003int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p) 4136int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4004{ 4137{
4005 unsigned long long msr; 4138 unsigned long long msr;
4139 const char *msr_name;
4006 int cpu; 4140 int cpu;
4007 4141
4008 if (!do_rapl) 4142 if (!do_rapl)
@@ -4018,10 +4152,17 @@ int print_rapl(struct thread_data *t, struct core_data *c, struct pkg_data *p)
4018 return -1; 4152 return -1;
4019 } 4153 }
4020 4154
4021 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr)) 4155 if (do_rapl & RAPL_AMD_F17H) {
4022 return -1; 4156 msr_name = "MSR_RAPL_PWR_UNIT";
4157 if (get_msr(cpu, MSR_RAPL_PWR_UNIT, &msr))
4158 return -1;
4159 } else {
4160 msr_name = "MSR_RAPL_POWER_UNIT";
4161 if (get_msr(cpu, MSR_RAPL_POWER_UNIT, &msr))
4162 return -1;
4163 }
4023 4164
4024 fprintf(outf, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr, 4165 fprintf(outf, "cpu%d: %s: 0x%08llx (%f Watts, %f Joules, %f sec.)\n", cpu, msr_name, msr,
4025 rapl_power_units, rapl_energy_units, rapl_time_units); 4166 rapl_power_units, rapl_energy_units, rapl_time_units);
4026 4167
4027 if (do_rapl & RAPL_PKG_POWER_INFO) { 4168 if (do_rapl & RAPL_PKG_POWER_INFO) {
@@ -4451,6 +4592,9 @@ unsigned int intel_model_duplicates(unsigned int model)
4451 case INTEL_FAM6_KABYLAKE_MOBILE: 4592 case INTEL_FAM6_KABYLAKE_MOBILE:
4452 case INTEL_FAM6_KABYLAKE_DESKTOP: 4593 case INTEL_FAM6_KABYLAKE_DESKTOP:
4453 return INTEL_FAM6_SKYLAKE_MOBILE; 4594 return INTEL_FAM6_SKYLAKE_MOBILE;
4595
4596 case INTEL_FAM6_ICELAKE_MOBILE:
4597 return INTEL_FAM6_CANNONLAKE_MOBILE;
4454 } 4598 }
4455 return model; 4599 return model;
4456} 4600}
@@ -4702,7 +4846,9 @@ void process_cpuid()
4702 } 4846 }
4703 do_slm_cstates = is_slm(family, model); 4847 do_slm_cstates = is_slm(family, model);
4704 do_knl_cstates = is_knl(family, model); 4848 do_knl_cstates = is_knl(family, model);
4705 do_cnl_cstates = is_cnl(family, model); 4849
4850 if (do_slm_cstates || do_knl_cstates || is_cnl(family, model))
4851 BIC_NOT_PRESENT(BIC_CPU_c3);
4706 4852
4707 if (!quiet) 4853 if (!quiet)
4708 decode_misc_pwr_mgmt_msr(); 4854 decode_misc_pwr_mgmt_msr();
@@ -4769,6 +4915,7 @@ void topology_probe()
4769 int i; 4915 int i;
4770 int max_core_id = 0; 4916 int max_core_id = 0;
4771 int max_package_id = 0; 4917 int max_package_id = 0;
4918 int max_die_id = 0;
4772 int max_siblings = 0; 4919 int max_siblings = 0;
4773 4920
4774 /* Initialize num_cpus, max_cpu_num */ 4921 /* Initialize num_cpus, max_cpu_num */
@@ -4835,6 +4982,11 @@ void topology_probe()
4835 if (cpus[i].physical_package_id > max_package_id) 4982 if (cpus[i].physical_package_id > max_package_id)
4836 max_package_id = cpus[i].physical_package_id; 4983 max_package_id = cpus[i].physical_package_id;
4837 4984
4985 /* get die information */
4986 cpus[i].die_id = get_die_id(i);
4987 if (cpus[i].die_id > max_die_id)
4988 max_die_id = cpus[i].die_id;
4989
4838 /* get numa node information */ 4990 /* get numa node information */
4839 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]); 4991 cpus[i].physical_node_id = get_physical_node_id(&cpus[i]);
4840 if (cpus[i].physical_node_id > topo.max_node_num) 4992 if (cpus[i].physical_node_id > topo.max_node_num)
@@ -4860,6 +5012,13 @@ void topology_probe()
4860 if (!summary_only && topo.cores_per_node > 1) 5012 if (!summary_only && topo.cores_per_node > 1)
4861 BIC_PRESENT(BIC_Core); 5013 BIC_PRESENT(BIC_Core);
4862 5014
5015 topo.num_die = max_die_id + 1;
5016 if (debug > 1)
5017 fprintf(outf, "max_die_id %d, sizing for %d die\n",
5018 max_die_id, topo.num_die);
5019 if (!summary_only && topo.num_die > 1)
5020 BIC_PRESENT(BIC_Die);
5021
4863 topo.num_packages = max_package_id + 1; 5022 topo.num_packages = max_package_id + 1;
4864 if (debug > 1) 5023 if (debug > 1)
4865 fprintf(outf, "max_package_id %d, sizing for %d packages\n", 5024 fprintf(outf, "max_package_id %d, sizing for %d packages\n",
@@ -4884,8 +5043,8 @@ void topology_probe()
4884 if (cpu_is_not_present(i)) 5043 if (cpu_is_not_present(i))
4885 continue; 5044 continue;
4886 fprintf(outf, 5045 fprintf(outf,
4887 "cpu %d pkg %d node %d lnode %d core %d thread %d\n", 5046 "cpu %d pkg %d die %d node %d lnode %d core %d thread %d\n",
4888 i, cpus[i].physical_package_id, 5047 i, cpus[i].physical_package_id, cpus[i].die_id,
4889 cpus[i].physical_node_id, 5048 cpus[i].physical_node_id,
4890 cpus[i].logical_node_id, 5049 cpus[i].logical_node_id,
4891 cpus[i].physical_core_id, 5050 cpus[i].physical_core_id,
@@ -5122,7 +5281,7 @@ int get_and_dump_counters(void)
5122} 5281}
5123 5282
5124void print_version() { 5283void print_version() {
5125 fprintf(outf, "turbostat version 18.07.27" 5284 fprintf(outf, "turbostat version 19.03.20"
5126 " - Len Brown <lenb@kernel.org>\n"); 5285 " - Len Brown <lenb@kernel.org>\n");
5127} 5286}
5128 5287
@@ -5319,7 +5478,8 @@ void probe_sysfs(void)
5319 input = fopen(path, "r"); 5478 input = fopen(path, "r");
5320 if (input == NULL) 5479 if (input == NULL)
5321 continue; 5480 continue;
5322 fgets(name_buf, sizeof(name_buf), input); 5481 if (!fgets(name_buf, sizeof(name_buf), input))
5482 err(1, "%s: failed to read file", path);
5323 5483
5324 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5484 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5325 sp = strchr(name_buf, '-'); 5485 sp = strchr(name_buf, '-');
@@ -5346,7 +5506,8 @@ void probe_sysfs(void)
5346 input = fopen(path, "r"); 5506 input = fopen(path, "r");
5347 if (input == NULL) 5507 if (input == NULL)
5348 continue; 5508 continue;
5349 fgets(name_buf, sizeof(name_buf), input); 5509 if (!fgets(name_buf, sizeof(name_buf), input))
5510 err(1, "%s: failed to read file", path);
5350 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */ 5511 /* truncate "C1-HSW\n" to "C1", or truncate "C1\n" to "C1" */
5351 sp = strchr(name_buf, '-'); 5512 sp = strchr(name_buf, '-');
5352 if (!sp) 5513 if (!sp)
diff --git a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
index bcbd928c96ab..fc818bc1d729 100644
--- a/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
+++ b/tools/testing/selftests/bpf/prog_tests/flow_dissector.c
@@ -39,6 +39,58 @@ static struct bpf_flow_keys pkt_v6_flow_keys = {
39 .n_proto = __bpf_constant_htons(ETH_P_IPV6), 39 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
40}; 40};
41 41
42#define VLAN_HLEN 4
43
44static struct {
45 struct ethhdr eth;
46 __u16 vlan_tci;
47 __u16 vlan_proto;
48 struct iphdr iph;
49 struct tcphdr tcp;
50} __packed pkt_vlan_v4 = {
51 .eth.h_proto = __bpf_constant_htons(ETH_P_8021Q),
52 .vlan_proto = __bpf_constant_htons(ETH_P_IP),
53 .iph.ihl = 5,
54 .iph.protocol = IPPROTO_TCP,
55 .iph.tot_len = __bpf_constant_htons(MAGIC_BYTES),
56 .tcp.urg_ptr = 123,
57 .tcp.doff = 5,
58};
59
60static struct bpf_flow_keys pkt_vlan_v4_flow_keys = {
61 .nhoff = VLAN_HLEN,
62 .thoff = VLAN_HLEN + sizeof(struct iphdr),
63 .addr_proto = ETH_P_IP,
64 .ip_proto = IPPROTO_TCP,
65 .n_proto = __bpf_constant_htons(ETH_P_IP),
66};
67
68static struct {
69 struct ethhdr eth;
70 __u16 vlan_tci;
71 __u16 vlan_proto;
72 __u16 vlan_tci2;
73 __u16 vlan_proto2;
74 struct ipv6hdr iph;
75 struct tcphdr tcp;
76} __packed pkt_vlan_v6 = {
77 .eth.h_proto = __bpf_constant_htons(ETH_P_8021AD),
78 .vlan_proto = __bpf_constant_htons(ETH_P_8021Q),
79 .vlan_proto2 = __bpf_constant_htons(ETH_P_IPV6),
80 .iph.nexthdr = IPPROTO_TCP,
81 .iph.payload_len = __bpf_constant_htons(MAGIC_BYTES),
82 .tcp.urg_ptr = 123,
83 .tcp.doff = 5,
84};
85
86static struct bpf_flow_keys pkt_vlan_v6_flow_keys = {
87 .nhoff = VLAN_HLEN * 2,
88 .thoff = VLAN_HLEN * 2 + sizeof(struct ipv6hdr),
89 .addr_proto = ETH_P_IPV6,
90 .ip_proto = IPPROTO_TCP,
91 .n_proto = __bpf_constant_htons(ETH_P_IPV6),
92};
93
42void test_flow_dissector(void) 94void test_flow_dissector(void)
43{ 95{
44 struct bpf_flow_keys flow_keys; 96 struct bpf_flow_keys flow_keys;
@@ -68,5 +120,21 @@ void test_flow_dissector(void)
68 err, errno, retval, duration, size, sizeof(flow_keys)); 120 err, errno, retval, duration, size, sizeof(flow_keys));
69 CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys); 121 CHECK_FLOW_KEYS("ipv6_flow_keys", flow_keys, pkt_v6_flow_keys);
70 122
123 err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v4, sizeof(pkt_vlan_v4),
124 &flow_keys, &size, &retval, &duration);
125 CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv4",
126 "err %d errno %d retval %d duration %d size %u/%lu\n",
127 err, errno, retval, duration, size, sizeof(flow_keys));
128 CHECK_FLOW_KEYS("vlan_ipv4_flow_keys", flow_keys,
129 pkt_vlan_v4_flow_keys);
130
131 err = bpf_prog_test_run(prog_fd, 10, &pkt_vlan_v6, sizeof(pkt_vlan_v6),
132 &flow_keys, &size, &retval, &duration);
133 CHECK(size != sizeof(flow_keys) || err || retval != 1, "vlan_ipv6",
134 "err %d errno %d retval %d duration %d size %u/%lu\n",
135 err, errno, retval, duration, size, sizeof(flow_keys));
136 CHECK_FLOW_KEYS("vlan_ipv6_flow_keys", flow_keys,
137 pkt_vlan_v6_flow_keys);
138
71 bpf_object__close(obj); 139 bpf_object__close(obj);
72} 140}
diff --git a/tools/testing/selftests/bpf/progs/bpf_flow.c b/tools/testing/selftests/bpf/progs/bpf_flow.c
index 284660f5aa95..75b17cada539 100644
--- a/tools/testing/selftests/bpf/progs/bpf_flow.c
+++ b/tools/testing/selftests/bpf/progs/bpf_flow.c
@@ -92,7 +92,6 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
92{ 92{
93 struct bpf_flow_keys *keys = skb->flow_keys; 93 struct bpf_flow_keys *keys = skb->flow_keys;
94 94
95 keys->n_proto = proto;
96 switch (proto) { 95 switch (proto) {
97 case bpf_htons(ETH_P_IP): 96 case bpf_htons(ETH_P_IP):
98 bpf_tail_call(skb, &jmp_table, IP); 97 bpf_tail_call(skb, &jmp_table, IP);
@@ -119,10 +118,9 @@ static __always_inline int parse_eth_proto(struct __sk_buff *skb, __be16 proto)
119SEC("flow_dissector") 118SEC("flow_dissector")
120int _dissect(struct __sk_buff *skb) 119int _dissect(struct __sk_buff *skb)
121{ 120{
122 if (!skb->vlan_present) 121 struct bpf_flow_keys *keys = skb->flow_keys;
123 return parse_eth_proto(skb, skb->protocol); 122
124 else 123 return parse_eth_proto(skb, keys->n_proto);
125 return parse_eth_proto(skb, skb->vlan_proto);
126} 124}
127 125
128/* Parses on IPPROTO_* */ 126/* Parses on IPPROTO_* */
@@ -336,15 +334,9 @@ PROG(VLAN)(struct __sk_buff *skb)
336{ 334{
337 struct bpf_flow_keys *keys = skb->flow_keys; 335 struct bpf_flow_keys *keys = skb->flow_keys;
338 struct vlan_hdr *vlan, _vlan; 336 struct vlan_hdr *vlan, _vlan;
339 __be16 proto;
340
341 /* Peek back to see if single or double-tagging */
342 if (bpf_skb_load_bytes(skb, keys->thoff - sizeof(proto), &proto,
343 sizeof(proto)))
344 return BPF_DROP;
345 337
346 /* Account for double-tagging */ 338 /* Account for double-tagging */
347 if (proto == bpf_htons(ETH_P_8021AD)) { 339 if (keys->n_proto == bpf_htons(ETH_P_8021AD)) {
348 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 340 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan);
349 if (!vlan) 341 if (!vlan)
350 return BPF_DROP; 342 return BPF_DROP;
@@ -352,6 +344,7 @@ PROG(VLAN)(struct __sk_buff *skb)
352 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) 344 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q))
353 return BPF_DROP; 345 return BPF_DROP;
354 346
347 keys->nhoff += sizeof(*vlan);
355 keys->thoff += sizeof(*vlan); 348 keys->thoff += sizeof(*vlan);
356 } 349 }
357 350
@@ -359,12 +352,14 @@ PROG(VLAN)(struct __sk_buff *skb)
359 if (!vlan) 352 if (!vlan)
360 return BPF_DROP; 353 return BPF_DROP;
361 354
355 keys->nhoff += sizeof(*vlan);
362 keys->thoff += sizeof(*vlan); 356 keys->thoff += sizeof(*vlan);
363 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ 357 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/
364 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || 358 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) ||
365 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) 359 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q))
366 return BPF_DROP; 360 return BPF_DROP;
367 361
362 keys->n_proto = vlan->h_vlan_encapsulated_proto;
368 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto); 363 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);
369} 364}
370 365
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 23e3b314ca60..ec5794e4205b 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -5777,6 +5777,53 @@ const struct btf_dedup_test dedup_tests[] = {
5777 }, 5777 },
5778}, 5778},
5779{ 5779{
5780 .descr = "dedup: void equiv check",
5781 /*
5782 * // CU 1:
5783 * struct s {
5784 * struct {} *x;
5785 * };
5786 * // CU 2:
5787 * struct s {
5788 * int *x;
5789 * };
5790 */
5791 .input = {
5792 .raw_types = {
5793 /* CU 1 */
5794 BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
5795 BTF_PTR_ENC(1), /* [2] ptr -> [1] */
5796 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
5797 BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
5798 /* CU 2 */
5799 BTF_PTR_ENC(0), /* [4] ptr -> void */
5800 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
5801 BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
5802 BTF_END_RAW,
5803 },
5804 BTF_STR_SEC("\0s\0x"),
5805 },
5806 .expect = {
5807 .raw_types = {
5808 /* CU 1 */
5809 BTF_STRUCT_ENC(0, 0, 1), /* [1] struct {} */
5810 BTF_PTR_ENC(1), /* [2] ptr -> [1] */
5811 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [3] struct s */
5812 BTF_MEMBER_ENC(NAME_NTH(2), 2, 0),
5813 /* CU 2 */
5814 BTF_PTR_ENC(0), /* [4] ptr -> void */
5815 BTF_STRUCT_ENC(NAME_NTH(1), 1, 8), /* [5] struct s */
5816 BTF_MEMBER_ENC(NAME_NTH(2), 4, 0),
5817 BTF_END_RAW,
5818 },
5819 BTF_STR_SEC("\0s\0x"),
5820 },
5821 .opts = {
5822 .dont_resolve_fwds = false,
5823 .dedup_table_size = 1, /* force hash collisions */
5824 },
5825},
5826{
5780 .descr = "dedup: all possible kinds (no duplicates)", 5827 .descr = "dedup: all possible kinds (no duplicates)",
5781 .input = { 5828 .input = {
5782 .raw_types = { 5829 .raw_types = {
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index f2ccae39ee66..fb11240b758b 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -908,6 +908,44 @@
908 .result = REJECT, 908 .result = REJECT,
909}, 909},
910{ 910{
911 "calls: stack depth check in dead code",
912 .insns = {
913 /* main */
914 BPF_MOV64_IMM(BPF_REG_1, 0),
915 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
916 BPF_EXIT_INSN(),
917 /* A */
918 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
919 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2), /* call B */
920 BPF_MOV64_IMM(BPF_REG_0, 0),
921 BPF_EXIT_INSN(),
922 /* B */
923 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
924 BPF_EXIT_INSN(),
925 /* C */
926 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
927 BPF_EXIT_INSN(),
928 /* D */
929 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
930 BPF_EXIT_INSN(),
931 /* E */
932 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
933 BPF_EXIT_INSN(),
934 /* F */
935 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
936 BPF_EXIT_INSN(),
937 /* G */
938 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
939 BPF_EXIT_INSN(),
940 /* H */
941 BPF_MOV64_IMM(BPF_REG_0, 0),
942 BPF_EXIT_INSN(),
943 },
944 .prog_type = BPF_PROG_TYPE_XDP,
945 .errstr = "call stack",
946 .result = REJECT,
947},
948{
911 "calls: spill into caller stack frame", 949 "calls: spill into caller stack frame",
912 .insns = { 950 .insns = {
913 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 951 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
diff --git a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
index 27f0acaed880..ddabb160a11b 100644
--- a/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
+++ b/tools/testing/selftests/tc-testing/tc-tests/actions/sample.json
@@ -144,6 +144,30 @@
144 ] 144 ]
145 }, 145 },
146 { 146 {
147 "id": "7571",
148 "name": "Add sample action with invalid rate",
149 "category": [
150 "actions",
151 "sample"
152 ],
153 "setup": [
154 [
155 "$TC actions flush action sample",
156 0,
157 1,
158 255
159 ]
160 ],
161 "cmdUnderTest": "$TC actions add action sample rate 0 group 1 index 2",
162 "expExitCode": "255",
163 "verifyCmd": "$TC actions get action sample index 2",
164 "matchPattern": "action order [0-9]+: sample rate 1/0 group 1.*index 2 ref",
165 "matchCount": "0",
166 "teardown": [
167 "$TC actions flush action sample"
168 ]
169 },
170 {
147 "id": "b6d4", 171 "id": "b6d4",
148 "name": "Add sample action with mandatory arguments and invalid control action", 172 "name": "Add sample action with mandatory arguments and invalid control action",
149 "category": [ 173 "category": [