summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-08 21:21:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-08 21:21:05 -0500
commit0058b0a506e40d9a2c62015fe92eb64a44d78cd9 (patch)
treeb92b6bb31d6308a3f049b49e3780e525ff67a2e5
parent5cb8418cb533222709f362d264653a634eb8c7ac (diff)
parenta2582cdc32f071422e0197a6c59bd1235b426ce2 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) BPF sample build fixes from Björn Töpel 2) Fix powerpc bpf tail call implementation, from Eric Dumazet. 3) DCCP leaks jiffies on the wire, fix also from Eric Dumazet. 4) Fix crash in ebtables when using dnat target, from Florian Westphal. 5) Fix port disable handling whne removing bcm_sf2 driver, from Florian Fainelli. 6) Fix kTLS sk_msg trim on fallback to copy mode, from Jakub Kicinski. 7) Various KCSAN fixes all over the networking, from Eric Dumazet. 8) Memory leaks in mlx5 driver, from Alex Vesker. 9) SMC interface refcounting fix, from Ursula Braun. 10) TSO descriptor handling fixes in stmmac driver, from Jose Abreu. 11) Add a TX lock to synchonize the kTLS TX path properly with crypto operations. From Jakub Kicinski. 12) Sock refcount during shutdown fix in vsock/virtio code, from Stefano Garzarella. 13) Infinite loop in Intel ice driver, from Colin Ian King. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (108 commits) ixgbe: need_wakeup flag might not be set for Tx i40e: need_wakeup flag might not be set for Tx igb/igc: use ktime accessors for skb->tstamp i40e: Fix for ethtool -m issue on X722 NIC iavf: initialize ITRN registers with correct values ice: fix potential infinite loop because loop counter being too small qede: fix NULL pointer deref in __qede_remove() net: fix data-race in neigh_event_send() vsock/virtio: fix sock refcnt holding during the shutdown net: ethernet: octeon_mgmt: Account for second possible VLAN header mac80211: fix station inactive_time shortly after boot net/fq_impl: Switch to kvmalloc() for memory allocation mac80211: fix ieee80211_txq_setup_flows() failure path ipv4: Fix table id reference in fib_sync_down_addr ipv6: fixes rt6_probe() and fib6_nh->last_probe init net: hns: Fix the stray netpoll locks causing deadlock in NAPI path net: usb: qmi_wwan: add support for DW5821e with eSIM support CDC-NCM: handle incomplete transfer of MTU nfc: netlink: fix double device reference drop NFC: st21nfca: fix double free ...
-rw-r--r--Documentation/networking/tls-offload.rst4
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c13
-rw-r--r--drivers/net/bonding/bond_main.c44
-rw-r--r--drivers/net/can/c_can/c_can.c71
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/dev.c1
-rw-r--r--drivers/net/can/flexcan.c11
-rw-r--r--drivers/net/can/rx-offload.c102
-rw-r--r--drivers/net/can/spi/mcp251x.c2
-rw-r--r--drivers/net/can/ti_hecc.c232
-rw-r--r--drivers/net/can/usb/gs_usb.c1
-rw-r--r--drivers/net/can/usb/mcba_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c32
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c2
-rw-r--r--drivers/net/can/usb/usb_8dev.c3
-rw-r--r--drivers/net/can/xilinx_can.c1
-rw-r--r--drivers/net/dsa/bcm_sf2.c4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c35
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c145
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c10
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c9
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c12
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c70
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c134
-rw-r--r--drivers/net/usb/cdc_ncm.c6
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/nfc/fdp/i2c.c2
-rw-r--r--drivers/nfc/st21nfca/core.c1
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/skmsg.h9
-rw-r--r--include/net/bonding.h3
-rw-r--r--include/net/fq_impl.h4
-rw-r--r--include/net/neighbour.h4
-rw-r--r--include/net/netfilter/nf_tables.h3
-rw-r--r--include/net/sch_generic.h4
-rw-r--r--include/net/sock.h4
-rw-r--r--include/net/tls.h5
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/can/bcm.h2
-rw-r--r--include/uapi/linux/can/error.h2
-rw-r--r--include/uapi/linux/can/gw.h2
-rw-r--r--include/uapi/linux/can/j1939.h2
-rw-r--r--include/uapi/linux/can/netlink.h2
-rw-r--r--include/uapi/linux/can/raw.h2
-rw-r--r--include/uapi/linux/can/vxcan.h2
-rw-r--r--kernel/bpf/cgroup.c4
-rw-r--r--kernel/bpf/syscall.c7
-rw-r--r--net/bridge/netfilter/ebt_dnat.c19
-rw-r--r--net/can/j1939/socket.c9
-rw-r--r--net/can/j1939/transport.c20
-rw-r--r--net/core/skmsg.c20
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/ipv4/fib_semantics.c2
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/mac80211/main.c2
-rw-r--r--net/mac80211/sta_info.c3
-rw-r--r--net/netfilter/ipset/ip_set_core.c49
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipmac.c2
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c1
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c1
-rw-r--r--net/netfilter/nf_tables_api.c7
-rw-r--r--net/netfilter/nf_tables_offload.c3
-rw-r--r--net/netfilter/nft_bitwise.c5
-rw-r--r--net/netfilter/nft_cmp.c2
-rw-r--r--net/nfc/netlink.c2
-rw-r--r--net/sched/cls_api.c83
-rw-r--r--net/sched/sch_taprio.c5
-rw-r--r--net/smc/smc_pnet.c2
-rw-r--r--net/tls/tls_device.c10
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_sw.c30
-rw-r--r--net/vmw_vsock/virtio_transport_common.c8
-rw-r--r--samples/bpf/Makefile1
-rw-r--r--tools/perf/perf-sys.h6
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c8
-rw-r--r--tools/testing/selftests/net/tls.c108
111 files changed, 1093 insertions, 483 deletions
diff --git a/Documentation/networking/tls-offload.rst b/Documentation/networking/tls-offload.rst
index 0dd3f748239f..f914e81fd3a6 100644
--- a/Documentation/networking/tls-offload.rst
+++ b/Documentation/networking/tls-offload.rst
@@ -436,6 +436,10 @@ by the driver:
436 encryption. 436 encryption.
437 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream 437 * ``tx_tls_ooo`` - number of TX packets which were part of a TLS stream
438 but did not arrive in the expected order. 438 but did not arrive in the expected order.
439 * ``tx_tls_skip_no_sync_data`` - number of TX packets which were part of
440 a TLS stream and arrived out-of-order, but skipped the HW offload routine
441 and went to the regular transmit flow as they were retransmissions of the
442 connection handshake.
439 * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of 443 * ``tx_tls_drop_no_sync_data`` - number of TX packets which were part of
440 a TLS stream dropped, because they arrived out of order and associated 444 a TLS stream dropped, because they arrived out of order and associated
441 record could not be found. 445 record could not be found.
diff --git a/MAINTAINERS b/MAINTAINERS
index 2a427d1e9f01..8f148f499660 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3053,6 +3053,7 @@ M: Daniel Borkmann <daniel@iogearbox.net>
3053R: Martin KaFai Lau <kafai@fb.com> 3053R: Martin KaFai Lau <kafai@fb.com>
3054R: Song Liu <songliubraving@fb.com> 3054R: Song Liu <songliubraving@fb.com>
3055R: Yonghong Song <yhs@fb.com> 3055R: Yonghong Song <yhs@fb.com>
3056R: Andrii Nakryiko <andriin@fb.com>
3056L: netdev@vger.kernel.org 3057L: netdev@vger.kernel.org
3057L: bpf@vger.kernel.org 3058L: bpf@vger.kernel.org
3058T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git 3059T: git git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf.git
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 02a59946a78a..be3517ef0574 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -1142,6 +1142,19 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
1142 } 1142 }
1143 1143
1144 /* 1144 /*
1145 * If we have seen a tail call, we need a second pass.
1146 * This is because bpf_jit_emit_common_epilogue() is called
1147 * from bpf_jit_emit_tail_call() with a not yet stable ctx->seen.
1148 */
1149 if (cgctx.seen & SEEN_TAILCALL) {
1150 cgctx.idx = 0;
1151 if (bpf_jit_build_body(fp, 0, &cgctx, addrs, false)) {
1152 fp = org_fp;
1153 goto out_addrs;
1154 }
1155 }
1156
1157 /*
1145 * Pretend to build prologue, given the features we've seen. This will 1158 * Pretend to build prologue, given the features we've seen. This will
1146 * update ctgtx.idx as it pretends to output instructions, then we can 1159 * update ctgtx.idx as it pretends to output instructions, then we can
1147 * calculate total size from idx. 1160 * calculate total size from idx.
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 480f9459b402..62f65573eb04 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2083,8 +2083,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2083 ignore_updelay = !rcu_dereference(bond->curr_active_slave); 2083 ignore_updelay = !rcu_dereference(bond->curr_active_slave);
2084 2084
2085 bond_for_each_slave_rcu(bond, slave, iter) { 2085 bond_for_each_slave_rcu(bond, slave, iter) {
2086 slave->new_link = BOND_LINK_NOCHANGE; 2086 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2087 slave->link_new_state = slave->link;
2088 2087
2089 link_state = bond_check_dev_link(bond, slave->dev, 0); 2088 link_state = bond_check_dev_link(bond, slave->dev, 0);
2090 2089
@@ -2118,7 +2117,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2118 } 2117 }
2119 2118
2120 if (slave->delay <= 0) { 2119 if (slave->delay <= 0) {
2121 slave->new_link = BOND_LINK_DOWN; 2120 bond_propose_link_state(slave, BOND_LINK_DOWN);
2122 commit++; 2121 commit++;
2123 continue; 2122 continue;
2124 } 2123 }
@@ -2155,7 +2154,7 @@ static int bond_miimon_inspect(struct bonding *bond)
2155 slave->delay = 0; 2154 slave->delay = 0;
2156 2155
2157 if (slave->delay <= 0) { 2156 if (slave->delay <= 0) {
2158 slave->new_link = BOND_LINK_UP; 2157 bond_propose_link_state(slave, BOND_LINK_UP);
2159 commit++; 2158 commit++;
2160 ignore_updelay = false; 2159 ignore_updelay = false;
2161 continue; 2160 continue;
@@ -2193,7 +2192,7 @@ static void bond_miimon_commit(struct bonding *bond)
2193 struct slave *slave, *primary; 2192 struct slave *slave, *primary;
2194 2193
2195 bond_for_each_slave(bond, slave, iter) { 2194 bond_for_each_slave(bond, slave, iter) {
2196 switch (slave->new_link) { 2195 switch (slave->link_new_state) {
2197 case BOND_LINK_NOCHANGE: 2196 case BOND_LINK_NOCHANGE:
2198 /* For 802.3ad mode, check current slave speed and 2197 /* For 802.3ad mode, check current slave speed and
2199 * duplex again in case its port was disabled after 2198 * duplex again in case its port was disabled after
@@ -2265,8 +2264,8 @@ static void bond_miimon_commit(struct bonding *bond)
2265 2264
2266 default: 2265 default:
2267 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n", 2266 slave_err(bond->dev, slave->dev, "invalid new link %d on slave\n",
2268 slave->new_link); 2267 slave->link_new_state);
2269 slave->new_link = BOND_LINK_NOCHANGE; 2268 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2270 2269
2271 continue; 2270 continue;
2272 } 2271 }
@@ -2674,13 +2673,13 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2674 bond_for_each_slave_rcu(bond, slave, iter) { 2673 bond_for_each_slave_rcu(bond, slave, iter) {
2675 unsigned long trans_start = dev_trans_start(slave->dev); 2674 unsigned long trans_start = dev_trans_start(slave->dev);
2676 2675
2677 slave->new_link = BOND_LINK_NOCHANGE; 2676 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2678 2677
2679 if (slave->link != BOND_LINK_UP) { 2678 if (slave->link != BOND_LINK_UP) {
2680 if (bond_time_in_interval(bond, trans_start, 1) && 2679 if (bond_time_in_interval(bond, trans_start, 1) &&
2681 bond_time_in_interval(bond, slave->last_rx, 1)) { 2680 bond_time_in_interval(bond, slave->last_rx, 1)) {
2682 2681
2683 slave->new_link = BOND_LINK_UP; 2682 bond_propose_link_state(slave, BOND_LINK_UP);
2684 slave_state_changed = 1; 2683 slave_state_changed = 1;
2685 2684
2686 /* primary_slave has no meaning in round-robin 2685 /* primary_slave has no meaning in round-robin
@@ -2705,7 +2704,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2705 if (!bond_time_in_interval(bond, trans_start, 2) || 2704 if (!bond_time_in_interval(bond, trans_start, 2) ||
2706 !bond_time_in_interval(bond, slave->last_rx, 2)) { 2705 !bond_time_in_interval(bond, slave->last_rx, 2)) {
2707 2706
2708 slave->new_link = BOND_LINK_DOWN; 2707 bond_propose_link_state(slave, BOND_LINK_DOWN);
2709 slave_state_changed = 1; 2708 slave_state_changed = 1;
2710 2709
2711 if (slave->link_failure_count < UINT_MAX) 2710 if (slave->link_failure_count < UINT_MAX)
@@ -2736,8 +2735,8 @@ static void bond_loadbalance_arp_mon(struct bonding *bond)
2736 goto re_arm; 2735 goto re_arm;
2737 2736
2738 bond_for_each_slave(bond, slave, iter) { 2737 bond_for_each_slave(bond, slave, iter) {
2739 if (slave->new_link != BOND_LINK_NOCHANGE) 2738 if (slave->link_new_state != BOND_LINK_NOCHANGE)
2740 slave->link = slave->new_link; 2739 slave->link = slave->link_new_state;
2741 } 2740 }
2742 2741
2743 if (slave_state_changed) { 2742 if (slave_state_changed) {
@@ -2760,9 +2759,9 @@ re_arm:
2760} 2759}
2761 2760
2762/* Called to inspect slaves for active-backup mode ARP monitor link state 2761/* Called to inspect slaves for active-backup mode ARP monitor link state
2763 * changes. Sets new_link in slaves to specify what action should take 2762 * changes. Sets proposed link state in slaves to specify what action
2764 * place for the slave. Returns 0 if no changes are found, >0 if changes 2763 * should take place for the slave. Returns 0 if no changes are found, >0
2765 * to link states must be committed. 2764 * if changes to link states must be committed.
2766 * 2765 *
2767 * Called with rcu_read_lock held. 2766 * Called with rcu_read_lock held.
2768 */ 2767 */
@@ -2774,12 +2773,12 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2774 int commit = 0; 2773 int commit = 0;
2775 2774
2776 bond_for_each_slave_rcu(bond, slave, iter) { 2775 bond_for_each_slave_rcu(bond, slave, iter) {
2777 slave->new_link = BOND_LINK_NOCHANGE; 2776 bond_propose_link_state(slave, BOND_LINK_NOCHANGE);
2778 last_rx = slave_last_rx(bond, slave); 2777 last_rx = slave_last_rx(bond, slave);
2779 2778
2780 if (slave->link != BOND_LINK_UP) { 2779 if (slave->link != BOND_LINK_UP) {
2781 if (bond_time_in_interval(bond, last_rx, 1)) { 2780 if (bond_time_in_interval(bond, last_rx, 1)) {
2782 slave->new_link = BOND_LINK_UP; 2781 bond_propose_link_state(slave, BOND_LINK_UP);
2783 commit++; 2782 commit++;
2784 } 2783 }
2785 continue; 2784 continue;
@@ -2807,7 +2806,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2807 if (!bond_is_active_slave(slave) && 2806 if (!bond_is_active_slave(slave) &&
2808 !rcu_access_pointer(bond->current_arp_slave) && 2807 !rcu_access_pointer(bond->current_arp_slave) &&
2809 !bond_time_in_interval(bond, last_rx, 3)) { 2808 !bond_time_in_interval(bond, last_rx, 3)) {
2810 slave->new_link = BOND_LINK_DOWN; 2809 bond_propose_link_state(slave, BOND_LINK_DOWN);
2811 commit++; 2810 commit++;
2812 } 2811 }
2813 2812
@@ -2820,7 +2819,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
2820 if (bond_is_active_slave(slave) && 2819 if (bond_is_active_slave(slave) &&
2821 (!bond_time_in_interval(bond, trans_start, 2) || 2820 (!bond_time_in_interval(bond, trans_start, 2) ||
2822 !bond_time_in_interval(bond, last_rx, 2))) { 2821 !bond_time_in_interval(bond, last_rx, 2))) {
2823 slave->new_link = BOND_LINK_DOWN; 2822 bond_propose_link_state(slave, BOND_LINK_DOWN);
2824 commit++; 2823 commit++;
2825 } 2824 }
2826 } 2825 }
@@ -2840,7 +2839,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
2840 struct slave *slave; 2839 struct slave *slave;
2841 2840
2842 bond_for_each_slave(bond, slave, iter) { 2841 bond_for_each_slave(bond, slave, iter) {
2843 switch (slave->new_link) { 2842 switch (slave->link_new_state) {
2844 case BOND_LINK_NOCHANGE: 2843 case BOND_LINK_NOCHANGE:
2845 continue; 2844 continue;
2846 2845
@@ -2890,8 +2889,9 @@ static void bond_ab_arp_commit(struct bonding *bond)
2890 continue; 2889 continue;
2891 2890
2892 default: 2891 default:
2893 slave_err(bond->dev, slave->dev, "impossible: new_link %d on slave\n", 2892 slave_err(bond->dev, slave->dev,
2894 slave->new_link); 2893 "impossible: link_new_state %d on slave\n",
2894 slave->link_new_state);
2895 continue; 2895 continue;
2896 } 2896 }
2897 2897
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 606b7d8ffe13..8e9f5620c9a2 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -52,6 +52,7 @@
52#define CONTROL_EX_PDR BIT(8) 52#define CONTROL_EX_PDR BIT(8)
53 53
54/* control register */ 54/* control register */
55#define CONTROL_SWR BIT(15)
55#define CONTROL_TEST BIT(7) 56#define CONTROL_TEST BIT(7)
56#define CONTROL_CCE BIT(6) 57#define CONTROL_CCE BIT(6)
57#define CONTROL_DISABLE_AR BIT(5) 58#define CONTROL_DISABLE_AR BIT(5)
@@ -97,6 +98,9 @@
97#define BTR_TSEG2_SHIFT 12 98#define BTR_TSEG2_SHIFT 12
98#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) 99#define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT)
99 100
101/* interrupt register */
102#define INT_STS_PENDING 0x8000
103
100/* brp extension register */ 104/* brp extension register */
101#define BRP_EXT_BRPE_MASK 0x0f 105#define BRP_EXT_BRPE_MASK 0x0f
102#define BRP_EXT_BRPE_SHIFT 0 106#define BRP_EXT_BRPE_SHIFT 0
@@ -569,6 +573,26 @@ static void c_can_configure_msg_objects(struct net_device *dev)
569 IF_MCONT_RCV_EOB); 573 IF_MCONT_RCV_EOB);
570} 574}
571 575
576static int c_can_software_reset(struct net_device *dev)
577{
578 struct c_can_priv *priv = netdev_priv(dev);
579 int retry = 0;
580
581 if (priv->type != BOSCH_D_CAN)
582 return 0;
583
584 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT);
585 while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) {
586 msleep(20);
587 if (retry++ > 100) {
588 netdev_err(dev, "CCTRL: software reset failed\n");
589 return -EIO;
590 }
591 }
592
593 return 0;
594}
595
572/* 596/*
573 * Configure C_CAN chip: 597 * Configure C_CAN chip:
574 * - enable/disable auto-retransmission 598 * - enable/disable auto-retransmission
@@ -578,6 +602,11 @@ static void c_can_configure_msg_objects(struct net_device *dev)
578static int c_can_chip_config(struct net_device *dev) 602static int c_can_chip_config(struct net_device *dev)
579{ 603{
580 struct c_can_priv *priv = netdev_priv(dev); 604 struct c_can_priv *priv = netdev_priv(dev);
605 int err;
606
607 err = c_can_software_reset(dev);
608 if (err)
609 return err;
581 610
582 /* enable automatic retransmission */ 611 /* enable automatic retransmission */
583 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); 612 priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
@@ -886,6 +915,9 @@ static int c_can_handle_state_change(struct net_device *dev,
886 struct can_berr_counter bec; 915 struct can_berr_counter bec;
887 916
888 switch (error_type) { 917 switch (error_type) {
918 case C_CAN_NO_ERROR:
919 priv->can.state = CAN_STATE_ERROR_ACTIVE;
920 break;
889 case C_CAN_ERROR_WARNING: 921 case C_CAN_ERROR_WARNING:
890 /* error warning state */ 922 /* error warning state */
891 priv->can.can_stats.error_warning++; 923 priv->can.can_stats.error_warning++;
@@ -916,6 +948,13 @@ static int c_can_handle_state_change(struct net_device *dev,
916 ERR_CNT_RP_SHIFT; 948 ERR_CNT_RP_SHIFT;
917 949
918 switch (error_type) { 950 switch (error_type) {
951 case C_CAN_NO_ERROR:
952 /* error warning state */
953 cf->can_id |= CAN_ERR_CRTL;
954 cf->data[1] = CAN_ERR_CRTL_ACTIVE;
955 cf->data[6] = bec.txerr;
956 cf->data[7] = bec.rxerr;
957 break;
919 case C_CAN_ERROR_WARNING: 958 case C_CAN_ERROR_WARNING:
920 /* error warning state */ 959 /* error warning state */
921 cf->can_id |= CAN_ERR_CRTL; 960 cf->can_id |= CAN_ERR_CRTL;
@@ -1029,10 +1068,16 @@ static int c_can_poll(struct napi_struct *napi, int quota)
1029 u16 curr, last = priv->last_status; 1068 u16 curr, last = priv->last_status;
1030 int work_done = 0; 1069 int work_done = 0;
1031 1070
1032 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); 1071 /* Only read the status register if a status interrupt was pending */
1033 /* Ack status on C_CAN. D_CAN is self clearing */ 1072 if (atomic_xchg(&priv->sie_pending, 0)) {
1034 if (priv->type != BOSCH_D_CAN) 1073 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1035 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); 1074 /* Ack status on C_CAN. D_CAN is self clearing */
1075 if (priv->type != BOSCH_D_CAN)
1076 priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1077 } else {
1078 /* no change detected ... */
1079 curr = last;
1080 }
1036 1081
1037 /* handle state changes */ 1082 /* handle state changes */
1038 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { 1083 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
@@ -1054,11 +1099,17 @@ static int c_can_poll(struct napi_struct *napi, int quota)
1054 /* handle bus recovery events */ 1099 /* handle bus recovery events */
1055 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { 1100 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1056 netdev_dbg(dev, "left bus off state\n"); 1101 netdev_dbg(dev, "left bus off state\n");
1057 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1102 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1058 } 1103 }
1104
1059 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { 1105 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1060 netdev_dbg(dev, "left error passive state\n"); 1106 netdev_dbg(dev, "left error passive state\n");
1061 priv->can.state = CAN_STATE_ERROR_ACTIVE; 1107 work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1108 }
1109
1110 if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) {
1111 netdev_dbg(dev, "left error warning state\n");
1112 work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR);
1062 } 1113 }
1063 1114
1064 /* handle lec errors on the bus */ 1115 /* handle lec errors on the bus */
@@ -1083,10 +1134,16 @@ static irqreturn_t c_can_isr(int irq, void *dev_id)
1083{ 1134{
1084 struct net_device *dev = (struct net_device *)dev_id; 1135 struct net_device *dev = (struct net_device *)dev_id;
1085 struct c_can_priv *priv = netdev_priv(dev); 1136 struct c_can_priv *priv = netdev_priv(dev);
1137 int reg_int;
1086 1138
1087 if (!priv->read_reg(priv, C_CAN_INT_REG)) 1139 reg_int = priv->read_reg(priv, C_CAN_INT_REG);
1140 if (!reg_int)
1088 return IRQ_NONE; 1141 return IRQ_NONE;
1089 1142
1143 /* save for later use */
1144 if (reg_int & INT_STS_PENDING)
1145 atomic_set(&priv->sie_pending, 1);
1146
1090 /* disable all interrupts and schedule the NAPI */ 1147 /* disable all interrupts and schedule the NAPI */
1091 c_can_irq_control(priv, false); 1148 c_can_irq_control(priv, false);
1092 napi_schedule(&priv->napi); 1149 napi_schedule(&priv->napi);
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 8acdc7fa4792..d5567a7c1c6d 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -198,6 +198,7 @@ struct c_can_priv {
198 struct net_device *dev; 198 struct net_device *dev;
199 struct device *device; 199 struct device *device;
200 atomic_t tx_active; 200 atomic_t tx_active;
201 atomic_t sie_pending;
201 unsigned long tx_dir; 202 unsigned long tx_dir;
202 int last_status; 203 int last_status;
203 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index); 204 u16 (*read_reg) (const struct c_can_priv *priv, enum reg index);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index ac86be52b461..1c88c361938c 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -848,6 +848,7 @@ void of_can_transceiver(struct net_device *dev)
848 return; 848 return;
849 849
850 ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max); 850 ret = of_property_read_u32(dn, "max-bitrate", &priv->bitrate_max);
851 of_node_put(dn);
851 if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max)) 852 if ((ret && ret != -EINVAL) || (!ret && !priv->bitrate_max))
852 netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n"); 853 netdev_warn(dev, "Invalid value for transceiver max bitrate. Ignoring bitrate limit.\n");
853} 854}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index dc5695dffc2e..57f9a2f51085 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -677,6 +677,7 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
677 struct can_frame *cf; 677 struct can_frame *cf;
678 bool rx_errors = false, tx_errors = false; 678 bool rx_errors = false, tx_errors = false;
679 u32 timestamp; 679 u32 timestamp;
680 int err;
680 681
681 timestamp = priv->read(&regs->timer) << 16; 682 timestamp = priv->read(&regs->timer) << 16;
682 683
@@ -725,7 +726,9 @@ static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr)
725 if (tx_errors) 726 if (tx_errors)
726 dev->stats.tx_errors++; 727 dev->stats.tx_errors++;
727 728
728 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 729 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
730 if (err)
731 dev->stats.rx_fifo_errors++;
729} 732}
730 733
731static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) 734static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
@@ -738,6 +741,7 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
738 int flt; 741 int flt;
739 struct can_berr_counter bec; 742 struct can_berr_counter bec;
740 u32 timestamp; 743 u32 timestamp;
744 int err;
741 745
742 timestamp = priv->read(&regs->timer) << 16; 746 timestamp = priv->read(&regs->timer) << 16;
743 747
@@ -769,7 +773,9 @@ static void flexcan_irq_state(struct net_device *dev, u32 reg_esr)
769 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 773 if (unlikely(new_state == CAN_STATE_BUS_OFF))
770 can_bus_off(dev); 774 can_bus_off(dev);
771 775
772 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp); 776 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
777 if (err)
778 dev->stats.rx_fifo_errors++;
773} 779}
774 780
775static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) 781static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload)
@@ -1188,6 +1194,7 @@ static int flexcan_chip_start(struct net_device *dev)
1188 reg_mecr = priv->read(&regs->mecr); 1194 reg_mecr = priv->read(&regs->mecr);
1189 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; 1195 reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
1190 priv->write(reg_mecr, &regs->mecr); 1196 priv->write(reg_mecr, &regs->mecr);
1197 reg_mecr |= FLEXCAN_MECR_ECCDIS;
1191 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | 1198 reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
1192 FLEXCAN_MECR_FANCEI_MSK); 1199 FLEXCAN_MECR_FANCEI_MSK);
1193 priv->write(reg_mecr, &regs->mecr); 1200 priv->write(reg_mecr, &regs->mecr);
diff --git a/drivers/net/can/rx-offload.c b/drivers/net/can/rx-offload.c
index e6a668ee7730..84cae167e42f 100644
--- a/drivers/net/can/rx-offload.c
+++ b/drivers/net/can/rx-offload.c
@@ -107,37 +107,95 @@ static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b)
107 return cb_b->timestamp - cb_a->timestamp; 107 return cb_b->timestamp - cb_a->timestamp;
108} 108}
109 109
110static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 110/**
111 * can_rx_offload_offload_one() - Read one CAN frame from HW
112 * @offload: pointer to rx_offload context
113 * @n: number of mailbox to read
114 *
115 * The task of this function is to read a CAN frame from mailbox @n
116 * from the device and return the mailbox's content as a struct
117 * sk_buff.
118 *
119 * If the struct can_rx_offload::skb_queue exceeds the maximal queue
120 * length (struct can_rx_offload::skb_queue_len_max) or no skb can be
121 * allocated, the mailbox contents is discarded by reading it into an
122 * overflow buffer. This way the mailbox is marked as free by the
123 * driver.
124 *
125 * Return: A pointer to skb containing the CAN frame on success.
126 *
127 * NULL if the mailbox @n is empty.
128 *
129 * ERR_PTR() in case of an error
130 */
131static struct sk_buff *
132can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n)
111{ 133{
112 struct sk_buff *skb = NULL; 134 struct sk_buff *skb = NULL, *skb_error = NULL;
113 struct can_rx_offload_cb *cb; 135 struct can_rx_offload_cb *cb;
114 struct can_frame *cf; 136 struct can_frame *cf;
115 int ret; 137 int ret;
116 138
117 /* If queue is full or skb not available, read to discard mailbox */ 139 if (likely(skb_queue_len(&offload->skb_queue) <
118 if (likely(skb_queue_len(&offload->skb_queue) <= 140 offload->skb_queue_len_max)) {
119 offload->skb_queue_len_max))
120 skb = alloc_can_skb(offload->dev, &cf); 141 skb = alloc_can_skb(offload->dev, &cf);
142 if (unlikely(!skb))
143 skb_error = ERR_PTR(-ENOMEM); /* skb alloc failed */
144 } else {
145 skb_error = ERR_PTR(-ENOBUFS); /* skb_queue is full */
146 }
121 147
122 if (!skb) { 148 /* If queue is full or skb not available, drop by reading into
149 * overflow buffer.
150 */
151 if (unlikely(skb_error)) {
123 struct can_frame cf_overflow; 152 struct can_frame cf_overflow;
124 u32 timestamp; 153 u32 timestamp;
125 154
126 ret = offload->mailbox_read(offload, &cf_overflow, 155 ret = offload->mailbox_read(offload, &cf_overflow,
127 &timestamp, n); 156 &timestamp, n);
128 if (ret)
129 offload->dev->stats.rx_dropped++;
130 157
131 return NULL; 158 /* Mailbox was empty. */
159 if (unlikely(!ret))
160 return NULL;
161
162 /* Mailbox has been read and we're dropping it or
163 * there was a problem reading the mailbox.
164 *
165 * Increment error counters in any case.
166 */
167 offload->dev->stats.rx_dropped++;
168 offload->dev->stats.rx_fifo_errors++;
169
170 /* There was a problem reading the mailbox, propagate
171 * error value.
172 */
173 if (unlikely(ret < 0))
174 return ERR_PTR(ret);
175
176 return skb_error;
132 } 177 }
133 178
134 cb = can_rx_offload_get_cb(skb); 179 cb = can_rx_offload_get_cb(skb);
135 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); 180 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n);
136 if (!ret) { 181
182 /* Mailbox was empty. */
183 if (unlikely(!ret)) {
137 kfree_skb(skb); 184 kfree_skb(skb);
138 return NULL; 185 return NULL;
139 } 186 }
140 187
188 /* There was a problem reading the mailbox, propagate error value. */
189 if (unlikely(ret < 0)) {
190 kfree_skb(skb);
191
192 offload->dev->stats.rx_dropped++;
193 offload->dev->stats.rx_fifo_errors++;
194
195 return ERR_PTR(ret);
196 }
197
198 /* Mailbox was read. */
141 return skb; 199 return skb;
142} 200}
143 201
@@ -157,8 +215,8 @@ int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pen
157 continue; 215 continue;
158 216
159 skb = can_rx_offload_offload_one(offload, i); 217 skb = can_rx_offload_offload_one(offload, i);
160 if (!skb) 218 if (IS_ERR_OR_NULL(skb))
161 break; 219 continue;
162 220
163 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); 221 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare);
164 } 222 }
@@ -188,7 +246,13 @@ int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload)
188 struct sk_buff *skb; 246 struct sk_buff *skb;
189 int received = 0; 247 int received = 0;
190 248
191 while ((skb = can_rx_offload_offload_one(offload, 0))) { 249 while (1) {
250 skb = can_rx_offload_offload_one(offload, 0);
251 if (IS_ERR(skb))
252 continue;
253 if (!skb)
254 break;
255
192 skb_queue_tail(&offload->skb_queue, skb); 256 skb_queue_tail(&offload->skb_queue, skb);
193 received++; 257 received++;
194 } 258 }
@@ -207,8 +271,10 @@ int can_rx_offload_queue_sorted(struct can_rx_offload *offload,
207 unsigned long flags; 271 unsigned long flags;
208 272
209 if (skb_queue_len(&offload->skb_queue) > 273 if (skb_queue_len(&offload->skb_queue) >
210 offload->skb_queue_len_max) 274 offload->skb_queue_len_max) {
211 return -ENOMEM; 275 kfree_skb(skb);
276 return -ENOBUFS;
277 }
212 278
213 cb = can_rx_offload_get_cb(skb); 279 cb = can_rx_offload_get_cb(skb);
214 cb->timestamp = timestamp; 280 cb->timestamp = timestamp;
@@ -250,8 +316,10 @@ int can_rx_offload_queue_tail(struct can_rx_offload *offload,
250 struct sk_buff *skb) 316 struct sk_buff *skb)
251{ 317{
252 if (skb_queue_len(&offload->skb_queue) > 318 if (skb_queue_len(&offload->skb_queue) >
253 offload->skb_queue_len_max) 319 offload->skb_queue_len_max) {
254 return -ENOMEM; 320 kfree_skb(skb);
321 return -ENOBUFS;
322 }
255 323
256 skb_queue_tail(&offload->skb_queue, skb); 324 skb_queue_tail(&offload->skb_queue, skb);
257 can_rx_offload_schedule(offload); 325 can_rx_offload_schedule(offload);
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index bee9f7b8dad6..bb20a9b75cc6 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -717,6 +717,7 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
717 if (priv->after_suspend) { 717 if (priv->after_suspend) {
718 mcp251x_hw_reset(spi); 718 mcp251x_hw_reset(spi);
719 mcp251x_setup(net, spi); 719 mcp251x_setup(net, spi);
720 priv->force_quit = 0;
720 if (priv->after_suspend & AFTER_SUSPEND_RESTART) { 721 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
721 mcp251x_set_normal_mode(spi); 722 mcp251x_set_normal_mode(spi);
722 } else if (priv->after_suspend & AFTER_SUSPEND_UP) { 723 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
@@ -728,7 +729,6 @@ static void mcp251x_restart_work_handler(struct work_struct *ws)
728 mcp251x_hw_sleep(spi); 729 mcp251x_hw_sleep(spi);
729 } 730 }
730 priv->after_suspend = 0; 731 priv->after_suspend = 0;
731 priv->force_quit = 0;
732 } 732 }
733 733
734 if (priv->restart_tx) { 734 if (priv->restart_tx) {
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index f8b19eef5d26..31ad364a89bb 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -73,6 +73,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
73 */ 73 */
74#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) 74#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
75#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) 75#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
76#define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX)
76 77
77/* TI HECC module registers */ 78/* TI HECC module registers */
78#define HECC_CANME 0x0 /* Mailbox enable */ 79#define HECC_CANME 0x0 /* Mailbox enable */
@@ -82,7 +83,7 @@ MODULE_VERSION(HECC_MODULE_VERSION);
82#define HECC_CANTA 0x10 /* Transmission acknowledge */ 83#define HECC_CANTA 0x10 /* Transmission acknowledge */
83#define HECC_CANAA 0x14 /* Abort acknowledge */ 84#define HECC_CANAA 0x14 /* Abort acknowledge */
84#define HECC_CANRMP 0x18 /* Receive message pending */ 85#define HECC_CANRMP 0x18 /* Receive message pending */
85#define HECC_CANRML 0x1C /* Remote message lost */ 86#define HECC_CANRML 0x1C /* Receive message lost */
86#define HECC_CANRFP 0x20 /* Remote frame pending */ 87#define HECC_CANRFP 0x20 /* Remote frame pending */
87#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ 88#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
88#define HECC_CANMC 0x28 /* Master control */ 89#define HECC_CANMC 0x28 /* Master control */
@@ -149,6 +150,8 @@ MODULE_VERSION(HECC_MODULE_VERSION);
149#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ 150#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
150 HECC_CANES_CRCE | HECC_CANES_SE |\ 151 HECC_CANES_CRCE | HECC_CANES_SE |\
151 HECC_CANES_ACKE) 152 HECC_CANES_ACKE)
153#define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\
154 HECC_CANES_EP | HECC_CANES_EW)
152 155
153#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ 156#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
154 157
@@ -382,8 +385,18 @@ static void ti_hecc_start(struct net_device *ndev)
382 hecc_set_bit(priv, HECC_CANMIM, mbx_mask); 385 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
383 } 386 }
384 387
385 /* Prevent message over-write & Enable interrupts */ 388 /* Enable tx interrupts */
386 hecc_write(priv, HECC_CANOPC, HECC_SET_REG); 389 hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1);
390
391 /* Prevent message over-write to create a rx fifo, but not for
392 * the lowest priority mailbox, since that allows detecting
393 * overflows instead of the hardware silently dropping the
394 * messages.
395 */
396 mbx_mask = ~BIT(HECC_RX_LAST_MBOX);
397 hecc_write(priv, HECC_CANOPC, mbx_mask);
398
399 /* Enable interrupts */
387 if (priv->use_hecc1int) { 400 if (priv->use_hecc1int) {
388 hecc_write(priv, HECC_CANMIL, HECC_SET_REG); 401 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
389 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | 402 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
@@ -400,6 +413,9 @@ static void ti_hecc_stop(struct net_device *ndev)
400{ 413{
401 struct ti_hecc_priv *priv = netdev_priv(ndev); 414 struct ti_hecc_priv *priv = netdev_priv(ndev);
402 415
416 /* Disable the CPK; stop sending, erroring and acking */
417 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
418
403 /* Disable interrupts and disable mailboxes */ 419 /* Disable interrupts and disable mailboxes */
404 hecc_write(priv, HECC_CANGIM, 0); 420 hecc_write(priv, HECC_CANGIM, 0);
405 hecc_write(priv, HECC_CANMIM, 0); 421 hecc_write(priv, HECC_CANMIM, 0);
@@ -508,8 +524,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
508 hecc_set_bit(priv, HECC_CANME, mbx_mask); 524 hecc_set_bit(priv, HECC_CANME, mbx_mask);
509 spin_unlock_irqrestore(&priv->mbx_lock, flags); 525 spin_unlock_irqrestore(&priv->mbx_lock, flags);
510 526
511 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
512 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
513 hecc_write(priv, HECC_CANTRS, mbx_mask); 527 hecc_write(priv, HECC_CANTRS, mbx_mask);
514 528
515 return NETDEV_TX_OK; 529 return NETDEV_TX_OK;
@@ -526,8 +540,10 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
526 u32 *timestamp, unsigned int mbxno) 540 u32 *timestamp, unsigned int mbxno)
527{ 541{
528 struct ti_hecc_priv *priv = rx_offload_to_priv(offload); 542 struct ti_hecc_priv *priv = rx_offload_to_priv(offload);
529 u32 data; 543 u32 data, mbx_mask;
544 int ret = 1;
530 545
546 mbx_mask = BIT(mbxno);
531 data = hecc_read_mbx(priv, mbxno, HECC_CANMID); 547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
532 if (data & HECC_CANMID_IDE) 548 if (data & HECC_CANMID_IDE)
533 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; 549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
@@ -548,7 +564,25 @@ static unsigned int ti_hecc_mailbox_read(struct can_rx_offload *offload,
548 564
549 *timestamp = hecc_read_stamp(priv, mbxno); 565 *timestamp = hecc_read_stamp(priv, mbxno);
550 566
551 return 1; 567 /* Check for FIFO overrun.
568 *
569 * All but the last RX mailbox have activated overwrite
570 * protection. So skip check for overrun, if we're not
571 * handling the last RX mailbox.
572 *
573 * As the overwrite protection for the last RX mailbox is
574 * disabled, the CAN core might update while we're reading
575 * it. This means the skb might be inconsistent.
576 *
577 * Return an error to let rx-offload discard this CAN frame.
578 */
579 if (unlikely(mbxno == HECC_RX_LAST_MBOX &&
580 hecc_read(priv, HECC_CANRML) & mbx_mask))
581 ret = -ENOBUFS;
582
583 hecc_write(priv, HECC_CANRMP, mbx_mask);
584
585 return ret;
552} 586}
553 587
554static int ti_hecc_error(struct net_device *ndev, int int_status, 588static int ti_hecc_error(struct net_device *ndev, int int_status,
@@ -558,92 +592,73 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
558 struct can_frame *cf; 592 struct can_frame *cf;
559 struct sk_buff *skb; 593 struct sk_buff *skb;
560 u32 timestamp; 594 u32 timestamp;
595 int err;
561 596
562 /* propagate the error condition to the can stack */ 597 if (err_status & HECC_BUS_ERROR) {
563 skb = alloc_can_err_skb(ndev, &cf); 598 /* propagate the error condition to the can stack */
564 if (!skb) { 599 skb = alloc_can_err_skb(ndev, &cf);
565 if (printk_ratelimit()) 600 if (!skb) {
566 netdev_err(priv->ndev, 601 if (net_ratelimit())
567 "%s: alloc_can_err_skb() failed\n", 602 netdev_err(priv->ndev,
568 __func__); 603 "%s: alloc_can_err_skb() failed\n",
569 return -ENOMEM; 604 __func__);
570 } 605 return -ENOMEM;
571
572 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
573 if ((int_status & HECC_CANGIF_BOIF) == 0) {
574 priv->can.state = CAN_STATE_ERROR_WARNING;
575 ++priv->can.can_stats.error_warning;
576 cf->can_id |= CAN_ERR_CRTL;
577 if (hecc_read(priv, HECC_CANTEC) > 96)
578 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
579 if (hecc_read(priv, HECC_CANREC) > 96)
580 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
581 }
582 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
583 netdev_dbg(priv->ndev, "Error Warning interrupt\n");
584 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
585 }
586
587 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
588 if ((int_status & HECC_CANGIF_BOIF) == 0) {
589 priv->can.state = CAN_STATE_ERROR_PASSIVE;
590 ++priv->can.can_stats.error_passive;
591 cf->can_id |= CAN_ERR_CRTL;
592 if (hecc_read(priv, HECC_CANTEC) > 127)
593 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
594 if (hecc_read(priv, HECC_CANREC) > 127)
595 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
596 } 606 }
597 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
598 netdev_dbg(priv->ndev, "Error passive interrupt\n");
599 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
600 }
601
602 /* Need to check busoff condition in error status register too to
603 * ensure warning interrupts don't hog the system
604 */
605 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
606 priv->can.state = CAN_STATE_BUS_OFF;
607 cf->can_id |= CAN_ERR_BUSOFF;
608 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
609 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
610 /* Disable all interrupts in bus-off to avoid int hog */
611 hecc_write(priv, HECC_CANGIM, 0);
612 ++priv->can.can_stats.bus_off;
613 can_bus_off(ndev);
614 }
615 607
616 if (err_status & HECC_BUS_ERROR) {
617 ++priv->can.can_stats.bus_error; 608 ++priv->can.can_stats.bus_error;
618 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; 609 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
619 if (err_status & HECC_CANES_FE) { 610 if (err_status & HECC_CANES_FE)
620 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
621 cf->data[2] |= CAN_ERR_PROT_FORM; 611 cf->data[2] |= CAN_ERR_PROT_FORM;
622 } 612 if (err_status & HECC_CANES_BE)
623 if (err_status & HECC_CANES_BE) {
624 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
625 cf->data[2] |= CAN_ERR_PROT_BIT; 613 cf->data[2] |= CAN_ERR_PROT_BIT;
626 } 614 if (err_status & HECC_CANES_SE)
627 if (err_status & HECC_CANES_SE) {
628 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
629 cf->data[2] |= CAN_ERR_PROT_STUFF; 615 cf->data[2] |= CAN_ERR_PROT_STUFF;
630 } 616 if (err_status & HECC_CANES_CRCE)
631 if (err_status & HECC_CANES_CRCE) {
632 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
633 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; 617 cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
634 } 618 if (err_status & HECC_CANES_ACKE)
635 if (err_status & HECC_CANES_ACKE) {
636 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
637 cf->data[3] = CAN_ERR_PROT_LOC_ACK; 619 cf->data[3] = CAN_ERR_PROT_LOC_ACK;
638 } 620
621 timestamp = hecc_read(priv, HECC_CANLNT);
622 err = can_rx_offload_queue_sorted(&priv->offload, skb,
623 timestamp);
624 if (err)
625 ndev->stats.rx_fifo_errors++;
639 } 626 }
640 627
641 timestamp = hecc_read(priv, HECC_CANLNT); 628 hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS);
642 can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
643 629
644 return 0; 630 return 0;
645} 631}
646 632
633static void ti_hecc_change_state(struct net_device *ndev,
634 enum can_state rx_state,
635 enum can_state tx_state)
636{
637 struct ti_hecc_priv *priv = netdev_priv(ndev);
638 struct can_frame *cf;
639 struct sk_buff *skb;
640 u32 timestamp;
641 int err;
642
643 skb = alloc_can_err_skb(priv->ndev, &cf);
644 if (unlikely(!skb)) {
645 priv->can.state = max(tx_state, rx_state);
646 return;
647 }
648
649 can_change_state(priv->ndev, cf, tx_state, rx_state);
650
651 if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) {
652 cf->data[6] = hecc_read(priv, HECC_CANTEC);
653 cf->data[7] = hecc_read(priv, HECC_CANREC);
654 }
655
656 timestamp = hecc_read(priv, HECC_CANLNT);
657 err = can_rx_offload_queue_sorted(&priv->offload, skb, timestamp);
658 if (err)
659 ndev->stats.rx_fifo_errors++;
660}
661
647static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) 662static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
648{ 663{
649 struct net_device *ndev = (struct net_device *)dev_id; 664 struct net_device *ndev = (struct net_device *)dev_id;
@@ -651,6 +666,7 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
651 struct net_device_stats *stats = &ndev->stats; 666 struct net_device_stats *stats = &ndev->stats;
652 u32 mbxno, mbx_mask, int_status, err_status, stamp; 667 u32 mbxno, mbx_mask, int_status, err_status, stamp;
653 unsigned long flags, rx_pending; 668 unsigned long flags, rx_pending;
669 u32 handled = 0;
654 670
655 int_status = hecc_read(priv, 671 int_status = hecc_read(priv,
656 priv->use_hecc1int ? 672 priv->use_hecc1int ?
@@ -660,17 +676,66 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
660 return IRQ_NONE; 676 return IRQ_NONE;
661 677
662 err_status = hecc_read(priv, HECC_CANES); 678 err_status = hecc_read(priv, HECC_CANES);
663 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO | 679 if (unlikely(err_status & HECC_CANES_FLAGS))
664 HECC_CANES_EP | HECC_CANES_EW))
665 ti_hecc_error(ndev, int_status, err_status); 680 ti_hecc_error(ndev, int_status, err_status);
666 681
682 if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) {
683 enum can_state rx_state, tx_state;
684 u32 rec = hecc_read(priv, HECC_CANREC);
685 u32 tec = hecc_read(priv, HECC_CANTEC);
686
687 if (int_status & HECC_CANGIF_WLIF) {
688 handled |= HECC_CANGIF_WLIF;
689 rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0;
690 tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0;
691 netdev_dbg(priv->ndev, "Error Warning interrupt\n");
692 ti_hecc_change_state(ndev, rx_state, tx_state);
693 }
694
695 if (int_status & HECC_CANGIF_EPIF) {
696 handled |= HECC_CANGIF_EPIF;
697 rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0;
698 tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0;
699 netdev_dbg(priv->ndev, "Error passive interrupt\n");
700 ti_hecc_change_state(ndev, rx_state, tx_state);
701 }
702
703 if (int_status & HECC_CANGIF_BOIF) {
704 handled |= HECC_CANGIF_BOIF;
705 rx_state = CAN_STATE_BUS_OFF;
706 tx_state = CAN_STATE_BUS_OFF;
707 netdev_dbg(priv->ndev, "Bus off interrupt\n");
708
709 /* Disable all interrupts */
710 hecc_write(priv, HECC_CANGIM, 0);
711 can_bus_off(ndev);
712 ti_hecc_change_state(ndev, rx_state, tx_state);
713 }
714 } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) {
715 enum can_state new_state, tx_state, rx_state;
716 u32 rec = hecc_read(priv, HECC_CANREC);
717 u32 tec = hecc_read(priv, HECC_CANTEC);
718
719 if (rec >= 128 || tec >= 128)
720 new_state = CAN_STATE_ERROR_PASSIVE;
721 else if (rec >= 96 || tec >= 96)
722 new_state = CAN_STATE_ERROR_WARNING;
723 else
724 new_state = CAN_STATE_ERROR_ACTIVE;
725
726 if (new_state < priv->can.state) {
727 rx_state = rec >= tec ? new_state : 0;
728 tx_state = rec <= tec ? new_state : 0;
729 ti_hecc_change_state(ndev, rx_state, tx_state);
730 }
731 }
732
667 if (int_status & HECC_CANGIF_GMIF) { 733 if (int_status & HECC_CANGIF_GMIF) {
668 while (priv->tx_tail - priv->tx_head > 0) { 734 while (priv->tx_tail - priv->tx_head > 0) {
669 mbxno = get_tx_tail_mb(priv); 735 mbxno = get_tx_tail_mb(priv);
670 mbx_mask = BIT(mbxno); 736 mbx_mask = BIT(mbxno);
671 if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) 737 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
672 break; 738 break;
673 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
674 hecc_write(priv, HECC_CANTA, mbx_mask); 739 hecc_write(priv, HECC_CANTA, mbx_mask);
675 spin_lock_irqsave(&priv->mbx_lock, flags); 740 spin_lock_irqsave(&priv->mbx_lock, flags);
676 hecc_clear_bit(priv, HECC_CANME, mbx_mask); 741 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
@@ -695,16 +760,15 @@ static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
695 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { 760 while ((rx_pending = hecc_read(priv, HECC_CANRMP))) {
696 can_rx_offload_irq_offload_timestamp(&priv->offload, 761 can_rx_offload_irq_offload_timestamp(&priv->offload,
697 rx_pending); 762 rx_pending);
698 hecc_write(priv, HECC_CANRMP, rx_pending);
699 } 763 }
700 } 764 }
701 765
702 /* clear all interrupt conditions - read back to avoid spurious ints */ 766 /* clear all interrupt conditions - read back to avoid spurious ints */
703 if (priv->use_hecc1int) { 767 if (priv->use_hecc1int) {
704 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); 768 hecc_write(priv, HECC_CANGIF1, handled);
705 int_status = hecc_read(priv, HECC_CANGIF1); 769 int_status = hecc_read(priv, HECC_CANGIF1);
706 } else { 770 } else {
707 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); 771 hecc_write(priv, HECC_CANGIF0, handled);
708 int_status = hecc_read(priv, HECC_CANGIF0); 772 int_status = hecc_read(priv, HECC_CANGIF0);
709 } 773 }
710 774
@@ -877,7 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
877 941
878 priv->offload.mailbox_read = ti_hecc_mailbox_read; 942 priv->offload.mailbox_read = ti_hecc_mailbox_read;
879 priv->offload.mb_first = HECC_RX_FIRST_MBOX; 943 priv->offload.mb_first = HECC_RX_FIRST_MBOX;
880 priv->offload.mb_last = HECC_MAX_TX_MBOX; 944 priv->offload.mb_last = HECC_RX_LAST_MBOX;
881 err = can_rx_offload_add_timestamp(ndev, &priv->offload); 945 err = can_rx_offload_add_timestamp(ndev, &priv->offload);
882 if (err) { 946 if (err) {
883 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); 947 dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n");
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index bd6eb9967630..2f74f6704c12 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -623,6 +623,7 @@ static int gs_can_open(struct net_device *netdev)
623 rc); 623 rc);
624 624
625 usb_unanchor_urb(urb); 625 usb_unanchor_urb(urb);
626 usb_free_urb(urb);
626 break; 627 break;
627 } 628 }
628 629
diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c
index 19a702ac49e4..21faa2ec4632 100644
--- a/drivers/net/can/usb/mcba_usb.c
+++ b/drivers/net/can/usb/mcba_usb.c
@@ -876,9 +876,8 @@ static void mcba_usb_disconnect(struct usb_interface *intf)
876 netdev_info(priv->netdev, "device disconnected\n"); 876 netdev_info(priv->netdev, "device disconnected\n");
877 877
878 unregister_candev(priv->netdev); 878 unregister_candev(priv->netdev);
879 free_candev(priv->netdev);
880
881 mcba_urb_unlink(priv); 879 mcba_urb_unlink(priv);
880 free_candev(priv->netdev);
882} 881}
883 882
884static struct usb_driver mcba_usb_driver = { 883static struct usb_driver mcba_usb_driver = {
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 617da295b6c1..d2539c95adb6 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -100,7 +100,7 @@ struct pcan_usb_msg_context {
100 u8 *end; 100 u8 *end;
101 u8 rec_cnt; 101 u8 rec_cnt;
102 u8 rec_idx; 102 u8 rec_idx;
103 u8 rec_data_idx; 103 u8 rec_ts_idx;
104 struct net_device *netdev; 104 struct net_device *netdev;
105 struct pcan_usb *pdev; 105 struct pcan_usb *pdev;
106}; 106};
@@ -436,8 +436,8 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
436 } 436 }
437 if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) { 437 if ((n & PCAN_USB_ERROR_BUS_LIGHT) == 0) {
438 /* no error (back to active state) */ 438 /* no error (back to active state) */
439 mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 439 new_state = CAN_STATE_ERROR_ACTIVE;
440 return 0; 440 break;
441 } 441 }
442 break; 442 break;
443 443
@@ -460,9 +460,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
460 } 460 }
461 461
462 if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) { 462 if ((n & PCAN_USB_ERROR_BUS_HEAVY) == 0) {
463 /* no error (back to active state) */ 463 /* no error (back to warning state) */
464 mc->pdev->dev.can.state = CAN_STATE_ERROR_ACTIVE; 464 new_state = CAN_STATE_ERROR_WARNING;
465 return 0; 465 break;
466 } 466 }
467 break; 467 break;
468 468
@@ -501,6 +501,11 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
501 mc->pdev->dev.can.can_stats.error_warning++; 501 mc->pdev->dev.can.can_stats.error_warning++;
502 break; 502 break;
503 503
504 case CAN_STATE_ERROR_ACTIVE:
505 cf->can_id |= CAN_ERR_CRTL;
506 cf->data[1] = CAN_ERR_CRTL_ACTIVE;
507 break;
508
504 default: 509 default:
505 /* CAN_STATE_MAX (trick to handle other errors) */ 510 /* CAN_STATE_MAX (trick to handle other errors) */
506 cf->can_id |= CAN_ERR_CRTL; 511 cf->can_id |= CAN_ERR_CRTL;
@@ -547,10 +552,15 @@ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc,
547 mc->ptr += PCAN_USB_CMD_ARGS; 552 mc->ptr += PCAN_USB_CMD_ARGS;
548 553
549 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { 554 if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) {
550 int err = pcan_usb_decode_ts(mc, !mc->rec_idx); 555 int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx);
551 556
552 if (err) 557 if (err)
553 return err; 558 return err;
559
560 /* Next packet in the buffer will have a timestamp on a single
561 * byte
562 */
563 mc->rec_ts_idx++;
554 } 564 }
555 565
556 switch (f) { 566 switch (f) {
@@ -632,10 +642,13 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
632 642
633 cf->can_dlc = get_can_dlc(rec_len); 643 cf->can_dlc = get_can_dlc(rec_len);
634 644
635 /* first data packet timestamp is a word */ 645 /* Only first packet timestamp is a word */
636 if (pcan_usb_decode_ts(mc, !mc->rec_data_idx)) 646 if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx))
637 goto decode_failed; 647 goto decode_failed;
638 648
649 /* Next packet in the buffer will have a timestamp on a single byte */
650 mc->rec_ts_idx++;
651
639 /* read data */ 652 /* read data */
640 memset(cf->data, 0x0, sizeof(cf->data)); 653 memset(cf->data, 0x0, sizeof(cf->data));
641 if (status_len & PCAN_USB_STATUSLEN_RTR) { 654 if (status_len & PCAN_USB_STATUSLEN_RTR) {
@@ -688,7 +701,6 @@ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf)
688 /* handle normal can frames here */ 701 /* handle normal can frames here */
689 } else { 702 } else {
690 err = pcan_usb_decode_data(&mc, sl); 703 err = pcan_usb_decode_data(&mc, sl);
691 mc.rec_data_idx++;
692 } 704 }
693 } 705 }
694 706
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 65dce642b86b..0b7766b715fd 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -750,7 +750,7 @@ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter,
750 dev = netdev_priv(netdev); 750 dev = netdev_priv(netdev);
751 751
752 /* allocate a buffer large enough to send commands */ 752 /* allocate a buffer large enough to send commands */
753 dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); 753 dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
754 if (!dev->cmd_buf) { 754 if (!dev->cmd_buf) {
755 err = -ENOMEM; 755 err = -ENOMEM;
756 goto lbl_free_candev; 756 goto lbl_free_candev;
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index d596a2ad7f78..8fa224b28218 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -996,9 +996,8 @@ static void usb_8dev_disconnect(struct usb_interface *intf)
996 netdev_info(priv->netdev, "device disconnected\n"); 996 netdev_info(priv->netdev, "device disconnected\n");
997 997
998 unregister_netdev(priv->netdev); 998 unregister_netdev(priv->netdev);
999 free_candev(priv->netdev);
1000
1001 unlink_all_urbs(priv); 999 unlink_all_urbs(priv);
1000 free_candev(priv->netdev);
1002 } 1001 }
1003 1002
1004} 1003}
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 911b34316c9d..7c482b2d78d2 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -1599,7 +1599,6 @@ static const struct xcan_devtype_data xcan_zynq_data = {
1599 1599
1600static const struct xcan_devtype_data xcan_axi_data = { 1600static const struct xcan_devtype_data xcan_axi_data = {
1601 .cantype = XAXI_CAN, 1601 .cantype = XAXI_CAN,
1602 .flags = XCAN_FLAG_TXFEMP,
1603 .bittiming_const = &xcan_bittiming_const, 1602 .bittiming_const = &xcan_bittiming_const,
1604 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, 1603 .btr_ts2_shift = XCAN_BTR_TS2_SHIFT,
1605 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, 1604 .btr_sjw_shift = XCAN_BTR_SJW_SHIFT,
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index d44651ad520c..69fc13046ac7 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -1215,10 +1215,10 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
1215 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev); 1215 struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
1216 1216
1217 priv->wol_ports_mask = 0; 1217 priv->wol_ports_mask = 0;
1218 /* Disable interrupts */
1219 bcm_sf2_intr_disable(priv);
1218 dsa_unregister_switch(priv->dev->ds); 1220 dsa_unregister_switch(priv->dev->ds);
1219 bcm_sf2_cfp_exit(priv->dev->ds); 1221 bcm_sf2_cfp_exit(priv->dev->ds);
1220 /* Disable all ports and interrupts */
1221 bcm_sf2_sw_suspend(priv->dev->ds);
1222 bcm_sf2_mdio_unregister(priv); 1222 bcm_sf2_mdio_unregister(priv);
1223 1223
1224 return 0; 1224 return 0;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0f138280315a..1de51811fcb4 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1996,8 +1996,6 @@ static void reset_umac(struct bcmgenet_priv *priv)
1996 1996
1997 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */ 1997 /* issue soft reset with (rg)mii loopback to ensure a stable rxclk */
1998 bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD); 1998 bcmgenet_umac_writel(priv, CMD_SW_RESET | CMD_LCL_LOOP_EN, UMAC_CMD);
1999 udelay(2);
2000 bcmgenet_umac_writel(priv, 0, UMAC_CMD);
2001} 1999}
2002 2000
2003static void bcmgenet_intr_disable(struct bcmgenet_priv *priv) 2001static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
@@ -2614,8 +2612,10 @@ static void bcmgenet_irq_task(struct work_struct *work)
2614 spin_unlock_irq(&priv->lock); 2612 spin_unlock_irq(&priv->lock);
2615 2613
2616 if (status & UMAC_IRQ_PHY_DET_R && 2614 if (status & UMAC_IRQ_PHY_DET_R &&
2617 priv->dev->phydev->autoneg != AUTONEG_ENABLE) 2615 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
2618 phy_init_hw(priv->dev->phydev); 2616 phy_init_hw(priv->dev->phydev);
2617 genphy_config_aneg(priv->dev->phydev);
2618 }
2619 2619
2620 /* Link UP/DOWN event */ 2620 /* Link UP/DOWN event */
2621 if (status & UMAC_IRQ_LINK_EVENT) 2621 if (status & UMAC_IRQ_LINK_EVENT)
@@ -2879,12 +2879,6 @@ static int bcmgenet_open(struct net_device *dev)
2879 if (priv->internal_phy) 2879 if (priv->internal_phy)
2880 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 2880 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2881 2881
2882 ret = bcmgenet_mii_connect(dev);
2883 if (ret) {
2884 netdev_err(dev, "failed to connect to PHY\n");
2885 goto err_clk_disable;
2886 }
2887
2888 /* take MAC out of reset */ 2882 /* take MAC out of reset */
2889 bcmgenet_umac_reset(priv); 2883 bcmgenet_umac_reset(priv);
2890 2884
@@ -2894,12 +2888,6 @@ static int bcmgenet_open(struct net_device *dev)
2894 reg = bcmgenet_umac_readl(priv, UMAC_CMD); 2888 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
2895 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD); 2889 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
2896 2890
2897 ret = bcmgenet_mii_config(dev, true);
2898 if (ret) {
2899 netdev_err(dev, "unsupported PHY\n");
2900 goto err_disconnect_phy;
2901 }
2902
2903 bcmgenet_set_hw_addr(priv, dev->dev_addr); 2891 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2904 2892
2905 if (priv->internal_phy) { 2893 if (priv->internal_phy) {
@@ -2915,7 +2903,7 @@ static int bcmgenet_open(struct net_device *dev)
2915 ret = bcmgenet_init_dma(priv); 2903 ret = bcmgenet_init_dma(priv);
2916 if (ret) { 2904 if (ret) {
2917 netdev_err(dev, "failed to initialize DMA\n"); 2905 netdev_err(dev, "failed to initialize DMA\n");
2918 goto err_disconnect_phy; 2906 goto err_clk_disable;
2919 } 2907 }
2920 2908
2921 /* Always enable ring 16 - descriptor ring */ 2909 /* Always enable ring 16 - descriptor ring */
@@ -2938,19 +2926,25 @@ static int bcmgenet_open(struct net_device *dev)
2938 goto err_irq0; 2926 goto err_irq0;
2939 } 2927 }
2940 2928
2929 ret = bcmgenet_mii_probe(dev);
2930 if (ret) {
2931 netdev_err(dev, "failed to connect to PHY\n");
2932 goto err_irq1;
2933 }
2934
2941 bcmgenet_netif_start(dev); 2935 bcmgenet_netif_start(dev);
2942 2936
2943 netif_tx_start_all_queues(dev); 2937 netif_tx_start_all_queues(dev);
2944 2938
2945 return 0; 2939 return 0;
2946 2940
2941err_irq1:
2942 free_irq(priv->irq1, priv);
2947err_irq0: 2943err_irq0:
2948 free_irq(priv->irq0, priv); 2944 free_irq(priv->irq0, priv);
2949err_fini_dma: 2945err_fini_dma:
2950 bcmgenet_dma_teardown(priv); 2946 bcmgenet_dma_teardown(priv);
2951 bcmgenet_fini_dma(priv); 2947 bcmgenet_fini_dma(priv);
2952err_disconnect_phy:
2953 phy_disconnect(dev->phydev);
2954err_clk_disable: 2948err_clk_disable:
2955 if (priv->internal_phy) 2949 if (priv->internal_phy)
2956 bcmgenet_power_down(priv, GENET_POWER_PASSIVE); 2950 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
@@ -3631,8 +3625,6 @@ static int bcmgenet_resume(struct device *d)
3631 if (priv->internal_phy) 3625 if (priv->internal_phy)
3632 bcmgenet_power_up(priv, GENET_POWER_PASSIVE); 3626 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3633 3627
3634 phy_init_hw(dev->phydev);
3635
3636 bcmgenet_umac_reset(priv); 3628 bcmgenet_umac_reset(priv);
3637 3629
3638 init_umac(priv); 3630 init_umac(priv);
@@ -3641,7 +3633,10 @@ static int bcmgenet_resume(struct device *d)
3641 if (priv->wolopts) 3633 if (priv->wolopts)
3642 clk_disable_unprepare(priv->clk_wol); 3634 clk_disable_unprepare(priv->clk_wol);
3643 3635
3636 phy_init_hw(dev->phydev);
3637
3644 /* Speed settings must be restored */ 3638 /* Speed settings must be restored */
3639 genphy_config_aneg(dev->phydev);
3645 bcmgenet_mii_config(priv->dev, false); 3640 bcmgenet_mii_config(priv->dev, false);
3646 3641
3647 bcmgenet_set_hw_addr(priv, dev->dev_addr); 3642 bcmgenet_set_hw_addr(priv, dev->dev_addr);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 7fbf573d8d52..dbc69d8fa05f 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -720,8 +720,8 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
720 720
721/* MDIO routines */ 721/* MDIO routines */
722int bcmgenet_mii_init(struct net_device *dev); 722int bcmgenet_mii_init(struct net_device *dev);
723int bcmgenet_mii_connect(struct net_device *dev);
724int bcmgenet_mii_config(struct net_device *dev, bool init); 723int bcmgenet_mii_config(struct net_device *dev, bool init);
724int bcmgenet_mii_probe(struct net_device *dev);
725void bcmgenet_mii_exit(struct net_device *dev); 725void bcmgenet_mii_exit(struct net_device *dev);
726void bcmgenet_phy_power_set(struct net_device *dev, bool enable); 726void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
727void bcmgenet_mii_setup(struct net_device *dev); 727void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 17bb8d60a157..dbe18cdf6c1b 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -173,46 +173,6 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
173 bcmgenet_fixed_phy_link_update); 173 bcmgenet_fixed_phy_link_update);
174} 174}
175 175
176int bcmgenet_mii_connect(struct net_device *dev)
177{
178 struct bcmgenet_priv *priv = netdev_priv(dev);
179 struct device_node *dn = priv->pdev->dev.of_node;
180 struct phy_device *phydev;
181 u32 phy_flags = 0;
182 int ret;
183
184 /* Communicate the integrated PHY revision */
185 if (priv->internal_phy)
186 phy_flags = priv->gphy_rev;
187
188 /* Initialize link state variables that bcmgenet_mii_setup() uses */
189 priv->old_link = -1;
190 priv->old_speed = -1;
191 priv->old_duplex = -1;
192 priv->old_pause = -1;
193
194 if (dn) {
195 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
196 phy_flags, priv->phy_interface);
197 if (!phydev) {
198 pr_err("could not attach to PHY\n");
199 return -ENODEV;
200 }
201 } else {
202 phydev = dev->phydev;
203 phydev->dev_flags = phy_flags;
204
205 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
206 priv->phy_interface);
207 if (ret) {
208 pr_err("could not attach to PHY\n");
209 return -ENODEV;
210 }
211 }
212
213 return 0;
214}
215
216int bcmgenet_mii_config(struct net_device *dev, bool init) 176int bcmgenet_mii_config(struct net_device *dev, bool init)
217{ 177{
218 struct bcmgenet_priv *priv = netdev_priv(dev); 178 struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -221,8 +181,38 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
221 const char *phy_name = NULL; 181 const char *phy_name = NULL;
222 u32 id_mode_dis = 0; 182 u32 id_mode_dis = 0;
223 u32 port_ctrl; 183 u32 port_ctrl;
184 int bmcr = -1;
185 int ret;
224 u32 reg; 186 u32 reg;
225 187
188 /* MAC clocking workaround during reset of umac state machines */
189 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
190 if (reg & CMD_SW_RESET) {
191 /* An MII PHY must be isolated to prevent TXC contention */
192 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) {
193 ret = phy_read(phydev, MII_BMCR);
194 if (ret >= 0) {
195 bmcr = ret;
196 ret = phy_write(phydev, MII_BMCR,
197 bmcr | BMCR_ISOLATE);
198 }
199 if (ret) {
200 netdev_err(dev, "failed to isolate PHY\n");
201 return ret;
202 }
203 }
204 /* Switch MAC clocking to RGMII generated clock */
205 bcmgenet_sys_writel(priv, PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
206 /* Ensure 5 clks with Rx disabled
207 * followed by 5 clks with Reset asserted
208 */
209 udelay(4);
210 reg &= ~(CMD_SW_RESET | CMD_LCL_LOOP_EN);
211 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
212 /* Ensure 5 more clocks before Rx is enabled */
213 udelay(2);
214 }
215
226 priv->ext_phy = !priv->internal_phy && 216 priv->ext_phy = !priv->internal_phy &&
227 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA); 217 (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
228 218
@@ -254,6 +244,9 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
254 phy_set_max_speed(phydev, SPEED_100); 244 phy_set_max_speed(phydev, SPEED_100);
255 bcmgenet_sys_writel(priv, 245 bcmgenet_sys_writel(priv,
256 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL); 246 PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
247 /* Restore the MII PHY after isolation */
248 if (bmcr >= 0)
249 phy_write(phydev, MII_BMCR, bmcr);
257 break; 250 break;
258 251
259 case PHY_INTERFACE_MODE_REVMII: 252 case PHY_INTERFACE_MODE_REVMII:
@@ -306,21 +299,71 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
306 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL); 299 bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
307 } 300 }
308 301
309 if (init) { 302 if (init)
310 linkmode_copy(phydev->advertising, phydev->supported); 303 dev_info(kdev, "configuring instance for %s\n", phy_name);
311 304
312 /* The internal PHY has its link interrupts routed to the 305 return 0;
313 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue 306}
314 * that prevents the signaling of link UP interrupts when
315 * the link operates at 10Mbps, so fallback to polling for
316 * those versions of GENET.
317 */
318 if (priv->internal_phy && !GENET_IS_V5(priv))
319 phydev->irq = PHY_IGNORE_INTERRUPT;
320 307
321 dev_info(kdev, "configuring instance for %s\n", phy_name); 308int bcmgenet_mii_probe(struct net_device *dev)
309{
310 struct bcmgenet_priv *priv = netdev_priv(dev);
311 struct device_node *dn = priv->pdev->dev.of_node;
312 struct phy_device *phydev;
313 u32 phy_flags = 0;
314 int ret;
315
316 /* Communicate the integrated PHY revision */
317 if (priv->internal_phy)
318 phy_flags = priv->gphy_rev;
319
320 /* Initialize link state variables that bcmgenet_mii_setup() uses */
321 priv->old_link = -1;
322 priv->old_speed = -1;
323 priv->old_duplex = -1;
324 priv->old_pause = -1;
325
326 if (dn) {
327 phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
328 phy_flags, priv->phy_interface);
329 if (!phydev) {
330 pr_err("could not attach to PHY\n");
331 return -ENODEV;
332 }
333 } else {
334 phydev = dev->phydev;
335 phydev->dev_flags = phy_flags;
336
337 ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
338 priv->phy_interface);
339 if (ret) {
340 pr_err("could not attach to PHY\n");
341 return -ENODEV;
342 }
322 } 343 }
323 344
345 /* Configure port multiplexer based on what the probed PHY device since
346 * reading the 'max-speed' property determines the maximum supported
347 * PHY speed which is needed for bcmgenet_mii_config() to configure
348 * things appropriately.
349 */
350 ret = bcmgenet_mii_config(dev, true);
351 if (ret) {
352 phy_disconnect(dev->phydev);
353 return ret;
354 }
355
356 linkmode_copy(phydev->advertising, phydev->supported);
357
358 /* The internal PHY has its link interrupts routed to the
359 * Ethernet MAC ISRs. On GENETv5 there is a hardware issue
360 * that prevents the signaling of link UP interrupts when
361 * the link operates at 10Mbps, so fallback to polling for
362 * those versions of GENET.
363 */
364 if (priv->internal_phy && !GENET_IS_V5(priv))
365 dev->phydev->irq = PHY_IGNORE_INTERRUPT;
366
324 return 0; 367 return 0;
325} 368}
326 369
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 0e5de88fd6e8..cdd7e5da4a74 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1499,7 +1499,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
1499 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1499 netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1500 1500
1501 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; 1501 netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1502 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; 1502 netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1503 1503
1504 mac = of_get_mac_address(pdev->dev.of_node); 1504 mac = of_get_mac_address(pdev->dev.of_node);
1505 1505
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 22c01b224baa..a9c386b63581 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3645,6 +3645,8 @@ fec_drv_remove(struct platform_device *pdev)
3645 regulator_disable(fep->reg_phy); 3645 regulator_disable(fep->reg_phy);
3646 pm_runtime_put(&pdev->dev); 3646 pm_runtime_put(&pdev->dev);
3647 pm_runtime_disable(&pdev->dev); 3647 pm_runtime_disable(&pdev->dev);
3648 clk_disable_unprepare(fep->clk_ahb);
3649 clk_disable_unprepare(fep->clk_ipg);
3648 if (of_phy_is_fixed_link(np)) 3650 if (of_phy_is_fixed_link(np))
3649 of_phy_deregister_fixed_link(np); 3651 of_phy_deregister_fixed_link(np);
3650 of_node_put(fep->phy_node); 3652 of_node_put(fep->phy_node);
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 6d0457eb4faa..08339278c722 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -199,7 +199,6 @@ hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
199 199
200 ring->q = q; 200 ring->q = q;
201 ring->flags = flags; 201 ring->flags = flags;
202 spin_lock_init(&ring->lock);
203 ring->coal_param = q->handle->coal_param; 202 ring->coal_param = q->handle->coal_param;
204 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr); 203 assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
205 204
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e9c67c06bfd2..6ab9458302e1 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -274,9 +274,6 @@ struct hnae_ring {
274 /* statistic */ 274 /* statistic */
275 struct ring_stats stats; 275 struct ring_stats stats;
276 276
277 /* ring lock for poll one */
278 spinlock_t lock;
279
280 dma_addr_t desc_dma_addr; 277 dma_addr_t desc_dma_addr;
281 u32 buf_size; /* size for hnae_desc->addr, preset by AE */ 278 u32 buf_size; /* size for hnae_desc->addr, preset by AE */
282 u16 desc_num; /* total number of desc */ 279 u16 desc_num; /* total number of desc */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index a48396dd4ebb..14ab20491fd0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -943,15 +943,6 @@ static int is_valid_clean_head(struct hnae_ring *ring, int h)
943 return u > c ? (h > c && h <= u) : (h > c || h <= u); 943 return u > c ? (h > c && h <= u) : (h > c || h <= u);
944} 944}
945 945
946/* netif_tx_lock will turn down the performance, set only when necessary */
947#ifdef CONFIG_NET_POLL_CONTROLLER
948#define NETIF_TX_LOCK(ring) spin_lock(&(ring)->lock)
949#define NETIF_TX_UNLOCK(ring) spin_unlock(&(ring)->lock)
950#else
951#define NETIF_TX_LOCK(ring)
952#define NETIF_TX_UNLOCK(ring)
953#endif
954
955/* reclaim all desc in one budget 946/* reclaim all desc in one budget
956 * return error or number of desc left 947 * return error or number of desc left
957 */ 948 */
@@ -965,21 +956,16 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
965 int head; 956 int head;
966 int bytes, pkts; 957 int bytes, pkts;
967 958
968 NETIF_TX_LOCK(ring);
969
970 head = readl_relaxed(ring->io_base + RCB_REG_HEAD); 959 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
971 rmb(); /* make sure head is ready before touch any data */ 960 rmb(); /* make sure head is ready before touch any data */
972 961
973 if (is_ring_empty(ring) || head == ring->next_to_clean) { 962 if (is_ring_empty(ring) || head == ring->next_to_clean)
974 NETIF_TX_UNLOCK(ring);
975 return 0; /* no data to poll */ 963 return 0; /* no data to poll */
976 }
977 964
978 if (!is_valid_clean_head(ring, head)) { 965 if (!is_valid_clean_head(ring, head)) {
979 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head, 966 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
980 ring->next_to_use, ring->next_to_clean); 967 ring->next_to_use, ring->next_to_clean);
981 ring->stats.io_err_cnt++; 968 ring->stats.io_err_cnt++;
982 NETIF_TX_UNLOCK(ring);
983 return -EIO; 969 return -EIO;
984 } 970 }
985 971
@@ -994,8 +980,6 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
994 ring->stats.tx_pkts += pkts; 980 ring->stats.tx_pkts += pkts;
995 ring->stats.tx_bytes += bytes; 981 ring->stats.tx_bytes += bytes;
996 982
997 NETIF_TX_UNLOCK(ring);
998
999 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 983 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1000 netdev_tx_completed_queue(dev_queue, pkts, bytes); 984 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1001 985
@@ -1055,16 +1039,12 @@ static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1055 int head; 1039 int head;
1056 int bytes, pkts; 1040 int bytes, pkts;
1057 1041
1058 NETIF_TX_LOCK(ring);
1059
1060 head = ring->next_to_use; /* ntu :soft setted ring position*/ 1042 head = ring->next_to_use; /* ntu :soft setted ring position*/
1061 bytes = 0; 1043 bytes = 0;
1062 pkts = 0; 1044 pkts = 0;
1063 while (head != ring->next_to_clean) 1045 while (head != ring->next_to_clean)
1064 hns_nic_reclaim_one_desc(ring, &bytes, &pkts); 1046 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1065 1047
1066 NETIF_TX_UNLOCK(ring);
1067
1068 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index); 1048 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1069 netdev_tx_reset_queue(dev_queue); 1049 netdev_tx_reset_queue(dev_queue);
1070} 1050}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 75ccc1e7076b..a0998937727d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HNAE3_H 4#ifndef __HNAE3_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 2110fa3b4479..5d468ed404a6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HNS3_ENET_H 4#ifndef __HNS3_ENET_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 4821fe08b5e4..1426eb5ddf3d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_CMD_H 4#ifndef __HCLGE_CMD_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
index 278f21e02736..b04702e65689 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_DCB_H__ 4#ifndef __HCLGE_DCB_H__
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index e02e01bd9eff..16f7d0e15b4f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3587,12 +3587,28 @@ static int hclge_set_rst_done(struct hclge_dev *hdev)
3587{ 3587{
3588 struct hclge_pf_rst_done_cmd *req; 3588 struct hclge_pf_rst_done_cmd *req;
3589 struct hclge_desc desc; 3589 struct hclge_desc desc;
3590 int ret;
3590 3591
3591 req = (struct hclge_pf_rst_done_cmd *)desc.data; 3592 req = (struct hclge_pf_rst_done_cmd *)desc.data;
3592 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false); 3593 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
3593 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT; 3594 req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
3594 3595
3595 return hclge_cmd_send(&hdev->hw, &desc, 1); 3596 ret = hclge_cmd_send(&hdev->hw, &desc, 1);
3597 /* To be compatible with the old firmware, which does not support
3598 * command HCLGE_OPC_PF_RST_DONE, just print a warning and
3599 * return success
3600 */
3601 if (ret == -EOPNOTSUPP) {
3602 dev_warn(&hdev->pdev->dev,
3603 "current firmware does not support command(0x%x)!\n",
3604 HCLGE_OPC_PF_RST_DONE);
3605 return 0;
3606 } else if (ret) {
3607 dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
3608 ret);
3609 }
3610
3611 return ret;
3596} 3612}
3597 3613
3598static int hclge_reset_prepare_up(struct hclge_dev *hdev) 3614static int hclge_reset_prepare_up(struct hclge_dev *hdev)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index c3d56b872ed7..59b824347ba4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_MAIN_H 4#ifndef __HCLGE_MAIN_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
index ef095d9c566f..dd9a1218a7b0 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_MDIO_H 4#ifndef __HCLGE_MDIO_H
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 818610988d34..260f22d19d81 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -1,4 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0+ 1/* SPDX-License-Identifier: GPL-2.0+ */
2// Copyright (c) 2016-2017 Hisilicon Limited. 2// Copyright (c) 2016-2017 Hisilicon Limited.
3 3
4#ifndef __HCLGE_TM_H 4#ifndef __HCLGE_TM_H
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 530613f31527..69a2daaca5c5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -20,6 +20,8 @@
20 20
21/* API version 1.7 implements additional link and PHY-specific APIs */ 21/* API version 1.7 implements additional link and PHY-specific APIs */
22#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 22#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
23/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
24#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
23/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ 25/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
24#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 26#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
25 27
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index d37c6e0e5f08..7560f06768e0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -1876,7 +1876,8 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE) 1876 hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU; 1877 hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
1878 1878
1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) { 1879 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
1880 hw->mac.type != I40E_MAC_X722) {
1880 __le32 tmp; 1881 __le32 tmp;
1881 1882
1882 memcpy(&tmp, resp->link_type, sizeof(tmp)); 1883 memcpy(&tmp, resp->link_type, sizeof(tmp));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index a05dfecdd9b4..d07e1a890428 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -689,8 +689,6 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
689 i40e_xdp_ring_update_tail(xdp_ring); 689 i40e_xdp_ring_update_tail(xdp_ring);
690 690
691 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 691 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
692 if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
693 xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
694 } 692 }
695 693
696 return !!budget && work_done; 694 return !!budget && work_done;
@@ -769,12 +767,8 @@ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi,
769 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); 767 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes);
770 768
771out_xmit: 769out_xmit:
772 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 770 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
773 if (tx_ring->next_to_clean == tx_ring->next_to_use) 771 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
774 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
775 else
776 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
777 }
778 772
779 xmit_done = i40e_xmit_zc(tx_ring, budget); 773 xmit_done = i40e_xmit_zc(tx_ring, budget);
780 774
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 8f310e520b06..821987da5698 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -314,7 +314,7 @@ iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); 314 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
315 q_vector->ring_mask |= BIT(r_idx); 315 q_vector->ring_mask |= BIT(r_idx);
316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx), 316 wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
317 q_vector->rx.current_itr); 317 q_vector->rx.current_itr >> 1);
318 q_vector->rx.current_itr = q_vector->rx.target_itr; 318 q_vector->rx.current_itr = q_vector->rx.target_itr;
319} 319}
320 320
@@ -340,7 +340,7 @@ iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); 340 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
341 q_vector->num_ringpairs++; 341 q_vector->num_ringpairs++;
342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx), 342 wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
343 q_vector->tx.target_itr); 343 q_vector->tx.target_itr >> 1);
344 q_vector->tx.current_itr = q_vector->tx.target_itr; 344 q_vector->tx.current_itr = q_vector->tx.target_itr;
345} 345}
346 346
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index fc624b73d05d..2fde9653a608 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1036,7 +1036,7 @@ enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw)
1036 struct ice_aqc_query_txsched_res_resp *buf; 1036 struct ice_aqc_query_txsched_res_resp *buf;
1037 enum ice_status status = 0; 1037 enum ice_status status = 0;
1038 __le16 max_sibl; 1038 __le16 max_sibl;
1039 u8 i; 1039 u16 i;
1040 1040
1041 if (hw->layer_info) 1041 if (hw->layer_info)
1042 return status; 1042 return status;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 9148c62d9ac5..ed7e667d7eb2 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -5675,8 +5675,8 @@ static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
5675 * should have been handled by the upper layers. 5675 * should have been handled by the upper layers.
5676 */ 5676 */
5677 if (tx_ring->launchtime_enable) { 5677 if (tx_ring->launchtime_enable) {
5678 ts = ns_to_timespec64(first->skb->tstamp); 5678 ts = ktime_to_timespec64(first->skb->tstamp);
5679 first->skb->tstamp = 0; 5679 first->skb->tstamp = ktime_set(0, 0);
5680 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32); 5680 context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
5681 } else { 5681 } else {
5682 context_desc->seqnum_seed = 0; 5682 context_desc->seqnum_seed = 0;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 8e424dfab12e..24888676f69b 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -824,8 +824,8 @@ static void igc_tx_ctxtdesc(struct igc_ring *tx_ring,
824 * should have been handled by the upper layers. 824 * should have been handled by the upper layers.
825 */ 825 */
826 if (tx_ring->launchtime_enable) { 826 if (tx_ring->launchtime_enable) {
827 ts = ns_to_timespec64(first->skb->tstamp); 827 ts = ktime_to_timespec64(first->skb->tstamp);
828 first->skb->tstamp = 0; 828 first->skb->tstamp = ktime_set(0, 0);
829 context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32); 829 context_desc->launch_time = cpu_to_le32(ts.tv_nsec / 32);
830 } else { 830 } else {
831 context_desc->launch_time = 0; 831 context_desc->launch_time = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index 100ac89b345d..d6feaacfbf89 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -622,8 +622,6 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
622 if (tx_desc) { 622 if (tx_desc) {
623 ixgbe_xdp_ring_update_tail(xdp_ring); 623 ixgbe_xdp_ring_update_tail(xdp_ring);
624 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 624 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
625 if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
626 xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
627 } 625 }
628 626
629 return !!budget && work_done; 627 return !!budget && work_done;
@@ -691,12 +689,8 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
691 if (xsk_frames) 689 if (xsk_frames)
692 xsk_umem_complete_tx(umem, xsk_frames); 690 xsk_umem_complete_tx(umem, xsk_frames);
693 691
694 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 692 if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem))
695 if (tx_ring->next_to_clean == tx_ring->next_to_use) 693 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
696 xsk_set_tx_need_wakeup(tx_ring->xsk_umem);
697 else
698 xsk_clear_tx_need_wakeup(tx_ring->xsk_umem);
699 }
700 694
701 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 695 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
702} 696}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index fce9b3a24347..69bb6bb06e76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -514,8 +514,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; 514 dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
515 /* 515 /*
516 * Subtract 1 from the limit because we need to allocate a 516 * Subtract 1 from the limit because we need to allocate a
517 * spare CQE so the HCA HW can tell the difference between an 517 * spare CQE to enable resizing the CQ.
518 * empty CQ and a full CQ.
519 */ 518 */
520 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 519 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
521 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 520 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 369499e88fe8..9004a07e457a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,7 +1079,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
1079 MLX5_CAP_GEN(dev, max_flow_counter_15_0); 1079 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
1080 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size); 1080 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1081 1081
1082 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(2^%d))\n", 1082 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d), groups(%d), max flow table size(%d))\n",
1083 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size), 1083 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
1084 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS, 1084 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS,
1085 fdb_max); 1085 fdb_max);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index 7879e1746297..366bda1bb1c3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -183,7 +183,8 @@ static bool mlx5_eswitch_offload_is_uplink_port(const struct mlx5_eswitch *esw,
183 u32 port_mask, port_value; 183 u32 port_mask, port_value;
184 184
185 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source)) 185 if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
186 return spec->flow_context.flow_source == MLX5_VPORT_UPLINK; 186 return spec->flow_context.flow_source ==
187 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
187 188
188 port_mask = MLX5_GET(fte_match_param, spec->match_criteria, 189 port_mask = MLX5_GET(fte_match_param, spec->match_criteria,
189 misc_parameters.source_port); 190 misc_parameters.source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index b74b7d0f6590..004c56c2fc0c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -1577,6 +1577,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action)
1577 break; 1577 break;
1578 case DR_ACTION_TYP_MODIFY_HDR: 1578 case DR_ACTION_TYP_MODIFY_HDR:
1579 mlx5dr_icm_free_chunk(action->rewrite.chunk); 1579 mlx5dr_icm_free_chunk(action->rewrite.chunk);
1580 kfree(action->rewrite.data);
1580 refcount_dec(&action->rewrite.dmn->refcount); 1581 refcount_dec(&action->rewrite.dmn->refcount);
1581 break; 1582 break;
1582 default: 1583 default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index e8b656075c6f..5dcb8baf491a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -1096,6 +1096,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
1096 if (htbl) 1096 if (htbl)
1097 mlx5dr_htbl_put(htbl); 1097 mlx5dr_htbl_put(htbl);
1098 1098
1099 kfree(hw_ste_arr);
1100
1099 return 0; 1101 return 0;
1100 1102
1101free_ste: 1103free_ste:
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index 344539c0d3aa..672ea1342add 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -1680,9 +1680,6 @@ static int ocelot_netdevice_port_event(struct net_device *dev,
1680 struct ocelot_port *ocelot_port = netdev_priv(dev); 1680 struct ocelot_port *ocelot_port = netdev_priv(dev);
1681 int err = 0; 1681 int err = 0;
1682 1682
1683 if (!ocelot_netdevice_dev_check(dev))
1684 return 0;
1685
1686 switch (event) { 1683 switch (event) {
1687 case NETDEV_CHANGEUPPER: 1684 case NETDEV_CHANGEUPPER:
1688 if (netif_is_bridge_master(info->upper_dev)) { 1685 if (netif_is_bridge_master(info->upper_dev)) {
@@ -1719,12 +1716,16 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
1719 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1716 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1720 int ret = 0; 1717 int ret = 0;
1721 1718
1719 if (!ocelot_netdevice_dev_check(dev))
1720 return 0;
1721
1722 if (event == NETDEV_PRECHANGEUPPER && 1722 if (event == NETDEV_PRECHANGEUPPER &&
1723 netif_is_lag_master(info->upper_dev)) { 1723 netif_is_lag_master(info->upper_dev)) {
1724 struct netdev_lag_upper_info *lag_upper_info = info->upper_info; 1724 struct netdev_lag_upper_info *lag_upper_info = info->upper_info;
1725 struct netlink_ext_ack *extack; 1725 struct netlink_ext_ack *extack;
1726 1726
1727 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) { 1727 if (lag_upper_info &&
1728 lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
1728 extack = netdev_notifier_info_to_extack(&info->info); 1729 extack = netdev_notifier_info_to_extack(&info->info);
1729 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type"); 1730 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
1730 1731
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index e40773c01a44..06ac806052bc 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -523,7 +523,7 @@ void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
523#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri)) 523#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
524#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0) 524#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
525 525
526void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 mask, 526void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
527 u32 offset); 527 u32 offset);
528#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri)) 528#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
529#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi)) 529#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 8d1c208f778f..a220cc7c947a 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -1208,8 +1208,16 @@ enum qede_remove_mode {
1208static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) 1208static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
1209{ 1209{
1210 struct net_device *ndev = pci_get_drvdata(pdev); 1210 struct net_device *ndev = pci_get_drvdata(pdev);
1211 struct qede_dev *edev = netdev_priv(ndev); 1211 struct qede_dev *edev;
1212 struct qed_dev *cdev = edev->cdev; 1212 struct qed_dev *cdev;
1213
1214 if (!ndev) {
1215 dev_info(&pdev->dev, "Device has already been removed\n");
1216 return;
1217 }
1218
1219 edev = netdev_priv(ndev);
1220 cdev = edev->cdev;
1213 1221
1214 DP_INFO(edev, "Starting qede_remove\n"); 1222 DP_INFO(edev, "Starting qede_remove\n");
1215 1223
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 9c54b715228e..06de59521fc4 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -57,10 +57,10 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
57 if (port->nr_rmnet_devs) 57 if (port->nr_rmnet_devs)
58 return -EINVAL; 58 return -EINVAL;
59 59
60 kfree(port);
61
62 netdev_rx_handler_unregister(real_dev); 60 netdev_rx_handler_unregister(real_dev);
63 61
62 kfree(port);
63
64 /* release reference on real_dev */ 64 /* release reference on real_dev */
65 dev_put(real_dev); 65 dev_put(real_dev);
66 66
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 5064c292b873..c4e961ea44d5 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -916,6 +916,9 @@ static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
916 916
917static int r8168g_mdio_read(struct rtl8169_private *tp, int reg) 917static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
918{ 918{
919 if (reg == 0x1f)
920 return tp->ocp_base == OCP_STD_PHY_BASE ? 0 : tp->ocp_base >> 4;
921
919 if (tp->ocp_base != OCP_STD_PHY_BASE) 922 if (tp->ocp_base != OCP_STD_PHY_BASE)
920 reg -= 0x10; 923 reg -= 0x10;
921 924
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 5a7b0aca1d31..66e60c7e9850 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -432,7 +432,7 @@ static void dwmac4_set_filter(struct mac_device_info *hw,
432 * bits used depends on the hardware configuration 432 * bits used depends on the hardware configuration
433 * selected at core configuration time. 433 * selected at core configuration time.
434 */ 434 */
435 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 435 u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
436 ETH_ALEN)) >> (32 - mcbitslog2); 436 ETH_ALEN)) >> (32 - mcbitslog2);
437 /* The most significant bit determines the register to 437 /* The most significant bit determines the register to
438 * use (H/L) while the other 5 bits determine the bit 438 * use (H/L) while the other 5 bits determine the bit
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index 5031398e612c..070bd7d1ae4c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -224,6 +224,7 @@ static void dwxgmac2_config_cbs(struct mac_device_info *hw,
224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue)); 224 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
225 225
226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); 226 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
227 value &= ~XGMAC_TSA;
227 value |= XGMAC_CC | XGMAC_CBS; 228 value |= XGMAC_CC | XGMAC_CBS;
228 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue)); 229 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
229} 230}
@@ -463,7 +464,7 @@ static void dwxgmac2_set_filter(struct mac_device_info *hw,
463 value |= XGMAC_FILTER_HMC; 464 value |= XGMAC_FILTER_HMC;
464 465
465 netdev_for_each_mc_addr(ha, dev) { 466 netdev_for_each_mc_addr(ha, dev) {
466 int nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 467 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
467 (32 - mcbitslog2)); 468 (32 - mcbitslog2));
468 mc_filter[nr >> 5] |= (1 << (nr & 0x1F)); 469 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
469 } 470 }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index ae48154f933c..bd5838ce1e8a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -288,7 +288,8 @@ static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
288 288
289static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len) 289static int dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
290{ 290{
291 *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL; 291 if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
292 *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
292 return 0; 293 return 0;
293} 294}
294 295
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index 965cbe3e6f51..f70ca5300b82 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -369,7 +369,7 @@ static void dwxgmac2_get_hw_feature(void __iomem *ioaddr,
369 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13; 369 dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
370 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12; 370 dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
371 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11; 371 dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
372 dma_cap->av &= !(hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10; 372 dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
373 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9; 373 dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
374 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8; 374 dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
375 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7; 375 dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
@@ -470,6 +470,7 @@ static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
470static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode) 470static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
471{ 471{
472 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 472 u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
473 u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
473 474
474 value &= ~XGMAC_TXQEN; 475 value &= ~XGMAC_TXQEN;
475 if (qmode != MTL_QUEUE_AVB) { 476 if (qmode != MTL_QUEUE_AVB) {
@@ -477,6 +478,7 @@ static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
477 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel)); 478 writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
478 } else { 479 } else {
479 value |= 0x1 << XGMAC_TXQEN_SHIFT; 480 value |= 0x1 << XGMAC_TXQEN_SHIFT;
481 writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
480 } 482 }
481 483
482 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel)); 484 writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index a223584f5f9a..252cf48c5816 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -176,6 +176,7 @@
176#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c 176#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
177#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230 177#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
178#define MMC_XGMAC_RX_FPE_FRAG 0x234 178#define MMC_XGMAC_RX_FPE_FRAG 0x234
179#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
179 180
180static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode) 181static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
181{ 182{
@@ -333,8 +334,9 @@ static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
333 334
334static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr) 335static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
335{ 336{
336 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK); 337 writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
337 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK); 338 writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
339 writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
338} 340}
339 341
340static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest) 342static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 4e9c848c67cc..f826365c979d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2996,6 +2996,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
2996 stmmac_set_desc_addr(priv, first, des); 2996 stmmac_set_desc_addr(priv, first, des);
2997 tmp_pay_len = pay_len; 2997 tmp_pay_len = pay_len;
2998 des += proto_hdr_len; 2998 des += proto_hdr_len;
2999 pay_len = 0;
2999 } 3000 }
3000 3001
3001 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); 3002 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
@@ -3023,6 +3024,19 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3023 /* Only the last descriptor gets to point to the skb. */ 3024 /* Only the last descriptor gets to point to the skb. */
3024 tx_q->tx_skbuff[tx_q->cur_tx] = skb; 3025 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
3025 3026
3027 /* Manage tx mitigation */
3028 tx_q->tx_count_frames += nfrags + 1;
3029 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3030 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3031 priv->hwts_tx_en)) {
3032 stmmac_tx_timer_arm(priv, queue);
3033 } else {
3034 desc = &tx_q->dma_tx[tx_q->cur_tx];
3035 tx_q->tx_count_frames = 0;
3036 stmmac_set_tx_ic(priv, desc);
3037 priv->xstats.tx_set_ic_bit++;
3038 }
3039
3026 /* We've used all descriptors we need for this skb, however, 3040 /* We've used all descriptors we need for this skb, however,
3027 * advance cur_tx so that it references a fresh descriptor. 3041 * advance cur_tx so that it references a fresh descriptor.
3028 * ndo_start_xmit will fill this descriptor the next time it's 3042 * ndo_start_xmit will fill this descriptor the next time it's
@@ -3040,19 +3054,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
3040 priv->xstats.tx_tso_frames++; 3054 priv->xstats.tx_tso_frames++;
3041 priv->xstats.tx_tso_nfrags += nfrags; 3055 priv->xstats.tx_tso_nfrags += nfrags;
3042 3056
3043 /* Manage tx mitigation */
3044 tx_q->tx_count_frames += nfrags + 1;
3045 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3046 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3047 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3048 priv->hwts_tx_en)) {
3049 stmmac_tx_timer_arm(priv, queue);
3050 } else {
3051 tx_q->tx_count_frames = 0;
3052 stmmac_set_tx_ic(priv, desc);
3053 priv->xstats.tx_set_ic_bit++;
3054 }
3055
3056 if (priv->sarc_type) 3057 if (priv->sarc_type)
3057 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3058 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3058 3059
@@ -3224,6 +3225,27 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3224 /* Only the last descriptor gets to point to the skb. */ 3225 /* Only the last descriptor gets to point to the skb. */
3225 tx_q->tx_skbuff[entry] = skb; 3226 tx_q->tx_skbuff[entry] = skb;
3226 3227
3228 /* According to the coalesce parameter the IC bit for the latest
3229 * segment is reset and the timer re-started to clean the tx status.
3230 * This approach takes care about the fragments: desc is the first
3231 * element in case of no SG.
3232 */
3233 tx_q->tx_count_frames += nfrags + 1;
3234 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3235 !((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3236 priv->hwts_tx_en)) {
3237 stmmac_tx_timer_arm(priv, queue);
3238 } else {
3239 if (likely(priv->extend_desc))
3240 desc = &tx_q->dma_etx[entry].basic;
3241 else
3242 desc = &tx_q->dma_tx[entry];
3243
3244 tx_q->tx_count_frames = 0;
3245 stmmac_set_tx_ic(priv, desc);
3246 priv->xstats.tx_set_ic_bit++;
3247 }
3248
3227 /* We've used all descriptors we need for this skb, however, 3249 /* We've used all descriptors we need for this skb, however,
3228 * advance cur_tx so that it references a fresh descriptor. 3250 * advance cur_tx so that it references a fresh descriptor.
3229 * ndo_start_xmit will fill this descriptor the next time it's 3251 * ndo_start_xmit will fill this descriptor the next time it's
@@ -3259,23 +3281,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
3259 3281
3260 dev->stats.tx_bytes += skb->len; 3282 dev->stats.tx_bytes += skb->len;
3261 3283
3262 /* According to the coalesce parameter the IC bit for the latest
3263 * segment is reset and the timer re-started to clean the tx status.
3264 * This approach takes care about the fragments: desc is the first
3265 * element in case of no SG.
3266 */
3267 tx_q->tx_count_frames += nfrags + 1;
3268 if (likely(priv->tx_coal_frames > tx_q->tx_count_frames) &&
3269 !(priv->synopsys_id >= DWMAC_CORE_4_00 &&
3270 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
3271 priv->hwts_tx_en)) {
3272 stmmac_tx_timer_arm(priv, queue);
3273 } else {
3274 tx_q->tx_count_frames = 0;
3275 stmmac_set_tx_ic(priv, desc);
3276 priv->xstats.tx_set_ic_bit++;
3277 }
3278
3279 if (priv->sarc_type) 3284 if (priv->sarc_type)
3280 stmmac_set_desc_sarc(priv, first, priv->sarc_type); 3285 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
3281 3286
@@ -3506,8 +3511,6 @@ read_again:
3506 if (unlikely(status & dma_own)) 3511 if (unlikely(status & dma_own))
3507 break; 3512 break;
3508 3513
3509 count++;
3510
3511 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE); 3514 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
3512 next_entry = rx_q->cur_rx; 3515 next_entry = rx_q->cur_rx;
3513 3516
@@ -3534,6 +3537,7 @@ read_again:
3534 goto read_again; 3537 goto read_again;
3535 if (unlikely(error)) { 3538 if (unlikely(error)) {
3536 dev_kfree_skb(skb); 3539 dev_kfree_skb(skb);
3540 count++;
3537 continue; 3541 continue;
3538 } 3542 }
3539 3543
@@ -3573,6 +3577,7 @@ read_again:
3573 skb = napi_alloc_skb(&ch->rx_napi, len); 3577 skb = napi_alloc_skb(&ch->rx_napi, len);
3574 if (!skb) { 3578 if (!skb) {
3575 priv->dev->stats.rx_dropped++; 3579 priv->dev->stats.rx_dropped++;
3580 count++;
3576 continue; 3581 continue;
3577 } 3582 }
3578 3583
@@ -3638,6 +3643,7 @@ read_again:
3638 3643
3639 priv->dev->stats.rx_packets++; 3644 priv->dev->stats.rx_packets++;
3640 priv->dev->stats.rx_bytes += len; 3645 priv->dev->stats.rx_bytes += len;
3646 count++;
3641 } 3647 }
3642 3648
3643 if (status & rx_not_ls) { 3649 if (status & rx_not_ls) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index e4ac3c401432..ac3f658105c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -6,7 +6,9 @@
6 * Author: Jose Abreu <joabreu@synopsys.com> 6 * Author: Jose Abreu <joabreu@synopsys.com>
7 */ 7 */
8 8
9#include <linux/bitrev.h>
9#include <linux/completion.h> 10#include <linux/completion.h>
11#include <linux/crc32.h>
10#include <linux/ethtool.h> 12#include <linux/ethtool.h>
11#include <linux/ip.h> 13#include <linux/ip.h>
12#include <linux/phy.h> 14#include <linux/phy.h>
@@ -485,12 +487,48 @@ static int stmmac_filter_check(struct stmmac_priv *priv)
485 return -EOPNOTSUPP; 487 return -EOPNOTSUPP;
486} 488}
487 489
490static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
491{
492 int mc_offset = 32 - priv->hw->mcast_bits_log2;
493 struct netdev_hw_addr *ha;
494 u32 hash, hash_nr;
495
496 /* First compute the hash for desired addr */
497 hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
498 hash_nr = hash >> 5;
499 hash = 1 << (hash & 0x1f);
500
501 /* Now, check if it collides with any existing one */
502 netdev_for_each_mc_addr(ha, priv->dev) {
503 u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
504 if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
505 return false;
506 }
507
508 /* No collisions, address is good to go */
509 return true;
510}
511
512static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
513{
514 struct netdev_hw_addr *ha;
515
516 /* Check if it collides with any existing one */
517 netdev_for_each_uc_addr(ha, priv->dev) {
518 if (!memcmp(ha->addr, addr, ETH_ALEN))
519 return false;
520 }
521
522 /* No collisions, address is good to go */
523 return true;
524}
525
488static int stmmac_test_hfilt(struct stmmac_priv *priv) 526static int stmmac_test_hfilt(struct stmmac_priv *priv)
489{ 527{
490 unsigned char gd_addr[ETH_ALEN] = {0x01, 0xee, 0xdd, 0xcc, 0xbb, 0xaa}; 528 unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
491 unsigned char bd_addr[ETH_ALEN] = {0x01, 0x01, 0x02, 0x03, 0x04, 0x05}; 529 unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
492 struct stmmac_packet_attrs attr = { }; 530 struct stmmac_packet_attrs attr = { };
493 int ret; 531 int ret, tries = 256;
494 532
495 ret = stmmac_filter_check(priv); 533 ret = stmmac_filter_check(priv);
496 if (ret) 534 if (ret)
@@ -499,6 +537,16 @@ static int stmmac_test_hfilt(struct stmmac_priv *priv)
499 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins) 537 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
500 return -EOPNOTSUPP; 538 return -EOPNOTSUPP;
501 539
540 while (--tries) {
541 /* We only need to check the bd_addr for collisions */
542 bd_addr[ETH_ALEN - 1] = tries;
543 if (stmmac_hash_check(priv, bd_addr))
544 break;
545 }
546
547 if (!tries)
548 return -EOPNOTSUPP;
549
502 ret = dev_mc_add(priv->dev, gd_addr); 550 ret = dev_mc_add(priv->dev, gd_addr);
503 if (ret) 551 if (ret)
504 return ret; 552 return ret;
@@ -523,13 +571,25 @@ cleanup:
523 571
524static int stmmac_test_pfilt(struct stmmac_priv *priv) 572static int stmmac_test_pfilt(struct stmmac_priv *priv)
525{ 573{
526 unsigned char gd_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 574 unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
527 unsigned char bd_addr[ETH_ALEN] = {0x08, 0x00, 0x22, 0x33, 0x44, 0x55}; 575 unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
528 struct stmmac_packet_attrs attr = { }; 576 struct stmmac_packet_attrs attr = { };
529 int ret; 577 int ret, tries = 256;
530 578
531 if (stmmac_filter_check(priv)) 579 if (stmmac_filter_check(priv))
532 return -EOPNOTSUPP; 580 return -EOPNOTSUPP;
581 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
582 return -EOPNOTSUPP;
583
584 while (--tries) {
585 /* We only need to check the bd_addr for collisions */
586 bd_addr[ETH_ALEN - 1] = tries;
587 if (stmmac_perfect_check(priv, bd_addr))
588 break;
589 }
590
591 if (!tries)
592 return -EOPNOTSUPP;
533 593
534 ret = dev_uc_add(priv->dev, gd_addr); 594 ret = dev_uc_add(priv->dev, gd_addr);
535 if (ret) 595 if (ret)
@@ -553,39 +613,31 @@ cleanup:
553 return ret; 613 return ret;
554} 614}
555 615
556static int stmmac_dummy_sync(struct net_device *netdev, const u8 *addr)
557{
558 return 0;
559}
560
561static void stmmac_test_set_rx_mode(struct net_device *netdev)
562{
563 /* As we are in test mode of ethtool we already own the rtnl lock
564 * so no address will change from user. We can just call the
565 * ndo_set_rx_mode() callback directly */
566 if (netdev->netdev_ops->ndo_set_rx_mode)
567 netdev->netdev_ops->ndo_set_rx_mode(netdev);
568}
569
570static int stmmac_test_mcfilt(struct stmmac_priv *priv) 616static int stmmac_test_mcfilt(struct stmmac_priv *priv)
571{ 617{
572 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 618 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
573 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 619 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
574 struct stmmac_packet_attrs attr = { }; 620 struct stmmac_packet_attrs attr = { };
575 int ret; 621 int ret, tries = 256;
576 622
577 if (stmmac_filter_check(priv)) 623 if (stmmac_filter_check(priv))
578 return -EOPNOTSUPP; 624 return -EOPNOTSUPP;
579 if (!priv->hw->multicast_filter_bins) 625 if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
580 return -EOPNOTSUPP; 626 return -EOPNOTSUPP;
581 627
582 /* Remove all MC addresses */ 628 while (--tries) {
583 __dev_mc_unsync(priv->dev, NULL); 629 /* We only need to check the mc_addr for collisions */
584 stmmac_test_set_rx_mode(priv->dev); 630 mc_addr[ETH_ALEN - 1] = tries;
631 if (stmmac_hash_check(priv, mc_addr))
632 break;
633 }
634
635 if (!tries)
636 return -EOPNOTSUPP;
585 637
586 ret = dev_uc_add(priv->dev, uc_addr); 638 ret = dev_uc_add(priv->dev, uc_addr);
587 if (ret) 639 if (ret)
588 goto cleanup; 640 return ret;
589 641
590 attr.dst = uc_addr; 642 attr.dst = uc_addr;
591 643
@@ -602,30 +654,34 @@ static int stmmac_test_mcfilt(struct stmmac_priv *priv)
602 654
603cleanup: 655cleanup:
604 dev_uc_del(priv->dev, uc_addr); 656 dev_uc_del(priv->dev, uc_addr);
605 __dev_mc_sync(priv->dev, stmmac_dummy_sync, NULL);
606 stmmac_test_set_rx_mode(priv->dev);
607 return ret; 657 return ret;
608} 658}
609 659
610static int stmmac_test_ucfilt(struct stmmac_priv *priv) 660static int stmmac_test_ucfilt(struct stmmac_priv *priv)
611{ 661{
612 unsigned char uc_addr[ETH_ALEN] = {0x00, 0x01, 0x44, 0x55, 0x66, 0x77}; 662 unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
613 unsigned char mc_addr[ETH_ALEN] = {0x01, 0x01, 0x44, 0x55, 0x66, 0x77}; 663 unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
614 struct stmmac_packet_attrs attr = { }; 664 struct stmmac_packet_attrs attr = { };
615 int ret; 665 int ret, tries = 256;
616 666
617 if (stmmac_filter_check(priv)) 667 if (stmmac_filter_check(priv))
618 return -EOPNOTSUPP; 668 return -EOPNOTSUPP;
619 if (!priv->hw->multicast_filter_bins) 669 if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
620 return -EOPNOTSUPP; 670 return -EOPNOTSUPP;
621 671
622 /* Remove all UC addresses */ 672 while (--tries) {
623 __dev_uc_unsync(priv->dev, NULL); 673 /* We only need to check the uc_addr for collisions */
624 stmmac_test_set_rx_mode(priv->dev); 674 uc_addr[ETH_ALEN - 1] = tries;
675 if (stmmac_perfect_check(priv, uc_addr))
676 break;
677 }
678
679 if (!tries)
680 return -EOPNOTSUPP;
625 681
626 ret = dev_mc_add(priv->dev, mc_addr); 682 ret = dev_mc_add(priv->dev, mc_addr);
627 if (ret) 683 if (ret)
628 goto cleanup; 684 return ret;
629 685
630 attr.dst = mc_addr; 686 attr.dst = mc_addr;
631 687
@@ -642,8 +698,6 @@ static int stmmac_test_ucfilt(struct stmmac_priv *priv)
642 698
643cleanup: 699cleanup:
644 dev_mc_del(priv->dev, mc_addr); 700 dev_mc_del(priv->dev, mc_addr);
645 __dev_uc_sync(priv->dev, stmmac_dummy_sync, NULL);
646 stmmac_test_set_rx_mode(priv->dev);
647 return ret; 701 return ret;
648} 702}
649 703
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 00cab3f43a4c..a245597a3902 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -578,8 +578,8 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
578 /* read current mtu value from device */ 578 /* read current mtu value from device */
579 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE, 579 err = usbnet_read_cmd(dev, USB_CDC_GET_MAX_DATAGRAM_SIZE,
580 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, 580 USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
581 0, iface_no, &max_datagram_size, 2); 581 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
582 if (err < 0) { 582 if (err < sizeof(max_datagram_size)) {
583 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n"); 583 dev_dbg(&dev->intf->dev, "GET_MAX_DATAGRAM_SIZE failed\n");
584 goto out; 584 goto out;
585 } 585 }
@@ -590,7 +590,7 @@ static void cdc_ncm_set_dgram_size(struct usbnet *dev, int new_size)
590 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); 590 max_datagram_size = cpu_to_le16(ctx->max_datagram_size);
591 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE, 591 err = usbnet_write_cmd(dev, USB_CDC_SET_MAX_DATAGRAM_SIZE,
592 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE, 592 USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE,
593 0, iface_no, &max_datagram_size, 2); 593 0, iface_no, &max_datagram_size, sizeof(max_datagram_size));
594 if (err < 0) 594 if (err < 0)
595 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n"); 595 dev_dbg(&dev->intf->dev, "SET_MAX_DATAGRAM_SIZE failed\n");
596 596
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 596428ec71df..56d334b9ad45 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1362,6 +1362,7 @@ static const struct usb_device_id products[] = {
1362 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */ 1362 {QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
1363 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */ 1363 {QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
1364 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */ 1364 {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
1365 {QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
1365 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */ 1366 {QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
1366 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */ 1367 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
1367 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ 1368 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 1cd113c8d7cb..ad0abb1f0bae 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -259,7 +259,7 @@ static void fdp_nci_i2c_read_device_properties(struct device *dev,
259 *fw_vsc_cfg, len); 259 *fw_vsc_cfg, len);
260 260
261 if (r) { 261 if (r) {
262 devm_kfree(dev, fw_vsc_cfg); 262 devm_kfree(dev, *fw_vsc_cfg);
263 goto vsc_read_err; 263 goto vsc_read_err;
264 } 264 }
265 } else { 265 } else {
diff --git a/drivers/nfc/st21nfca/core.c b/drivers/nfc/st21nfca/core.c
index f9ac176cf257..2ce17932a073 100644
--- a/drivers/nfc/st21nfca/core.c
+++ b/drivers/nfc/st21nfca/core.c
@@ -708,6 +708,7 @@ static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev,
708 NFC_PROTO_FELICA_MASK; 708 NFC_PROTO_FELICA_MASK;
709 } else { 709 } else {
710 kfree_skb(nfcid_skb); 710 kfree_skb(nfcid_skb);
711 nfcid_skb = NULL;
711 /* P2P in type A */ 712 /* P2P in type A */
712 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, 713 r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE,
713 ST21NFCA_RF_READER_F_NFCID1, 714 ST21NFCA_RF_READER_F_NFCID1,
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 5b9d22338606..3bf3835d0e86 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map);
656void bpf_map_put(struct bpf_map *map); 656void bpf_map_put(struct bpf_map *map);
657int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 657int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
658void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 658void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
659int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); 659int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size);
660void bpf_map_charge_finish(struct bpf_map_memory *mem); 660void bpf_map_charge_finish(struct bpf_map_memory *mem);
661void bpf_map_charge_move(struct bpf_map_memory *dst, 661void bpf_map_charge_move(struct bpf_map_memory *dst,
662 struct bpf_map_memory *src); 662 struct bpf_map_memory *src);
663void *bpf_map_area_alloc(size_t size, int numa_node); 663void *bpf_map_area_alloc(u64 size, int numa_node);
664void bpf_map_area_free(void *base); 664void bpf_map_area_free(void *base);
665void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 665void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
666 666
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index e4b3fb4bb77c..ce7055259877 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -139,6 +139,11 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
139 } 139 }
140} 140}
141 141
142static inline u32 sk_msg_iter_dist(u32 start, u32 end)
143{
144 return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
145}
146
142#define sk_msg_iter_var_prev(var) \ 147#define sk_msg_iter_var_prev(var) \
143 do { \ 148 do { \
144 if (var == 0) \ 149 if (var == 0) \
@@ -198,9 +203,7 @@ static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
198 if (sk_msg_full(msg)) 203 if (sk_msg_full(msg))
199 return MAX_MSG_FRAGS; 204 return MAX_MSG_FRAGS;
200 205
201 return msg->sg.end >= msg->sg.start ? 206 return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
202 msg->sg.end - msg->sg.start :
203 msg->sg.end + (MAX_MSG_FRAGS - msg->sg.start);
204} 207}
205 208
206static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which) 209static inline struct scatterlist *sk_msg_elem(struct sk_msg *msg, int which)
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 1afc125014da..3d56b026bb9e 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -159,7 +159,6 @@ struct slave {
159 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS]; 159 unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
160 s8 link; /* one of BOND_LINK_XXXX */ 160 s8 link; /* one of BOND_LINK_XXXX */
161 s8 link_new_state; /* one of BOND_LINK_XXXX */ 161 s8 link_new_state; /* one of BOND_LINK_XXXX */
162 s8 new_link;
163 u8 backup:1, /* indicates backup slave. Value corresponds with 162 u8 backup:1, /* indicates backup slave. Value corresponds with
164 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */ 163 BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
165 inactive:1, /* indicates inactive slave */ 164 inactive:1, /* indicates inactive slave */
@@ -549,7 +548,7 @@ static inline void bond_propose_link_state(struct slave *slave, int state)
549 548
550static inline void bond_commit_link_state(struct slave *slave, bool notify) 549static inline void bond_commit_link_state(struct slave *slave, bool notify)
551{ 550{
552 if (slave->link == slave->link_new_state) 551 if (slave->link_new_state == BOND_LINK_NOCHANGE)
553 return; 552 return;
554 553
555 slave->link = slave->link_new_state; 554 slave->link = slave->link_new_state;
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h
index 107c0d700ed6..38a9a3d1222b 100644
--- a/include/net/fq_impl.h
+++ b/include/net/fq_impl.h
@@ -313,7 +313,7 @@ static int fq_init(struct fq *fq, int flows_cnt)
313 fq->limit = 8192; 313 fq->limit = 8192;
314 fq->memory_limit = 16 << 20; /* 16 MBytes */ 314 fq->memory_limit = 16 << 20; /* 16 MBytes */
315 315
316 fq->flows = kcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); 316 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL);
317 if (!fq->flows) 317 if (!fq->flows)
318 return -ENOMEM; 318 return -ENOMEM;
319 319
@@ -331,7 +331,7 @@ static void fq_reset(struct fq *fq,
331 for (i = 0; i < fq->flows_cnt; i++) 331 for (i = 0; i < fq->flows_cnt; i++)
332 fq_flow_reset(fq, &fq->flows[i], free_func); 332 fq_flow_reset(fq, &fq->flows[i], free_func);
333 333
334 kfree(fq->flows); 334 kvfree(fq->flows);
335 fq->flows = NULL; 335 fq->flows = NULL;
336} 336}
337 337
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 50a67bd6a434..b8452cc0e059 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -439,8 +439,8 @@ static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
439{ 439{
440 unsigned long now = jiffies; 440 unsigned long now = jiffies;
441 441
442 if (neigh->used != now) 442 if (READ_ONCE(neigh->used) != now)
443 neigh->used = now; 443 WRITE_ONCE(neigh->used, now);
444 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) 444 if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE)))
445 return __neigh_event_send(neigh, skb); 445 return __neigh_event_send(neigh, skb);
446 return 0; 446 return 0;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 001d294edf57..2d0275f13bbf 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -820,7 +820,8 @@ struct nft_expr_ops {
820 */ 820 */
821struct nft_expr { 821struct nft_expr {
822 const struct nft_expr_ops *ops; 822 const struct nft_expr_ops *ops;
823 unsigned char data[]; 823 unsigned char data[]
824 __attribute__((aligned(__alignof__(u64))));
824}; 825};
825 826
826static inline void *nft_expr_priv(const struct nft_expr *expr) 827static inline void *nft_expr_priv(const struct nft_expr *expr)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 637548d54b3e..d80acda231ae 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -15,6 +15,7 @@
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <linux/rwsem.h> 16#include <linux/rwsem.h>
17#include <linux/atomic.h> 17#include <linux/atomic.h>
18#include <linux/hashtable.h>
18#include <net/gen_stats.h> 19#include <net/gen_stats.h>
19#include <net/rtnetlink.h> 20#include <net/rtnetlink.h>
20#include <net/flow_offload.h> 21#include <net/flow_offload.h>
@@ -362,6 +363,7 @@ struct tcf_proto {
362 bool deleting; 363 bool deleting;
363 refcount_t refcnt; 364 refcount_t refcnt;
364 struct rcu_head rcu; 365 struct rcu_head rcu;
366 struct hlist_node destroy_ht_node;
365}; 367};
366 368
367struct qdisc_skb_cb { 369struct qdisc_skb_cb {
@@ -414,6 +416,8 @@ struct tcf_block {
414 struct list_head filter_chain_list; 416 struct list_head filter_chain_list;
415 } chain0; 417 } chain0;
416 struct rcu_head rcu; 418 struct rcu_head rcu;
419 DECLARE_HASHTABLE(proto_destroy_ht, 7);
420 struct mutex proto_destroy_lock; /* Lock for proto_destroy hashtable. */
417}; 421};
418 422
419#ifdef CONFIG_PROVE_LOCKING 423#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/net/sock.h b/include/net/sock.h
index 8f9adcfac41b..718e62fbe869 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2342,7 +2342,7 @@ static inline ktime_t sock_read_timestamp(struct sock *sk)
2342 2342
2343 return kt; 2343 return kt;
2344#else 2344#else
2345 return sk->sk_stamp; 2345 return READ_ONCE(sk->sk_stamp);
2346#endif 2346#endif
2347} 2347}
2348 2348
@@ -2353,7 +2353,7 @@ static inline void sock_write_timestamp(struct sock *sk, ktime_t kt)
2353 sk->sk_stamp = kt; 2353 sk->sk_stamp = kt;
2354 write_sequnlock(&sk->sk_stamp_seq); 2354 write_sequnlock(&sk->sk_stamp_seq);
2355#else 2355#else
2356 sk->sk_stamp = kt; 2356 WRITE_ONCE(sk->sk_stamp, kt);
2357#endif 2357#endif
2358} 2358}
2359 2359
diff --git a/include/net/tls.h b/include/net/tls.h
index c664e6dba0d1..794e297483ea 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -40,6 +40,7 @@
40#include <linux/socket.h> 40#include <linux/socket.h>
41#include <linux/tcp.h> 41#include <linux/tcp.h>
42#include <linux/skmsg.h> 42#include <linux/skmsg.h>
43#include <linux/mutex.h>
43#include <linux/netdevice.h> 44#include <linux/netdevice.h>
44#include <linux/rcupdate.h> 45#include <linux/rcupdate.h>
45 46
@@ -269,6 +270,10 @@ struct tls_context {
269 270
270 bool in_tcp_sendpages; 271 bool in_tcp_sendpages;
271 bool pending_open_record_frags; 272 bool pending_open_record_frags;
273
274 struct mutex tx_lock; /* protects partially_sent_* fields and
275 * per-type TX fields
276 */
272 unsigned long flags; 277 unsigned long flags;
273 278
274 /* cache cold stuff */ 279 /* cache cold stuff */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index 1e988fdeba34..6a6d2c7655ff 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can.h 3 * linux/can.h
4 * 4 *
diff --git a/include/uapi/linux/can/bcm.h b/include/uapi/linux/can/bcm.h
index 0fb328d93148..dd2b925b09ac 100644
--- a/include/uapi/linux/can/bcm.h
+++ b/include/uapi/linux/can/bcm.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/bcm.h 3 * linux/can/bcm.h
4 * 4 *
diff --git a/include/uapi/linux/can/error.h b/include/uapi/linux/can/error.h
index bfc4b5d22a5e..34633283de64 100644
--- a/include/uapi/linux/can/error.h
+++ b/include/uapi/linux/can/error.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/error.h 3 * linux/can/error.h
4 * 4 *
diff --git a/include/uapi/linux/can/gw.h b/include/uapi/linux/can/gw.h
index 3aea5388c8e4..c2190bbe21d8 100644
--- a/include/uapi/linux/can/gw.h
+++ b/include/uapi/linux/can/gw.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/gw.h 3 * linux/can/gw.h
4 * 4 *
diff --git a/include/uapi/linux/can/j1939.h b/include/uapi/linux/can/j1939.h
index c32325342d30..df6e821075c1 100644
--- a/include/uapi/linux/can/j1939.h
+++ b/include/uapi/linux/can/j1939.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * j1939.h 3 * j1939.h
4 * 4 *
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 1bc70d3a4d39..6f598b73839e 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2/* 2/*
3 * linux/can/netlink.h 3 * linux/can/netlink.h
4 * 4 *
diff --git a/include/uapi/linux/can/raw.h b/include/uapi/linux/can/raw.h
index be3b36e7ff61..6a11d308eb5c 100644
--- a/include/uapi/linux/can/raw.h
+++ b/include/uapi/linux/can/raw.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ 1/* SPDX-License-Identifier: ((GPL-2.0-only WITH Linux-syscall-note) OR BSD-3-Clause) */
2/* 2/*
3 * linux/can/raw.h 3 * linux/can/raw.h
4 * 4 *
diff --git a/include/uapi/linux/can/vxcan.h b/include/uapi/linux/can/vxcan.h
index 066812d118a2..4fa9d8777a07 100644
--- a/include/uapi/linux/can/vxcan.h
+++ b/include/uapi/linux/can/vxcan.h
@@ -1,4 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ 1/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
2#ifndef _UAPI_CAN_VXCAN_H 2#ifndef _UAPI_CAN_VXCAN_H
3#define _UAPI_CAN_VXCAN_H 3#define _UAPI_CAN_VXCAN_H
4 4
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index ddd8addcdb5c..a3eaf08e7dd3 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -1311,12 +1311,12 @@ static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type,
1311 return false; 1311 return false;
1312 1312
1313 switch (off) { 1313 switch (off) {
1314 case offsetof(struct bpf_sysctl, write): 1314 case bpf_ctx_range(struct bpf_sysctl, write):
1315 if (type != BPF_READ) 1315 if (type != BPF_READ)
1316 return false; 1316 return false;
1317 bpf_ctx_record_field_size(info, size_default); 1317 bpf_ctx_record_field_size(info, size_default);
1318 return bpf_ctx_narrow_access_ok(off, size, size_default); 1318 return bpf_ctx_narrow_access_ok(off, size, size_default);
1319 case offsetof(struct bpf_sysctl, file_pos): 1319 case bpf_ctx_range(struct bpf_sysctl, file_pos):
1320 if (type == BPF_READ) { 1320 if (type == BPF_READ) {
1321 bpf_ctx_record_field_size(info, size_default); 1321 bpf_ctx_record_field_size(info, size_default);
1322 return bpf_ctx_narrow_access_ok(off, size, size_default); 1322 return bpf_ctx_narrow_access_ok(off, size, size_default);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0937719b87e2..ace1cfaa24b6 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
126 return map; 126 return map;
127} 127}
128 128
129void *bpf_map_area_alloc(size_t size, int numa_node) 129void *bpf_map_area_alloc(u64 size, int numa_node)
130{ 130{
131 /* We really just want to fail instead of triggering OOM killer 131 /* We really just want to fail instead of triggering OOM killer
132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, 132 * under memory pressure, therefore we set __GFP_NORETRY to kmalloc,
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node)
141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; 141 const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
142 void *area; 142 void *area;
143 143
144 if (size >= SIZE_MAX)
145 return NULL;
146
144 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { 147 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
145 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, 148 area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
146 numa_node); 149 numa_node);
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
197 atomic_long_sub(pages, &user->locked_vm); 200 atomic_long_sub(pages, &user->locked_vm);
198} 201}
199 202
200int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) 203int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size)
201{ 204{
202 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; 205 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
203 struct user_struct *user; 206 struct user_struct *user;
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index ed91ea31978a..12a4f4d93681 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -20,7 +20,6 @@ static unsigned int
20ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par) 20ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
21{ 21{
22 const struct ebt_nat_info *info = par->targinfo; 22 const struct ebt_nat_info *info = par->targinfo;
23 struct net_device *dev;
24 23
25 if (skb_ensure_writable(skb, ETH_ALEN)) 24 if (skb_ensure_writable(skb, ETH_ALEN))
26 return EBT_DROP; 25 return EBT_DROP;
@@ -33,10 +32,22 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
33 else 32 else
34 skb->pkt_type = PACKET_MULTICAST; 33 skb->pkt_type = PACKET_MULTICAST;
35 } else { 34 } else {
36 if (xt_hooknum(par) != NF_BR_BROUTING) 35 const struct net_device *dev;
37 dev = br_port_get_rcu(xt_in(par))->br->dev; 36
38 else 37 switch (xt_hooknum(par)) {
38 case NF_BR_BROUTING:
39 dev = xt_in(par); 39 dev = xt_in(par);
40 break;
41 case NF_BR_PRE_ROUTING:
42 dev = br_port_get_rcu(xt_in(par))->br->dev;
43 break;
44 default:
45 dev = NULL;
46 break;
47 }
48
49 if (!dev) /* NF_BR_LOCAL_OUT */
50 return info->target;
40 51
41 if (ether_addr_equal(info->mac, dev->dev_addr)) 52 if (ether_addr_equal(info->mac, dev->dev_addr))
42 skb->pkt_type = PACKET_HOST; 53 skb->pkt_type = PACKET_HOST;
diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
index 37c1040bcb9c..4d8ba701e15d 100644
--- a/net/can/j1939/socket.c
+++ b/net/can/j1939/socket.c
@@ -580,6 +580,7 @@ static int j1939_sk_release(struct socket *sock)
580 j1939_netdev_stop(priv); 580 j1939_netdev_stop(priv);
581 } 581 }
582 582
583 kfree(jsk->filters);
583 sock_orphan(sk); 584 sock_orphan(sk);
584 sock->sk = NULL; 585 sock->sk = NULL;
585 586
@@ -909,8 +910,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
909 memset(serr, 0, sizeof(*serr)); 910 memset(serr, 0, sizeof(*serr));
910 switch (type) { 911 switch (type) {
911 case J1939_ERRQUEUE_ACK: 912 case J1939_ERRQUEUE_ACK:
912 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) 913 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
914 kfree_skb(skb);
913 return; 915 return;
916 }
914 917
915 serr->ee.ee_errno = ENOMSG; 918 serr->ee.ee_errno = ENOMSG;
916 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 919 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
@@ -918,8 +921,10 @@ void j1939_sk_errqueue(struct j1939_session *session,
918 state = "ACK"; 921 state = "ACK";
919 break; 922 break;
920 case J1939_ERRQUEUE_SCHED: 923 case J1939_ERRQUEUE_SCHED:
921 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) 924 if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
925 kfree_skb(skb);
922 return; 926 return;
927 }
923 928
924 serr->ee.ee_errno = ENOMSG; 929 serr->ee.ee_errno = ENOMSG;
925 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; 930 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
index fe000ea757ea..e5f1a56994c6 100644
--- a/net/can/j1939/transport.c
+++ b/net/can/j1939/transport.c
@@ -1273,9 +1273,27 @@ j1939_xtp_rx_abort(struct j1939_priv *priv, struct sk_buff *skb,
1273static void 1273static void
1274j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb) 1274j1939_xtp_rx_eoma_one(struct j1939_session *session, struct sk_buff *skb)
1275{ 1275{
1276 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
1277 const u8 *dat;
1278 int len;
1279
1276 if (j1939_xtp_rx_cmd_bad_pgn(session, skb)) 1280 if (j1939_xtp_rx_cmd_bad_pgn(session, skb))
1277 return; 1281 return;
1278 1282
1283 dat = skb->data;
1284
1285 if (skcb->addr.type == J1939_ETP)
1286 len = j1939_etp_ctl_to_size(dat);
1287 else
1288 len = j1939_tp_ctl_to_size(dat);
1289
1290 if (session->total_message_size != len) {
1291 netdev_warn_once(session->priv->ndev,
1292 "%s: 0x%p: Incorrect size. Expected: %i; got: %i.\n",
1293 __func__, session, session->total_message_size,
1294 len);
1295 }
1296
1279 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session); 1297 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
1280 1298
1281 session->pkt.tx_acked = session->pkt.total; 1299 session->pkt.tx_acked = session->pkt.total;
@@ -1432,7 +1450,7 @@ j1939_session *j1939_session_fresh_new(struct j1939_priv *priv,
1432 skcb = j1939_skb_to_cb(skb); 1450 skcb = j1939_skb_to_cb(skb);
1433 memcpy(skcb, rel_skcb, sizeof(*skcb)); 1451 memcpy(skcb, rel_skcb, sizeof(*skcb));
1434 1452
1435 session = j1939_session_new(priv, skb, skb->len); 1453 session = j1939_session_new(priv, skb, size);
1436 if (!session) { 1454 if (!session) {
1437 kfree_skb(skb); 1455 kfree_skb(skb);
1438 return NULL; 1456 return NULL;
diff --git a/net/core/skmsg.c b/net/core/skmsg.c
index cf390e0aa73d..ad31e4e53d0a 100644
--- a/net/core/skmsg.c
+++ b/net/core/skmsg.c
@@ -270,18 +270,28 @@ void sk_msg_trim(struct sock *sk, struct sk_msg *msg, int len)
270 270
271 msg->sg.data[i].length -= trim; 271 msg->sg.data[i].length -= trim;
272 sk_mem_uncharge(sk, trim); 272 sk_mem_uncharge(sk, trim);
273 /* Adjust copybreak if it falls into the trimmed part of last buf */
274 if (msg->sg.curr == i && msg->sg.copybreak > msg->sg.data[i].length)
275 msg->sg.copybreak = msg->sg.data[i].length;
273out: 276out:
274 /* If we trim data before curr pointer update copybreak and current 277 sk_msg_iter_var_next(i);
275 * so that any future copy operations start at new copy location. 278 msg->sg.end = i;
279
280 /* If we trim data a full sg elem before curr pointer update
281 * copybreak and current so that any future copy operations
282 * start at new copy location.
276 * However trimed data that has not yet been used in a copy op 283 * However trimed data that has not yet been used in a copy op
277 * does not require an update. 284 * does not require an update.
278 */ 285 */
279 if (msg->sg.curr >= i) { 286 if (!msg->sg.size) {
287 msg->sg.curr = msg->sg.start;
288 msg->sg.copybreak = 0;
289 } else if (sk_msg_iter_dist(msg->sg.start, msg->sg.curr) >=
290 sk_msg_iter_dist(msg->sg.start, msg->sg.end)) {
291 sk_msg_iter_var_prev(i);
280 msg->sg.curr = i; 292 msg->sg.curr = i;
281 msg->sg.copybreak = msg->sg.data[i].length; 293 msg->sg.copybreak = msg->sg.data[i].length;
282 } 294 }
283 sk_msg_iter_var_next(i);
284 msg->sg.end = i;
285} 295}
286EXPORT_SYMBOL_GPL(sk_msg_trim); 296EXPORT_SYMBOL_GPL(sk_msg_trim);
287 297
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 0d8f782c25cc..d19557c6d04b 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -416,7 +416,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
416 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); 416 RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt));
417 newinet->mc_index = inet_iif(skb); 417 newinet->mc_index = inet_iif(skb);
418 newinet->mc_ttl = ip_hdr(skb)->ttl; 418 newinet->mc_ttl = ip_hdr(skb)->ttl;
419 newinet->inet_id = jiffies; 419 newinet->inet_id = prandom_u32();
420 420
421 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL) 421 if (dst == NULL && (dst = inet_csk_route_child_sock(sk, newsk, req)) == NULL)
422 goto put_and_exit; 422 goto put_and_exit;
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 0913a090b2bf..f1888c683426 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1814,8 +1814,8 @@ int fib_sync_down_addr(struct net_device *dev, __be32 local)
1814 int ret = 0; 1814 int ret = 0;
1815 unsigned int hash = fib_laddr_hashfn(local); 1815 unsigned int hash = fib_laddr_hashfn(local);
1816 struct hlist_head *head = &fib_info_laddrhash[hash]; 1816 struct hlist_head *head = &fib_info_laddrhash[hash];
1817 int tb_id = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
1817 struct net *net = dev_net(dev); 1818 struct net *net = dev_net(dev);
1818 int tb_id = l3mdev_fib_table(dev);
1819 struct fib_info *fi; 1819 struct fib_info *fi;
1820 1820
1821 if (!fib_info_laddrhash || local == 0) 1821 if (!fib_info_laddrhash || local == 0)
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index a63ff85fe141..e60bf8e7dd1a 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -621,6 +621,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
621{ 621{
622 struct __rt6_probe_work *work = NULL; 622 struct __rt6_probe_work *work = NULL;
623 const struct in6_addr *nh_gw; 623 const struct in6_addr *nh_gw;
624 unsigned long last_probe;
624 struct neighbour *neigh; 625 struct neighbour *neigh;
625 struct net_device *dev; 626 struct net_device *dev;
626 struct inet6_dev *idev; 627 struct inet6_dev *idev;
@@ -639,6 +640,7 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
639 nh_gw = &fib6_nh->fib_nh_gw6; 640 nh_gw = &fib6_nh->fib_nh_gw6;
640 dev = fib6_nh->fib_nh_dev; 641 dev = fib6_nh->fib_nh_dev;
641 rcu_read_lock_bh(); 642 rcu_read_lock_bh();
643 last_probe = READ_ONCE(fib6_nh->last_probe);
642 idev = __in6_dev_get(dev); 644 idev = __in6_dev_get(dev);
643 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); 645 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
644 if (neigh) { 646 if (neigh) {
@@ -654,13 +656,15 @@ static void rt6_probe(struct fib6_nh *fib6_nh)
654 __neigh_set_probe_once(neigh); 656 __neigh_set_probe_once(neigh);
655 } 657 }
656 write_unlock(&neigh->lock); 658 write_unlock(&neigh->lock);
657 } else if (time_after(jiffies, fib6_nh->last_probe + 659 } else if (time_after(jiffies, last_probe +
658 idev->cnf.rtr_probe_interval)) { 660 idev->cnf.rtr_probe_interval)) {
659 work = kmalloc(sizeof(*work), GFP_ATOMIC); 661 work = kmalloc(sizeof(*work), GFP_ATOMIC);
660 } 662 }
661 663
662 if (work) { 664 if (!work || cmpxchg(&fib6_nh->last_probe,
663 fib6_nh->last_probe = jiffies; 665 last_probe, jiffies) != last_probe) {
666 kfree(work);
667 } else {
664 INIT_WORK(&work->work, rt6_probe_deferred); 668 INIT_WORK(&work->work, rt6_probe_deferred);
665 work->target = *nh_gw; 669 work->target = *nh_gw;
666 dev_hold(dev); 670 dev_hold(dev);
@@ -3383,6 +3387,9 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3383 int err; 3387 int err;
3384 3388
3385 fib6_nh->fib_nh_family = AF_INET6; 3389 fib6_nh->fib_nh_family = AF_INET6;
3390#ifdef CONFIG_IPV6_ROUTER_PREF
3391 fib6_nh->last_probe = jiffies;
3392#endif
3386 3393
3387 err = -ENODEV; 3394 err = -ENODEV;
3388 if (cfg->fc_ifindex) { 3395 if (cfg->fc_ifindex) {
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index aba094b4ccfc..2d05c4cfaf6d 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -1292,8 +1292,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
1292 ieee80211_remove_interfaces(local); 1292 ieee80211_remove_interfaces(local);
1293 fail_rate: 1293 fail_rate:
1294 rtnl_unlock(); 1294 rtnl_unlock();
1295 ieee80211_led_exit(local);
1296 fail_flows: 1295 fail_flows:
1296 ieee80211_led_exit(local);
1297 destroy_workqueue(local->workqueue); 1297 destroy_workqueue(local->workqueue);
1298 fail_workqueue: 1298 fail_workqueue:
1299 wiphy_unregister(local->hw.wiphy); 1299 wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index bd11fef2139f..8d3a2389b055 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2457,7 +2457,8 @@ unsigned long ieee80211_sta_last_active(struct sta_info *sta)
2457{ 2457{
2458 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta); 2458 struct ieee80211_sta_rx_stats *stats = sta_get_last_rx_stats(sta);
2459 2459
2460 if (time_after(stats->last_rx, sta->status_stats.last_ack)) 2460 if (!sta->status_stats.last_ack ||
2461 time_after(stats->last_rx, sta->status_stats.last_ack))
2461 return stats->last_rx; 2462 return stats->last_rx;
2462 return sta->status_stats.last_ack; 2463 return sta->status_stats.last_ack;
2463} 2464}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index e64d5f9a89dd..d73d1828216a 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -296,7 +296,8 @@ ip_set_get_ipaddr4(struct nlattr *nla, __be32 *ipaddr)
296 296
297 if (unlikely(!flag_nested(nla))) 297 if (unlikely(!flag_nested(nla)))
298 return -IPSET_ERR_PROTOCOL; 298 return -IPSET_ERR_PROTOCOL;
299 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) 299 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
300 ipaddr_policy, NULL))
300 return -IPSET_ERR_PROTOCOL; 301 return -IPSET_ERR_PROTOCOL;
301 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4))) 302 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV4)))
302 return -IPSET_ERR_PROTOCOL; 303 return -IPSET_ERR_PROTOCOL;
@@ -314,7 +315,8 @@ ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr)
314 if (unlikely(!flag_nested(nla))) 315 if (unlikely(!flag_nested(nla)))
315 return -IPSET_ERR_PROTOCOL; 316 return -IPSET_ERR_PROTOCOL;
316 317
317 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_IPADDR_MAX, nla, ipaddr_policy, NULL)) 318 if (nla_parse_nested(tb, IPSET_ATTR_IPADDR_MAX, nla,
319 ipaddr_policy, NULL))
318 return -IPSET_ERR_PROTOCOL; 320 return -IPSET_ERR_PROTOCOL;
319 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6))) 321 if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_IPADDR_IPV6)))
320 return -IPSET_ERR_PROTOCOL; 322 return -IPSET_ERR_PROTOCOL;
@@ -934,7 +936,8 @@ static int ip_set_create(struct net *net, struct sock *ctnl,
934 936
935 /* Without holding any locks, create private part. */ 937 /* Without holding any locks, create private part. */
936 if (attr[IPSET_ATTR_DATA] && 938 if (attr[IPSET_ATTR_DATA] &&
937 nla_parse_nested_deprecated(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA], set->type->create_policy, NULL)) { 939 nla_parse_nested(tb, IPSET_ATTR_CREATE_MAX, attr[IPSET_ATTR_DATA],
940 set->type->create_policy, NULL)) {
938 ret = -IPSET_ERR_PROTOCOL; 941 ret = -IPSET_ERR_PROTOCOL;
939 goto put_out; 942 goto put_out;
940 } 943 }
@@ -1281,6 +1284,14 @@ dump_attrs(struct nlmsghdr *nlh)
1281 } 1284 }
1282} 1285}
1283 1286
1287static const struct nla_policy
1288ip_set_dump_policy[IPSET_ATTR_CMD_MAX + 1] = {
1289 [IPSET_ATTR_PROTOCOL] = { .type = NLA_U8 },
1290 [IPSET_ATTR_SETNAME] = { .type = NLA_NUL_STRING,
1291 .len = IPSET_MAXNAMELEN - 1 },
1292 [IPSET_ATTR_FLAGS] = { .type = NLA_U32 },
1293};
1294
1284static int 1295static int
1285dump_init(struct netlink_callback *cb, struct ip_set_net *inst) 1296dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
1286{ 1297{
@@ -1292,9 +1303,9 @@ dump_init(struct netlink_callback *cb, struct ip_set_net *inst)
1292 ip_set_id_t index; 1303 ip_set_id_t index;
1293 int ret; 1304 int ret;
1294 1305
1295 ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, attr, 1306 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, attr,
1296 nlh->nlmsg_len - min_len, 1307 nlh->nlmsg_len - min_len,
1297 ip_set_setname_policy, NULL); 1308 ip_set_dump_policy, NULL);
1298 if (ret) 1309 if (ret)
1299 return ret; 1310 return ret;
1300 1311
@@ -1543,9 +1554,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
1543 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len); 1554 memcpy(&errmsg->msg, nlh, nlh->nlmsg_len);
1544 cmdattr = (void *)&errmsg->msg + min_len; 1555 cmdattr = (void *)&errmsg->msg + min_len;
1545 1556
1546 ret = nla_parse_deprecated(cda, IPSET_ATTR_CMD_MAX, cmdattr, 1557 ret = nla_parse(cda, IPSET_ATTR_CMD_MAX, cmdattr,
1547 nlh->nlmsg_len - min_len, 1558 nlh->nlmsg_len - min_len, ip_set_adt_policy,
1548 ip_set_adt_policy, NULL); 1559 NULL);
1549 1560
1550 if (ret) { 1561 if (ret) {
1551 nlmsg_free(skb2); 1562 nlmsg_free(skb2);
@@ -1596,7 +1607,9 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
1596 1607
1597 use_lineno = !!attr[IPSET_ATTR_LINENO]; 1608 use_lineno = !!attr[IPSET_ATTR_LINENO];
1598 if (attr[IPSET_ATTR_DATA]) { 1609 if (attr[IPSET_ATTR_DATA]) {
1599 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) 1610 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX,
1611 attr[IPSET_ATTR_DATA],
1612 set->type->adt_policy, NULL))
1600 return -IPSET_ERR_PROTOCOL; 1613 return -IPSET_ERR_PROTOCOL;
1601 ret = call_ad(ctnl, skb, set, tb, adt, flags, 1614 ret = call_ad(ctnl, skb, set, tb, adt, flags,
1602 use_lineno); 1615 use_lineno);
@@ -1606,7 +1619,8 @@ static int ip_set_ad(struct net *net, struct sock *ctnl,
1606 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) { 1619 nla_for_each_nested(nla, attr[IPSET_ATTR_ADT], nla_rem) {
1607 if (nla_type(nla) != IPSET_ATTR_DATA || 1620 if (nla_type(nla) != IPSET_ATTR_DATA ||
1608 !flag_nested(nla) || 1621 !flag_nested(nla) ||
1609 nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, nla, set->type->adt_policy, NULL)) 1622 nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, nla,
1623 set->type->adt_policy, NULL))
1610 return -IPSET_ERR_PROTOCOL; 1624 return -IPSET_ERR_PROTOCOL;
1611 ret = call_ad(ctnl, skb, set, tb, adt, 1625 ret = call_ad(ctnl, skb, set, tb, adt,
1612 flags, use_lineno); 1626 flags, use_lineno);
@@ -1655,7 +1669,8 @@ static int ip_set_utest(struct net *net, struct sock *ctnl, struct sk_buff *skb,
1655 if (!set) 1669 if (!set)
1656 return -ENOENT; 1670 return -ENOENT;
1657 1671
1658 if (nla_parse_nested_deprecated(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA], set->type->adt_policy, NULL)) 1672 if (nla_parse_nested(tb, IPSET_ATTR_ADT_MAX, attr[IPSET_ATTR_DATA],
1673 set->type->adt_policy, NULL))
1659 return -IPSET_ERR_PROTOCOL; 1674 return -IPSET_ERR_PROTOCOL;
1660 1675
1661 rcu_read_lock_bh(); 1676 rcu_read_lock_bh();
@@ -1961,7 +1976,7 @@ static const struct nfnl_callback ip_set_netlink_subsys_cb[IPSET_MSG_MAX] = {
1961 [IPSET_CMD_LIST] = { 1976 [IPSET_CMD_LIST] = {
1962 .call = ip_set_dump, 1977 .call = ip_set_dump,
1963 .attr_count = IPSET_ATTR_CMD_MAX, 1978 .attr_count = IPSET_ATTR_CMD_MAX,
1964 .policy = ip_set_setname_policy, 1979 .policy = ip_set_dump_policy,
1965 }, 1980 },
1966 [IPSET_CMD_SAVE] = { 1981 [IPSET_CMD_SAVE] = {
1967 .call = ip_set_dump, 1982 .call = ip_set_dump,
@@ -2069,8 +2084,9 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2069 } 2084 }
2070 2085
2071 req_version->version = IPSET_PROTOCOL; 2086 req_version->version = IPSET_PROTOCOL;
2072 ret = copy_to_user(user, req_version, 2087 if (copy_to_user(user, req_version,
2073 sizeof(struct ip_set_req_version)); 2088 sizeof(struct ip_set_req_version)))
2089 ret = -EFAULT;
2074 goto done; 2090 goto done;
2075 } 2091 }
2076 case IP_SET_OP_GET_BYNAME: { 2092 case IP_SET_OP_GET_BYNAME: {
@@ -2129,7 +2145,8 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
2129 } /* end of switch(op) */ 2145 } /* end of switch(op) */
2130 2146
2131copy: 2147copy:
2132 ret = copy_to_user(user, data, copylen); 2148 if (copy_to_user(user, data, copylen))
2149 ret = -EFAULT;
2133 2150
2134done: 2151done:
2135 vfree(data); 2152 vfree(data);
diff --git a/net/netfilter/ipset/ip_set_hash_ipmac.c b/net/netfilter/ipset/ip_set_hash_ipmac.c
index 24d8f4df4230..4ce563eb927d 100644
--- a/net/netfilter/ipset/ip_set_hash_ipmac.c
+++ b/net/netfilter/ipset/ip_set_hash_ipmac.c
@@ -209,7 +209,7 @@ hash_ipmac6_kadt(struct ip_set *set, const struct sk_buff *skb,
209 (skb_mac_header(skb) + ETH_HLEN) > skb->data) 209 (skb_mac_header(skb) + ETH_HLEN) > skb->data)
210 return -EINVAL; 210 return -EINVAL;
211 211
212 if (opt->flags & IPSET_DIM_ONE_SRC) 212 if (opt->flags & IPSET_DIM_TWO_SRC)
213 ether_addr_copy(e.ether, eth_hdr(skb)->h_source); 213 ether_addr_copy(e.ether, eth_hdr(skb)->h_source);
214 else 214 else
215 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); 215 ether_addr_copy(e.ether, eth_hdr(skb)->h_dest);
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index c259cbc3ef45..3d932de0ad29 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -368,6 +368,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
368 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, 368 [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
369 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 369 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
370 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 370 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
371 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
371 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, 372 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
372 [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, 373 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
373 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, 374 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index a3ae69bfee66..4398322fad59 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -476,6 +476,7 @@ static struct ip_set_type hash_netnet_type __read_mostly = {
476 [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, 476 [IPSET_ATTR_CIDR] = { .type = NLA_U8 },
477 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, 477 [IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
478 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, 478 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
479 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
479 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, 480 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
480 [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, 481 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
481 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, 482 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index d481f9baca2f..712a428509ad 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1922,6 +1922,7 @@ static int nf_tables_newchain(struct net *net, struct sock *nlsk,
1922 if (nlh->nlmsg_flags & NLM_F_REPLACE) 1922 if (nlh->nlmsg_flags & NLM_F_REPLACE)
1923 return -EOPNOTSUPP; 1923 return -EOPNOTSUPP;
1924 1924
1925 flags |= chain->flags & NFT_BASE_CHAIN;
1925 return nf_tables_updchain(&ctx, genmask, policy, flags); 1926 return nf_tables_updchain(&ctx, genmask, policy, flags);
1926 } 1927 }
1927 1928
@@ -5143,9 +5144,6 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
5143 struct nft_trans *trans; 5144 struct nft_trans *trans;
5144 int err; 5145 int err;
5145 5146
5146 if (!obj->ops->update)
5147 return -EOPNOTSUPP;
5148
5149 trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ, 5147 trans = nft_trans_alloc(ctx, NFT_MSG_NEWOBJ,
5150 sizeof(struct nft_trans_obj)); 5148 sizeof(struct nft_trans_obj));
5151 if (!trans) 5149 if (!trans)
@@ -6499,7 +6497,8 @@ static void nft_obj_commit_update(struct nft_trans *trans)
6499 obj = nft_trans_obj(trans); 6497 obj = nft_trans_obj(trans);
6500 newobj = nft_trans_obj_newobj(trans); 6498 newobj = nft_trans_obj_newobj(trans);
6501 6499
6502 obj->ops->update(obj, newobj); 6500 if (obj->ops->update)
6501 obj->ops->update(obj, newobj);
6503 6502
6504 kfree(newobj); 6503 kfree(newobj);
6505} 6504}
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index ad783f4840ef..e25dab8128db 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -334,7 +334,8 @@ int nft_flow_rule_offload_commit(struct net *net)
334 334
335 switch (trans->msg_type) { 335 switch (trans->msg_type) {
336 case NFT_MSG_NEWCHAIN: 336 case NFT_MSG_NEWCHAIN:
337 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 337 if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) ||
338 nft_trans_chain_update(trans))
338 continue; 339 continue;
339 340
340 policy = nft_trans_chain_policy(trans); 341 policy = nft_trans_chain_policy(trans);
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 974300178fa9..02afa752dd2e 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -134,12 +134,13 @@ static int nft_bitwise_offload(struct nft_offload_ctx *ctx,
134 const struct nft_expr *expr) 134 const struct nft_expr *expr)
135{ 135{
136 const struct nft_bitwise *priv = nft_expr_priv(expr); 136 const struct nft_bitwise *priv = nft_expr_priv(expr);
137 struct nft_offload_reg *reg = &ctx->regs[priv->dreg];
137 138
138 if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) || 139 if (memcmp(&priv->xor, &zero, sizeof(priv->xor)) ||
139 priv->sreg != priv->dreg) 140 priv->sreg != priv->dreg || priv->len != reg->len)
140 return -EOPNOTSUPP; 141 return -EOPNOTSUPP;
141 142
142 memcpy(&ctx->regs[priv->dreg].mask, &priv->mask, sizeof(priv->mask)); 143 memcpy(&reg->mask, &priv->mask, sizeof(priv->mask));
143 144
144 return 0; 145 return 0;
145} 146}
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index bd173b1824c6..0744b2bb46da 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -116,7 +116,7 @@ static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
116 u8 *mask = (u8 *)&flow->match.mask; 116 u8 *mask = (u8 *)&flow->match.mask;
117 u8 *key = (u8 *)&flow->match.key; 117 u8 *key = (u8 *)&flow->match.key;
118 118
119 if (priv->op != NFT_CMP_EQ) 119 if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
120 return -EOPNOTSUPP; 120 return -EOPNOTSUPP;
121 121
122 memcpy(key + reg->offset, &priv->data, priv->len); 122 memcpy(key + reg->offset, &priv->data, priv->len);
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index 17e6ca62f1be..afde0d763039 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -1099,7 +1099,6 @@ static int nfc_genl_llc_set_params(struct sk_buff *skb, struct genl_info *info)
1099 1099
1100 local = nfc_llcp_find_local(dev); 1100 local = nfc_llcp_find_local(dev);
1101 if (!local) { 1101 if (!local) {
1102 nfc_put_device(dev);
1103 rc = -ENODEV; 1102 rc = -ENODEV;
1104 goto exit; 1103 goto exit;
1105 } 1104 }
@@ -1159,7 +1158,6 @@ static int nfc_genl_llc_sdreq(struct sk_buff *skb, struct genl_info *info)
1159 1158
1160 local = nfc_llcp_find_local(dev); 1159 local = nfc_llcp_find_local(dev);
1161 if (!local) { 1160 if (!local) {
1162 nfc_put_device(dev);
1163 rc = -ENODEV; 1161 rc = -ENODEV;
1164 goto exit; 1162 goto exit;
1165 } 1163 }
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 8717c0b26c90..20d60b8fcb70 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -21,6 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/idr.h> 22#include <linux/idr.h>
23#include <linux/rhashtable.h> 23#include <linux/rhashtable.h>
24#include <linux/jhash.h>
24#include <net/net_namespace.h> 25#include <net/net_namespace.h>
25#include <net/sock.h> 26#include <net/sock.h>
26#include <net/netlink.h> 27#include <net/netlink.h>
@@ -47,6 +48,62 @@ static LIST_HEAD(tcf_proto_base);
47/* Protects list of registered TC modules. It is pure SMP lock. */ 48/* Protects list of registered TC modules. It is pure SMP lock. */
48static DEFINE_RWLOCK(cls_mod_lock); 49static DEFINE_RWLOCK(cls_mod_lock);
49 50
51static u32 destroy_obj_hashfn(const struct tcf_proto *tp)
52{
53 return jhash_3words(tp->chain->index, tp->prio,
54 (__force __u32)tp->protocol, 0);
55}
56
57static void tcf_proto_signal_destroying(struct tcf_chain *chain,
58 struct tcf_proto *tp)
59{
60 struct tcf_block *block = chain->block;
61
62 mutex_lock(&block->proto_destroy_lock);
63 hash_add_rcu(block->proto_destroy_ht, &tp->destroy_ht_node,
64 destroy_obj_hashfn(tp));
65 mutex_unlock(&block->proto_destroy_lock);
66}
67
68static bool tcf_proto_cmp(const struct tcf_proto *tp1,
69 const struct tcf_proto *tp2)
70{
71 return tp1->chain->index == tp2->chain->index &&
72 tp1->prio == tp2->prio &&
73 tp1->protocol == tp2->protocol;
74}
75
76static bool tcf_proto_exists_destroying(struct tcf_chain *chain,
77 struct tcf_proto *tp)
78{
79 u32 hash = destroy_obj_hashfn(tp);
80 struct tcf_proto *iter;
81 bool found = false;
82
83 rcu_read_lock();
84 hash_for_each_possible_rcu(chain->block->proto_destroy_ht, iter,
85 destroy_ht_node, hash) {
86 if (tcf_proto_cmp(tp, iter)) {
87 found = true;
88 break;
89 }
90 }
91 rcu_read_unlock();
92
93 return found;
94}
95
96static void
97tcf_proto_signal_destroyed(struct tcf_chain *chain, struct tcf_proto *tp)
98{
99 struct tcf_block *block = chain->block;
100
101 mutex_lock(&block->proto_destroy_lock);
102 if (hash_hashed(&tp->destroy_ht_node))
103 hash_del_rcu(&tp->destroy_ht_node);
104 mutex_unlock(&block->proto_destroy_lock);
105}
106
50/* Find classifier type by string name */ 107/* Find classifier type by string name */
51 108
52static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind) 109static const struct tcf_proto_ops *__tcf_proto_lookup_ops(const char *kind)
@@ -234,9 +291,11 @@ static void tcf_proto_get(struct tcf_proto *tp)
234static void tcf_chain_put(struct tcf_chain *chain); 291static void tcf_chain_put(struct tcf_chain *chain);
235 292
236static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held, 293static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
237 struct netlink_ext_ack *extack) 294 bool sig_destroy, struct netlink_ext_ack *extack)
238{ 295{
239 tp->ops->destroy(tp, rtnl_held, extack); 296 tp->ops->destroy(tp, rtnl_held, extack);
297 if (sig_destroy)
298 tcf_proto_signal_destroyed(tp->chain, tp);
240 tcf_chain_put(tp->chain); 299 tcf_chain_put(tp->chain);
241 module_put(tp->ops->owner); 300 module_put(tp->ops->owner);
242 kfree_rcu(tp, rcu); 301 kfree_rcu(tp, rcu);
@@ -246,7 +305,7 @@ static void tcf_proto_put(struct tcf_proto *tp, bool rtnl_held,
246 struct netlink_ext_ack *extack) 305 struct netlink_ext_ack *extack)
247{ 306{
248 if (refcount_dec_and_test(&tp->refcnt)) 307 if (refcount_dec_and_test(&tp->refcnt))
249 tcf_proto_destroy(tp, rtnl_held, extack); 308 tcf_proto_destroy(tp, rtnl_held, true, extack);
250} 309}
251 310
252static int walker_check_empty(struct tcf_proto *tp, void *fh, 311static int walker_check_empty(struct tcf_proto *tp, void *fh,
@@ -370,6 +429,7 @@ static bool tcf_chain_detach(struct tcf_chain *chain)
370static void tcf_block_destroy(struct tcf_block *block) 429static void tcf_block_destroy(struct tcf_block *block)
371{ 430{
372 mutex_destroy(&block->lock); 431 mutex_destroy(&block->lock);
432 mutex_destroy(&block->proto_destroy_lock);
373 kfree_rcu(block, rcu); 433 kfree_rcu(block, rcu);
374} 434}
375 435
@@ -545,6 +605,12 @@ static void tcf_chain_flush(struct tcf_chain *chain, bool rtnl_held)
545 605
546 mutex_lock(&chain->filter_chain_lock); 606 mutex_lock(&chain->filter_chain_lock);
547 tp = tcf_chain_dereference(chain->filter_chain, chain); 607 tp = tcf_chain_dereference(chain->filter_chain, chain);
608 while (tp) {
609 tp_next = rcu_dereference_protected(tp->next, 1);
610 tcf_proto_signal_destroying(chain, tp);
611 tp = tp_next;
612 }
613 tp = tcf_chain_dereference(chain->filter_chain, chain);
548 RCU_INIT_POINTER(chain->filter_chain, NULL); 614 RCU_INIT_POINTER(chain->filter_chain, NULL);
549 tcf_chain0_head_change(chain, NULL); 615 tcf_chain0_head_change(chain, NULL);
550 chain->flushing = true; 616 chain->flushing = true;
@@ -844,6 +910,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
844 return ERR_PTR(-ENOMEM); 910 return ERR_PTR(-ENOMEM);
845 } 911 }
846 mutex_init(&block->lock); 912 mutex_init(&block->lock);
913 mutex_init(&block->proto_destroy_lock);
847 init_rwsem(&block->cb_lock); 914 init_rwsem(&block->cb_lock);
848 flow_block_init(&block->flow_block); 915 flow_block_init(&block->flow_block);
849 INIT_LIST_HEAD(&block->chain_list); 916 INIT_LIST_HEAD(&block->chain_list);
@@ -1621,6 +1688,12 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1621 1688
1622 mutex_lock(&chain->filter_chain_lock); 1689 mutex_lock(&chain->filter_chain_lock);
1623 1690
1691 if (tcf_proto_exists_destroying(chain, tp_new)) {
1692 mutex_unlock(&chain->filter_chain_lock);
1693 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1694 return ERR_PTR(-EAGAIN);
1695 }
1696
1624 tp = tcf_chain_tp_find(chain, &chain_info, 1697 tp = tcf_chain_tp_find(chain, &chain_info,
1625 protocol, prio, false); 1698 protocol, prio, false);
1626 if (!tp) 1699 if (!tp)
@@ -1628,10 +1701,10 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
1628 mutex_unlock(&chain->filter_chain_lock); 1701 mutex_unlock(&chain->filter_chain_lock);
1629 1702
1630 if (tp) { 1703 if (tp) {
1631 tcf_proto_destroy(tp_new, rtnl_held, NULL); 1704 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1632 tp_new = tp; 1705 tp_new = tp;
1633 } else if (err) { 1706 } else if (err) {
1634 tcf_proto_destroy(tp_new, rtnl_held, NULL); 1707 tcf_proto_destroy(tp_new, rtnl_held, false, NULL);
1635 tp_new = ERR_PTR(err); 1708 tp_new = ERR_PTR(err);
1636 } 1709 }
1637 1710
@@ -1669,6 +1742,7 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
1669 return; 1742 return;
1670 } 1743 }
1671 1744
1745 tcf_proto_signal_destroying(chain, tp);
1672 next = tcf_chain_dereference(chain_info.next, chain); 1746 next = tcf_chain_dereference(chain_info.next, chain);
1673 if (tp == chain->filter_chain) 1747 if (tp == chain->filter_chain)
1674 tcf_chain0_head_change(chain, next); 1748 tcf_chain0_head_change(chain, next);
@@ -2188,6 +2262,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
2188 err = -EINVAL; 2262 err = -EINVAL;
2189 goto errout_locked; 2263 goto errout_locked;
2190 } else if (t->tcm_handle == 0) { 2264 } else if (t->tcm_handle == 0) {
2265 tcf_proto_signal_destroying(chain, tp);
2191 tcf_chain_tp_remove(chain, &chain_info, tp); 2266 tcf_chain_tp_remove(chain, &chain_info, tp);
2192 mutex_unlock(&chain->filter_chain_lock); 2267 mutex_unlock(&chain->filter_chain_lock);
2193 2268
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 2121187229cd..7cd68628c637 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1224,8 +1224,6 @@ static int taprio_enable_offload(struct net_device *dev,
1224 goto done; 1224 goto done;
1225 } 1225 }
1226 1226
1227 taprio_offload_config_changed(q);
1228
1229done: 1227done:
1230 taprio_offload_free(offload); 1228 taprio_offload_free(offload);
1231 1229
@@ -1505,6 +1503,9 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
1505 call_rcu(&admin->rcu, taprio_free_sched_cb); 1503 call_rcu(&admin->rcu, taprio_free_sched_cb);
1506 1504
1507 spin_unlock_irqrestore(&q->current_entry_lock, flags); 1505 spin_unlock_irqrestore(&q->current_entry_lock, flags);
1506
1507 if (FULL_OFFLOAD_IS_ENABLED(taprio_flags))
1508 taprio_offload_config_changed(q);
1508 } 1509 }
1509 1510
1510 new_admin = NULL; 1511 new_admin = NULL;
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 2920b006f65c..571e6d84da3b 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -376,8 +376,6 @@ static int smc_pnet_fill_entry(struct net *net,
376 return 0; 376 return 0;
377 377
378error: 378error:
379 if (pnetelem->ndev)
380 dev_put(pnetelem->ndev);
381 return rc; 379 return rc;
382} 380}
383 381
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index f959487c5cd1..683d00837693 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -523,8 +523,10 @@ last_record:
523int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) 523int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
524{ 524{
525 unsigned char record_type = TLS_RECORD_TYPE_DATA; 525 unsigned char record_type = TLS_RECORD_TYPE_DATA;
526 struct tls_context *tls_ctx = tls_get_ctx(sk);
526 int rc; 527 int rc;
527 528
529 mutex_lock(&tls_ctx->tx_lock);
528 lock_sock(sk); 530 lock_sock(sk);
529 531
530 if (unlikely(msg->msg_controllen)) { 532 if (unlikely(msg->msg_controllen)) {
@@ -538,12 +540,14 @@ int tls_device_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
538 540
539out: 541out:
540 release_sock(sk); 542 release_sock(sk);
543 mutex_unlock(&tls_ctx->tx_lock);
541 return rc; 544 return rc;
542} 545}
543 546
544int tls_device_sendpage(struct sock *sk, struct page *page, 547int tls_device_sendpage(struct sock *sk, struct page *page,
545 int offset, size_t size, int flags) 548 int offset, size_t size, int flags)
546{ 549{
550 struct tls_context *tls_ctx = tls_get_ctx(sk);
547 struct iov_iter msg_iter; 551 struct iov_iter msg_iter;
548 char *kaddr = kmap(page); 552 char *kaddr = kmap(page);
549 struct kvec iov; 553 struct kvec iov;
@@ -552,6 +556,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
552 if (flags & MSG_SENDPAGE_NOTLAST) 556 if (flags & MSG_SENDPAGE_NOTLAST)
553 flags |= MSG_MORE; 557 flags |= MSG_MORE;
554 558
559 mutex_lock(&tls_ctx->tx_lock);
555 lock_sock(sk); 560 lock_sock(sk);
556 561
557 if (flags & MSG_OOB) { 562 if (flags & MSG_OOB) {
@@ -568,6 +573,7 @@ int tls_device_sendpage(struct sock *sk, struct page *page,
568 573
569out: 574out:
570 release_sock(sk); 575 release_sock(sk);
576 mutex_unlock(&tls_ctx->tx_lock);
571 return rc; 577 return rc;
572} 578}
573 579
@@ -623,9 +629,11 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
623 629
624void tls_device_write_space(struct sock *sk, struct tls_context *ctx) 630void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
625{ 631{
626 if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) { 632 if (tls_is_partially_sent_record(ctx)) {
627 gfp_t sk_allocation = sk->sk_allocation; 633 gfp_t sk_allocation = sk->sk_allocation;
628 634
635 WARN_ON_ONCE(sk->sk_write_pending);
636
629 sk->sk_allocation = GFP_ATOMIC; 637 sk->sk_allocation = GFP_ATOMIC;
630 tls_push_partial_record(sk, ctx, 638 tls_push_partial_record(sk, ctx,
631 MSG_DONTWAIT | MSG_NOSIGNAL | 639 MSG_DONTWAIT | MSG_NOSIGNAL |
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index ac88877dcade..0775ae40fcfb 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -267,6 +267,7 @@ void tls_ctx_free(struct sock *sk, struct tls_context *ctx)
267 267
268 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send)); 268 memzero_explicit(&ctx->crypto_send, sizeof(ctx->crypto_send));
269 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv)); 269 memzero_explicit(&ctx->crypto_recv, sizeof(ctx->crypto_recv));
270 mutex_destroy(&ctx->tx_lock);
270 271
271 if (sk) 272 if (sk)
272 kfree_rcu(ctx, rcu); 273 kfree_rcu(ctx, rcu);
@@ -612,6 +613,7 @@ static struct tls_context *create_ctx(struct sock *sk)
612 if (!ctx) 613 if (!ctx)
613 return NULL; 614 return NULL;
614 615
616 mutex_init(&ctx->tx_lock);
615 rcu_assign_pointer(icsk->icsk_ulp_data, ctx); 617 rcu_assign_pointer(icsk->icsk_ulp_data, ctx);
616 ctx->sk_proto = sk->sk_prot; 618 ctx->sk_proto = sk->sk_prot;
617 return ctx; 619 return ctx;
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index c2b5e0d2ba1a..446f23c1f3ce 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -897,15 +897,9 @@ int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 897 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL))
898 return -ENOTSUPP; 898 return -ENOTSUPP;
899 899
900 mutex_lock(&tls_ctx->tx_lock);
900 lock_sock(sk); 901 lock_sock(sk);
901 902
902 /* Wait till there is any pending write on socket */
903 if (unlikely(sk->sk_write_pending)) {
904 ret = wait_on_pending_writer(sk, &timeo);
905 if (unlikely(ret))
906 goto send_end;
907 }
908
909 if (unlikely(msg->msg_controllen)) { 903 if (unlikely(msg->msg_controllen)) {
910 ret = tls_proccess_cmsg(sk, msg, &record_type); 904 ret = tls_proccess_cmsg(sk, msg, &record_type);
911 if (ret) { 905 if (ret) {
@@ -1091,6 +1085,7 @@ send_end:
1091 ret = sk_stream_error(sk, msg->msg_flags, ret); 1085 ret = sk_stream_error(sk, msg->msg_flags, ret);
1092 1086
1093 release_sock(sk); 1087 release_sock(sk);
1088 mutex_unlock(&tls_ctx->tx_lock);
1094 return copied ? copied : ret; 1089 return copied ? copied : ret;
1095} 1090}
1096 1091
@@ -1114,13 +1109,6 @@ static int tls_sw_do_sendpage(struct sock *sk, struct page *page,
1114 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1109 eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST));
1115 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1110 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
1116 1111
1117 /* Wait till there is any pending write on socket */
1118 if (unlikely(sk->sk_write_pending)) {
1119 ret = wait_on_pending_writer(sk, &timeo);
1120 if (unlikely(ret))
1121 goto sendpage_end;
1122 }
1123
1124 /* Call the sk_stream functions to manage the sndbuf mem. */ 1112 /* Call the sk_stream functions to manage the sndbuf mem. */
1125 while (size > 0) { 1113 while (size > 0) {
1126 size_t copy, required_size; 1114 size_t copy, required_size;
@@ -1219,15 +1207,18 @@ sendpage_end:
1219int tls_sw_sendpage(struct sock *sk, struct page *page, 1207int tls_sw_sendpage(struct sock *sk, struct page *page,
1220 int offset, size_t size, int flags) 1208 int offset, size_t size, int flags)
1221{ 1209{
1210 struct tls_context *tls_ctx = tls_get_ctx(sk);
1222 int ret; 1211 int ret;
1223 1212
1224 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1213 if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL |
1225 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY)) 1214 MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY))
1226 return -ENOTSUPP; 1215 return -ENOTSUPP;
1227 1216
1217 mutex_lock(&tls_ctx->tx_lock);
1228 lock_sock(sk); 1218 lock_sock(sk);
1229 ret = tls_sw_do_sendpage(sk, page, offset, size, flags); 1219 ret = tls_sw_do_sendpage(sk, page, offset, size, flags);
1230 release_sock(sk); 1220 release_sock(sk);
1221 mutex_unlock(&tls_ctx->tx_lock);
1231 return ret; 1222 return ret;
1232} 1223}
1233 1224
@@ -2170,9 +2161,11 @@ static void tx_work_handler(struct work_struct *work)
2170 2161
2171 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask)) 2162 if (!test_and_clear_bit(BIT_TX_SCHEDULED, &ctx->tx_bitmask))
2172 return; 2163 return;
2164 mutex_lock(&tls_ctx->tx_lock);
2173 lock_sock(sk); 2165 lock_sock(sk);
2174 tls_tx_records(sk, -1); 2166 tls_tx_records(sk, -1);
2175 release_sock(sk); 2167 release_sock(sk);
2168 mutex_unlock(&tls_ctx->tx_lock);
2176} 2169}
2177 2170
2178void tls_sw_write_space(struct sock *sk, struct tls_context *ctx) 2171void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
@@ -2180,12 +2173,9 @@ void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
2180 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx); 2173 struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
2181 2174
2182 /* Schedule the transmission if tx list is ready */ 2175 /* Schedule the transmission if tx list is ready */
2183 if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) { 2176 if (is_tx_ready(tx_ctx) &&
2184 /* Schedule the transmission */ 2177 !test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
2185 if (!test_and_set_bit(BIT_TX_SCHEDULED, 2178 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2186 &tx_ctx->tx_bitmask))
2187 schedule_delayed_work(&tx_ctx->tx_work.work, 0);
2188 }
2189} 2179}
2190 2180
2191void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx) 2181void tls_sw_strparser_arm(struct sock *sk, struct tls_context *tls_ctx)
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 481f7f8a1655..fb2060dffb0a 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -947,9 +947,11 @@ virtio_transport_recv_connected(struct sock *sk,
947 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND) 947 if (le32_to_cpu(pkt->hdr.flags) & VIRTIO_VSOCK_SHUTDOWN_SEND)
948 vsk->peer_shutdown |= SEND_SHUTDOWN; 948 vsk->peer_shutdown |= SEND_SHUTDOWN;
949 if (vsk->peer_shutdown == SHUTDOWN_MASK && 949 if (vsk->peer_shutdown == SHUTDOWN_MASK &&
950 vsock_stream_has_data(vsk) <= 0) { 950 vsock_stream_has_data(vsk) <= 0 &&
951 sock_set_flag(sk, SOCK_DONE); 951 !sock_flag(sk, SOCK_DONE)) {
952 sk->sk_state = TCP_CLOSING; 952 (void)virtio_transport_reset(vsk, NULL);
953
954 virtio_transport_do_close(vsk, true);
953 } 955 }
954 if (le32_to_cpu(pkt->hdr.flags)) 956 if (le32_to_cpu(pkt->hdr.flags))
955 sk->sk_state_change(sk); 957 sk->sk_state_change(sk);
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 1d9be26b4edd..42b571cde177 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -176,6 +176,7 @@ KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/bpf/
176KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/ 176KBUILD_HOSTCFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
177KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include 177KBUILD_HOSTCFLAGS += -I$(srctree)/tools/lib/ -I$(srctree)/tools/include
178KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf 178KBUILD_HOSTCFLAGS += -I$(srctree)/tools/perf
179KBUILD_HOSTCFLAGS += -DHAVE_ATTR_TEST=0
179 180
180HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable 181HOSTCFLAGS_bpf_load.o += -I$(objtree)/usr/include -Wno-unused-variable
181 182
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 63e4349a772a..15e458e150bd 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -15,7 +15,9 @@ void test_attr__init(void);
15void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu, 15void test_attr__open(struct perf_event_attr *attr, pid_t pid, int cpu,
16 int fd, int group_fd, unsigned long flags); 16 int fd, int group_fd, unsigned long flags);
17 17
18#define HAVE_ATTR_TEST 18#ifndef HAVE_ATTR_TEST
19#define HAVE_ATTR_TEST 1
20#endif
19 21
20static inline int 22static inline int
21sys_perf_event_open(struct perf_event_attr *attr, 23sys_perf_event_open(struct perf_event_attr *attr,
@@ -27,7 +29,7 @@ sys_perf_event_open(struct perf_event_attr *attr,
27 fd = syscall(__NR_perf_event_open, attr, pid, cpu, 29 fd = syscall(__NR_perf_event_open, attr, pid, cpu,
28 group_fd, flags); 30 group_fd, flags);
29 31
30#ifdef HAVE_ATTR_TEST 32#if HAVE_ATTR_TEST
31 if (unlikely(test_attr__enabled)) 33 if (unlikely(test_attr__enabled))
32 test_attr__open(attr, pid, cpu, fd, group_fd, flags); 34 test_attr__open(attr, pid, cpu, fd, group_fd, flags);
33#endif 35#endif
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index a320e3844b17..7c6e5b173f33 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -161,9 +161,14 @@ static struct sysctl_test tests[] = {
161 .descr = "ctx:file_pos sysctl:read read ok narrow", 161 .descr = "ctx:file_pos sysctl:read read ok narrow",
162 .insns = { 162 .insns = {
163 /* If (file_pos == X) */ 163 /* If (file_pos == X) */
164#if __BYTE_ORDER == __LITTLE_ENDIAN
164 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, 165 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
165 offsetof(struct bpf_sysctl, file_pos)), 166 offsetof(struct bpf_sysctl, file_pos)),
166 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2), 167#else
168 BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1,
169 offsetof(struct bpf_sysctl, file_pos) + 3),
170#endif
171 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 4, 2),
167 172
168 /* return ALLOW; */ 173 /* return ALLOW; */
169 BPF_MOV64_IMM(BPF_REG_0, 1), 174 BPF_MOV64_IMM(BPF_REG_0, 1),
@@ -176,6 +181,7 @@ static struct sysctl_test tests[] = {
176 .attach_type = BPF_CGROUP_SYSCTL, 181 .attach_type = BPF_CGROUP_SYSCTL,
177 .sysctl = "kernel/ostype", 182 .sysctl = "kernel/ostype",
178 .open_flags = O_RDONLY, 183 .open_flags = O_RDONLY,
184 .seek = 4,
179 .result = SUCCESS, 185 .result = SUCCESS,
180 }, 186 },
181 { 187 {
diff --git a/tools/testing/selftests/net/tls.c b/tools/testing/selftests/net/tls.c
index 4c285b6e1db8..1c8f194d6556 100644
--- a/tools/testing/selftests/net/tls.c
+++ b/tools/testing/selftests/net/tls.c
@@ -898,6 +898,114 @@ TEST_F(tls, nonblocking)
898 } 898 }
899} 899}
900 900
901static void
902test_mutliproc(struct __test_metadata *_metadata, struct _test_data_tls *self,
903 bool sendpg, unsigned int n_readers, unsigned int n_writers)
904{
905 const unsigned int n_children = n_readers + n_writers;
906 const size_t data = 6 * 1000 * 1000;
907 const size_t file_sz = data / 100;
908 size_t read_bias, write_bias;
909 int i, fd, child_id;
910 char buf[file_sz];
911 pid_t pid;
912
913 /* Only allow multiples for simplicity */
914 ASSERT_EQ(!(n_readers % n_writers) || !(n_writers % n_readers), true);
915 read_bias = n_writers / n_readers ?: 1;
916 write_bias = n_readers / n_writers ?: 1;
917
918 /* prep a file to send */
919 fd = open("/tmp/", O_TMPFILE | O_RDWR, 0600);
920 ASSERT_GE(fd, 0);
921
922 memset(buf, 0xac, file_sz);
923 ASSERT_EQ(write(fd, buf, file_sz), file_sz);
924
925 /* spawn children */
926 for (child_id = 0; child_id < n_children; child_id++) {
927 pid = fork();
928 ASSERT_NE(pid, -1);
929 if (!pid)
930 break;
931 }
932
933 /* parent waits for all children */
934 if (pid) {
935 for (i = 0; i < n_children; i++) {
936 int status;
937
938 wait(&status);
939 EXPECT_EQ(status, 0);
940 }
941
942 return;
943 }
944
945 /* Split threads for reading and writing */
946 if (child_id < n_readers) {
947 size_t left = data * read_bias;
948 char rb[8001];
949
950 while (left) {
951 int res;
952
953 res = recv(self->cfd, rb,
954 left > sizeof(rb) ? sizeof(rb) : left, 0);
955
956 EXPECT_GE(res, 0);
957 left -= res;
958 }
959 } else {
960 size_t left = data * write_bias;
961
962 while (left) {
963 int res;
964
965 ASSERT_EQ(lseek(fd, 0, SEEK_SET), 0);
966 if (sendpg)
967 res = sendfile(self->fd, fd, NULL,
968 left > file_sz ? file_sz : left);
969 else
970 res = send(self->fd, buf,
971 left > file_sz ? file_sz : left, 0);
972
973 EXPECT_GE(res, 0);
974 left -= res;
975 }
976 }
977}
978
979TEST_F(tls, mutliproc_even)
980{
981 test_mutliproc(_metadata, self, false, 6, 6);
982}
983
984TEST_F(tls, mutliproc_readers)
985{
986 test_mutliproc(_metadata, self, false, 4, 12);
987}
988
989TEST_F(tls, mutliproc_writers)
990{
991 test_mutliproc(_metadata, self, false, 10, 2);
992}
993
994TEST_F(tls, mutliproc_sendpage_even)
995{
996 test_mutliproc(_metadata, self, true, 6, 6);
997}
998
999TEST_F(tls, mutliproc_sendpage_readers)
1000{
1001 test_mutliproc(_metadata, self, true, 4, 12);
1002}
1003
1004TEST_F(tls, mutliproc_sendpage_writers)
1005{
1006 test_mutliproc(_metadata, self, true, 10, 2);
1007}
1008
901TEST_F(tls, control_msg) 1009TEST_F(tls, control_msg)
902{ 1010{
903 if (self->notls) 1011 if (self->notls)