aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-19 14:58:19 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-19 14:58:19 -0500
commit79c0ef3e85c015b0921a8fd5dd539d1480e9cd6c (patch)
tree84c85e3aa7ff0d25ab62af3d3bfb896888d5219e
parent91ab883eb21325ad80f3473633f794c78ac87f51 (diff)
parent506b0a395f26e52b3f18827e0de1be051acb77ab (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Prevent index integer overflow in ptr_ring, from Jason Wang. 2) Program mvpp2 multicast filter properly, from Mikulas Patocka. 3) The bridge brport attribute file is write only and doesn't have a ->show() method, don't blindly invoke it. From Xin Long. 4) Inverted mask used in genphy_setup_forced(), from Ingo van Lil. 5) Fix multiple definition issue with if_ether.h UAPI header, from Hauke Mehrtens. 6) Fix GFP_KERNEL usage in atomic in RDS protocol code, from Sowmini Varadhan. 7) Revert XDP redirect support from thunderx driver, it is not implemented properly. From Jesper Dangaard Brouer. 8) Fix missing RTNL protection across some tipc operations, from Ying Xue. 9) Return the correct IV bytes in the TLS getsockopt code, from Boris Pismenny. 10) Take tclassid into consideration properly when doing FIB rule matching. From Stefano Brivio. 11) cxgb4 device needs more PCI VPD quirks, from Casey Leedom. 12) TUN driver doesn't align frags properly, and we can end up doing unaligned atomics on misaligned metadata. From Eric Dumazet. 13) Fix various crashes found using DEBUG_PREEMPT in rmnet driver, from Subash Abhinov Kasiviswanathan. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (56 commits) tg3: APE heartbeat changes mlxsw: spectrum_router: Do not unconditionally clear route offload indication net: qualcomm: rmnet: Fix possible null dereference in command processing net: qualcomm: rmnet: Fix warning seen with 64 bit stats net: qualcomm: rmnet: Fix crash on real dev unregistration sctp: remove the left unnecessary check for chunk in sctp_renege_events rxrpc: Work around usercopy check tun: fix tun_napi_alloc_frags() frag allocator udplite: fix partial checksum initialization skbuff: Fix comment mis-spelling. dn_getsockoptdecnet: move nf_{get/set}sockopt outside sock lock PCI/cxgb4: Extend T3 PCI quirk to T4+ devices cxgb4: fix trailing zero in CIM LA dump cxgb4: free up resources of pf 0-3 fib_semantics: Don't match route with mismatching tclassid NFC: llcp: Limit size of SDP URI tls: getsockopt return record sequence number tls: reset the crypto info if copy_from_user fails tls: retrun the correct IV in getsockopt docs: segmentation-offloads.txt: add SCTP info ...
-rw-r--r--Documentation/networking/segmentation-offloads.txt38
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c35
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h5
-rw-r--r--drivers/net/ethernet/cavium/common/cavium_ptp.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c110
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c10
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c54
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c35
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c68
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c5
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c6
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/phy/phy_device.c2
-rw-r--r--drivers/net/thunderbolt.c19
-rw-r--r--drivers/net/tun.c16
-rw-r--r--drivers/pci/quirks.c39
-rw-r--r--include/linux/ptr_ring.h2
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/net/udplite.h1
-rw-r--r--include/uapi/linux/if_ether.h6
-rw-r--r--include/uapi/linux/libc-compat.h6
-rw-r--r--net/bridge/br_sysfs_if.c3
-rw-r--r--net/core/dev.c11
-rw-r--r--net/decnet/af_decnet.c62
-rw-r--r--net/ipv4/fib_semantics.c5
-rw-r--r--net/ipv4/tcp_output.c25
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/ip6_checksum.c5
-rw-r--r--net/nfc/llcp_commands.c4
-rw-r--r--net/nfc/netlink.c3
-rw-r--r--net/rds/connection.c2
-rw-r--r--net/rxrpc/recvmsg.c5
-rw-r--r--net/sched/cls_api.c26
-rw-r--r--net/sched/cls_u32.c24
-rw-r--r--net/sctp/debug.c6
-rw-r--r--net/sctp/input.c5
-rw-r--r--net/sctp/stream.c2
-rw-r--r--net/sctp/stream_interleave.c16
-rw-r--r--net/tipc/bearer.c82
-rw-r--r--net/tipc/bearer.h4
-rw-r--r--net/tipc/net.c15
-rw-r--r--net/tipc/net.h1
-rw-r--r--net/tipc/netlink_compat.c43
-rw-r--r--net/tls/tls_main.c7
-rw-r--r--net/unix/af_unix.c2
52 files changed, 504 insertions, 395 deletions
diff --git a/Documentation/networking/segmentation-offloads.txt b/Documentation/networking/segmentation-offloads.txt
index 2f09455a993a..d47480b61ac6 100644
--- a/Documentation/networking/segmentation-offloads.txt
+++ b/Documentation/networking/segmentation-offloads.txt
@@ -13,6 +13,7 @@ The following technologies are described:
13 * Generic Segmentation Offload - GSO 13 * Generic Segmentation Offload - GSO
14 * Generic Receive Offload - GRO 14 * Generic Receive Offload - GRO
15 * Partial Generic Segmentation Offload - GSO_PARTIAL 15 * Partial Generic Segmentation Offload - GSO_PARTIAL
16 * SCTP accelleration with GSO - GSO_BY_FRAGS
16 17
17TCP Segmentation Offload 18TCP Segmentation Offload
18======================== 19========================
@@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments. Many of the requirements for UDP
49fragmentation offload are the same as TSO. However the IPv4 ID for 50fragmentation offload are the same as TSO. However the IPv4 ID for
50fragments should not increment as a single IPv4 datagram is fragmented. 51fragments should not increment as a single IPv4 datagram is fragmented.
51 52
53UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
54still receive them from tuntap and similar devices. Offload of UDP-based
55tunnel protocols is still supported.
56
52IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads 57IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
53======================================================== 58========================================================
54 59
@@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the
83fact that the outer header also requests to have a non-zero checksum 88fact that the outer header also requests to have a non-zero checksum
84included in the outer header. 89included in the outer header.
85 90
86Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header 91Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel
87has requested a remote checksum offload. In this case the inner headers 92header has requested a remote checksum offload. In this case the inner
88will be left with a partial checksum and only the outer header checksum 93headers will be left with a partial checksum and only the outer header
89will be computed. 94checksum will be computed.
90 95
91Generic Segmentation Offload 96Generic Segmentation Offload
92============================ 97============================
@@ -128,3 +133,28 @@ values for if the header was simply duplicated. The one exception to this
128is the outer IPv4 ID field. It is up to the device drivers to guarantee 133is the outer IPv4 ID field. It is up to the device drivers to guarantee
129that the IPv4 ID field is incremented in the case that a given header does 134that the IPv4 ID field is incremented in the case that a given header does
130not have the DF bit set. 135not have the DF bit set.
136
137SCTP accelleration with GSO
138===========================
139
140SCTP - despite the lack of hardware support - can still take advantage of
141GSO to pass one large packet through the network stack, rather than
142multiple small packets.
143
144This requires a different approach to other offloads, as SCTP packets
145cannot be just segmented to (P)MTU. Rather, the chunks must be contained in
146IP segments, padding respected. So unlike regular GSO, SCTP can't just
147generate a big skb, set gso_size to the fragmentation point and deliver it
148to IP layer.
149
150Instead, the SCTP protocol layer builds an skb with the segments correctly
151padded and stored as chained skbs, and skb_segment() splits based on those.
152To signal this, gso_size is set to the special value GSO_BY_FRAGS.
153
154Therefore, any code in the core networking stack must be aware of the
155possibility that gso_size will be GSO_BY_FRAGS and handle that case
156appropriately. (For size checks, the skb_gso_validate_*_len family of
157helpers do this automatically.)
158
159This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
160set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index a77ee2f8fb8d..c1841db1b500 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -820,7 +820,7 @@ static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
820 820
821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM); 821 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
822 822
823 udelay(10); 823 usleep_range(10, 20);
824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us; 824 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
825 } 825 }
826 826
@@ -922,8 +922,8 @@ static int tg3_ape_send_event(struct tg3 *tp, u32 event)
922 if (!(apedata & APE_FW_STATUS_READY)) 922 if (!(apedata & APE_FW_STATUS_READY))
923 return -EAGAIN; 923 return -EAGAIN;
924 924
925 /* Wait for up to 1 millisecond for APE to service previous event. */ 925 /* Wait for up to 20 millisecond for APE to service previous event. */
926 err = tg3_ape_event_lock(tp, 1000); 926 err = tg3_ape_event_lock(tp, 20000);
927 if (err) 927 if (err)
928 return err; 928 return err;
929 929
@@ -946,6 +946,7 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
946 946
947 switch (kind) { 947 switch (kind) {
948 case RESET_KIND_INIT: 948 case RESET_KIND_INIT:
949 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
949 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 950 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
950 APE_HOST_SEG_SIG_MAGIC); 951 APE_HOST_SEG_SIG_MAGIC);
951 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 952 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
@@ -962,13 +963,6 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
962 event = APE_EVENT_STATUS_STATE_START; 963 event = APE_EVENT_STATUS_STATE_START;
963 break; 964 break;
964 case RESET_KIND_SHUTDOWN: 965 case RESET_KIND_SHUTDOWN:
965 /* With the interface we are currently using,
966 * APE does not track driver state. Wiping
967 * out the HOST SEGMENT SIGNATURE forces
968 * the APE to assume OS absent status.
969 */
970 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
971
972 if (device_may_wakeup(&tp->pdev->dev) && 966 if (device_may_wakeup(&tp->pdev->dev) &&
973 tg3_flag(tp, WOL_ENABLE)) { 967 tg3_flag(tp, WOL_ENABLE)) {
974 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED, 968 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
@@ -990,6 +984,18 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
990 tg3_ape_send_event(tp, event); 984 tg3_ape_send_event(tp, event);
991} 985}
992 986
987static void tg3_send_ape_heartbeat(struct tg3 *tp,
988 unsigned long interval)
989{
990 /* Check if hb interval has exceeded */
991 if (!tg3_flag(tp, ENABLE_APE) ||
992 time_before(jiffies, tp->ape_hb_jiffies + interval))
993 return;
994
995 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_COUNT, tp->ape_hb++);
996 tp->ape_hb_jiffies = jiffies;
997}
998
993static void tg3_disable_ints(struct tg3 *tp) 999static void tg3_disable_ints(struct tg3 *tp)
994{ 1000{
995 int i; 1001 int i;
@@ -7262,6 +7268,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
7262 } 7268 }
7263 } 7269 }
7264 7270
7271 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7265 return work_done; 7272 return work_done;
7266 7273
7267tx_recovery: 7274tx_recovery:
@@ -7344,6 +7351,7 @@ static int tg3_poll(struct napi_struct *napi, int budget)
7344 } 7351 }
7345 } 7352 }
7346 7353
7354 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL << 1);
7347 return work_done; 7355 return work_done;
7348 7356
7349tx_recovery: 7357tx_recovery:
@@ -10732,7 +10740,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
10732 if (tg3_flag(tp, ENABLE_APE)) 10740 if (tg3_flag(tp, ENABLE_APE))
10733 /* Write our heartbeat update interval to APE. */ 10741 /* Write our heartbeat update interval to APE. */
10734 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS, 10742 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10735 APE_HOST_HEARTBEAT_INT_DISABLE); 10743 APE_HOST_HEARTBEAT_INT_5SEC);
10736 10744
10737 tg3_write_sig_post_reset(tp, RESET_KIND_INIT); 10745 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10738 10746
@@ -11077,6 +11085,9 @@ static void tg3_timer(struct timer_list *t)
11077 tp->asf_counter = tp->asf_multiplier; 11085 tp->asf_counter = tp->asf_multiplier;
11078 } 11086 }
11079 11087
11088 /* Update the APE heartbeat every 5 seconds.*/
11089 tg3_send_ape_heartbeat(tp, TG3_APE_HB_INTERVAL);
11090
11080 spin_unlock(&tp->lock); 11091 spin_unlock(&tp->lock);
11081 11092
11082restart_timer: 11093restart_timer:
@@ -16653,6 +16664,8 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
16653 pci_state_reg); 16664 pci_state_reg);
16654 16665
16655 tg3_ape_lock_init(tp); 16666 tg3_ape_lock_init(tp);
16667 tp->ape_hb_interval =
16668 msecs_to_jiffies(APE_HOST_HEARTBEAT_INT_5SEC);
16656 } 16669 }
16657 16670
16658 /* Set up tp->grc_local_ctrl before calling 16671 /* Set up tp->grc_local_ctrl before calling
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 47f51cc0566d..1d61aa3efda1 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2508,6 +2508,7 @@
2508#define TG3_APE_LOCK_PHY3 5 2508#define TG3_APE_LOCK_PHY3 5
2509#define TG3_APE_LOCK_GPIO 7 2509#define TG3_APE_LOCK_GPIO 7
2510 2510
2511#define TG3_APE_HB_INTERVAL (tp->ape_hb_interval)
2511#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10 2512#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
2512 2513
2513 2514
@@ -3423,6 +3424,10 @@ struct tg3 {
3423 struct device *hwmon_dev; 3424 struct device *hwmon_dev;
3424 bool link_up; 3425 bool link_up;
3425 bool pcierr_recovery; 3426 bool pcierr_recovery;
3427
3428 u32 ape_hb;
3429 unsigned long ape_hb_interval;
3430 unsigned long ape_hb_jiffies;
3426}; 3431};
3427 3432
3428/* Accessor macros for chip and asic attributes 3433/* Accessor macros for chip and asic attributes
diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c
index c87c9c684a33..d59497a7bdce 100644
--- a/drivers/net/ethernet/cavium/common/cavium_ptp.c
+++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c
@@ -75,6 +75,8 @@ EXPORT_SYMBOL(cavium_ptp_get);
75 75
76void cavium_ptp_put(struct cavium_ptp *ptp) 76void cavium_ptp_put(struct cavium_ptp *ptp)
77{ 77{
78 if (!ptp)
79 return;
78 pci_dev_put(ptp->pdev); 80 pci_dev_put(ptp->pdev);
79} 81}
80EXPORT_SYMBOL(cavium_ptp_put); 82EXPORT_SYMBOL(cavium_ptp_put);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index b68cde9f17d2..7d9c5ffbd041 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -67,11 +67,6 @@ module_param(cpi_alg, int, S_IRUGO);
67MODULE_PARM_DESC(cpi_alg, 67MODULE_PARM_DESC(cpi_alg,
68 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)"); 68 "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
69 69
70struct nicvf_xdp_tx {
71 u64 dma_addr;
72 u8 qidx;
73};
74
75static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) 70static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx)
76{ 71{
77 if (nic->sqs_mode) 72 if (nic->sqs_mode)
@@ -507,29 +502,14 @@ static int nicvf_init_resources(struct nicvf *nic)
507 return 0; 502 return 0;
508} 503}
509 504
510static void nicvf_unmap_page(struct nicvf *nic, struct page *page, u64 dma_addr)
511{
512 /* Check if it's a recycled page, if not unmap the DMA mapping.
513 * Recycled page holds an extra reference.
514 */
515 if (page_ref_count(page) == 1) {
516 dma_addr &= PAGE_MASK;
517 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
518 RCV_FRAG_LEN + XDP_HEADROOM,
519 DMA_FROM_DEVICE,
520 DMA_ATTR_SKIP_CPU_SYNC);
521 }
522}
523
524static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, 505static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
525 struct cqe_rx_t *cqe_rx, struct snd_queue *sq, 506 struct cqe_rx_t *cqe_rx, struct snd_queue *sq,
526 struct rcv_queue *rq, struct sk_buff **skb) 507 struct rcv_queue *rq, struct sk_buff **skb)
527{ 508{
528 struct xdp_buff xdp; 509 struct xdp_buff xdp;
529 struct page *page; 510 struct page *page;
530 struct nicvf_xdp_tx *xdp_tx = NULL;
531 u32 action; 511 u32 action;
532 u16 len, err, offset = 0; 512 u16 len, offset = 0;
533 u64 dma_addr, cpu_addr; 513 u64 dma_addr, cpu_addr;
534 void *orig_data; 514 void *orig_data;
535 515
@@ -543,7 +523,7 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
543 cpu_addr = (u64)phys_to_virt(cpu_addr); 523 cpu_addr = (u64)phys_to_virt(cpu_addr);
544 page = virt_to_page((void *)cpu_addr); 524 page = virt_to_page((void *)cpu_addr);
545 525
546 xdp.data_hard_start = page_address(page) + RCV_BUF_HEADROOM; 526 xdp.data_hard_start = page_address(page);
547 xdp.data = (void *)cpu_addr; 527 xdp.data = (void *)cpu_addr;
548 xdp_set_data_meta_invalid(&xdp); 528 xdp_set_data_meta_invalid(&xdp);
549 xdp.data_end = xdp.data + len; 529 xdp.data_end = xdp.data + len;
@@ -563,7 +543,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
563 543
564 switch (action) { 544 switch (action) {
565 case XDP_PASS: 545 case XDP_PASS:
566 nicvf_unmap_page(nic, page, dma_addr); 546 /* Check if it's a recycled page, if not
547 * unmap the DMA mapping.
548 *
549 * Recycled page holds an extra reference.
550 */
551 if (page_ref_count(page) == 1) {
552 dma_addr &= PAGE_MASK;
553 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
554 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
555 DMA_FROM_DEVICE,
556 DMA_ATTR_SKIP_CPU_SYNC);
557 }
567 558
568 /* Build SKB and pass on packet to network stack */ 559 /* Build SKB and pass on packet to network stack */
569 *skb = build_skb(xdp.data, 560 *skb = build_skb(xdp.data,
@@ -576,20 +567,6 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
576 case XDP_TX: 567 case XDP_TX:
577 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len); 568 nicvf_xdp_sq_append_pkt(nic, sq, (u64)xdp.data, dma_addr, len);
578 return true; 569 return true;
579 case XDP_REDIRECT:
580 /* Save DMA address for use while transmitting */
581 xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
582 xdp_tx->dma_addr = dma_addr;
583 xdp_tx->qidx = nicvf_netdev_qidx(nic, cqe_rx->rq_idx);
584
585 err = xdp_do_redirect(nic->pnicvf->netdev, &xdp, prog);
586 if (!err)
587 return true;
588
589 /* Free the page on error */
590 nicvf_unmap_page(nic, page, dma_addr);
591 put_page(page);
592 break;
593 default: 570 default:
594 bpf_warn_invalid_xdp_action(action); 571 bpf_warn_invalid_xdp_action(action);
595 /* fall through */ 572 /* fall through */
@@ -597,7 +574,18 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog,
597 trace_xdp_exception(nic->netdev, prog, action); 574 trace_xdp_exception(nic->netdev, prog, action);
598 /* fall through */ 575 /* fall through */
599 case XDP_DROP: 576 case XDP_DROP:
600 nicvf_unmap_page(nic, page, dma_addr); 577 /* Check if it's a recycled page, if not
578 * unmap the DMA mapping.
579 *
580 * Recycled page holds an extra reference.
581 */
582 if (page_ref_count(page) == 1) {
583 dma_addr &= PAGE_MASK;
584 dma_unmap_page_attrs(&nic->pdev->dev, dma_addr,
585 RCV_FRAG_LEN + XDP_PACKET_HEADROOM,
586 DMA_FROM_DEVICE,
587 DMA_ATTR_SKIP_CPU_SYNC);
588 }
601 put_page(page); 589 put_page(page);
602 return true; 590 return true;
603 } 591 }
@@ -1864,50 +1852,6 @@ static int nicvf_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
1864 } 1852 }
1865} 1853}
1866 1854
1867static int nicvf_xdp_xmit(struct net_device *netdev, struct xdp_buff *xdp)
1868{
1869 struct nicvf *nic = netdev_priv(netdev);
1870 struct nicvf *snic = nic;
1871 struct nicvf_xdp_tx *xdp_tx;
1872 struct snd_queue *sq;
1873 struct page *page;
1874 int err, qidx;
1875
1876 if (!netif_running(netdev) || !nic->xdp_prog)
1877 return -EINVAL;
1878
1879 page = virt_to_page(xdp->data);
1880 xdp_tx = (struct nicvf_xdp_tx *)page_address(page);
1881 qidx = xdp_tx->qidx;
1882
1883 if (xdp_tx->qidx >= nic->xdp_tx_queues)
1884 return -EINVAL;
1885
1886 /* Get secondary Qset's info */
1887 if (xdp_tx->qidx >= MAX_SND_QUEUES_PER_QS) {
1888 qidx = xdp_tx->qidx / MAX_SND_QUEUES_PER_QS;
1889 snic = (struct nicvf *)nic->snicvf[qidx - 1];
1890 if (!snic)
1891 return -EINVAL;
1892 qidx = xdp_tx->qidx % MAX_SND_QUEUES_PER_QS;
1893 }
1894
1895 sq = &snic->qs->sq[qidx];
1896 err = nicvf_xdp_sq_append_pkt(snic, sq, (u64)xdp->data,
1897 xdp_tx->dma_addr,
1898 xdp->data_end - xdp->data);
1899 if (err)
1900 return -ENOMEM;
1901
1902 nicvf_xdp_sq_doorbell(snic, sq, qidx);
1903 return 0;
1904}
1905
1906static void nicvf_xdp_flush(struct net_device *dev)
1907{
1908 return;
1909}
1910
1911static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr) 1855static int nicvf_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
1912{ 1856{
1913 struct hwtstamp_config config; 1857 struct hwtstamp_config config;
@@ -1986,8 +1930,6 @@ static const struct net_device_ops nicvf_netdev_ops = {
1986 .ndo_fix_features = nicvf_fix_features, 1930 .ndo_fix_features = nicvf_fix_features,
1987 .ndo_set_features = nicvf_set_features, 1931 .ndo_set_features = nicvf_set_features,
1988 .ndo_bpf = nicvf_xdp, 1932 .ndo_bpf = nicvf_xdp,
1989 .ndo_xdp_xmit = nicvf_xdp_xmit,
1990 .ndo_xdp_flush = nicvf_xdp_flush,
1991 .ndo_do_ioctl = nicvf_ioctl, 1933 .ndo_do_ioctl = nicvf_ioctl,
1992}; 1934};
1993 1935
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 3eae9ff9b53a..d42704d07484 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -204,7 +204,7 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr,
204 204
205 /* Reserve space for header modifications by BPF program */ 205 /* Reserve space for header modifications by BPF program */
206 if (rbdr->is_xdp) 206 if (rbdr->is_xdp)
207 buf_len += XDP_HEADROOM; 207 buf_len += XDP_PACKET_HEADROOM;
208 208
209 /* Check if it's recycled */ 209 /* Check if it's recycled */
210 if (pgcache) 210 if (pgcache)
@@ -224,9 +224,8 @@ ret:
224 nic->rb_page = NULL; 224 nic->rb_page = NULL;
225 return -ENOMEM; 225 return -ENOMEM;
226 } 226 }
227
228 if (pgcache) 227 if (pgcache)
229 pgcache->dma_addr = *rbuf + XDP_HEADROOM; 228 pgcache->dma_addr = *rbuf + XDP_PACKET_HEADROOM;
230 nic->rb_page_offset += buf_len; 229 nic->rb_page_offset += buf_len;
231 } 230 }
232 231
@@ -1244,7 +1243,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1244 int qentry; 1243 int qentry;
1245 1244
1246 if (subdesc_cnt > sq->xdp_free_cnt) 1245 if (subdesc_cnt > sq->xdp_free_cnt)
1247 return -1; 1246 return 0;
1248 1247
1249 qentry = nicvf_get_sq_desc(sq, subdesc_cnt); 1248 qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
1250 1249
@@ -1255,7 +1254,7 @@ int nicvf_xdp_sq_append_pkt(struct nicvf *nic, struct snd_queue *sq,
1255 1254
1256 sq->xdp_desc_cnt += subdesc_cnt; 1255 sq->xdp_desc_cnt += subdesc_cnt;
1257 1256
1258 return 0; 1257 return 1;
1259} 1258}
1260 1259
1261/* Calculate no of SQ subdescriptors needed to transmit all 1260/* Calculate no of SQ subdescriptors needed to transmit all
@@ -1656,7 +1655,7 @@ static void nicvf_unmap_rcv_buffer(struct nicvf *nic, u64 dma_addr,
1656 if (page_ref_count(page) != 1) 1655 if (page_ref_count(page) != 1)
1657 return; 1656 return;
1658 1657
1659 len += XDP_HEADROOM; 1658 len += XDP_PACKET_HEADROOM;
1660 /* Receive buffers in XDP mode are mapped from page start */ 1659 /* Receive buffers in XDP mode are mapped from page start */
1661 dma_addr &= PAGE_MASK; 1660 dma_addr &= PAGE_MASK;
1662 } 1661 }
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index ce1eed7a6d63..5e9a03cf1b4d 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -11,7 +11,6 @@
11 11
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/iommu.h> 13#include <linux/iommu.h>
14#include <linux/bpf.h>
15#include <net/xdp.h> 14#include <net/xdp.h>
16#include "q_struct.h" 15#include "q_struct.h"
17 16
@@ -94,9 +93,6 @@
94#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \ 93#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
95 SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) 94 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
96 95
97#define RCV_BUF_HEADROOM 128 /* To store dma address for XDP redirect */
98#define XDP_HEADROOM (XDP_PACKET_HEADROOM + RCV_BUF_HEADROOM)
99
100#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 96#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
101 MAX_CQE_PER_PKT_XMIT) 97 MAX_CQE_PER_PKT_XMIT)
102 98
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 557fd8bfd54e..00a1d2d13169 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -472,7 +472,7 @@ int cudbg_collect_cim_la(struct cudbg_init *pdbg_init,
472 472
473 if (is_t6(padap->params.chip)) { 473 if (is_t6(padap->params.chip)) {
474 size = padap->params.cim_la_size / 10 + 1; 474 size = padap->params.cim_la_size / 10 + 1;
475 size *= 11 * sizeof(u32); 475 size *= 10 * sizeof(u32);
476 } else { 476 } else {
477 size = padap->params.cim_la_size / 8; 477 size = padap->params.cim_la_size / 8;
478 size *= 8 * sizeof(u32); 478 size *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
index 30485f9a598f..143686c60234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_cudbg.c
@@ -102,7 +102,7 @@ static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity)
102 case CUDBG_CIM_LA: 102 case CUDBG_CIM_LA:
103 if (is_t6(adap->params.chip)) { 103 if (is_t6(adap->params.chip)) {
104 len = adap->params.cim_la_size / 10 + 1; 104 len = adap->params.cim_la_size / 10 + 1;
105 len *= 11 * sizeof(u32); 105 len *= 10 * sizeof(u32);
106 } else { 106 } else {
107 len = adap->params.cim_la_size / 8; 107 len = adap->params.cim_la_size / 8;
108 len *= 8 * sizeof(u32); 108 len *= 8 * sizeof(u32);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 56bc626ef006..7b452e85de2a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4982,9 +4982,10 @@ static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
4982 4982
4983 pcie_fw = readl(adap->regs + PCIE_FW_A); 4983 pcie_fw = readl(adap->regs + PCIE_FW_A);
4984 /* Check if cxgb4 is the MASTER and fw is initialized */ 4984 /* Check if cxgb4 is the MASTER and fw is initialized */
4985 if (!(pcie_fw & PCIE_FW_INIT_F) || 4985 if (num_vfs &&
4986 (!(pcie_fw & PCIE_FW_INIT_F) ||
4986 !(pcie_fw & PCIE_FW_MASTER_VLD_F) || 4987 !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
4987 PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF) { 4988 PCIE_FW_MASTER_G(pcie_fw) != CXGB4_UNIFIED_PF)) {
4988 dev_warn(&pdev->dev, 4989 dev_warn(&pdev->dev,
4989 "cxgb4 driver needs to be MASTER to support SRIOV\n"); 4990 "cxgb4 driver needs to be MASTER to support SRIOV\n");
4990 return -EOPNOTSUPP; 4991 return -EOPNOTSUPP;
@@ -5599,24 +5600,24 @@ static void remove_one(struct pci_dev *pdev)
5599#if IS_ENABLED(CONFIG_IPV6) 5600#if IS_ENABLED(CONFIG_IPV6)
5600 t4_cleanup_clip_tbl(adapter); 5601 t4_cleanup_clip_tbl(adapter);
5601#endif 5602#endif
5602 iounmap(adapter->regs);
5603 if (!is_t4(adapter->params.chip)) 5603 if (!is_t4(adapter->params.chip))
5604 iounmap(adapter->bar2); 5604 iounmap(adapter->bar2);
5605 pci_disable_pcie_error_reporting(pdev);
5606 if ((adapter->flags & DEV_ENABLED)) {
5607 pci_disable_device(pdev);
5608 adapter->flags &= ~DEV_ENABLED;
5609 }
5610 pci_release_regions(pdev);
5611 kfree(adapter->mbox_log);
5612 synchronize_rcu();
5613 kfree(adapter);
5614 } 5605 }
5615#ifdef CONFIG_PCI_IOV 5606#ifdef CONFIG_PCI_IOV
5616 else { 5607 else {
5617 cxgb4_iov_configure(adapter->pdev, 0); 5608 cxgb4_iov_configure(adapter->pdev, 0);
5618 } 5609 }
5619#endif 5610#endif
5611 iounmap(adapter->regs);
5612 pci_disable_pcie_error_reporting(pdev);
5613 if ((adapter->flags & DEV_ENABLED)) {
5614 pci_disable_device(pdev);
5615 adapter->flags &= ~DEV_ENABLED;
5616 }
5617 pci_release_regions(pdev);
5618 kfree(adapter->mbox_log);
5619 synchronize_rcu();
5620 kfree(adapter);
5620} 5621}
5621 5622
5622/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt 5623/* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 047609ef0515..920bccd6bc40 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -2637,7 +2637,6 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
2637} 2637}
2638 2638
2639#define EEPROM_STAT_ADDR 0x7bfc 2639#define EEPROM_STAT_ADDR 0x7bfc
2640#define VPD_SIZE 0x800
2641#define VPD_BASE 0x400 2640#define VPD_BASE 0x400
2642#define VPD_BASE_OLD 0 2641#define VPD_BASE_OLD 0
2643#define VPD_LEN 1024 2642#define VPD_LEN 1024
@@ -2704,15 +2703,6 @@ int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2704 if (!vpd) 2703 if (!vpd)
2705 return -ENOMEM; 2704 return -ENOMEM;
2706 2705
2707 /* We have two VPD data structures stored in the adapter VPD area.
2708 * By default, Linux calculates the size of the VPD area by traversing
2709 * the first VPD area at offset 0x0, so we need to tell the OS what
2710 * our real VPD size is.
2711 */
2712 ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
2713 if (ret < 0)
2714 goto out;
2715
2716 /* Card information normally starts at VPD_BASE but early cards had 2706 /* Card information normally starts at VPD_BASE but early cards had
2717 * it at 0. 2707 * it at 0.
2718 */ 2708 */
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 27447260215d..996f47568f9e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -791,6 +791,18 @@ static int ibmvnic_login(struct net_device *netdev)
791 return 0; 791 return 0;
792} 792}
793 793
794static void release_login_buffer(struct ibmvnic_adapter *adapter)
795{
796 kfree(adapter->login_buf);
797 adapter->login_buf = NULL;
798}
799
800static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
801{
802 kfree(adapter->login_rsp_buf);
803 adapter->login_rsp_buf = NULL;
804}
805
794static void release_resources(struct ibmvnic_adapter *adapter) 806static void release_resources(struct ibmvnic_adapter *adapter)
795{ 807{
796 int i; 808 int i;
@@ -813,6 +825,10 @@ static void release_resources(struct ibmvnic_adapter *adapter)
813 } 825 }
814 } 826 }
815 } 827 }
828 kfree(adapter->napi);
829 adapter->napi = NULL;
830
831 release_login_rsp_buffer(adapter);
816} 832}
817 833
818static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state) 834static int set_link_state(struct ibmvnic_adapter *adapter, u8 link_state)
@@ -1057,6 +1073,35 @@ static int ibmvnic_open(struct net_device *netdev)
1057 return rc; 1073 return rc;
1058} 1074}
1059 1075
1076static void clean_rx_pools(struct ibmvnic_adapter *adapter)
1077{
1078 struct ibmvnic_rx_pool *rx_pool;
1079 u64 rx_entries;
1080 int rx_scrqs;
1081 int i, j;
1082
1083 if (!adapter->rx_pool)
1084 return;
1085
1086 rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
1087 rx_entries = adapter->req_rx_add_entries_per_subcrq;
1088
1089 /* Free any remaining skbs in the rx buffer pools */
1090 for (i = 0; i < rx_scrqs; i++) {
1091 rx_pool = &adapter->rx_pool[i];
1092 if (!rx_pool)
1093 continue;
1094
1095 netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
1096 for (j = 0; j < rx_entries; j++) {
1097 if (rx_pool->rx_buff[j].skb) {
1098 dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
1099 rx_pool->rx_buff[j].skb = NULL;
1100 }
1101 }
1102 }
1103}
1104
1060static void clean_tx_pools(struct ibmvnic_adapter *adapter) 1105static void clean_tx_pools(struct ibmvnic_adapter *adapter)
1061{ 1106{
1062 struct ibmvnic_tx_pool *tx_pool; 1107 struct ibmvnic_tx_pool *tx_pool;
@@ -1134,7 +1179,7 @@ static int __ibmvnic_close(struct net_device *netdev)
1134 } 1179 }
1135 } 1180 }
1136 } 1181 }
1137 1182 clean_rx_pools(adapter);
1138 clean_tx_pools(adapter); 1183 clean_tx_pools(adapter);
1139 adapter->state = VNIC_CLOSED; 1184 adapter->state = VNIC_CLOSED;
1140 return rc; 1185 return rc;
@@ -1670,8 +1715,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1670 return 0; 1715 return 0;
1671 } 1716 }
1672 1717
1673 netif_carrier_on(netdev);
1674
1675 /* kick napi */ 1718 /* kick napi */
1676 for (i = 0; i < adapter->req_rx_queues; i++) 1719 for (i = 0; i < adapter->req_rx_queues; i++)
1677 napi_schedule(&adapter->napi[i]); 1720 napi_schedule(&adapter->napi[i]);
@@ -1679,6 +1722,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
1679 if (adapter->reset_reason != VNIC_RESET_FAILOVER) 1722 if (adapter->reset_reason != VNIC_RESET_FAILOVER)
1680 netdev_notify_peers(netdev); 1723 netdev_notify_peers(netdev);
1681 1724
1725 netif_carrier_on(netdev);
1726
1682 return 0; 1727 return 0;
1683} 1728}
1684 1729
@@ -1853,6 +1898,7 @@ restart_poll:
1853 be16_to_cpu(next->rx_comp.rc)); 1898 be16_to_cpu(next->rx_comp.rc));
1854 /* free the entry */ 1899 /* free the entry */
1855 next->rx_comp.first = 0; 1900 next->rx_comp.first = 0;
1901 dev_kfree_skb_any(rx_buff->skb);
1856 remove_buff_from_pool(adapter, rx_buff); 1902 remove_buff_from_pool(adapter, rx_buff);
1857 continue; 1903 continue;
1858 } 1904 }
@@ -3013,6 +3059,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
3013 struct vnic_login_client_data *vlcd; 3059 struct vnic_login_client_data *vlcd;
3014 int i; 3060 int i;
3015 3061
3062 release_login_rsp_buffer(adapter);
3016 client_data_len = vnic_client_data_len(adapter); 3063 client_data_len = vnic_client_data_len(adapter);
3017 3064
3018 buffer_size = 3065 buffer_size =
@@ -3708,6 +3755,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
3708 3755
3709 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, 3756 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
3710 DMA_BIDIRECTIONAL); 3757 DMA_BIDIRECTIONAL);
3758 release_login_buffer(adapter);
3711 dma_unmap_single(dev, adapter->login_rsp_buf_token, 3759 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3712 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL); 3760 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
3713 3761
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a1d7b88cf083..5a1668cdb461 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -7137,6 +7137,7 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7137 int id = port->id; 7137 int id = port->id;
7138 bool allmulti = dev->flags & IFF_ALLMULTI; 7138 bool allmulti = dev->flags & IFF_ALLMULTI;
7139 7139
7140retry:
7140 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC); 7141 mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
7141 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti); 7142 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
7142 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti); 7143 mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
@@ -7144,9 +7145,13 @@ static void mvpp2_set_rx_mode(struct net_device *dev)
7144 /* Remove all port->id's mcast enries */ 7145 /* Remove all port->id's mcast enries */
7145 mvpp2_prs_mcast_del_all(priv, id); 7146 mvpp2_prs_mcast_del_all(priv, id);
7146 7147
7147 if (allmulti && !netdev_mc_empty(dev)) { 7148 if (!allmulti) {
7148 netdev_for_each_mc_addr(ha, dev) 7149 netdev_for_each_mc_addr(ha, dev) {
7149 mvpp2_prs_mac_da_accept(priv, id, ha->addr, true); 7150 if (mvpp2_prs_mac_da_accept(priv, id, ha->addr, true)) {
7151 allmulti = true;
7152 goto retry;
7153 }
7154 }
7150 } 7155 }
7151} 7156}
7152 7157
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f0b25baba09a..f7948e983637 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -788,6 +788,9 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
788 u32 tb_id, 788 u32 tb_id,
789 struct netlink_ext_ack *extack) 789 struct netlink_ext_ack *extack)
790{ 790{
791 struct mlxsw_sp_mr_table *mr4_table;
792 struct mlxsw_sp_fib *fib4;
793 struct mlxsw_sp_fib *fib6;
791 struct mlxsw_sp_vr *vr; 794 struct mlxsw_sp_vr *vr;
792 int err; 795 int err;
793 796
@@ -796,29 +799,30 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
796 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers"); 799 NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
797 return ERR_PTR(-EBUSY); 800 return ERR_PTR(-EBUSY);
798 } 801 }
799 vr->fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4); 802 fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
800 if (IS_ERR(vr->fib4)) 803 if (IS_ERR(fib4))
801 return ERR_CAST(vr->fib4); 804 return ERR_CAST(fib4);
802 vr->fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6); 805 fib6 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
803 if (IS_ERR(vr->fib6)) { 806 if (IS_ERR(fib6)) {
804 err = PTR_ERR(vr->fib6); 807 err = PTR_ERR(fib6);
805 goto err_fib6_create; 808 goto err_fib6_create;
806 } 809 }
807 vr->mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id, 810 mr4_table = mlxsw_sp_mr_table_create(mlxsw_sp, vr->id,
808 MLXSW_SP_L3_PROTO_IPV4); 811 MLXSW_SP_L3_PROTO_IPV4);
809 if (IS_ERR(vr->mr4_table)) { 812 if (IS_ERR(mr4_table)) {
810 err = PTR_ERR(vr->mr4_table); 813 err = PTR_ERR(mr4_table);
811 goto err_mr_table_create; 814 goto err_mr_table_create;
812 } 815 }
816 vr->fib4 = fib4;
817 vr->fib6 = fib6;
818 vr->mr4_table = mr4_table;
813 vr->tb_id = tb_id; 819 vr->tb_id = tb_id;
814 return vr; 820 return vr;
815 821
816err_mr_table_create: 822err_mr_table_create:
817 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib6); 823 mlxsw_sp_fib_destroy(mlxsw_sp, fib6);
818 vr->fib6 = NULL;
819err_fib6_create: 824err_fib6_create:
820 mlxsw_sp_fib_destroy(mlxsw_sp, vr->fib4); 825 mlxsw_sp_fib_destroy(mlxsw_sp, fib4);
821 vr->fib4 = NULL;
822 return ERR_PTR(err); 826 return ERR_PTR(err);
823} 827}
824 828
@@ -3790,6 +3794,9 @@ mlxsw_sp_fib4_entry_offload_unset(struct mlxsw_sp_fib_entry *fib_entry)
3790 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group; 3794 struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
3791 int i; 3795 int i;
3792 3796
3797 if (!list_is_singular(&nh_grp->fib_list))
3798 return;
3799
3793 for (i = 0; i < nh_grp->count; i++) { 3800 for (i = 0; i < nh_grp->count; i++) {
3794 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i]; 3801 struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
3795 3802
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 7e7704daf5f1..c4949183eef3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -43,12 +43,6 @@
43 43
44/* Local Definitions and Declarations */ 44/* Local Definitions and Declarations */
45 45
46struct rmnet_walk_data {
47 struct net_device *real_dev;
48 struct list_head *head;
49 struct rmnet_port *port;
50};
51
52static int rmnet_is_real_dev_registered(const struct net_device *real_dev) 46static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
53{ 47{
54 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler; 48 return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
@@ -112,17 +106,14 @@ static int rmnet_register_real_device(struct net_device *real_dev)
112static void rmnet_unregister_bridge(struct net_device *dev, 106static void rmnet_unregister_bridge(struct net_device *dev,
113 struct rmnet_port *port) 107 struct rmnet_port *port)
114{ 108{
115 struct net_device *rmnet_dev, *bridge_dev;
116 struct rmnet_port *bridge_port; 109 struct rmnet_port *bridge_port;
110 struct net_device *bridge_dev;
117 111
118 if (port->rmnet_mode != RMNET_EPMODE_BRIDGE) 112 if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
119 return; 113 return;
120 114
121 /* bridge slave handling */ 115 /* bridge slave handling */
122 if (!port->nr_rmnet_devs) { 116 if (!port->nr_rmnet_devs) {
123 rmnet_dev = netdev_master_upper_dev_get_rcu(dev);
124 netdev_upper_dev_unlink(dev, rmnet_dev);
125
126 bridge_dev = port->bridge_ep; 117 bridge_dev = port->bridge_ep;
127 118
128 bridge_port = rmnet_get_port_rtnl(bridge_dev); 119 bridge_port = rmnet_get_port_rtnl(bridge_dev);
@@ -132,9 +123,6 @@ static void rmnet_unregister_bridge(struct net_device *dev,
132 bridge_dev = port->bridge_ep; 123 bridge_dev = port->bridge_ep;
133 124
134 bridge_port = rmnet_get_port_rtnl(bridge_dev); 125 bridge_port = rmnet_get_port_rtnl(bridge_dev);
135 rmnet_dev = netdev_master_upper_dev_get_rcu(bridge_dev);
136 netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
137
138 rmnet_unregister_real_device(bridge_dev, bridge_port); 126 rmnet_unregister_real_device(bridge_dev, bridge_port);
139 } 127 }
140} 128}
@@ -173,10 +161,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
173 if (err) 161 if (err)
174 goto err1; 162 goto err1;
175 163
176 err = netdev_master_upper_dev_link(dev, real_dev, NULL, NULL, extack);
177 if (err)
178 goto err2;
179
180 port->rmnet_mode = mode; 164 port->rmnet_mode = mode;
181 165
182 hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]); 166 hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -193,8 +177,6 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
193 177
194 return 0; 178 return 0;
195 179
196err2:
197 rmnet_vnd_dellink(mux_id, port, ep);
198err1: 180err1:
199 rmnet_unregister_real_device(real_dev, port); 181 rmnet_unregister_real_device(real_dev, port);
200err0: 182err0:
@@ -204,14 +186,13 @@ err0:
204 186
205static void rmnet_dellink(struct net_device *dev, struct list_head *head) 187static void rmnet_dellink(struct net_device *dev, struct list_head *head)
206{ 188{
189 struct rmnet_priv *priv = netdev_priv(dev);
207 struct net_device *real_dev; 190 struct net_device *real_dev;
208 struct rmnet_endpoint *ep; 191 struct rmnet_endpoint *ep;
209 struct rmnet_port *port; 192 struct rmnet_port *port;
210 u8 mux_id; 193 u8 mux_id;
211 194
212 rcu_read_lock(); 195 real_dev = priv->real_dev;
213 real_dev = netdev_master_upper_dev_get_rcu(dev);
214 rcu_read_unlock();
215 196
216 if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) 197 if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
217 return; 198 return;
@@ -219,7 +200,6 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
219 port = rmnet_get_port_rtnl(real_dev); 200 port = rmnet_get_port_rtnl(real_dev);
220 201
221 mux_id = rmnet_vnd_get_mux(dev); 202 mux_id = rmnet_vnd_get_mux(dev);
222 netdev_upper_dev_unlink(dev, real_dev);
223 203
224 ep = rmnet_get_endpoint(port, mux_id); 204 ep = rmnet_get_endpoint(port, mux_id);
225 if (ep) { 205 if (ep) {
@@ -233,30 +213,13 @@ static void rmnet_dellink(struct net_device *dev, struct list_head *head)
233 unregister_netdevice_queue(dev, head); 213 unregister_netdevice_queue(dev, head);
234} 214}
235 215
236static int rmnet_dev_walk_unreg(struct net_device *rmnet_dev, void *data)
237{
238 struct rmnet_walk_data *d = data;
239 struct rmnet_endpoint *ep;
240 u8 mux_id;
241
242 mux_id = rmnet_vnd_get_mux(rmnet_dev);
243 ep = rmnet_get_endpoint(d->port, mux_id);
244 if (ep) {
245 hlist_del_init_rcu(&ep->hlnode);
246 rmnet_vnd_dellink(mux_id, d->port, ep);
247 kfree(ep);
248 }
249 netdev_upper_dev_unlink(rmnet_dev, d->real_dev);
250 unregister_netdevice_queue(rmnet_dev, d->head);
251
252 return 0;
253}
254
255static void rmnet_force_unassociate_device(struct net_device *dev) 216static void rmnet_force_unassociate_device(struct net_device *dev)
256{ 217{
257 struct net_device *real_dev = dev; 218 struct net_device *real_dev = dev;
258 struct rmnet_walk_data d; 219 struct hlist_node *tmp_ep;
220 struct rmnet_endpoint *ep;
259 struct rmnet_port *port; 221 struct rmnet_port *port;
222 unsigned long bkt_ep;
260 LIST_HEAD(list); 223 LIST_HEAD(list);
261 224
262 if (!rmnet_is_real_dev_registered(real_dev)) 225 if (!rmnet_is_real_dev_registered(real_dev))
@@ -264,16 +227,19 @@ static void rmnet_force_unassociate_device(struct net_device *dev)
264 227
265 ASSERT_RTNL(); 228 ASSERT_RTNL();
266 229
267 d.real_dev = real_dev;
268 d.head = &list;
269
270 port = rmnet_get_port_rtnl(dev); 230 port = rmnet_get_port_rtnl(dev);
271 d.port = port;
272 231
273 rcu_read_lock(); 232 rcu_read_lock();
274 rmnet_unregister_bridge(dev, port); 233 rmnet_unregister_bridge(dev, port);
275 234
276 netdev_walk_all_lower_dev_rcu(real_dev, rmnet_dev_walk_unreg, &d); 235 hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
236 unregister_netdevice_queue(ep->egress_dev, &list);
237 rmnet_vnd_dellink(ep->mux_id, port, ep);
238
239 hlist_del_init_rcu(&ep->hlnode);
240 kfree(ep);
241 }
242
277 rcu_read_unlock(); 243 rcu_read_unlock();
278 unregister_netdevice_many(&list); 244 unregister_netdevice_many(&list);
279 245
@@ -422,11 +388,6 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
422 if (err) 388 if (err)
423 return -EBUSY; 389 return -EBUSY;
424 390
425 err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
426 extack);
427 if (err)
428 return -EINVAL;
429
430 slave_port = rmnet_get_port(slave_dev); 391 slave_port = rmnet_get_port(slave_dev);
431 slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE; 392 slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
432 slave_port->bridge_ep = real_dev; 393 slave_port->bridge_ep = real_dev;
@@ -449,7 +410,6 @@ int rmnet_del_bridge(struct net_device *rmnet_dev,
449 port->rmnet_mode = RMNET_EPMODE_VND; 410 port->rmnet_mode = RMNET_EPMODE_VND;
450 port->bridge_ep = NULL; 411 port->bridge_ep = NULL;
451 412
452 netdev_upper_dev_unlink(slave_dev, rmnet_dev);
453 slave_port = rmnet_get_port(slave_dev); 413 slave_port = rmnet_get_port(slave_dev);
454 rmnet_unregister_real_device(slave_dev, slave_port); 414 rmnet_unregister_real_device(slave_dev, slave_port);
455 415
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index 6bc328fb88e1..b0dbca070c00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -38,6 +38,11 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
38 } 38 }
39 39
40 ep = rmnet_get_endpoint(port, mux_id); 40 ep = rmnet_get_endpoint(port, mux_id);
41 if (!ep) {
42 kfree_skb(skb);
43 return RX_HANDLER_CONSUMED;
44 }
45
41 vnd = ep->egress_dev; 46 vnd = ep->egress_dev;
42 47
43 ip_family = cmd->flow_control.ip_family; 48 ip_family = cmd->flow_control.ip_family;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 570a227acdd8..346d310914df 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -121,7 +121,7 @@ static void rmnet_get_stats64(struct net_device *dev,
121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats)); 121 memset(&total_stats, 0, sizeof(struct rmnet_vnd_stats));
122 122
123 for_each_possible_cpu(cpu) { 123 for_each_possible_cpu(cpu) {
124 pcpu_ptr = this_cpu_ptr(priv->pcpu_stats); 124 pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
125 125
126 do { 126 do {
127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp); 127 start = u64_stats_fetch_begin_irq(&pcpu_ptr->syncp);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index c87f57ca4437..a95fbd5510d9 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -2255,9 +2255,6 @@ static int ravb_wol_setup(struct net_device *ndev)
2255 /* Enable MagicPacket */ 2255 /* Enable MagicPacket */
2256 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 2256 ravb_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
2257 2257
2258 /* Increased clock usage so device won't be suspended */
2259 clk_enable(priv->clk);
2260
2261 return enable_irq_wake(priv->emac_irq); 2258 return enable_irq_wake(priv->emac_irq);
2262} 2259}
2263 2260
@@ -2276,9 +2273,6 @@ static int ravb_wol_restore(struct net_device *ndev)
2276 if (ret < 0) 2273 if (ret < 0)
2277 return ret; 2274 return ret;
2278 2275
2279 /* Restore clock usage count */
2280 clk_disable(priv->clk);
2281
2282 return disable_irq_wake(priv->emac_irq); 2276 return disable_irq_wake(priv->emac_irq);
2283} 2277}
2284 2278
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index a197e11f3a56..92dcf8717fc6 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -40,7 +40,6 @@
40#include <linux/slab.h> 40#include <linux/slab.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/if_vlan.h> 42#include <linux/if_vlan.h>
43#include <linux/clk.h>
44#include <linux/sh_eth.h> 43#include <linux/sh_eth.h>
45#include <linux/of_mdio.h> 44#include <linux/of_mdio.h>
46 45
@@ -2304,7 +2303,7 @@ static void sh_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2304 wol->supported = 0; 2303 wol->supported = 0;
2305 wol->wolopts = 0; 2304 wol->wolopts = 0;
2306 2305
2307 if (mdp->cd->magic && mdp->clk) { 2306 if (mdp->cd->magic) {
2308 wol->supported = WAKE_MAGIC; 2307 wol->supported = WAKE_MAGIC;
2309 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0; 2308 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2310 } 2309 }
@@ -2314,7 +2313,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2314{ 2313{
2315 struct sh_eth_private *mdp = netdev_priv(ndev); 2314 struct sh_eth_private *mdp = netdev_priv(ndev);
2316 2315
2317 if (!mdp->cd->magic || !mdp->clk || wol->wolopts & ~WAKE_MAGIC) 2316 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2318 return -EOPNOTSUPP; 2317 return -EOPNOTSUPP;
2319 2318
2320 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); 2319 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -3153,11 +3152,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3153 goto out_release; 3152 goto out_release;
3154 } 3153 }
3155 3154
3156 /* Get clock, if not found that's OK but Wake-On-Lan is unavailable */
3157 mdp->clk = devm_clk_get(&pdev->dev, NULL);
3158 if (IS_ERR(mdp->clk))
3159 mdp->clk = NULL;
3160
3161 ndev->base_addr = res->start; 3155 ndev->base_addr = res->start;
3162 3156
3163 spin_lock_init(&mdp->lock); 3157 spin_lock_init(&mdp->lock);
@@ -3278,7 +3272,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
3278 if (ret) 3272 if (ret)
3279 goto out_napi_del; 3273 goto out_napi_del;
3280 3274
3281 if (mdp->cd->magic && mdp->clk) 3275 if (mdp->cd->magic)
3282 device_set_wakeup_capable(&pdev->dev, 1); 3276 device_set_wakeup_capable(&pdev->dev, 1);
3283 3277
3284 /* print device information */ 3278 /* print device information */
@@ -3331,9 +3325,6 @@ static int sh_eth_wol_setup(struct net_device *ndev)
3331 /* Enable MagicPacket */ 3325 /* Enable MagicPacket */
3332 sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE); 3326 sh_eth_modify(ndev, ECMR, ECMR_MPDE, ECMR_MPDE);
3333 3327
3334 /* Increased clock usage so device won't be suspended */
3335 clk_enable(mdp->clk);
3336
3337 return enable_irq_wake(ndev->irq); 3328 return enable_irq_wake(ndev->irq);
3338} 3329}
3339 3330
@@ -3359,9 +3350,6 @@ static int sh_eth_wol_restore(struct net_device *ndev)
3359 if (ret < 0) 3350 if (ret < 0)
3360 return ret; 3351 return ret;
3361 3352
3362 /* Restore clock usage count */
3363 clk_disable(mdp->clk);
3364
3365 return disable_irq_wake(ndev->irq); 3353 return disable_irq_wake(ndev->irq);
3366} 3354}
3367 3355
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index b13eed21c87d..d39ae77707ef 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1382,7 +1382,7 @@ int genphy_setup_forced(struct phy_device *phydev)
1382 ctl |= BMCR_FULLDPLX; 1382 ctl |= BMCR_FULLDPLX;
1383 1383
1384 return phy_modify(phydev, MII_BMCR, 1384 return phy_modify(phydev, MII_BMCR,
1385 BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN, ctl); 1385 ~(BMCR_LOOPBACK | BMCR_ISOLATE | BMCR_PDOWN), ctl);
1386} 1386}
1387EXPORT_SYMBOL(genphy_setup_forced); 1387EXPORT_SYMBOL(genphy_setup_forced);
1388 1388
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ca5e375de27c..e0d6760f3219 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -166,6 +166,8 @@ struct tbnet_ring {
166 * @connected_work: Worker that finalizes the ThunderboltIP connection 166 * @connected_work: Worker that finalizes the ThunderboltIP connection
167 * setup and enables DMA paths for high speed data 167 * setup and enables DMA paths for high speed data
168 * transfers 168 * transfers
169 * @disconnect_work: Worker that handles tearing down the ThunderboltIP
170 * connection
169 * @rx_hdr: Copy of the currently processed Rx frame. Used when a 171 * @rx_hdr: Copy of the currently processed Rx frame. Used when a
170 * network packet consists of multiple Thunderbolt frames. 172 * network packet consists of multiple Thunderbolt frames.
171 * In host byte order. 173 * In host byte order.
@@ -190,6 +192,7 @@ struct tbnet {
190 int login_retries; 192 int login_retries;
191 struct delayed_work login_work; 193 struct delayed_work login_work;
192 struct work_struct connected_work; 194 struct work_struct connected_work;
195 struct work_struct disconnect_work;
193 struct thunderbolt_ip_frame_header rx_hdr; 196 struct thunderbolt_ip_frame_header rx_hdr;
194 struct tbnet_ring rx_ring; 197 struct tbnet_ring rx_ring;
195 atomic_t frame_id; 198 atomic_t frame_id;
@@ -445,7 +448,7 @@ static int tbnet_handle_packet(const void *buf, size_t size, void *data)
445 case TBIP_LOGOUT: 448 case TBIP_LOGOUT:
446 ret = tbnet_logout_response(net, route, sequence, command_id); 449 ret = tbnet_logout_response(net, route, sequence, command_id);
447 if (!ret) 450 if (!ret)
448 tbnet_tear_down(net, false); 451 queue_work(system_long_wq, &net->disconnect_work);
449 break; 452 break;
450 453
451 default: 454 default:
@@ -659,6 +662,13 @@ static void tbnet_login_work(struct work_struct *work)
659 } 662 }
660} 663}
661 664
665static void tbnet_disconnect_work(struct work_struct *work)
666{
667 struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
668
669 tbnet_tear_down(net, false);
670}
671
662static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf, 672static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
663 const struct thunderbolt_ip_frame_header *hdr) 673 const struct thunderbolt_ip_frame_header *hdr)
664{ 674{
@@ -881,6 +891,7 @@ static int tbnet_stop(struct net_device *dev)
881 891
882 napi_disable(&net->napi); 892 napi_disable(&net->napi);
883 893
894 cancel_work_sync(&net->disconnect_work);
884 tbnet_tear_down(net, true); 895 tbnet_tear_down(net, true);
885 896
886 tb_ring_free(net->rx_ring.ring); 897 tb_ring_free(net->rx_ring.ring);
@@ -1195,6 +1206,7 @@ static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
1195 net = netdev_priv(dev); 1206 net = netdev_priv(dev);
1196 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work); 1207 INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
1197 INIT_WORK(&net->connected_work, tbnet_connected_work); 1208 INIT_WORK(&net->connected_work, tbnet_connected_work);
1209 INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
1198 mutex_init(&net->connection_lock); 1210 mutex_init(&net->connection_lock);
1199 atomic_set(&net->command_id, 0); 1211 atomic_set(&net->command_id, 0);
1200 atomic_set(&net->frame_id, 0); 1212 atomic_set(&net->frame_id, 0);
@@ -1270,10 +1282,7 @@ static int __maybe_unused tbnet_suspend(struct device *dev)
1270 stop_login(net); 1282 stop_login(net);
1271 if (netif_running(net->dev)) { 1283 if (netif_running(net->dev)) {
1272 netif_device_detach(net->dev); 1284 netif_device_detach(net->dev);
1273 tb_ring_stop(net->rx_ring.ring); 1285 tbnet_tear_down(net, true);
1274 tb_ring_stop(net->tx_ring.ring);
1275 tbnet_free_buffers(&net->rx_ring);
1276 tbnet_free_buffers(&net->tx_ring);
1277 } 1286 }
1278 1287
1279 return 0; 1288 return 0;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 81e6cc951e7f..b52258c327d2 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1489,27 +1489,23 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1489 skb->truesize += skb->data_len; 1489 skb->truesize += skb->data_len;
1490 1490
1491 for (i = 1; i < it->nr_segs; i++) { 1491 for (i = 1; i < it->nr_segs; i++) {
1492 struct page_frag *pfrag = &current->task_frag;
1492 size_t fragsz = it->iov[i].iov_len; 1493 size_t fragsz = it->iov[i].iov_len;
1493 unsigned long offset;
1494 struct page *page;
1495 void *data;
1496 1494
1497 if (fragsz == 0 || fragsz > PAGE_SIZE) { 1495 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1498 err = -EINVAL; 1496 err = -EINVAL;
1499 goto free; 1497 goto free;
1500 } 1498 }
1501 1499
1502 local_bh_disable(); 1500 if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) {
1503 data = napi_alloc_frag(fragsz);
1504 local_bh_enable();
1505 if (!data) {
1506 err = -ENOMEM; 1501 err = -ENOMEM;
1507 goto free; 1502 goto free;
1508 } 1503 }
1509 1504
1510 page = virt_to_head_page(data); 1505 skb_fill_page_desc(skb, i - 1, pfrag->page,
1511 offset = data - page_address(page); 1506 pfrag->offset, fragsz);
1512 skb_fill_page_desc(skb, i - 1, page, offset, fragsz); 1507 page_ref_inc(pfrag->page);
1508 pfrag->offset += fragsz;
1513 } 1509 }
1514 1510
1515 return skb; 1511 return skb;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index fc734014206f..8b14bd326d4a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3419,22 +3419,29 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PORT_RIDGE,
3419 3419
3420static void quirk_chelsio_extend_vpd(struct pci_dev *dev) 3420static void quirk_chelsio_extend_vpd(struct pci_dev *dev)
3421{ 3421{
3422 pci_set_vpd_size(dev, 8192); 3422 int chip = (dev->device & 0xf000) >> 12;
3423} 3423 int func = (dev->device & 0x0f00) >> 8;
3424 3424 int prod = (dev->device & 0x00ff) >> 0;
3425DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x20, quirk_chelsio_extend_vpd); 3425
3426DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x21, quirk_chelsio_extend_vpd); 3426 /*
3427DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x22, quirk_chelsio_extend_vpd); 3427 * If this is a T3-based adapter, there's a 1KB VPD area at offset
3428DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x23, quirk_chelsio_extend_vpd); 3428 * 0xc00 which contains the preferred VPD values. If this is a T4 or
3429DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x24, quirk_chelsio_extend_vpd); 3429 * later based adapter, the special VPD is at offset 0x400 for the
3430DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x25, quirk_chelsio_extend_vpd); 3430 * Physical Functions (the SR-IOV Virtual Functions have no VPD
3431DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x26, quirk_chelsio_extend_vpd); 3431 * Capabilities). The PCI VPD Access core routines will normally
3432DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x30, quirk_chelsio_extend_vpd); 3432 * compute the size of the VPD by parsing the VPD Data Structure at
3433DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x31, quirk_chelsio_extend_vpd); 3433 * offset 0x000. This will result in silent failures when attempting
3434DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x32, quirk_chelsio_extend_vpd); 3434 * to accesses these other VPD areas which are beyond those computed
3435DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x35, quirk_chelsio_extend_vpd); 3435 * limits.
3436DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x36, quirk_chelsio_extend_vpd); 3436 */
3437DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, 0x37, quirk_chelsio_extend_vpd); 3437 if (chip == 0x0 && prod >= 0x20)
3438 pci_set_vpd_size(dev, 8192);
3439 else if (chip >= 0x4 && func < 0x8)
3440 pci_set_vpd_size(dev, 2048);
3441}
3442
3443DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID,
3444 quirk_chelsio_extend_vpd);
3438 3445
3439#ifdef CONFIG_ACPI 3446#ifdef CONFIG_ACPI
3440/* 3447/*
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index b884b7794187..e6335227b844 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -469,7 +469,7 @@ static inline int ptr_ring_consume_batched_bh(struct ptr_ring *r,
469 */ 469 */
470static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) 470static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp)
471{ 471{
472 if (size * sizeof(void *) > KMALLOC_MAX_SIZE) 472 if (size > KMALLOC_MAX_SIZE / sizeof(void *))
473 return NULL; 473 return NULL;
474 return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); 474 return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO);
475} 475}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5ebc0f869720..c1e66bdcf583 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -3646,7 +3646,7 @@ static inline bool __skb_checksum_validate_needed(struct sk_buff *skb,
3646 return true; 3646 return true;
3647} 3647}
3648 3648
3649/* For small packets <= CHECKSUM_BREAK peform checksum complete directly 3649/* For small packets <= CHECKSUM_BREAK perform checksum complete directly
3650 * in checksum_init. 3650 * in checksum_init.
3651 */ 3651 */
3652#define CHECKSUM_BREAK 76 3652#define CHECKSUM_BREAK 76
diff --git a/include/net/udplite.h b/include/net/udplite.h
index 81bdbf97319b..9185e45b997f 100644
--- a/include/net/udplite.h
+++ b/include/net/udplite.h
@@ -64,6 +64,7 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh)
64 UDP_SKB_CB(skb)->cscov = cscov; 64 UDP_SKB_CB(skb)->cscov = cscov;
65 if (skb->ip_summed == CHECKSUM_COMPLETE) 65 if (skb->ip_summed == CHECKSUM_COMPLETE)
66 skb->ip_summed = CHECKSUM_NONE; 66 skb->ip_summed = CHECKSUM_NONE;
67 skb->csum_valid = 0;
67 } 68 }
68 69
69 return 0; 70 return 0;
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index f8cb5760ea4f..8bbbcb5cd94b 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -23,7 +23,6 @@
23#define _UAPI_LINUX_IF_ETHER_H 23#define _UAPI_LINUX_IF_ETHER_H
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/libc-compat.h>
27 26
28/* 27/*
29 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble 28 * IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
@@ -151,6 +150,11 @@
151 * This is an Ethernet frame header. 150 * This is an Ethernet frame header.
152 */ 151 */
153 152
153/* allow libcs like musl to deactivate this, glibc does not implement this. */
154#ifndef __UAPI_DEF_ETHHDR
155#define __UAPI_DEF_ETHHDR 1
156#endif
157
154#if __UAPI_DEF_ETHHDR 158#if __UAPI_DEF_ETHHDR
155struct ethhdr { 159struct ethhdr {
156 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ 160 unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index fc29efaa918c..8254c937c9f4 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -264,10 +264,4 @@
264 264
265#endif /* __GLIBC__ */ 265#endif /* __GLIBC__ */
266 266
267/* Definitions for if_ether.h */
268/* allow libcs like musl to deactivate this, glibc does not implement this. */
269#ifndef __UAPI_DEF_ETHHDR
270#define __UAPI_DEF_ETHHDR 1
271#endif
272
273#endif /* _UAPI_LIBC_COMPAT_H */ 267#endif /* _UAPI_LIBC_COMPAT_H */
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 0254c35b2bf0..126a8ea73c96 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -255,6 +255,9 @@ static ssize_t brport_show(struct kobject *kobj,
255 struct brport_attribute *brport_attr = to_brport_attr(attr); 255 struct brport_attribute *brport_attr = to_brport_attr(attr);
256 struct net_bridge_port *p = to_brport(kobj); 256 struct net_bridge_port *p = to_brport(kobj);
257 257
258 if (!brport_attr->show)
259 return -EINVAL;
260
258 return brport_attr->show(p, buf); 261 return brport_attr->show(p, buf);
259} 262}
260 263
diff --git a/net/core/dev.c b/net/core/dev.c
index dda9d7b9a840..d4362befe7e2 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2382,8 +2382,11 @@ EXPORT_SYMBOL(netdev_set_num_tc);
2382 */ 2382 */
2383int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) 2383int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2384{ 2384{
2385 bool disabling;
2385 int rc; 2386 int rc;
2386 2387
2388 disabling = txq < dev->real_num_tx_queues;
2389
2387 if (txq < 1 || txq > dev->num_tx_queues) 2390 if (txq < 1 || txq > dev->num_tx_queues)
2388 return -EINVAL; 2391 return -EINVAL;
2389 2392
@@ -2399,15 +2402,19 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2399 if (dev->num_tc) 2402 if (dev->num_tc)
2400 netif_setup_tc(dev, txq); 2403 netif_setup_tc(dev, txq);
2401 2404
2402 if (txq < dev->real_num_tx_queues) { 2405 dev->real_num_tx_queues = txq;
2406
2407 if (disabling) {
2408 synchronize_net();
2403 qdisc_reset_all_tx_gt(dev, txq); 2409 qdisc_reset_all_tx_gt(dev, txq);
2404#ifdef CONFIG_XPS 2410#ifdef CONFIG_XPS
2405 netif_reset_xps_queues_gt(dev, txq); 2411 netif_reset_xps_queues_gt(dev, txq);
2406#endif 2412#endif
2407 } 2413 }
2414 } else {
2415 dev->real_num_tx_queues = txq;
2408 } 2416 }
2409 2417
2410 dev->real_num_tx_queues = txq;
2411 return 0; 2418 return 0;
2412} 2419}
2413EXPORT_SYMBOL(netif_set_real_num_tx_queues); 2420EXPORT_SYMBOL(netif_set_real_num_tx_queues);
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 91dd09f79808..791aff68af88 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1338,6 +1338,12 @@ static int dn_setsockopt(struct socket *sock, int level, int optname, char __use
1338 lock_sock(sk); 1338 lock_sock(sk);
1339 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); 1339 err = __dn_setsockopt(sock, level, optname, optval, optlen, 0);
1340 release_sock(sk); 1340 release_sock(sk);
1341#ifdef CONFIG_NETFILTER
1342 /* we need to exclude all possible ENOPROTOOPTs except default case */
1343 if (err == -ENOPROTOOPT && optname != DSO_LINKINFO &&
1344 optname != DSO_STREAM && optname != DSO_SEQPACKET)
1345 err = nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1346#endif
1341 1347
1342 return err; 1348 return err;
1343} 1349}
@@ -1445,15 +1451,6 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1445 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); 1451 dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation);
1446 break; 1452 break;
1447 1453
1448 default:
1449#ifdef CONFIG_NETFILTER
1450 return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen);
1451#endif
1452 case DSO_LINKINFO:
1453 case DSO_STREAM:
1454 case DSO_SEQPACKET:
1455 return -ENOPROTOOPT;
1456
1457 case DSO_MAXWINDOW: 1454 case DSO_MAXWINDOW:
1458 if (optlen != sizeof(unsigned long)) 1455 if (optlen != sizeof(unsigned long))
1459 return -EINVAL; 1456 return -EINVAL;
@@ -1501,6 +1498,12 @@ static int __dn_setsockopt(struct socket *sock, int level,int optname, char __us
1501 return -EINVAL; 1498 return -EINVAL;
1502 scp->info_loc = u.info; 1499 scp->info_loc = u.info;
1503 break; 1500 break;
1501
1502 case DSO_LINKINFO:
1503 case DSO_STREAM:
1504 case DSO_SEQPACKET:
1505 default:
1506 return -ENOPROTOOPT;
1504 } 1507 }
1505 1508
1506 return 0; 1509 return 0;
@@ -1514,6 +1517,20 @@ static int dn_getsockopt(struct socket *sock, int level, int optname, char __use
1514 lock_sock(sk); 1517 lock_sock(sk);
1515 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); 1518 err = __dn_getsockopt(sock, level, optname, optval, optlen, 0);
1516 release_sock(sk); 1519 release_sock(sk);
1520#ifdef CONFIG_NETFILTER
1521 if (err == -ENOPROTOOPT && optname != DSO_STREAM &&
1522 optname != DSO_SEQPACKET && optname != DSO_CONACCEPT &&
1523 optname != DSO_CONREJECT) {
1524 int len;
1525
1526 if (get_user(len, optlen))
1527 return -EFAULT;
1528
1529 err = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1530 if (err >= 0)
1531 err = put_user(len, optlen);
1532 }
1533#endif
1517 1534
1518 return err; 1535 return err;
1519} 1536}
@@ -1579,26 +1596,6 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1579 r_data = &link; 1596 r_data = &link;
1580 break; 1597 break;
1581 1598
1582 default:
1583#ifdef CONFIG_NETFILTER
1584 {
1585 int ret, len;
1586
1587 if (get_user(len, optlen))
1588 return -EFAULT;
1589
1590 ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len);
1591 if (ret >= 0)
1592 ret = put_user(len, optlen);
1593 return ret;
1594 }
1595#endif
1596 case DSO_STREAM:
1597 case DSO_SEQPACKET:
1598 case DSO_CONACCEPT:
1599 case DSO_CONREJECT:
1600 return -ENOPROTOOPT;
1601
1602 case DSO_MAXWINDOW: 1599 case DSO_MAXWINDOW:
1603 if (r_len > sizeof(unsigned long)) 1600 if (r_len > sizeof(unsigned long))
1604 r_len = sizeof(unsigned long); 1601 r_len = sizeof(unsigned long);
@@ -1630,6 +1627,13 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1630 r_len = sizeof(unsigned char); 1627 r_len = sizeof(unsigned char);
1631 r_data = &scp->info_rem; 1628 r_data = &scp->info_rem;
1632 break; 1629 break;
1630
1631 case DSO_STREAM:
1632 case DSO_SEQPACKET:
1633 case DSO_CONACCEPT:
1634 case DSO_CONREJECT:
1635 default:
1636 return -ENOPROTOOPT;
1633 } 1637 }
1634 1638
1635 if (r_data) { 1639 if (r_data) {
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c586597da20d..7d36a950d961 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -646,6 +646,11 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
646 fi->fib_nh, cfg, extack)) 646 fi->fib_nh, cfg, extack))
647 return 1; 647 return 1;
648 } 648 }
649#ifdef CONFIG_IP_ROUTE_CLASSID
650 if (cfg->fc_flow &&
651 cfg->fc_flow != fi->fib_nh->nh_tclassid)
652 return 1;
653#endif
649 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) && 654 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
650 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw)) 655 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
651 return 0; 656 return 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index e9f985e42405..b2bca373f8be 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2027,6 +2027,24 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
2027 } 2027 }
2028} 2028}
2029 2029
2030static bool tcp_can_coalesce_send_queue_head(struct sock *sk, int len)
2031{
2032 struct sk_buff *skb, *next;
2033
2034 skb = tcp_send_head(sk);
2035 tcp_for_write_queue_from_safe(skb, next, sk) {
2036 if (len <= skb->len)
2037 break;
2038
2039 if (unlikely(TCP_SKB_CB(skb)->eor))
2040 return false;
2041
2042 len -= skb->len;
2043 }
2044
2045 return true;
2046}
2047
2030/* Create a new MTU probe if we are ready. 2048/* Create a new MTU probe if we are ready.
2031 * MTU probe is regularly attempting to increase the path MTU by 2049 * MTU probe is regularly attempting to increase the path MTU by
2032 * deliberately sending larger packets. This discovers routing 2050 * deliberately sending larger packets. This discovers routing
@@ -2099,6 +2117,9 @@ static int tcp_mtu_probe(struct sock *sk)
2099 return 0; 2117 return 0;
2100 } 2118 }
2101 2119
2120 if (!tcp_can_coalesce_send_queue_head(sk, probe_size))
2121 return -1;
2122
2102 /* We're allowed to probe. Build it now. */ 2123 /* We're allowed to probe. Build it now. */
2103 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false); 2124 nskb = sk_stream_alloc_skb(sk, probe_size, GFP_ATOMIC, false);
2104 if (!nskb) 2125 if (!nskb)
@@ -2134,6 +2155,10 @@ static int tcp_mtu_probe(struct sock *sk)
2134 /* We've eaten all the data from this skb. 2155 /* We've eaten all the data from this skb.
2135 * Throw it away. */ 2156 * Throw it away. */
2136 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; 2157 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
2158 /* If this is the last SKB we copy and eor is set
2159 * we need to propagate it to the new skb.
2160 */
2161 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor;
2137 tcp_unlink_write_queue(skb, sk); 2162 tcp_unlink_write_queue(skb, sk);
2138 sk_wmem_free_skb(sk, skb); 2163 sk_wmem_free_skb(sk, skb);
2139 } else { 2164 } else {
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index bfaefe560b5c..e5ef7c38c934 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2024,6 +2024,11 @@ static inline int udp4_csum_init(struct sk_buff *skb, struct udphdr *uh,
2024 err = udplite_checksum_init(skb, uh); 2024 err = udplite_checksum_init(skb, uh);
2025 if (err) 2025 if (err)
2026 return err; 2026 return err;
2027
2028 if (UDP_SKB_CB(skb)->partial_cov) {
2029 skb->csum = inet_compute_pseudo(skb, proto);
2030 return 0;
2031 }
2027 } 2032 }
2028 2033
2029 /* Note, we are only interested in != 0 or == 0, thus the 2034 /* Note, we are only interested in != 0 or == 0, thus the
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index ec43d18b5ff9..547515e8450a 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -73,6 +73,11 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
73 err = udplite_checksum_init(skb, uh); 73 err = udplite_checksum_init(skb, uh);
74 if (err) 74 if (err)
75 return err; 75 return err;
76
77 if (UDP_SKB_CB(skb)->partial_cov) {
78 skb->csum = ip6_compute_pseudo(skb, proto);
79 return 0;
80 }
76 } 81 }
77 82
78 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels) 83 /* To support RFC 6936 (allow zero checksum in UDP/IPV6 for tunnels)
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 367d8c027101..2ceefa183cee 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -149,6 +149,10 @@ struct nfc_llcp_sdp_tlv *nfc_llcp_build_sdreq_tlv(u8 tid, char *uri,
149 149
150 pr_debug("uri: %s, len: %zu\n", uri, uri_len); 150 pr_debug("uri: %s, len: %zu\n", uri, uri_len);
151 151
152 /* sdreq->tlv_len is u8, takes uri_len, + 3 for header, + 1 for NULL */
153 if (WARN_ON_ONCE(uri_len > U8_MAX - 4))
154 return NULL;
155
152 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL); 156 sdreq = kzalloc(sizeof(struct nfc_llcp_sdp_tlv), GFP_KERNEL);
153 if (sdreq == NULL) 157 if (sdreq == NULL)
154 return NULL; 158 return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index c0b83dc9d993..f018eafc2a0d 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -61,7 +61,8 @@ static const struct nla_policy nfc_genl_policy[NFC_ATTR_MAX + 1] = {
61}; 61};
62 62
63static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = { 63static const struct nla_policy nfc_sdp_genl_policy[NFC_SDP_ATTR_MAX + 1] = {
64 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING }, 64 [NFC_SDP_ATTR_URI] = { .type = NLA_STRING,
65 .len = U8_MAX - 4 },
65 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 }, 66 [NFC_SDP_ATTR_SAP] = { .type = NLA_U8 },
66}; 67};
67 68
diff --git a/net/rds/connection.c b/net/rds/connection.c
index 94e190febfdd..2da3176bf792 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -224,7 +224,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
224 if (rds_destroy_pending(conn)) 224 if (rds_destroy_pending(conn))
225 ret = -ENETDOWN; 225 ret = -ENETDOWN;
226 else 226 else
227 ret = trans->conn_alloc(conn, gfp); 227 ret = trans->conn_alloc(conn, GFP_ATOMIC);
228 if (ret) { 228 if (ret) {
229 rcu_read_unlock(); 229 rcu_read_unlock();
230 kfree(conn->c_path); 230 kfree(conn->c_path);
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index cc21e8db25b0..9d45d8b56744 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -517,9 +517,10 @@ try_again:
517 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 517 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
518 sizeof(unsigned int), &id32); 518 sizeof(unsigned int), &id32);
519 } else { 519 } else {
520 unsigned long idl = call->user_call_ID;
521
520 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID, 522 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_USER_CALL_ID,
521 sizeof(unsigned long), 523 sizeof(unsigned long), &idl);
522 &call->user_call_ID);
523 } 524 }
524 if (ret < 0) 525 if (ret < 0)
525 goto error_unlock_call; 526 goto error_unlock_call;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 2bc1bc23d42e..a7dc7271042a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -376,17 +376,12 @@ struct tcf_net {
376static unsigned int tcf_net_id; 376static unsigned int tcf_net_id;
377 377
378static int tcf_block_insert(struct tcf_block *block, struct net *net, 378static int tcf_block_insert(struct tcf_block *block, struct net *net,
379 u32 block_index, struct netlink_ext_ack *extack) 379 struct netlink_ext_ack *extack)
380{ 380{
381 struct tcf_net *tn = net_generic(net, tcf_net_id); 381 struct tcf_net *tn = net_generic(net, tcf_net_id);
382 int err;
383 382
384 err = idr_alloc_u32(&tn->idr, block, &block_index, block_index, 383 return idr_alloc_u32(&tn->idr, block, &block->index, block->index,
385 GFP_KERNEL); 384 GFP_KERNEL);
386 if (err)
387 return err;
388 block->index = block_index;
389 return 0;
390} 385}
391 386
392static void tcf_block_remove(struct tcf_block *block, struct net *net) 387static void tcf_block_remove(struct tcf_block *block, struct net *net)
@@ -397,6 +392,7 @@ static void tcf_block_remove(struct tcf_block *block, struct net *net)
397} 392}
398 393
399static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, 394static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
395 u32 block_index,
400 struct netlink_ext_ack *extack) 396 struct netlink_ext_ack *extack)
401{ 397{
402 struct tcf_block *block; 398 struct tcf_block *block;
@@ -419,10 +415,13 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q,
419 err = -ENOMEM; 415 err = -ENOMEM;
420 goto err_chain_create; 416 goto err_chain_create;
421 } 417 }
422 block->net = qdisc_net(q);
423 block->refcnt = 1; 418 block->refcnt = 1;
424 block->net = net; 419 block->net = net;
425 block->q = q; 420 block->index = block_index;
421
422 /* Don't store q pointer for blocks which are shared */
423 if (!tcf_block_shared(block))
424 block->q = q;
426 return block; 425 return block;
427 426
428err_chain_create: 427err_chain_create:
@@ -518,13 +517,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
518 } 517 }
519 518
520 if (!block) { 519 if (!block) {
521 block = tcf_block_create(net, q, extack); 520 block = tcf_block_create(net, q, ei->block_index, extack);
522 if (IS_ERR(block)) 521 if (IS_ERR(block))
523 return PTR_ERR(block); 522 return PTR_ERR(block);
524 created = true; 523 created = true;
525 if (ei->block_index) { 524 if (tcf_block_shared(block)) {
526 err = tcf_block_insert(block, net, 525 err = tcf_block_insert(block, net, extack);
527 ei->block_index, extack);
528 if (err) 526 if (err)
529 goto err_block_insert; 527 goto err_block_insert;
530 } 528 }
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 6c7601a530e3..ed8b6a24b9e9 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -96,7 +96,7 @@ struct tc_u_hnode {
96 96
97struct tc_u_common { 97struct tc_u_common {
98 struct tc_u_hnode __rcu *hlist; 98 struct tc_u_hnode __rcu *hlist;
99 struct tcf_block *block; 99 void *ptr;
100 int refcnt; 100 int refcnt;
101 struct idr handle_idr; 101 struct idr handle_idr;
102 struct hlist_node hnode; 102 struct hlist_node hnode;
@@ -330,9 +330,25 @@ static struct hlist_head *tc_u_common_hash;
330#define U32_HASH_SHIFT 10 330#define U32_HASH_SHIFT 10
331#define U32_HASH_SIZE (1 << U32_HASH_SHIFT) 331#define U32_HASH_SIZE (1 << U32_HASH_SHIFT)
332 332
333static void *tc_u_common_ptr(const struct tcf_proto *tp)
334{
335 struct tcf_block *block = tp->chain->block;
336
337 /* The block sharing is currently supported only
338 * for classless qdiscs. In that case we use block
339 * for tc_u_common identification. In case the
340 * block is not shared, block->q is a valid pointer
341 * and we can use that. That works for classful qdiscs.
342 */
343 if (tcf_block_shared(block))
344 return block;
345 else
346 return block->q;
347}
348
333static unsigned int tc_u_hash(const struct tcf_proto *tp) 349static unsigned int tc_u_hash(const struct tcf_proto *tp)
334{ 350{
335 return hash_ptr(tp->chain->block, U32_HASH_SHIFT); 351 return hash_ptr(tc_u_common_ptr(tp), U32_HASH_SHIFT);
336} 352}
337 353
338static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp) 354static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
@@ -342,7 +358,7 @@ static struct tc_u_common *tc_u_common_find(const struct tcf_proto *tp)
342 358
343 h = tc_u_hash(tp); 359 h = tc_u_hash(tp);
344 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) { 360 hlist_for_each_entry(tc, &tc_u_common_hash[h], hnode) {
345 if (tc->block == tp->chain->block) 361 if (tc->ptr == tc_u_common_ptr(tp))
346 return tc; 362 return tc;
347 } 363 }
348 return NULL; 364 return NULL;
@@ -371,7 +387,7 @@ static int u32_init(struct tcf_proto *tp)
371 kfree(root_ht); 387 kfree(root_ht);
372 return -ENOBUFS; 388 return -ENOBUFS;
373 } 389 }
374 tp_c->block = tp->chain->block; 390 tp_c->ptr = tc_u_common_ptr(tp);
375 INIT_HLIST_NODE(&tp_c->hnode); 391 INIT_HLIST_NODE(&tp_c->hnode);
376 idr_init(&tp_c->handle_idr); 392 idr_init(&tp_c->handle_idr);
377 393
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 291c97b07058..8f6c2e8c0953 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -81,6 +81,12 @@ const char *sctp_cname(const union sctp_subtype cid)
81 case SCTP_CID_RECONF: 81 case SCTP_CID_RECONF:
82 return "RECONF"; 82 return "RECONF";
83 83
84 case SCTP_CID_I_DATA:
85 return "I_DATA";
86
87 case SCTP_CID_I_FWD_TSN:
88 return "I_FWD_TSN";
89
84 default: 90 default:
85 break; 91 break;
86 } 92 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 141c9c466ec1..0247cc432e02 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -897,15 +897,12 @@ int sctp_hash_transport(struct sctp_transport *t)
897 rhl_for_each_entry_rcu(transport, tmp, list, node) 897 rhl_for_each_entry_rcu(transport, tmp, list, node)
898 if (transport->asoc->ep == t->asoc->ep) { 898 if (transport->asoc->ep == t->asoc->ep) {
899 rcu_read_unlock(); 899 rcu_read_unlock();
900 err = -EEXIST; 900 return -EEXIST;
901 goto out;
902 } 901 }
903 rcu_read_unlock(); 902 rcu_read_unlock();
904 903
905 err = rhltable_insert_key(&sctp_transport_hashtable, &arg, 904 err = rhltable_insert_key(&sctp_transport_hashtable, &arg,
906 &t->node, sctp_hash_params); 905 &t->node, sctp_hash_params);
907
908out:
909 if (err) 906 if (err)
910 pr_err_once("insert transport fail, errno %d\n", err); 907 pr_err_once("insert transport fail, errno %d\n", err);
911 908
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index cedf672487f9..f799043abec9 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * This file is part of the SCTP kernel implementation 7 * This file is part of the SCTP kernel implementation
8 * 8 *
9 * These functions manipulate sctp tsn mapping array. 9 * This file contains sctp stream maniuplation primitives and helpers.
10 * 10 *
11 * This SCTP implementation is free software; 11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of 12 * you can redistribute it and/or modify it under the terms of
diff --git a/net/sctp/stream_interleave.c b/net/sctp/stream_interleave.c
index 8c7cf8f08711..d3764c181299 100644
--- a/net/sctp/stream_interleave.c
+++ b/net/sctp/stream_interleave.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * This file is part of the SCTP kernel implementation 4 * This file is part of the SCTP kernel implementation
5 * 5 *
6 * These functions manipulate sctp stream queue/scheduling. 6 * These functions implement sctp stream message interleaving, mostly
7 * including I-DATA and I-FORWARD-TSN chunks process.
7 * 8 *
8 * This SCTP implementation is free software; 9 * This SCTP implementation is free software;
9 * you can redistribute it and/or modify it under the terms of 10 * you can redistribute it and/or modify it under the terms of
@@ -954,12 +955,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
954 __u32 freed = 0; 955 __u32 freed = 0;
955 __u16 needed; 956 __u16 needed;
956 957
957 if (chunk) { 958 needed = ntohs(chunk->chunk_hdr->length) -
958 needed = ntohs(chunk->chunk_hdr->length); 959 sizeof(struct sctp_idata_chunk);
959 needed -= sizeof(struct sctp_idata_chunk);
960 } else {
961 needed = SCTP_DEFAULT_MAXWINDOW;
962 }
963 960
964 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { 961 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
965 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed); 962 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
@@ -971,9 +968,8 @@ static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
971 needed); 968 needed);
972 } 969 }
973 970
974 if (chunk && freed >= needed) 971 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
975 if (sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0) 972 sctp_intl_start_pd(ulpq, gfp);
976 sctp_intl_start_pd(ulpq, gfp);
977 973
978 sk_mem_reclaim(asoc->base.sk); 974 sk_mem_reclaim(asoc->base.sk);
979} 975}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index c8001471da6c..3e3dce3d4c63 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -813,7 +813,7 @@ err_out:
813 return err; 813 return err;
814} 814}
815 815
816int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info) 816int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
817{ 817{
818 int err; 818 int err;
819 char *name; 819 char *name;
@@ -835,20 +835,27 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
835 835
836 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 836 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
837 837
838 rtnl_lock();
839 bearer = tipc_bearer_find(net, name); 838 bearer = tipc_bearer_find(net, name);
840 if (!bearer) { 839 if (!bearer)
841 rtnl_unlock();
842 return -EINVAL; 840 return -EINVAL;
843 }
844 841
845 bearer_disable(net, bearer); 842 bearer_disable(net, bearer);
846 rtnl_unlock();
847 843
848 return 0; 844 return 0;
849} 845}
850 846
851int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info) 847int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
848{
849 int err;
850
851 rtnl_lock();
852 err = __tipc_nl_bearer_disable(skb, info);
853 rtnl_unlock();
854
855 return err;
856}
857
858int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
852{ 859{
853 int err; 860 int err;
854 char *bearer; 861 char *bearer;
@@ -890,15 +897,18 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
890 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); 897 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
891 } 898 }
892 899
900 return tipc_enable_bearer(net, bearer, domain, prio, attrs);
901}
902
903int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
904{
905 int err;
906
893 rtnl_lock(); 907 rtnl_lock();
894 err = tipc_enable_bearer(net, bearer, domain, prio, attrs); 908 err = __tipc_nl_bearer_enable(skb, info);
895 if (err) {
896 rtnl_unlock();
897 return err;
898 }
899 rtnl_unlock(); 909 rtnl_unlock();
900 910
901 return 0; 911 return err;
902} 912}
903 913
904int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info) 914int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
@@ -944,7 +954,7 @@ int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info)
944 return 0; 954 return 0;
945} 955}
946 956
947int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info) 957int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
948{ 958{
949 int err; 959 int err;
950 char *name; 960 char *name;
@@ -965,22 +975,17 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
965 return -EINVAL; 975 return -EINVAL;
966 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 976 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
967 977
968 rtnl_lock();
969 b = tipc_bearer_find(net, name); 978 b = tipc_bearer_find(net, name);
970 if (!b) { 979 if (!b)
971 rtnl_unlock();
972 return -EINVAL; 980 return -EINVAL;
973 }
974 981
975 if (attrs[TIPC_NLA_BEARER_PROP]) { 982 if (attrs[TIPC_NLA_BEARER_PROP]) {
976 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 983 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
977 984
978 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP], 985 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
979 props); 986 props);
980 if (err) { 987 if (err)
981 rtnl_unlock();
982 return err; 988 return err;
983 }
984 989
985 if (props[TIPC_NLA_PROP_TOL]) 990 if (props[TIPC_NLA_PROP_TOL])
986 b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 991 b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -989,11 +994,21 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
989 if (props[TIPC_NLA_PROP_WIN]) 994 if (props[TIPC_NLA_PROP_WIN])
990 b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 995 b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
991 } 996 }
992 rtnl_unlock();
993 997
994 return 0; 998 return 0;
995} 999}
996 1000
1001int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
1002{
1003 int err;
1004
1005 rtnl_lock();
1006 err = __tipc_nl_bearer_set(skb, info);
1007 rtnl_unlock();
1008
1009 return err;
1010}
1011
997static int __tipc_nl_add_media(struct tipc_nl_msg *msg, 1012static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
998 struct tipc_media *media, int nlflags) 1013 struct tipc_media *media, int nlflags)
999{ 1014{
@@ -1115,7 +1130,7 @@ err_out:
1115 return err; 1130 return err;
1116} 1131}
1117 1132
1118int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info) 1133int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1119{ 1134{
1120 int err; 1135 int err;
1121 char *name; 1136 char *name;
@@ -1133,22 +1148,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1133 return -EINVAL; 1148 return -EINVAL;
1134 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]); 1149 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
1135 1150
1136 rtnl_lock();
1137 m = tipc_media_find(name); 1151 m = tipc_media_find(name);
1138 if (!m) { 1152 if (!m)
1139 rtnl_unlock();
1140 return -EINVAL; 1153 return -EINVAL;
1141 }
1142 1154
1143 if (attrs[TIPC_NLA_MEDIA_PROP]) { 1155 if (attrs[TIPC_NLA_MEDIA_PROP]) {
1144 struct nlattr *props[TIPC_NLA_PROP_MAX + 1]; 1156 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1145 1157
1146 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP], 1158 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
1147 props); 1159 props);
1148 if (err) { 1160 if (err)
1149 rtnl_unlock();
1150 return err; 1161 return err;
1151 }
1152 1162
1153 if (props[TIPC_NLA_PROP_TOL]) 1163 if (props[TIPC_NLA_PROP_TOL])
1154 m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]); 1164 m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
@@ -1157,7 +1167,17 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1157 if (props[TIPC_NLA_PROP_WIN]) 1167 if (props[TIPC_NLA_PROP_WIN])
1158 m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]); 1168 m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1159 } 1169 }
1160 rtnl_unlock();
1161 1170
1162 return 0; 1171 return 0;
1163} 1172}
1173
1174int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1175{
1176 int err;
1177
1178 rtnl_lock();
1179 err = __tipc_nl_media_set(skb, info);
1180 rtnl_unlock();
1181
1182 return err;
1183}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 42d6eeeb646d..a53613d95bc9 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -188,15 +188,19 @@ extern struct tipc_media udp_media_info;
188#endif 188#endif
189 189
190int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info); 190int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
191int __tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
191int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info); 192int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
193int __tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
192int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb); 194int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
193int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info); 195int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
194int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info); 196int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
197int __tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
195int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info); 198int tipc_nl_bearer_add(struct sk_buff *skb, struct genl_info *info);
196 199
197int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb); 200int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
198int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info); 201int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
199int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info); 202int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
203int __tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
200 204
201int tipc_media_set_priority(const char *name, u32 new_value); 205int tipc_media_set_priority(const char *name, u32 new_value);
202int tipc_media_set_window(const char *name, u32 new_value); 206int tipc_media_set_window(const char *name, u32 new_value);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 719c5924b638..1a2fde0d6f61 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -200,7 +200,7 @@ out:
200 return skb->len; 200 return skb->len;
201} 201}
202 202
203int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) 203int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
204{ 204{
205 struct net *net = sock_net(skb->sk); 205 struct net *net = sock_net(skb->sk);
206 struct tipc_net *tn = net_generic(net, tipc_net_id); 206 struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -241,10 +241,19 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
241 if (!tipc_addr_node_valid(addr)) 241 if (!tipc_addr_node_valid(addr))
242 return -EINVAL; 242 return -EINVAL;
243 243
244 rtnl_lock();
245 tipc_net_start(net, addr); 244 tipc_net_start(net, addr);
246 rtnl_unlock();
247 } 245 }
248 246
249 return 0; 247 return 0;
250} 248}
249
250int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
251{
252 int err;
253
254 rtnl_lock();
255 err = __tipc_nl_net_set(skb, info);
256 rtnl_unlock();
257
258 return err;
259}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index c7c254902873..c0306aa2374b 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -47,5 +47,6 @@ void tipc_net_stop(struct net *net);
47 47
48int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 48int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
49int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 49int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
50int __tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
50 51
51#endif 52#endif
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index e48f0b2c01b9..4492cda45566 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -285,10 +285,6 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
285 if (!trans_buf) 285 if (!trans_buf)
286 return -ENOMEM; 286 return -ENOMEM;
287 287
288 err = (*cmd->transcode)(cmd, trans_buf, msg);
289 if (err)
290 goto trans_out;
291
292 attrbuf = kmalloc((tipc_genl_family.maxattr + 1) * 288 attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
293 sizeof(struct nlattr *), GFP_KERNEL); 289 sizeof(struct nlattr *), GFP_KERNEL);
294 if (!attrbuf) { 290 if (!attrbuf) {
@@ -296,27 +292,34 @@ static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
296 goto trans_out; 292 goto trans_out;
297 } 293 }
298 294
299 err = nla_parse(attrbuf, tipc_genl_family.maxattr,
300 (const struct nlattr *)trans_buf->data,
301 trans_buf->len, NULL, NULL);
302 if (err)
303 goto parse_out;
304
305 doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 295 doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
306 if (!doit_buf) { 296 if (!doit_buf) {
307 err = -ENOMEM; 297 err = -ENOMEM;
308 goto parse_out; 298 goto attrbuf_out;
309 } 299 }
310 300
311 doit_buf->sk = msg->dst_sk;
312
313 memset(&info, 0, sizeof(info)); 301 memset(&info, 0, sizeof(info));
314 info.attrs = attrbuf; 302 info.attrs = attrbuf;
315 303
304 rtnl_lock();
305 err = (*cmd->transcode)(cmd, trans_buf, msg);
306 if (err)
307 goto doit_out;
308
309 err = nla_parse(attrbuf, tipc_genl_family.maxattr,
310 (const struct nlattr *)trans_buf->data,
311 trans_buf->len, NULL, NULL);
312 if (err)
313 goto doit_out;
314
315 doit_buf->sk = msg->dst_sk;
316
316 err = (*cmd->doit)(doit_buf, &info); 317 err = (*cmd->doit)(doit_buf, &info);
318doit_out:
319 rtnl_unlock();
317 320
318 kfree_skb(doit_buf); 321 kfree_skb(doit_buf);
319parse_out: 322attrbuf_out:
320 kfree(attrbuf); 323 kfree(attrbuf);
321trans_out: 324trans_out:
322 kfree_skb(trans_buf); 325 kfree_skb(trans_buf);
@@ -722,13 +725,13 @@ static int tipc_nl_compat_link_set(struct tipc_nl_compat_cmd_doit *cmd,
722 725
723 media = tipc_media_find(lc->name); 726 media = tipc_media_find(lc->name);
724 if (media) { 727 if (media) {
725 cmd->doit = &tipc_nl_media_set; 728 cmd->doit = &__tipc_nl_media_set;
726 return tipc_nl_compat_media_set(skb, msg); 729 return tipc_nl_compat_media_set(skb, msg);
727 } 730 }
728 731
729 bearer = tipc_bearer_find(msg->net, lc->name); 732 bearer = tipc_bearer_find(msg->net, lc->name);
730 if (bearer) { 733 if (bearer) {
731 cmd->doit = &tipc_nl_bearer_set; 734 cmd->doit = &__tipc_nl_bearer_set;
732 return tipc_nl_compat_bearer_set(skb, msg); 735 return tipc_nl_compat_bearer_set(skb, msg);
733 } 736 }
734 737
@@ -1089,12 +1092,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
1089 return tipc_nl_compat_dumpit(&dump, msg); 1092 return tipc_nl_compat_dumpit(&dump, msg);
1090 case TIPC_CMD_ENABLE_BEARER: 1093 case TIPC_CMD_ENABLE_BEARER:
1091 msg->req_type = TIPC_TLV_BEARER_CONFIG; 1094 msg->req_type = TIPC_TLV_BEARER_CONFIG;
1092 doit.doit = tipc_nl_bearer_enable; 1095 doit.doit = __tipc_nl_bearer_enable;
1093 doit.transcode = tipc_nl_compat_bearer_enable; 1096 doit.transcode = tipc_nl_compat_bearer_enable;
1094 return tipc_nl_compat_doit(&doit, msg); 1097 return tipc_nl_compat_doit(&doit, msg);
1095 case TIPC_CMD_DISABLE_BEARER: 1098 case TIPC_CMD_DISABLE_BEARER:
1096 msg->req_type = TIPC_TLV_BEARER_NAME; 1099 msg->req_type = TIPC_TLV_BEARER_NAME;
1097 doit.doit = tipc_nl_bearer_disable; 1100 doit.doit = __tipc_nl_bearer_disable;
1098 doit.transcode = tipc_nl_compat_bearer_disable; 1101 doit.transcode = tipc_nl_compat_bearer_disable;
1099 return tipc_nl_compat_doit(&doit, msg); 1102 return tipc_nl_compat_doit(&doit, msg);
1100 case TIPC_CMD_SHOW_LINK_STATS: 1103 case TIPC_CMD_SHOW_LINK_STATS:
@@ -1148,12 +1151,12 @@ static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
1148 return tipc_nl_compat_dumpit(&dump, msg); 1151 return tipc_nl_compat_dumpit(&dump, msg);
1149 case TIPC_CMD_SET_NODE_ADDR: 1152 case TIPC_CMD_SET_NODE_ADDR:
1150 msg->req_type = TIPC_TLV_NET_ADDR; 1153 msg->req_type = TIPC_TLV_NET_ADDR;
1151 doit.doit = tipc_nl_net_set; 1154 doit.doit = __tipc_nl_net_set;
1152 doit.transcode = tipc_nl_compat_net_set; 1155 doit.transcode = tipc_nl_compat_net_set;
1153 return tipc_nl_compat_doit(&doit, msg); 1156 return tipc_nl_compat_doit(&doit, msg);
1154 case TIPC_CMD_SET_NETID: 1157 case TIPC_CMD_SET_NETID:
1155 msg->req_type = TIPC_TLV_UNSIGNED; 1158 msg->req_type = TIPC_TLV_UNSIGNED;
1156 doit.doit = tipc_nl_net_set; 1159 doit.doit = __tipc_nl_net_set;
1157 doit.transcode = tipc_nl_compat_net_set; 1160 doit.transcode = tipc_nl_compat_net_set;
1158 return tipc_nl_compat_doit(&doit, msg); 1161 return tipc_nl_compat_doit(&doit, msg);
1159 case TIPC_CMD_GET_NETID: 1162 case TIPC_CMD_GET_NETID:
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index b0d5fcea47e7..e9b4b53ab53e 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -308,8 +308,11 @@ static int do_tls_getsockopt_tx(struct sock *sk, char __user *optval,
308 goto out; 308 goto out;
309 } 309 }
310 lock_sock(sk); 310 lock_sock(sk);
311 memcpy(crypto_info_aes_gcm_128->iv, ctx->iv, 311 memcpy(crypto_info_aes_gcm_128->iv,
312 ctx->iv + TLS_CIPHER_AES_GCM_128_SALT_SIZE,
312 TLS_CIPHER_AES_GCM_128_IV_SIZE); 313 TLS_CIPHER_AES_GCM_128_IV_SIZE);
314 memcpy(crypto_info_aes_gcm_128->rec_seq, ctx->rec_seq,
315 TLS_CIPHER_AES_GCM_128_REC_SEQ_SIZE);
313 release_sock(sk); 316 release_sock(sk);
314 if (copy_to_user(optval, 317 if (copy_to_user(optval,
315 crypto_info_aes_gcm_128, 318 crypto_info_aes_gcm_128,
@@ -375,7 +378,7 @@ static int do_tls_setsockopt_tx(struct sock *sk, char __user *optval,
375 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info)); 378 rc = copy_from_user(crypto_info, optval, sizeof(*crypto_info));
376 if (rc) { 379 if (rc) {
377 rc = -EFAULT; 380 rc = -EFAULT;
378 goto out; 381 goto err_crypto_info;
379 } 382 }
380 383
381 /* check version */ 384 /* check version */
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index d545e1d0dea2..2d465bdeccbc 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1825,7 +1825,7 @@ out:
1825} 1825}
1826 1826
1827/* We use paged skbs for stream sockets, and limit occupancy to 32768 1827/* We use paged skbs for stream sockets, and limit occupancy to 32768
1828 * bytes, and a minimun of a full page. 1828 * bytes, and a minimum of a full page.
1829 */ 1829 */
1830#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768)) 1830#define UNIX_SKB_FRAGS_SZ (PAGE_SIZE << get_order(32768))
1831 1831