aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-29 16:10:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-29 16:10:25 -0500
commit96c22a49ac125bc4ceddc0817dfb9ff3de8aea7d (patch)
treeac87dea252f3bc8ede59be14e43cac05df90b0ca
parentef0010a30935de4e0211cbc7bdffc30446cdee9b (diff)
parentf6454f80e8a965fca203dab28723f68ec78db608 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The forcedeth conversion from pci_*() DMA interfaces to dma_*() ones missed one spot. From Zhu Yanjun. 2) Missing CRYPTO_SHA256 Kconfig dep in cfg80211, from Johannes Berg. 3) Fix checksum offloading in thunderx driver, from Sunil Goutham. 4) Add SPDX to vm_sockets_diag.h, from Stephen Hemminger. 5) Fix use after free of packet headers in TIPC, from Jon Maloy. 6) "sizeof(ptr)" vs "sizeof(*ptr)" bug in i40e, from Gustavo A R Silva. 7) Tunneling fixes in mlxsw driver, from Petr Machata. 8) Fix crash in fanout_demux_rollover() of AF_PACKET, from Mike Maloney. 9) Fix race in AF_PACKET bind() vs. NETDEV_UP notifier, from Eric Dumazet. 10) Fix regression in sch_sfq.c due to one of the timer_setup() conversions. From Paolo Abeni. 11) SCTP does list_for_each_entry() using wrong struct member, fix from Xin Long. 12) Don't use big endian netlink attribute read for IFLA_BOND_AD_ACTOR_SYSTEM, it is in cpu endianness. Also from Xin Long. 13) Fix mis-initialization of q->link.clock in CBQ scheduler, preventing adding filters there. From Jiri Pirko. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (67 commits) ethernet: dwmac-stm32: Fix copyright net: via: via-rhine: use %p to format void * address instead of %x net: ethernet: xilinx: Mark XILINX_LL_TEMAC broken on 64-bit myri10ge: Update MAINTAINERS net: sched: cbq: create block for q->link.block atm: suni: remove extraneous space to fix indentation atm: lanai: use %p to format kernel addresses instead of %x VSOCK: Don't set sk_state to TCP_CLOSE before testing it atm: fore200e: use %pK to format kernel addresses instead of %x ambassador: fix incorrect indentation of assignment statement vxlan: use __be32 type for the param vni in __vxlan_fdb_delete bonding: use nla_get_u64 to extract the value for IFLA_BOND_AD_ACTOR_SYSTEM sctp: use right member as the param of list_for_each_entry sch_sfq: fix null pointer dereference at timer expiration cls_bpf: don't decrement net's refcount when offload fails net/packet: fix a race in packet_bind() and packet_notifier() packet: fix crash in fanout_demux_rollover() sctp: remove extern from stream sched sctp: force the params with right types for sctp csum apis sctp: force SCTP_ERROR_INV_STRM with __u32 when calling sctp_chunk_fail ...
-rw-r--r--MAINTAINERS4
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/lanai.c8
-rw-r--r--drivers/atm/suni.c2
-rw-r--r--drivers/net/bonding/bond_netlink.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c9
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c2
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c109
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c4
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/xilinx/Kconfig1
-rw-r--r--drivers/net/phy/marvell10g.c5
-rw-r--r--drivers/net/thunderbolt.c57
-rw-r--r--drivers/net/vxlan.c4
-rw-r--r--drivers/net/wan/lmc/lmc_main.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/channel.c2
-rw-r--r--drivers/net/wireless/st/cw1200/sta.c4
-rw-r--r--drivers/net/wireless/ti/wl1251/main.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c5
-rw-r--r--drivers/net/xen-netfront.c18
-rw-r--r--include/net/mac80211.h8
-rw-r--r--include/net/sctp/checksum.h13
-rw-r--r--include/net/sctp/sctp.h5
-rw-r--r--include/net/sctp/stream_sched.h5
-rw-r--r--include/trace/events/rxrpc.h86
-rw-r--r--include/uapi/linux/rxrpc.h1
-rw-r--r--include/uapi/linux/vm_sockets_diag.h1
-rw-r--r--net/dsa/dsa2.c25
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/mesh_hwmp.c15
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/tx.c29
-rw-r--r--net/openvswitch/datapath.c2
-rw-r--r--net/openvswitch/flow_netlink.c16
-rw-r--r--net/packet/af_packet.c37
-rw-r--r--net/packet/internal.h1
-rw-r--r--net/rxrpc/af_rxrpc.c23
-rw-r--r--net/rxrpc/ar-internal.h103
-rw-r--r--net/rxrpc/call_accept.c2
-rw-r--r--net/rxrpc/call_event.c229
-rw-r--r--net/rxrpc/call_object.c62
-rw-r--r--net/rxrpc/conn_client.c54
-rw-r--r--net/rxrpc/conn_event.c74
-rw-r--r--net/rxrpc/conn_object.c76
-rw-r--r--net/rxrpc/input.c74
-rw-r--r--net/rxrpc/misc.c19
-rw-r--r--net/rxrpc/net_ns.c33
-rw-r--r--net/rxrpc/output.c43
-rw-r--r--net/rxrpc/recvmsg.c12
-rw-r--r--net/rxrpc/sendmsg.c126
-rw-r--r--net/rxrpc/sysctl.c60
-rw-r--r--net/sched/cls_api.c17
-rw-r--r--net/sched/cls_bpf.c23
-rw-r--r--net/sched/sch_cbq.c9
-rw-r--r--net/sched/sch_sfq.c1
-rw-r--r--net/sctp/protocol.c1
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sctp/stream.c79
-rw-r--r--net/sctp/stream_sched.c25
-rw-r--r--net/sctp/stream_sched_prio.c7
-rw-r--r--net/sctp/stream_sched_rr.c7
-rw-r--r--net/tipc/group.c2
-rw-r--r--net/vmw_vsock/vmci_transport.c14
-rw-r--r--net/wireless/Kconfig7
72 files changed, 1182 insertions, 582 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index aa71ab52fd76..77d819b458a9 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -9331,9 +9331,9 @@ F: drivers/gpu/drm/mxsfb/
9331F: Documentation/devicetree/bindings/display/mxsfb-drm.txt 9331F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
9332 9332
9333MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) 9333MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
9334M: Hyong-Youb Kim <hykim@myri.com> 9334M: Chris Lee <christopher.lee@cspi.com>
9335L: netdev@vger.kernel.org 9335L: netdev@vger.kernel.org
9336W: https://www.myricom.com/support/downloads/myri10ge.html 9336W: https://www.cspi.com/ethernet-products/support/downloads/
9337S: Supported 9337S: Supported
9338F: drivers/net/ethernet/myricom/myri10ge/ 9338F: drivers/net/ethernet/myricom/myri10ge/
9339 9339
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index dd286ad404f8..9287ec958b70 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2258,7 +2258,7 @@ static int amb_probe(struct pci_dev *pci_dev,
2258 2258
2259 PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p", 2259 PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
2260 dev->atm_dev->number, dev, dev->atm_dev); 2260 dev->atm_dev->number, dev, dev->atm_dev);
2261 dev->atm_dev->dev_data = (void *) dev; 2261 dev->atm_dev->dev_data = (void *) dev;
2262 2262
2263 // register our address 2263 // register our address
2264 amb_esi (dev, dev->atm_dev->esi); 2264 amb_esi (dev, dev->atm_dev->esi);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index 126855e6cb7d..6ebc4e4820fc 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -3083,8 +3083,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
3083 ASSERT(fore200e_vcc); 3083 ASSERT(fore200e_vcc);
3084 3084
3085 len = sprintf(page, 3085 len = sprintf(page,
3086 " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n", 3086 " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
3087 (u32)(unsigned long)vcc, 3087 vcc,
3088 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal), 3088 vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
3089 fore200e_vcc->tx_pdu, 3089 fore200e_vcc->tx_pdu,
3090 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu, 3090 fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index 6664aa50789e..5f8e009b2da1 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -1586,8 +1586,8 @@ static int service_buffer_allocate(struct lanai_dev *lanai)
1586 lanai->pci); 1586 lanai->pci);
1587 if (unlikely(lanai->service.start == NULL)) 1587 if (unlikely(lanai->service.start == NULL))
1588 return -ENOMEM; 1588 return -ENOMEM;
1589 DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n", 1589 DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
1590 (unsigned long) lanai->service.start, 1590 lanai->service.start,
1591 lanai_buf_size(&lanai->service), 1591 lanai_buf_size(&lanai->service),
1592 lanai_buf_size_cardorder(&lanai->service)); 1592 lanai_buf_size_cardorder(&lanai->service));
1593 /* Clear ServWrite register to be safe */ 1593 /* Clear ServWrite register to be safe */
@@ -2218,9 +2218,9 @@ static int lanai_dev_open(struct atm_dev *atmdev)
2218#endif 2218#endif
2219 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN); 2219 memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
2220 lanai_timed_poll_start(lanai); 2220 lanai_timed_poll_start(lanai);
2221 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u " 2221 printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
2222 "(%pMF)\n", lanai->number, (int) lanai->pci->revision, 2222 "(%pMF)\n", lanai->number, (int) lanai->pci->revision,
2223 (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi); 2223 lanai->base, lanai->pci->irq, atmdev->esi);
2224 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), " 2224 printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
2225 "board_rev=%d\n", lanai->number, 2225 "board_rev=%d\n", lanai->number,
2226 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno, 2226 lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
index b8825f2d79e0..4b044710a8cf 100644
--- a/drivers/atm/suni.c
+++ b/drivers/atm/suni.c
@@ -177,7 +177,7 @@ static int set_loopback(struct atm_dev *dev,int mode)
177 default: 177 default:
178 return -EINVAL; 178 return -EINVAL;
179 } 179 }
180 dev->ops->phy_put(dev, control, reg); 180 dev->ops->phy_put(dev, control, reg);
181 PRIV(dev)->loop_mode = mode; 181 PRIV(dev)->loop_mode = mode;
182 return 0; 182 return 0;
183} 183}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index a1b33aa6054a..9697977b80f0 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -423,7 +423,7 @@ static int bond_changelink(struct net_device *bond_dev, struct nlattr *tb[],
423 return -EINVAL; 423 return -EINVAL;
424 424
425 bond_opt_initval(&newval, 425 bond_opt_initval(&newval,
426 nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM])); 426 nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
427 err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval); 427 err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
428 if (err) 428 if (err)
429 return err; 429 return err;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7ce1d4b7e67d..b13ce5ebde8d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2136,8 +2136,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
2136 /* Read A2 portion of the EEPROM */ 2136 /* Read A2 portion of the EEPROM */
2137 if (length) { 2137 if (length) {
2138 start -= ETH_MODULE_SFF_8436_LEN; 2138 start -= ETH_MODULE_SFF_8436_LEN;
2139 bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start, 2139 rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2140 length, data); 2140 start, length, data);
2141 } 2141 }
2142 return rc; 2142 return rc;
2143} 2143}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d4496e9afcdf..8b2c31e2a2b0 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1355,7 +1355,6 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
1355 1355
1356 /* Offload checksum calculation to HW */ 1356 /* Offload checksum calculation to HW */
1357 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1357 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1358 hdr->csum_l3 = 1; /* Enable IP csum calculation */
1359 hdr->l3_offset = skb_network_offset(skb); 1358 hdr->l3_offset = skb_network_offset(skb);
1360 hdr->l4_offset = skb_transport_offset(skb); 1359 hdr->l4_offset = skb_transport_offset(skb);
1361 1360
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index 8172cf08cc33..3bac9df1c099 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4307,8 +4307,10 @@ static void e1000_init_rx_addrs(struct e1000_hw *hw)
4307 4307
4308 rar_num = E1000_RAR_ENTRIES; 4308 rar_num = E1000_RAR_ENTRIES;
4309 4309
4310 /* Zero out the other 15 receive addresses. */ 4310 /* Zero out the following 14 receive addresses. RAR[15] is for
4311 e_dbg("Clearing RAR[1-15]\n"); 4311 * manageability
4312 */
4313 e_dbg("Clearing RAR[1-14]\n");
4312 for (i = 1; i < rar_num; i++) { 4314 for (i = 1; i < rar_num; i++) {
4313 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 4315 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
4314 E1000_WRITE_FLUSH(); 4316 E1000_WRITE_FLUSH();
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 67163ca898ba..00a36df02a3f 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -113,7 +113,8 @@
113#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */ 113#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
114#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */ 114#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
115#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */ 115#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
116#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29) 116#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
117#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
117#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL 118#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
118 119
119#define E1000_ICH_RAR_ENTRIES 7 120#define E1000_ICH_RAR_ENTRIES 7
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index f2f49239b015..9f18d39bdc8f 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -3034,9 +3034,12 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
3034 ew32(IOSFPC, reg_val); 3034 ew32(IOSFPC, reg_val);
3035 3035
3036 reg_val = er32(TARC(0)); 3036 reg_val = er32(TARC(0));
3037 /* SPT and KBL Si errata workaround to avoid Tx hang */ 3037 /* SPT and KBL Si errata workaround to avoid Tx hang.
3038 reg_val &= ~BIT(28); 3038 * Dropping the number of outstanding requests from
3039 reg_val |= BIT(29); 3039 * 3 to 2 in order to avoid a buffer overrun.
3040 */
3041 reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
3042 reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
3040 ew32(TARC(0), reg_val); 3043 ew32(TARC(0), reg_val);
3041 } 3044 }
3042} 3045}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 4c08cc86463e..321d8be80871 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -7401,7 +7401,6 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
7401 dev_err(&pf->pdev->dev, 7401 dev_err(&pf->pdev->dev,
7402 "Failed to add cloud filter, err %s\n", 7402 "Failed to add cloud filter, err %s\n",
7403 i40e_stat_str(&pf->hw, err)); 7403 i40e_stat_str(&pf->hw, err));
7404 err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
7405 goto err; 7404 goto err;
7406 } 7405 }
7407 7406
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index a3dc9b932946..36cb8e068e85 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2086,7 +2086,7 @@ static int i40e_vc_request_queues_msg(struct i40e_vf *vf, u8 *msg, int msglen)
2086 } 2086 }
2087 2087
2088 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0, 2088 return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
2089 (u8 *)vfres, sizeof(vfres)); 2089 (u8 *)vfres, sizeof(*vfres));
2090} 2090}
2091 2091
2092/** 2092/**
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 6c20e811f973..d83a78be98a2 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -4629,11 +4629,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4629 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4629 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4630 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL; 4630 val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
4631 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4631 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4632
4633 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4634 val |= MVPP2_GMAC_DISABLE_PADDING;
4635 val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
4636 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4637 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) { 4632 } else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
4638 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG); 4633 val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
4639 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL | 4634 val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
@@ -4641,10 +4636,6 @@ static void mvpp2_port_mii_gmac_configure_mode(struct mvpp2_port *port)
4641 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE; 4636 MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
4642 val &= ~MVPP22_CTRL4_DP_CLK_SEL; 4637 val &= ~MVPP22_CTRL4_DP_CLK_SEL;
4643 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG); 4638 writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
4644
4645 val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
4646 val &= ~MVPP2_GMAC_DISABLE_PADDING;
4647 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
4648 } 4639 }
4649 4640
4650 /* The port is connected to a copper PHY */ 4641 /* The port is connected to a copper PHY */
@@ -5805,7 +5796,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5805 sizeof(*txq_pcpu->buffs), 5796 sizeof(*txq_pcpu->buffs),
5806 GFP_KERNEL); 5797 GFP_KERNEL);
5807 if (!txq_pcpu->buffs) 5798 if (!txq_pcpu->buffs)
5808 goto cleanup; 5799 return -ENOMEM;
5809 5800
5810 txq_pcpu->count = 0; 5801 txq_pcpu->count = 0;
5811 txq_pcpu->reserved_num = 0; 5802 txq_pcpu->reserved_num = 0;
@@ -5821,26 +5812,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
5821 &txq_pcpu->tso_headers_dma, 5812 &txq_pcpu->tso_headers_dma,
5822 GFP_KERNEL); 5813 GFP_KERNEL);
5823 if (!txq_pcpu->tso_headers) 5814 if (!txq_pcpu->tso_headers)
5824 goto cleanup; 5815 return -ENOMEM;
5825 } 5816 }
5826 5817
5827 return 0; 5818 return 0;
5828cleanup:
5829 for_each_present_cpu(cpu) {
5830 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
5831 kfree(txq_pcpu->buffs);
5832
5833 dma_free_coherent(port->dev->dev.parent,
5834 txq_pcpu->size * TSO_HEADER_SIZE,
5835 txq_pcpu->tso_headers,
5836 txq_pcpu->tso_headers_dma);
5837 }
5838
5839 dma_free_coherent(port->dev->dev.parent,
5840 txq->size * MVPP2_DESC_ALIGNED_SIZE,
5841 txq->descs, txq->descs_dma);
5842
5843 return -ENOMEM;
5844} 5819}
5845 5820
5846/* Free allocated TXQ resources */ 5821/* Free allocated TXQ resources */
@@ -6867,6 +6842,12 @@ static int mvpp2_check_ringparam_valid(struct net_device *dev,
6867 else if (!IS_ALIGNED(ring->tx_pending, 32)) 6842 else if (!IS_ALIGNED(ring->tx_pending, 32))
6868 new_tx_pending = ALIGN(ring->tx_pending, 32); 6843 new_tx_pending = ALIGN(ring->tx_pending, 32);
6869 6844
6845 /* The Tx ring size cannot be smaller than the minimum number of
6846 * descriptors needed for TSO.
6847 */
6848 if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
6849 new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
6850
6870 if (ring->rx_pending != new_rx_pending) { 6851 if (ring->rx_pending != new_rx_pending) {
6871 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n", 6852 netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
6872 ring->rx_pending, new_rx_pending); 6853 ring->rx_pending, new_rx_pending);
@@ -8345,7 +8326,7 @@ static int mvpp2_probe(struct platform_device *pdev)
8345 for_each_available_child_of_node(dn, port_node) { 8326 for_each_available_child_of_node(dn, port_node) {
8346 err = mvpp2_port_probe(pdev, port_node, priv, i); 8327 err = mvpp2_port_probe(pdev, port_node, priv, i);
8347 if (err < 0) 8328 if (err < 0)
8348 goto err_mg_clk; 8329 goto err_port_probe;
8349 i++; 8330 i++;
8350 } 8331 }
8351 8332
@@ -8361,12 +8342,19 @@ static int mvpp2_probe(struct platform_device *pdev)
8361 priv->stats_queue = create_singlethread_workqueue(priv->queue_name); 8342 priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
8362 if (!priv->stats_queue) { 8343 if (!priv->stats_queue) {
8363 err = -ENOMEM; 8344 err = -ENOMEM;
8364 goto err_mg_clk; 8345 goto err_port_probe;
8365 } 8346 }
8366 8347
8367 platform_set_drvdata(pdev, priv); 8348 platform_set_drvdata(pdev, priv);
8368 return 0; 8349 return 0;
8369 8350
8351err_port_probe:
8352 i = 0;
8353 for_each_available_child_of_node(dn, port_node) {
8354 if (priv->port_list[i])
8355 mvpp2_port_remove(priv->port_list[i]);
8356 i++;
8357 }
8370err_mg_clk: 8358err_mg_clk:
8371 clk_disable_unprepare(priv->axi_clk); 8359 clk_disable_unprepare(priv->axi_clk);
8372 if (priv->hw_version == MVPP22) 8360 if (priv->hw_version == MVPP22)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 632c7b229054..72ef4f8025f0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1370,8 +1370,9 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry); 1370 mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
1371} 1371}
1372 1372
1373static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp, 1373static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
1374 struct mlxsw_sp_rif *rif); 1374 struct mlxsw_sp_rif *old_rif,
1375 struct mlxsw_sp_rif *new_rif);
1375static int 1376static int
1376mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp, 1377mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1377 struct mlxsw_sp_ipip_entry *ipip_entry, 1378 struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1389,17 +1390,18 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
1389 return PTR_ERR(new_lb_rif); 1390 return PTR_ERR(new_lb_rif);
1390 ipip_entry->ol_lb = new_lb_rif; 1391 ipip_entry->ol_lb = new_lb_rif;
1391 1392
1392 if (keep_encap) { 1393 if (keep_encap)
1393 list_splice_init(&old_lb_rif->common.nexthop_list, 1394 mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
1394 &new_lb_rif->common.nexthop_list); 1395 &new_lb_rif->common);
1395 mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
1396 }
1397 1396
1398 mlxsw_sp_rif_destroy(&old_lb_rif->common); 1397 mlxsw_sp_rif_destroy(&old_lb_rif->common);
1399 1398
1400 return 0; 1399 return 0;
1401} 1400}
1402 1401
1402static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
1403 struct mlxsw_sp_rif *rif);
1404
1403/** 1405/**
1404 * Update the offload related to an IPIP entry. This always updates decap, and 1406 * Update the offload related to an IPIP entry. This always updates decap, and
1405 * in addition to that it also: 1407 * in addition to that it also:
@@ -1449,9 +1451,27 @@ static int mlxsw_sp_netdevice_ipip_ol_vrf_event(struct mlxsw_sp *mlxsw_sp,
1449{ 1451{
1450 struct mlxsw_sp_ipip_entry *ipip_entry = 1452 struct mlxsw_sp_ipip_entry *ipip_entry =
1451 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1453 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
1454 enum mlxsw_sp_l3proto ul_proto;
1455 union mlxsw_sp_l3addr saddr;
1456 u32 ul_tb_id;
1452 1457
1453 if (!ipip_entry) 1458 if (!ipip_entry)
1454 return 0; 1459 return 0;
1460
1461 /* For flat configuration cases, moving overlay to a different VRF might
1462 * cause local address conflict, and the conflicting tunnels need to be
1463 * demoted.
1464 */
1465 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
1466 ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
1467 saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
1468 if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
1469 saddr, ul_tb_id,
1470 ipip_entry)) {
1471 mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
1472 return 0;
1473 }
1474
1455 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1475 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
1456 true, false, false, extack); 1476 true, false, false, extack);
1457} 1477}
@@ -3343,22 +3363,19 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
3343 return ul_dev ? (ul_dev->flags & IFF_UP) : true; 3363 return ul_dev ? (ul_dev->flags & IFF_UP) : true;
3344} 3364}
3345 3365
3346static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp, 3366static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
3347 struct mlxsw_sp_nexthop *nh, 3367 struct mlxsw_sp_nexthop *nh,
3348 struct net_device *ol_dev) 3368 struct mlxsw_sp_ipip_entry *ipip_entry)
3349{ 3369{
3350 bool removing; 3370 bool removing;
3351 3371
3352 if (!nh->nh_grp->gateway || nh->ipip_entry) 3372 if (!nh->nh_grp->gateway || nh->ipip_entry)
3353 return 0; 3373 return;
3354
3355 nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
3356 if (!nh->ipip_entry)
3357 return -ENOENT;
3358 3374
3359 removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev); 3375 nh->ipip_entry = ipip_entry;
3376 removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
3360 __mlxsw_sp_nexthop_neigh_update(nh, removing); 3377 __mlxsw_sp_nexthop_neigh_update(nh, removing);
3361 return 0; 3378 mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
3362} 3379}
3363 3380
3364static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp, 3381static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -3403,21 +3420,21 @@ static int mlxsw_sp_nexthop4_type_init(struct mlxsw_sp *mlxsw_sp,
3403 struct mlxsw_sp_nexthop *nh, 3420 struct mlxsw_sp_nexthop *nh,
3404 struct fib_nh *fib_nh) 3421 struct fib_nh *fib_nh)
3405{ 3422{
3406 struct mlxsw_sp_router *router = mlxsw_sp->router; 3423 const struct mlxsw_sp_ipip_ops *ipip_ops;
3407 struct net_device *dev = fib_nh->nh_dev; 3424 struct net_device *dev = fib_nh->nh_dev;
3408 enum mlxsw_sp_ipip_type ipipt; 3425 struct mlxsw_sp_ipip_entry *ipip_entry;
3409 struct mlxsw_sp_rif *rif; 3426 struct mlxsw_sp_rif *rif;
3410 int err; 3427 int err;
3411 3428
3412 if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) && 3429 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
3413 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 3430 if (ipip_entry) {
3414 MLXSW_SP_L3_PROTO_IPV4)) { 3431 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
3415 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 3432 if (ipip_ops->can_offload(mlxsw_sp, dev,
3416 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); 3433 MLXSW_SP_L3_PROTO_IPV4)) {
3417 if (err) 3434 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
3418 return err; 3435 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
3419 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); 3436 return 0;
3420 return 0; 3437 }
3421 } 3438 }
3422 3439
3423 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 3440 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
@@ -3545,6 +3562,18 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
3545 } 3562 }
3546} 3563}
3547 3564
3565static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
3566 struct mlxsw_sp_rif *old_rif,
3567 struct mlxsw_sp_rif *new_rif)
3568{
3569 struct mlxsw_sp_nexthop *nh;
3570
3571 list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
3572 list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
3573 nh->rif = new_rif;
3574 mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
3575}
3576
3548static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp, 3577static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
3549 struct mlxsw_sp_rif *rif) 3578 struct mlxsw_sp_rif *rif)
3550{ 3579{
@@ -3996,7 +4025,7 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
3996 case RTN_LOCAL: 4025 case RTN_LOCAL:
3997 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev, 4026 ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
3998 MLXSW_SP_L3_PROTO_IPV4, dip); 4027 MLXSW_SP_L3_PROTO_IPV4, dip);
3999 if (ipip_entry) { 4028 if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
4000 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP; 4029 fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
4001 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, 4030 return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
4002 fib_entry, 4031 fib_entry,
@@ -4694,21 +4723,21 @@ static int mlxsw_sp_nexthop6_type_init(struct mlxsw_sp *mlxsw_sp,
4694 struct mlxsw_sp_nexthop *nh, 4723 struct mlxsw_sp_nexthop *nh,
4695 const struct rt6_info *rt) 4724 const struct rt6_info *rt)
4696{ 4725{
4697 struct mlxsw_sp_router *router = mlxsw_sp->router; 4726 const struct mlxsw_sp_ipip_ops *ipip_ops;
4727 struct mlxsw_sp_ipip_entry *ipip_entry;
4698 struct net_device *dev = rt->dst.dev; 4728 struct net_device *dev = rt->dst.dev;
4699 enum mlxsw_sp_ipip_type ipipt;
4700 struct mlxsw_sp_rif *rif; 4729 struct mlxsw_sp_rif *rif;
4701 int err; 4730 int err;
4702 4731
4703 if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) && 4732 ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
4704 router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev, 4733 if (ipip_entry) {
4705 MLXSW_SP_L3_PROTO_IPV6)) { 4734 ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
4706 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP; 4735 if (ipip_ops->can_offload(mlxsw_sp, dev,
4707 err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev); 4736 MLXSW_SP_L3_PROTO_IPV6)) {
4708 if (err) 4737 nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
4709 return err; 4738 mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
4710 mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common); 4739 return 0;
4711 return 0; 4740 }
4712 } 4741 }
4713 4742
4714 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH; 4743 nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ac8439ceea10..481876b5424c 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -1986,9 +1986,9 @@ static void nv_unmap_txskb(struct fe_priv *np, struct nv_skb_map *tx_skb)
1986 tx_skb->dma_len, 1986 tx_skb->dma_len,
1987 DMA_TO_DEVICE); 1987 DMA_TO_DEVICE);
1988 else 1988 else
1989 pci_unmap_page(np->pci_dev, tx_skb->dma, 1989 dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
1990 tx_skb->dma_len, 1990 tx_skb->dma_len,
1991 PCI_DMA_TODEVICE); 1991 DMA_TO_DEVICE);
1992 tx_skb->dma = 0; 1992 tx_skb->dma = 0;
1993 } 1993 }
1994} 1994}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 61cb24810d10..9e6db16af663 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU 2 * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
3 * 3 *
4 * Copyright (C) Alexandre Torgue 2015 4 * Copyright (C) STMicroelectronics SA 2017
5 * Author: Alexandre Torgue <alexandre.torgue@gmail.com> 5 * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2 6 * License terms: GNU General Public License (GPL), version 2
7 * 7 *
8 */ 8 */
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 83e6f76eb965..33949248c829 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -995,8 +995,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
995 else 995 else
996 name = "Rhine III"; 996 name = "Rhine III";
997 997
998 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", 998 netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
999 name, (long)ioaddr, dev->dev_addr, rp->irq); 999 name, ioaddr, dev->dev_addr, rp->irq);
1000 1000
1001 dev_set_drvdata(hwdev, dev); 1001 dev_set_drvdata(hwdev, dev);
1002 1002
diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
index 6d68c8a8f4f2..da4ec575ccf9 100644
--- a/drivers/net/ethernet/xilinx/Kconfig
+++ b/drivers/net/ethernet/xilinx/Kconfig
@@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
34config XILINX_LL_TEMAC 34config XILINX_LL_TEMAC
35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 35 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
36 depends on (PPC || MICROBLAZE) 36 depends on (PPC || MICROBLAZE)
37 depends on !64BIT || BROKEN
37 select PHYLIB 38 select PHYLIB
38 ---help--- 39 ---help---
39 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC 40 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index aebc08beceba..21b3f36e023a 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -16,6 +16,7 @@
16 * link takes priority and the other port is completely locked out. 16 * link takes priority and the other port is completely locked out.
17 */ 17 */
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/marvell_phy.h>
19 20
20enum { 21enum {
21 MV_PCS_BASE_T = 0x0000, 22 MV_PCS_BASE_T = 0x0000,
@@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
338static struct phy_driver mv3310_drivers[] = { 339static struct phy_driver mv3310_drivers[] = {
339 { 340 {
340 .phy_id = 0x002b09aa, 341 .phy_id = 0x002b09aa,
341 .phy_id_mask = 0xffffffff, 342 .phy_id_mask = MARVELL_PHY_ID_MASK,
342 .name = "mv88x3310", 343 .name = "mv88x3310",
343 .features = SUPPORTED_10baseT_Full | 344 .features = SUPPORTED_10baseT_Full |
344 SUPPORTED_100baseT_Full | 345 SUPPORTED_100baseT_Full |
@@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
360module_phy_driver(mv3310_drivers); 361module_phy_driver(mv3310_drivers);
361 362
362static struct mdio_device_id __maybe_unused mv3310_tbl[] = { 363static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
363 { 0x002b09aa, 0xffffffff }, 364 { 0x002b09aa, MARVELL_PHY_ID_MASK },
364 { }, 365 { },
365}; 366};
366MODULE_DEVICE_TABLE(mdio, mv3310_tbl); 367MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 228d4aa6d9ae..ca5e375de27c 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -335,7 +335,7 @@ static void tbnet_free_buffers(struct tbnet_ring *ring)
335 if (ring->ring->is_tx) { 335 if (ring->ring->is_tx) {
336 dir = DMA_TO_DEVICE; 336 dir = DMA_TO_DEVICE;
337 order = 0; 337 order = 0;
338 size = tbnet_frame_size(tf); 338 size = TBNET_FRAME_SIZE;
339 } else { 339 } else {
340 dir = DMA_FROM_DEVICE; 340 dir = DMA_FROM_DEVICE;
341 order = TBNET_RX_PAGE_ORDER; 341 order = TBNET_RX_PAGE_ORDER;
@@ -512,6 +512,7 @@ err_free:
512static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net) 512static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
513{ 513{
514 struct tbnet_ring *ring = &net->tx_ring; 514 struct tbnet_ring *ring = &net->tx_ring;
515 struct device *dma_dev = tb_ring_dma_device(ring->ring);
515 struct tbnet_frame *tf; 516 struct tbnet_frame *tf;
516 unsigned int index; 517 unsigned int index;
517 518
@@ -522,7 +523,9 @@ static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
522 523
523 tf = &ring->frames[index]; 524 tf = &ring->frames[index];
524 tf->frame.size = 0; 525 tf->frame.size = 0;
525 tf->frame.buffer_phy = 0; 526
527 dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
528 tbnet_frame_size(tf), DMA_TO_DEVICE);
526 529
527 return tf; 530 return tf;
528} 531}
@@ -531,13 +534,8 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
531 bool canceled) 534 bool canceled)
532{ 535{
533 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame); 536 struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
534 struct device *dma_dev = tb_ring_dma_device(ring);
535 struct tbnet *net = netdev_priv(tf->dev); 537 struct tbnet *net = netdev_priv(tf->dev);
536 538
537 dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
538 DMA_TO_DEVICE);
539 tf->frame.buffer_phy = 0;
540
541 /* Return buffer to the ring */ 539 /* Return buffer to the ring */
542 net->tx_ring.prod++; 540 net->tx_ring.prod++;
543 541
@@ -548,10 +546,12 @@ static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
548static int tbnet_alloc_tx_buffers(struct tbnet *net) 546static int tbnet_alloc_tx_buffers(struct tbnet *net)
549{ 547{
550 struct tbnet_ring *ring = &net->tx_ring; 548 struct tbnet_ring *ring = &net->tx_ring;
549 struct device *dma_dev = tb_ring_dma_device(ring->ring);
551 unsigned int i; 550 unsigned int i;
552 551
553 for (i = 0; i < TBNET_RING_SIZE; i++) { 552 for (i = 0; i < TBNET_RING_SIZE; i++) {
554 struct tbnet_frame *tf = &ring->frames[i]; 553 struct tbnet_frame *tf = &ring->frames[i];
554 dma_addr_t dma_addr;
555 555
556 tf->page = alloc_page(GFP_KERNEL); 556 tf->page = alloc_page(GFP_KERNEL);
557 if (!tf->page) { 557 if (!tf->page) {
@@ -559,7 +559,17 @@ static int tbnet_alloc_tx_buffers(struct tbnet *net)
559 return -ENOMEM; 559 return -ENOMEM;
560 } 560 }
561 561
562 dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
563 DMA_TO_DEVICE);
564 if (dma_mapping_error(dma_dev, dma_addr)) {
565 __free_page(tf->page);
566 tf->page = NULL;
567 tbnet_free_buffers(ring);
568 return -ENOMEM;
569 }
570
562 tf->dev = net->dev; 571 tf->dev = net->dev;
572 tf->frame.buffer_phy = dma_addr;
563 tf->frame.callback = tbnet_tx_callback; 573 tf->frame.callback = tbnet_tx_callback;
564 tf->frame.sof = TBIP_PDF_FRAME_START; 574 tf->frame.sof = TBIP_PDF_FRAME_START;
565 tf->frame.eof = TBIP_PDF_FRAME_END; 575 tf->frame.eof = TBIP_PDF_FRAME_END;
@@ -881,19 +891,6 @@ static int tbnet_stop(struct net_device *dev)
881 return 0; 891 return 0;
882} 892}
883 893
884static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
885{
886 dma_addr_t dma_addr;
887
888 dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
889 DMA_TO_DEVICE);
890 if (dma_mapping_error(dma_dev, dma_addr))
891 return false;
892
893 tf->frame.buffer_phy = dma_addr;
894 return true;
895}
896
897static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb, 894static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
898 struct tbnet_frame **frames, u32 frame_count) 895 struct tbnet_frame **frames, u32 frame_count)
899{ 896{
@@ -908,13 +905,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
908 905
909 if (skb->ip_summed != CHECKSUM_PARTIAL) { 906 if (skb->ip_summed != CHECKSUM_PARTIAL) {
910 /* No need to calculate checksum so we just update the 907 /* No need to calculate checksum so we just update the
911 * total frame count and map the frames for DMA. 908 * total frame count and sync the frames for DMA.
912 */ 909 */
913 for (i = 0; i < frame_count; i++) { 910 for (i = 0; i < frame_count; i++) {
914 hdr = page_address(frames[i]->page); 911 hdr = page_address(frames[i]->page);
915 hdr->frame_count = cpu_to_le32(frame_count); 912 hdr->frame_count = cpu_to_le32(frame_count);
916 if (!tbnet_xmit_map(dma_dev, frames[i])) 913 dma_sync_single_for_device(dma_dev,
917 goto err_unmap; 914 frames[i]->frame.buffer_phy,
915 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
918 } 916 }
919 917
920 return true; 918 return true;
@@ -983,21 +981,14 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
983 *tucso = csum_fold(wsum); 981 *tucso = csum_fold(wsum);
984 982
985 /* Checksum is finally calculated and we don't touch the memory 983 /* Checksum is finally calculated and we don't touch the memory
986 * anymore, so DMA map the frames now. 984 * anymore, so DMA sync the frames now.
987 */ 985 */
988 for (i = 0; i < frame_count; i++) { 986 for (i = 0; i < frame_count; i++) {
989 if (!tbnet_xmit_map(dma_dev, frames[i])) 987 dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
990 goto err_unmap; 988 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
991 } 989 }
992 990
993 return true; 991 return true;
994
995err_unmap:
996 while (i--)
997 dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
998 tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
999
1000 return false;
1001} 992}
1002 993
1003static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num, 994static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 7ac487031b4b..19b9cc51079e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -874,8 +874,8 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
874 874
875static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, 875static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
876 const unsigned char *addr, union vxlan_addr ip, 876 const unsigned char *addr, union vxlan_addr ip,
877 __be16 port, __be32 src_vni, u32 vni, u32 ifindex, 877 __be16 port, __be32 src_vni, __be32 vni,
878 u16 vid) 878 u32 ifindex, u16 vid)
879{ 879{
880 struct vxlan_fdb *f; 880 struct vxlan_fdb *f;
881 struct vxlan_rdst *rd = NULL; 881 struct vxlan_rdst *rd = NULL;
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 37b1e0d03e31..90a4ad9a2d08 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -494,18 +494,11 @@ int lmc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) /*fold00*/
494 break; 494 break;
495 } 495 }
496 496
497 data = kmalloc(xc.len, GFP_KERNEL); 497 data = memdup_user(xc.data, xc.len);
498 if (!data) { 498 if (IS_ERR(data)) {
499 ret = -ENOMEM; 499 ret = PTR_ERR(data);
500 break; 500 break;
501 } 501 }
502
503 if(copy_from_user(data, xc.data, xc.len))
504 {
505 kfree(data);
506 ret = -ENOMEM;
507 break;
508 }
509 502
510 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data); 503 printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
511 504
diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
index dfb26f03c1a2..1b05b5d7a038 100644
--- a/drivers/net/wireless/ath/ath9k/channel.c
+++ b/drivers/net/wireless/ath/ath9k/channel.c
@@ -1113,7 +1113,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
1113 if (!avp->assoc) 1113 if (!avp->assoc)
1114 return false; 1114 return false;
1115 1115
1116 skb = ieee80211_nullfunc_get(sc->hw, vif); 1116 skb = ieee80211_nullfunc_get(sc->hw, vif, false);
1117 if (!skb) 1117 if (!skb)
1118 return false; 1118 return false;
1119 1119
diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
index 03687a80d6e9..38678e9a0562 100644
--- a/drivers/net/wireless/st/cw1200/sta.c
+++ b/drivers/net/wireless/st/cw1200/sta.c
@@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
198 198
199 priv->bss_loss_state++; 199 priv->bss_loss_state++;
200 200
201 skb = ieee80211_nullfunc_get(priv->hw, priv->vif); 201 skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
202 WARN_ON(!skb); 202 WARN_ON(!skb);
203 if (skb) 203 if (skb)
204 cw1200_tx(priv->hw, NULL, skb); 204 cw1200_tx(priv->hw, NULL, skb);
@@ -2265,7 +2265,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
2265 .rate = 0xFF, 2265 .rate = 0xFF,
2266 }; 2266 };
2267 2267
2268 frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif); 2268 frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
2269 if (!frame.skb) 2269 if (!frame.skb)
2270 return -ENOMEM; 2270 return -ENOMEM;
2271 2271
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 9915d83a4a30..6d02c660b4ab 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
566 size = sizeof(struct wl12xx_null_data_template); 566 size = sizeof(struct wl12xx_null_data_template);
567 ptr = NULL; 567 ptr = NULL;
568 } else { 568 } else {
569 skb = ieee80211_nullfunc_get(wl->hw, wl->vif); 569 skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
570 if (!skb) 570 if (!skb)
571 goto out; 571 goto out;
572 size = skb->len; 572 size = skb->len;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 2bfc12fdc929..761cf8573a80 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1069 ptr = NULL; 1069 ptr = NULL;
1070 } else { 1070 } else {
1071 skb = ieee80211_nullfunc_get(wl->hw, 1071 skb = ieee80211_nullfunc_get(wl->hw,
1072 wl12xx_wlvif_to_vif(wlvif)); 1072 wl12xx_wlvif_to_vif(wlvif),
1073 false);
1073 if (!skb) 1074 if (!skb)
1074 goto out; 1075 goto out;
1075 size = skb->len; 1076 size = skb->len;
@@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
1096 struct sk_buff *skb = NULL; 1097 struct sk_buff *skb = NULL;
1097 int ret = -ENOMEM; 1098 int ret = -ENOMEM;
1098 1099
1099 skb = ieee80211_nullfunc_get(wl->hw, vif); 1100 skb = ieee80211_nullfunc_get(wl->hw, vif, false);
1100 if (!skb) 1101 if (!skb)
1101 goto out; 1102 goto out;
1102 1103
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 18c85e55e76a..c5a34671abda 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -87,6 +87,8 @@ struct netfront_cb {
87/* IRQ name is queue name with "-tx" or "-rx" appended */ 87/* IRQ name is queue name with "-tx" or "-rx" appended */
88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) 88#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
89 89
90static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
91
90struct netfront_stats { 92struct netfront_stats {
91 u64 packets; 93 u64 packets;
92 u64 bytes; 94 u64 bytes;
@@ -2020,10 +2022,12 @@ static void netback_changed(struct xenbus_device *dev,
2020 break; 2022 break;
2021 2023
2022 case XenbusStateClosed: 2024 case XenbusStateClosed:
2025 wake_up_all(&module_unload_q);
2023 if (dev->state == XenbusStateClosed) 2026 if (dev->state == XenbusStateClosed)
2024 break; 2027 break;
2025 /* Missed the backend's CLOSING state -- fallthrough */ 2028 /* Missed the backend's CLOSING state -- fallthrough */
2026 case XenbusStateClosing: 2029 case XenbusStateClosing:
2030 wake_up_all(&module_unload_q);
2027 xenbus_frontend_closed(dev); 2031 xenbus_frontend_closed(dev);
2028 break; 2032 break;
2029 } 2033 }
@@ -2129,6 +2133,20 @@ static int xennet_remove(struct xenbus_device *dev)
2129 2133
2130 dev_dbg(&dev->dev, "%s\n", dev->nodename); 2134 dev_dbg(&dev->dev, "%s\n", dev->nodename);
2131 2135
2136 if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2137 xenbus_switch_state(dev, XenbusStateClosing);
2138 wait_event(module_unload_q,
2139 xenbus_read_driver_state(dev->otherend) ==
2140 XenbusStateClosing);
2141
2142 xenbus_switch_state(dev, XenbusStateClosed);
2143 wait_event(module_unload_q,
2144 xenbus_read_driver_state(dev->otherend) ==
2145 XenbusStateClosed ||
2146 xenbus_read_driver_state(dev->otherend) ==
2147 XenbusStateUnknown);
2148 }
2149
2132 xennet_disconnect_backend(info); 2150 xennet_disconnect_backend(info);
2133 2151
2134 unregister_netdev(info->netdev); 2152 unregister_netdev(info->netdev);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index cc9073e45be9..eec143cca1c0 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
4470 * ieee80211_nullfunc_get - retrieve a nullfunc template 4470 * ieee80211_nullfunc_get - retrieve a nullfunc template
4471 * @hw: pointer obtained from ieee80211_alloc_hw(). 4471 * @hw: pointer obtained from ieee80211_alloc_hw().
4472 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 4472 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
4473 * @qos_ok: QoS NDP is acceptable to the caller, this should be set
4474 * if at all possible
4473 * 4475 *
4474 * Creates a Nullfunc template which can, for example, uploaded to 4476 * Creates a Nullfunc template which can, for example, uploaded to
4475 * hardware. The template must be updated after association so that correct 4477 * hardware. The template must be updated after association so that correct
4476 * BSSID and address is used. 4478 * BSSID and address is used.
4477 * 4479 *
4480 * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
4481 * returned packet will be QoS NDP.
4482 *
4478 * Note: Caller (or hardware) is responsible for setting the 4483 * Note: Caller (or hardware) is responsible for setting the
4479 * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields. 4484 * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
4480 * 4485 *
4481 * Return: The nullfunc template. %NULL on error. 4486 * Return: The nullfunc template. %NULL on error.
4482 */ 4487 */
4483struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, 4488struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
4484 struct ieee80211_vif *vif); 4489 struct ieee80211_vif *vif,
4490 bool qos_ok);
4485 4491
4486/** 4492/**
4487 * ieee80211_probereq_get - retrieve a Probe Request template 4493 * ieee80211_probereq_get - retrieve a Probe Request template
diff --git a/include/net/sctp/checksum.h b/include/net/sctp/checksum.h
index 4a5b9a306c69..32ee65a30aff 100644
--- a/include/net/sctp/checksum.h
+++ b/include/net/sctp/checksum.h
@@ -48,31 +48,32 @@ static inline __wsum sctp_csum_update(const void *buff, int len, __wsum sum)
48 /* This uses the crypto implementation of crc32c, which is either 48 /* This uses the crypto implementation of crc32c, which is either
49 * implemented w/ hardware support or resolves to __crc32c_le(). 49 * implemented w/ hardware support or resolves to __crc32c_le().
50 */ 50 */
51 return crc32c(sum, buff, len); 51 return (__force __wsum)crc32c((__force __u32)sum, buff, len);
52} 52}
53 53
54static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2, 54static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
55 int offset, int len) 55 int offset, int len)
56{ 56{
57 return __crc32c_le_combine(csum, csum2, len); 57 return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
58 (__force __u32)csum2, len);
58} 59}
59 60
60static inline __le32 sctp_compute_cksum(const struct sk_buff *skb, 61static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
61 unsigned int offset) 62 unsigned int offset)
62{ 63{
63 struct sctphdr *sh = sctp_hdr(skb); 64 struct sctphdr *sh = sctp_hdr(skb);
64 __le32 ret, old = sh->checksum;
65 const struct skb_checksum_ops ops = { 65 const struct skb_checksum_ops ops = {
66 .update = sctp_csum_update, 66 .update = sctp_csum_update,
67 .combine = sctp_csum_combine, 67 .combine = sctp_csum_combine,
68 }; 68 };
69 __le32 old = sh->checksum;
70 __wsum new;
69 71
70 sh->checksum = 0; 72 sh->checksum = 0;
71 ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset, 73 new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
72 ~(__u32)0, &ops));
73 sh->checksum = old; 74 sh->checksum = old;
74 75
75 return ret; 76 return cpu_to_le32((__force __u32)new);
76} 77}
77 78
78#endif /* __sctp_checksum_h__ */ 79#endif /* __sctp_checksum_h__ */
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 749a42882437..906a9c0efa71 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -195,6 +195,11 @@ void sctp_remaddr_proc_exit(struct net *net);
195int sctp_offload_init(void); 195int sctp_offload_init(void);
196 196
197/* 197/*
198 * sctp/stream_sched.c
199 */
200void sctp_sched_ops_init(void);
201
202/*
198 * sctp/stream.c 203 * sctp/stream.c
199 */ 204 */
200int sctp_send_reset_streams(struct sctp_association *asoc, 205int sctp_send_reset_streams(struct sctp_association *asoc,
diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h
index c676550a4c7d..5c5da48f65e7 100644
--- a/include/net/sctp/stream_sched.h
+++ b/include/net/sctp/stream_sched.h
@@ -69,4 +69,9 @@ void sctp_sched_dequeue_common(struct sctp_outq *q, struct sctp_chunk *ch);
69int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp); 69int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
70struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream); 70struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
71 71
72void sctp_sched_ops_register(enum sctp_sched_type sched,
73 struct sctp_sched_ops *sched_ops);
74void sctp_sched_ops_prio_init(void);
75void sctp_sched_ops_rr_init(void);
76
72#endif /* __sctp_stream_sched_h__ */ 77#endif /* __sctp_stream_sched_h__ */
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index ebe96796027a..36cb50c111a6 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
49 rxrpc_conn_put_client, 49 rxrpc_conn_put_client,
50 rxrpc_conn_put_service, 50 rxrpc_conn_put_service,
51 rxrpc_conn_queued, 51 rxrpc_conn_queued,
52 rxrpc_conn_reap_service,
52 rxrpc_conn_seen, 53 rxrpc_conn_seen,
53}; 54};
54 55
@@ -138,10 +139,24 @@ enum rxrpc_rtt_rx_trace {
138 139
139enum rxrpc_timer_trace { 140enum rxrpc_timer_trace {
140 rxrpc_timer_begin, 141 rxrpc_timer_begin,
142 rxrpc_timer_exp_ack,
143 rxrpc_timer_exp_hard,
144 rxrpc_timer_exp_idle,
145 rxrpc_timer_exp_keepalive,
146 rxrpc_timer_exp_lost_ack,
147 rxrpc_timer_exp_normal,
148 rxrpc_timer_exp_ping,
149 rxrpc_timer_exp_resend,
141 rxrpc_timer_expired, 150 rxrpc_timer_expired,
142 rxrpc_timer_init_for_reply, 151 rxrpc_timer_init_for_reply,
143 rxrpc_timer_init_for_send_reply, 152 rxrpc_timer_init_for_send_reply,
153 rxrpc_timer_restart,
144 rxrpc_timer_set_for_ack, 154 rxrpc_timer_set_for_ack,
155 rxrpc_timer_set_for_hard,
156 rxrpc_timer_set_for_idle,
157 rxrpc_timer_set_for_keepalive,
158 rxrpc_timer_set_for_lost_ack,
159 rxrpc_timer_set_for_normal,
145 rxrpc_timer_set_for_ping, 160 rxrpc_timer_set_for_ping,
146 rxrpc_timer_set_for_resend, 161 rxrpc_timer_set_for_resend,
147 rxrpc_timer_set_for_send, 162 rxrpc_timer_set_for_send,
@@ -150,6 +165,7 @@ enum rxrpc_timer_trace {
150enum rxrpc_propose_ack_trace { 165enum rxrpc_propose_ack_trace {
151 rxrpc_propose_ack_client_tx_end, 166 rxrpc_propose_ack_client_tx_end,
152 rxrpc_propose_ack_input_data, 167 rxrpc_propose_ack_input_data,
168 rxrpc_propose_ack_ping_for_keepalive,
153 rxrpc_propose_ack_ping_for_lost_ack, 169 rxrpc_propose_ack_ping_for_lost_ack,
154 rxrpc_propose_ack_ping_for_lost_reply, 170 rxrpc_propose_ack_ping_for_lost_reply,
155 rxrpc_propose_ack_ping_for_params, 171 rxrpc_propose_ack_ping_for_params,
@@ -206,6 +222,7 @@ enum rxrpc_congest_change {
206 EM(rxrpc_conn_put_client, "PTc") \ 222 EM(rxrpc_conn_put_client, "PTc") \
207 EM(rxrpc_conn_put_service, "PTs") \ 223 EM(rxrpc_conn_put_service, "PTs") \
208 EM(rxrpc_conn_queued, "QUE") \ 224 EM(rxrpc_conn_queued, "QUE") \
225 EM(rxrpc_conn_reap_service, "RPs") \
209 E_(rxrpc_conn_seen, "SEE") 226 E_(rxrpc_conn_seen, "SEE")
210 227
211#define rxrpc_client_traces \ 228#define rxrpc_client_traces \
@@ -296,16 +313,31 @@ enum rxrpc_congest_change {
296#define rxrpc_timer_traces \ 313#define rxrpc_timer_traces \
297 EM(rxrpc_timer_begin, "Begin ") \ 314 EM(rxrpc_timer_begin, "Begin ") \
298 EM(rxrpc_timer_expired, "*EXPR*") \ 315 EM(rxrpc_timer_expired, "*EXPR*") \
316 EM(rxrpc_timer_exp_ack, "ExpAck") \
317 EM(rxrpc_timer_exp_hard, "ExpHrd") \
318 EM(rxrpc_timer_exp_idle, "ExpIdl") \
319 EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
320 EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
321 EM(rxrpc_timer_exp_normal, "ExpNml") \
322 EM(rxrpc_timer_exp_ping, "ExpPng") \
323 EM(rxrpc_timer_exp_resend, "ExpRsn") \
299 EM(rxrpc_timer_init_for_reply, "IniRpl") \ 324 EM(rxrpc_timer_init_for_reply, "IniRpl") \
300 EM(rxrpc_timer_init_for_send_reply, "SndRpl") \ 325 EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
326 EM(rxrpc_timer_restart, "Restrt") \
301 EM(rxrpc_timer_set_for_ack, "SetAck") \ 327 EM(rxrpc_timer_set_for_ack, "SetAck") \
328 EM(rxrpc_timer_set_for_hard, "SetHrd") \
329 EM(rxrpc_timer_set_for_idle, "SetIdl") \
330 EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
331 EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
332 EM(rxrpc_timer_set_for_normal, "SetNml") \
302 EM(rxrpc_timer_set_for_ping, "SetPng") \ 333 EM(rxrpc_timer_set_for_ping, "SetPng") \
303 EM(rxrpc_timer_set_for_resend, "SetRTx") \ 334 EM(rxrpc_timer_set_for_resend, "SetRTx") \
304 E_(rxrpc_timer_set_for_send, "SetTx ") 335 E_(rxrpc_timer_set_for_send, "SetSnd")
305 336
306#define rxrpc_propose_ack_traces \ 337#define rxrpc_propose_ack_traces \
307 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \ 338 EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
308 EM(rxrpc_propose_ack_input_data, "DataIn ") \ 339 EM(rxrpc_propose_ack_input_data, "DataIn ") \
340 EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
309 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \ 341 EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
310 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \ 342 EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
311 EM(rxrpc_propose_ack_ping_for_params, "Params ") \ 343 EM(rxrpc_propose_ack_ping_for_params, "Params ") \
@@ -932,39 +964,47 @@ TRACE_EVENT(rxrpc_rtt_rx,
932 964
933TRACE_EVENT(rxrpc_timer, 965TRACE_EVENT(rxrpc_timer,
934 TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why, 966 TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
935 ktime_t now, unsigned long now_j), 967 unsigned long now),
936 968
937 TP_ARGS(call, why, now, now_j), 969 TP_ARGS(call, why, now),
938 970
939 TP_STRUCT__entry( 971 TP_STRUCT__entry(
940 __field(struct rxrpc_call *, call ) 972 __field(struct rxrpc_call *, call )
941 __field(enum rxrpc_timer_trace, why ) 973 __field(enum rxrpc_timer_trace, why )
942 __field_struct(ktime_t, now ) 974 __field(long, now )
943 __field_struct(ktime_t, expire_at ) 975 __field(long, ack_at )
944 __field_struct(ktime_t, ack_at ) 976 __field(long, ack_lost_at )
945 __field_struct(ktime_t, resend_at ) 977 __field(long, resend_at )
946 __field(unsigned long, now_j ) 978 __field(long, ping_at )
947 __field(unsigned long, timer ) 979 __field(long, expect_rx_by )
980 __field(long, expect_req_by )
981 __field(long, expect_term_by )
982 __field(long, timer )
948 ), 983 ),
949 984
950 TP_fast_assign( 985 TP_fast_assign(
951 __entry->call = call; 986 __entry->call = call;
952 __entry->why = why; 987 __entry->why = why;
953 __entry->now = now; 988 __entry->now = now;
954 __entry->expire_at = call->expire_at; 989 __entry->ack_at = call->ack_at;
955 __entry->ack_at = call->ack_at; 990 __entry->ack_lost_at = call->ack_lost_at;
956 __entry->resend_at = call->resend_at; 991 __entry->resend_at = call->resend_at;
957 __entry->now_j = now_j; 992 __entry->expect_rx_by = call->expect_rx_by;
958 __entry->timer = call->timer.expires; 993 __entry->expect_req_by = call->expect_req_by;
994 __entry->expect_term_by = call->expect_term_by;
995 __entry->timer = call->timer.expires;
959 ), 996 ),
960 997
961 TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld", 998 TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
962 __entry->call, 999 __entry->call,
963 __print_symbolic(__entry->why, rxrpc_timer_traces), 1000 __print_symbolic(__entry->why, rxrpc_timer_traces),
964 ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)), 1001 __entry->ack_at - __entry->now,
965 ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)), 1002 __entry->ack_lost_at - __entry->now,
966 ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)), 1003 __entry->resend_at - __entry->now,
967 __entry->timer - __entry->now_j) 1004 __entry->expect_rx_by - __entry->now,
1005 __entry->expect_req_by - __entry->now,
1006 __entry->expect_term_by - __entry->now,
1007 __entry->timer - __entry->now)
968 ); 1008 );
969 1009
970TRACE_EVENT(rxrpc_rx_lose, 1010TRACE_EVENT(rxrpc_rx_lose,
@@ -1080,7 +1120,7 @@ TRACE_EVENT(rxrpc_congest,
1080 memcpy(&__entry->sum, summary, sizeof(__entry->sum)); 1120 memcpy(&__entry->sum, summary, sizeof(__entry->sum));
1081 ), 1121 ),
1082 1122
1083 TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s", 1123 TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
1084 __entry->call, 1124 __entry->call,
1085 __entry->ack_serial, 1125 __entry->ack_serial,
1086 __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names), 1126 __print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
diff --git a/include/uapi/linux/rxrpc.h b/include/uapi/linux/rxrpc.h
index 9d4afea308a4..9335d92c14a4 100644
--- a/include/uapi/linux/rxrpc.h
+++ b/include/uapi/linux/rxrpc.h
@@ -59,6 +59,7 @@ enum rxrpc_cmsg_type {
59 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 59 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
60 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 60 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
61 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 61 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
62 RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
62 RXRPC__SUPPORTED 63 RXRPC__SUPPORTED
63}; 64};
64 65
diff --git a/include/uapi/linux/vm_sockets_diag.h b/include/uapi/linux/vm_sockets_diag.h
index 14cd7dc5a187..0b4dd54f3d1e 100644
--- a/include/uapi/linux/vm_sockets_diag.h
+++ b/include/uapi/linux/vm_sockets_diag.h
@@ -1,3 +1,4 @@
1/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
1/* AF_VSOCK sock_diag(7) interface for querying open sockets */ 2/* AF_VSOCK sock_diag(7) interface for querying open sockets */
2 3
3#ifndef _UAPI__VM_SOCKETS_DIAG_H__ 4#ifndef _UAPI__VM_SOCKETS_DIAG_H__
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 44e3fb7dec8c..1e287420ff49 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -51,9 +51,7 @@ static struct dsa_switch_tree *dsa_tree_alloc(int index)
51 INIT_LIST_HEAD(&dst->list); 51 INIT_LIST_HEAD(&dst->list);
52 list_add_tail(&dsa_tree_list, &dst->list); 52 list_add_tail(&dsa_tree_list, &dst->list);
53 53
54 /* Initialize the reference counter to the number of switches, not 1 */
55 kref_init(&dst->refcount); 54 kref_init(&dst->refcount);
56 refcount_set(&dst->refcount.refcount, 0);
57 55
58 return dst; 56 return dst;
59} 57}
@@ -64,20 +62,23 @@ static void dsa_tree_free(struct dsa_switch_tree *dst)
64 kfree(dst); 62 kfree(dst);
65} 63}
66 64
67static struct dsa_switch_tree *dsa_tree_touch(int index) 65static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
68{ 66{
69 struct dsa_switch_tree *dst; 67 if (dst)
70 68 kref_get(&dst->refcount);
71 dst = dsa_tree_find(index);
72 if (!dst)
73 dst = dsa_tree_alloc(index);
74 69
75 return dst; 70 return dst;
76} 71}
77 72
78static void dsa_tree_get(struct dsa_switch_tree *dst) 73static struct dsa_switch_tree *dsa_tree_touch(int index)
79{ 74{
80 kref_get(&dst->refcount); 75 struct dsa_switch_tree *dst;
76
77 dst = dsa_tree_find(index);
78 if (dst)
79 return dsa_tree_get(dst);
80 else
81 return dsa_tree_alloc(index);
81} 82}
82 83
83static void dsa_tree_release(struct kref *ref) 84static void dsa_tree_release(struct kref *ref)
@@ -91,7 +92,8 @@ static void dsa_tree_release(struct kref *ref)
91 92
92static void dsa_tree_put(struct dsa_switch_tree *dst) 93static void dsa_tree_put(struct dsa_switch_tree *dst)
93{ 94{
94 kref_put(&dst->refcount, dsa_tree_release); 95 if (dst)
96 kref_put(&dst->refcount, dsa_tree_release);
95} 97}
96 98
97static bool dsa_port_is_dsa(struct dsa_port *port) 99static bool dsa_port_is_dsa(struct dsa_port *port)
@@ -765,6 +767,7 @@ int dsa_register_switch(struct dsa_switch *ds)
765 767
766 mutex_lock(&dsa2_mutex); 768 mutex_lock(&dsa2_mutex);
767 err = dsa_switch_probe(ds); 769 err = dsa_switch_probe(ds);
770 dsa_tree_put(ds->dst);
768 mutex_unlock(&dsa2_mutex); 771 mutex_unlock(&dsa2_mutex);
769 772
770 return err; 773 return err;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 41f5e48f8021..167f83b853e6 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -292,7 +292,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
292 292
293 mutex_lock(&sta->ampdu_mlme.mtx); 293 mutex_lock(&sta->ampdu_mlme.mtx);
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 ___ieee80211_stop_tx_ba_session(sta, i, reason);
296 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
297 WLAN_REASON_QSTA_LEAVE_QBSS, 296 WLAN_REASON_QSTA_LEAVE_QBSS,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
@@ -300,6 +299,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
300 } 299 }
301 mutex_unlock(&sta->ampdu_mlme.mtx); 300 mutex_unlock(&sta->ampdu_mlme.mtx);
302 301
302 for (i = 0; i < IEEE80211_NUM_TIDS; i++)
303 ___ieee80211_stop_tx_ba_session(sta, i, reason);
304
303 /* stopping might queue the work again - so cancel only afterwards */ 305 /* stopping might queue the work again - so cancel only afterwards */
304 cancel_work_sync(&sta->ampdu_mlme.work); 306 cancel_work_sync(&sta->ampdu_mlme.work);
305 307
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 4f7826d7b47c..4394463a0c2e 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
797 struct mesh_path *mpath; 797 struct mesh_path *mpath;
798 u8 ttl, flags, hopcount; 798 u8 ttl, flags, hopcount;
799 const u8 *orig_addr; 799 const u8 *orig_addr;
800 u32 orig_sn, metric, metric_txsta, interval; 800 u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
801 bool root_is_gate; 801 bool root_is_gate;
802 802
803 ttl = rann->rann_ttl; 803 ttl = rann->rann_ttl;
@@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
808 interval = le32_to_cpu(rann->rann_interval); 808 interval = le32_to_cpu(rann->rann_interval);
809 hopcount = rann->rann_hopcount; 809 hopcount = rann->rann_hopcount;
810 hopcount++; 810 hopcount++;
811 metric = le32_to_cpu(rann->rann_metric); 811 orig_metric = le32_to_cpu(rann->rann_metric);
812 812
813 /* Ignore our own RANNs */ 813 /* Ignore our own RANNs */
814 if (ether_addr_equal(orig_addr, sdata->vif.addr)) 814 if (ether_addr_equal(orig_addr, sdata->vif.addr))
@@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
825 return; 825 return;
826 } 826 }
827 827
828 metric_txsta = airtime_link_metric_get(local, sta); 828 last_hop_metric = airtime_link_metric_get(local, sta);
829 new_metric = orig_metric + last_hop_metric;
830 if (new_metric < orig_metric)
831 new_metric = MAX_METRIC;
829 832
830 mpath = mesh_path_lookup(sdata, orig_addr); 833 mpath = mesh_path_lookup(sdata, orig_addr);
831 if (!mpath) { 834 if (!mpath) {
@@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
838 } 841 }
839 842
840 if (!(SN_LT(mpath->sn, orig_sn)) && 843 if (!(SN_LT(mpath->sn, orig_sn)) &&
841 !(mpath->sn == orig_sn && metric < mpath->rann_metric)) { 844 !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
842 rcu_read_unlock(); 845 rcu_read_unlock();
843 return; 846 return;
844 } 847 }
@@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
856 } 859 }
857 860
858 mpath->sn = orig_sn; 861 mpath->sn = orig_sn;
859 mpath->rann_metric = metric + metric_txsta; 862 mpath->rann_metric = new_metric;
860 mpath->is_root = true; 863 mpath->is_root = true;
861 /* Recording RANNs sender address to send individually 864 /* Recording RANNs sender address to send individually
862 * addressed PREQs destined for root mesh STA */ 865 * addressed PREQs destined for root mesh STA */
@@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
876 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr, 879 mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
877 orig_sn, 0, NULL, 0, broadcast_addr, 880 orig_sn, 0, NULL, 0, broadcast_addr,
878 hopcount, ttl, interval, 881 hopcount, ttl, interval,
879 metric + metric_txsta, 0, sdata); 882 new_metric, 0, sdata);
880 } 883 }
881 884
882 rcu_read_unlock(); 885 rcu_read_unlock();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 04460440d731..c244691deab9 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -895,7 +895,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
895 struct ieee80211_hdr_3addr *nullfunc; 895 struct ieee80211_hdr_3addr *nullfunc;
896 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 896 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
897 897
898 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif); 898 skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
899 if (!skb) 899 if (!skb)
900 return; 900 return;
901 901
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 7b8154474b9e..3160954fc406 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4438,13 +4438,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
4438EXPORT_SYMBOL(ieee80211_pspoll_get); 4438EXPORT_SYMBOL(ieee80211_pspoll_get);
4439 4439
4440struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw, 4440struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
4441 struct ieee80211_vif *vif) 4441 struct ieee80211_vif *vif,
4442 bool qos_ok)
4442{ 4443{
4443 struct ieee80211_hdr_3addr *nullfunc; 4444 struct ieee80211_hdr_3addr *nullfunc;
4444 struct ieee80211_sub_if_data *sdata; 4445 struct ieee80211_sub_if_data *sdata;
4445 struct ieee80211_if_managed *ifmgd; 4446 struct ieee80211_if_managed *ifmgd;
4446 struct ieee80211_local *local; 4447 struct ieee80211_local *local;
4447 struct sk_buff *skb; 4448 struct sk_buff *skb;
4449 bool qos = false;
4448 4450
4449 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION)) 4451 if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
4450 return NULL; 4452 return NULL;
@@ -4453,7 +4455,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
4453 ifmgd = &sdata->u.mgd; 4455 ifmgd = &sdata->u.mgd;
4454 local = sdata->local; 4456 local = sdata->local;
4455 4457
4456 skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc)); 4458 if (qos_ok) {
4459 struct sta_info *sta;
4460
4461 rcu_read_lock();
4462 sta = sta_info_get(sdata, ifmgd->bssid);
4463 qos = sta && sta->sta.wme;
4464 rcu_read_unlock();
4465 }
4466
4467 skb = dev_alloc_skb(local->hw.extra_tx_headroom +
4468 sizeof(*nullfunc) + 2);
4457 if (!skb) 4469 if (!skb)
4458 return NULL; 4470 return NULL;
4459 4471
@@ -4463,6 +4475,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
4463 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | 4475 nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
4464 IEEE80211_STYPE_NULLFUNC | 4476 IEEE80211_STYPE_NULLFUNC |
4465 IEEE80211_FCTL_TODS); 4477 IEEE80211_FCTL_TODS);
4478 if (qos) {
4479 __le16 qos = cpu_to_le16(7);
4480
4481 BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
4482 IEEE80211_STYPE_NULLFUNC) !=
4483 IEEE80211_STYPE_QOS_NULLFUNC);
4484 nullfunc->frame_control |=
4485 cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
4486 skb->priority = 7;
4487 skb_set_queue_mapping(skb, IEEE80211_AC_VO);
4488 skb_put_data(skb, &qos, sizeof(qos));
4489 }
4490
4466 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN); 4491 memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
4467 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN); 4492 memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
4468 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN); 4493 memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 99cfafc2a139..ef38e5aecd28 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -308,7 +308,7 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
308 const struct dp_upcall_info *upcall_info, 308 const struct dp_upcall_info *upcall_info,
309 uint32_t cutlen) 309 uint32_t cutlen)
310{ 310{
311 unsigned short gso_type = skb_shinfo(skb)->gso_type; 311 unsigned int gso_type = skb_shinfo(skb)->gso_type;
312 struct sw_flow_key later_key; 312 struct sw_flow_key later_key;
313 struct sk_buff *segs, *nskb; 313 struct sk_buff *segs, *nskb;
314 int err; 314 int err;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index dc424798ba6f..624ea74353dd 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2241,14 +2241,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
2241 2241
2242#define MAX_ACTIONS_BUFSIZE (32 * 1024) 2242#define MAX_ACTIONS_BUFSIZE (32 * 1024)
2243 2243
2244static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log) 2244static struct sw_flow_actions *nla_alloc_flow_actions(int size)
2245{ 2245{
2246 struct sw_flow_actions *sfa; 2246 struct sw_flow_actions *sfa;
2247 2247
2248 if (size > MAX_ACTIONS_BUFSIZE) { 2248 WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
2249 OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
2250 return ERR_PTR(-EINVAL);
2251 }
2252 2249
2253 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL); 2250 sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
2254 if (!sfa) 2251 if (!sfa)
@@ -2321,12 +2318,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
2321 new_acts_size = ksize(*sfa) * 2; 2318 new_acts_size = ksize(*sfa) * 2;
2322 2319
2323 if (new_acts_size > MAX_ACTIONS_BUFSIZE) { 2320 if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
2324 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) 2321 if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
2322 OVS_NLERR(log, "Flow action size exceeds max %u",
2323 MAX_ACTIONS_BUFSIZE);
2325 return ERR_PTR(-EMSGSIZE); 2324 return ERR_PTR(-EMSGSIZE);
2325 }
2326 new_acts_size = MAX_ACTIONS_BUFSIZE; 2326 new_acts_size = MAX_ACTIONS_BUFSIZE;
2327 } 2327 }
2328 2328
2329 acts = nla_alloc_flow_actions(new_acts_size, log); 2329 acts = nla_alloc_flow_actions(new_acts_size);
2330 if (IS_ERR(acts)) 2330 if (IS_ERR(acts))
2331 return (void *)acts; 2331 return (void *)acts;
2332 2332
@@ -3059,7 +3059,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
3059{ 3059{
3060 int err; 3060 int err;
3061 3061
3062 *sfa = nla_alloc_flow_actions(nla_len(attr), log); 3062 *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
3063 if (IS_ERR(*sfa)) 3063 if (IS_ERR(*sfa))
3064 return PTR_ERR(*sfa); 3064 return PTR_ERR(*sfa);
3065 3065
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 737092ca9b4e..da215e5c1399 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1687,7 +1687,6 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1687 atomic_long_set(&rollover->num, 0); 1687 atomic_long_set(&rollover->num, 0);
1688 atomic_long_set(&rollover->num_huge, 0); 1688 atomic_long_set(&rollover->num_huge, 0);
1689 atomic_long_set(&rollover->num_failed, 0); 1689 atomic_long_set(&rollover->num_failed, 0);
1690 po->rollover = rollover;
1691 } 1690 }
1692 1691
1693 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) { 1692 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
@@ -1745,6 +1744,8 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1745 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) { 1744 if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1746 __dev_remove_pack(&po->prot_hook); 1745 __dev_remove_pack(&po->prot_hook);
1747 po->fanout = match; 1746 po->fanout = match;
1747 po->rollover = rollover;
1748 rollover = NULL;
1748 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); 1749 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1749 __fanout_link(sk, po); 1750 __fanout_link(sk, po);
1750 err = 0; 1751 err = 0;
@@ -1758,10 +1759,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1758 } 1759 }
1759 1760
1760out: 1761out:
1761 if (err && rollover) { 1762 kfree(rollover);
1762 kfree_rcu(rollover, rcu);
1763 po->rollover = NULL;
1764 }
1765 mutex_unlock(&fanout_mutex); 1763 mutex_unlock(&fanout_mutex);
1766 return err; 1764 return err;
1767} 1765}
@@ -1785,11 +1783,6 @@ static struct packet_fanout *fanout_release(struct sock *sk)
1785 list_del(&f->list); 1783 list_del(&f->list);
1786 else 1784 else
1787 f = NULL; 1785 f = NULL;
1788
1789 if (po->rollover) {
1790 kfree_rcu(po->rollover, rcu);
1791 po->rollover = NULL;
1792 }
1793 } 1786 }
1794 mutex_unlock(&fanout_mutex); 1787 mutex_unlock(&fanout_mutex);
1795 1788
@@ -3029,6 +3022,7 @@ static int packet_release(struct socket *sock)
3029 synchronize_net(); 3022 synchronize_net();
3030 3023
3031 if (f) { 3024 if (f) {
3025 kfree(po->rollover);
3032 fanout_release_data(f); 3026 fanout_release_data(f);
3033 kfree(f); 3027 kfree(f);
3034 } 3028 }
@@ -3097,6 +3091,10 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3097 if (need_rehook) { 3091 if (need_rehook) {
3098 if (po->running) { 3092 if (po->running) {
3099 rcu_read_unlock(); 3093 rcu_read_unlock();
3094 /* prevents packet_notifier() from calling
3095 * register_prot_hook()
3096 */
3097 po->num = 0;
3100 __unregister_prot_hook(sk, true); 3098 __unregister_prot_hook(sk, true);
3101 rcu_read_lock(); 3099 rcu_read_lock();
3102 dev_curr = po->prot_hook.dev; 3100 dev_curr = po->prot_hook.dev;
@@ -3105,6 +3103,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3105 dev->ifindex); 3103 dev->ifindex);
3106 } 3104 }
3107 3105
3106 BUG_ON(po->running);
3108 po->num = proto; 3107 po->num = proto;
3109 po->prot_hook.type = proto; 3108 po->prot_hook.type = proto;
3110 3109
@@ -3843,7 +3842,6 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3843 void *data = &val; 3842 void *data = &val;
3844 union tpacket_stats_u st; 3843 union tpacket_stats_u st;
3845 struct tpacket_rollover_stats rstats; 3844 struct tpacket_rollover_stats rstats;
3846 struct packet_rollover *rollover;
3847 3845
3848 if (level != SOL_PACKET) 3846 if (level != SOL_PACKET)
3849 return -ENOPROTOOPT; 3847 return -ENOPROTOOPT;
@@ -3922,18 +3920,13 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
3922 0); 3920 0);
3923 break; 3921 break;
3924 case PACKET_ROLLOVER_STATS: 3922 case PACKET_ROLLOVER_STATS:
3925 rcu_read_lock(); 3923 if (!po->rollover)
3926 rollover = rcu_dereference(po->rollover);
3927 if (rollover) {
3928 rstats.tp_all = atomic_long_read(&rollover->num);
3929 rstats.tp_huge = atomic_long_read(&rollover->num_huge);
3930 rstats.tp_failed = atomic_long_read(&rollover->num_failed);
3931 data = &rstats;
3932 lv = sizeof(rstats);
3933 }
3934 rcu_read_unlock();
3935 if (!rollover)
3936 return -EINVAL; 3924 return -EINVAL;
3925 rstats.tp_all = atomic_long_read(&po->rollover->num);
3926 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3927 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3928 data = &rstats;
3929 lv = sizeof(rstats);
3937 break; 3930 break;
3938 case PACKET_TX_HAS_OFF: 3931 case PACKET_TX_HAS_OFF:
3939 val = po->tp_tx_has_off; 3932 val = po->tp_tx_has_off;
diff --git a/net/packet/internal.h b/net/packet/internal.h
index 562fbc155006..a1d2b2319ae9 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -95,7 +95,6 @@ struct packet_fanout {
95 95
96struct packet_rollover { 96struct packet_rollover {
97 int sock; 97 int sock;
98 struct rcu_head rcu;
99 atomic_long_t num; 98 atomic_long_t num;
100 atomic_long_t num_huge; 99 atomic_long_t num_huge;
101 atomic_long_t num_failed; 100 atomic_long_t num_failed;
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 9b5c46b052fd..8f7cf4c042be 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -285,6 +285,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
285 bool upgrade) 285 bool upgrade)
286{ 286{
287 struct rxrpc_conn_parameters cp; 287 struct rxrpc_conn_parameters cp;
288 struct rxrpc_call_params p;
288 struct rxrpc_call *call; 289 struct rxrpc_call *call;
289 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 290 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
290 int ret; 291 int ret;
@@ -302,6 +303,10 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
302 if (key && !key->payload.data[0]) 303 if (key && !key->payload.data[0])
303 key = NULL; /* a no-security key */ 304 key = NULL; /* a no-security key */
304 305
306 memset(&p, 0, sizeof(p));
307 p.user_call_ID = user_call_ID;
308 p.tx_total_len = tx_total_len;
309
305 memset(&cp, 0, sizeof(cp)); 310 memset(&cp, 0, sizeof(cp));
306 cp.local = rx->local; 311 cp.local = rx->local;
307 cp.key = key; 312 cp.key = key;
@@ -309,8 +314,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
309 cp.exclusive = false; 314 cp.exclusive = false;
310 cp.upgrade = upgrade; 315 cp.upgrade = upgrade;
311 cp.service_id = srx->srx_service; 316 cp.service_id = srx->srx_service;
312 call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, 317 call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
313 gfp);
314 /* The socket has been unlocked. */ 318 /* The socket has been unlocked. */
315 if (!IS_ERR(call)) { 319 if (!IS_ERR(call)) {
316 call->notify_rx = notify_rx; 320 call->notify_rx = notify_rx;
@@ -863,6 +867,19 @@ static int rxrpc_release_sock(struct sock *sk)
863 sock_orphan(sk); 867 sock_orphan(sk);
864 sk->sk_shutdown = SHUTDOWN_MASK; 868 sk->sk_shutdown = SHUTDOWN_MASK;
865 869
870 /* We want to kill off all connections from a service socket
871 * as fast as possible because we can't share these; client
872 * sockets, on the other hand, can share an endpoint.
873 */
874 switch (sk->sk_state) {
875 case RXRPC_SERVER_BOUND:
876 case RXRPC_SERVER_BOUND2:
877 case RXRPC_SERVER_LISTENING:
878 case RXRPC_SERVER_LISTEN_DISABLED:
879 rx->local->service_closed = true;
880 break;
881 }
882
866 spin_lock_bh(&sk->sk_receive_queue.lock); 883 spin_lock_bh(&sk->sk_receive_queue.lock);
867 sk->sk_state = RXRPC_CLOSE; 884 sk->sk_state = RXRPC_CLOSE;
868 spin_unlock_bh(&sk->sk_receive_queue.lock); 885 spin_unlock_bh(&sk->sk_receive_queue.lock);
@@ -878,6 +895,8 @@ static int rxrpc_release_sock(struct sock *sk)
878 rxrpc_release_calls_on_socket(rx); 895 rxrpc_release_calls_on_socket(rx);
879 flush_workqueue(rxrpc_workqueue); 896 flush_workqueue(rxrpc_workqueue);
880 rxrpc_purge_queue(&sk->sk_receive_queue); 897 rxrpc_purge_queue(&sk->sk_receive_queue);
898 rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
899 rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
881 900
882 rxrpc_put_local(rx->local); 901 rxrpc_put_local(rx->local);
883 rx->local = NULL; 902 rx->local = NULL;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index b2151993d384..416688381eb7 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -79,17 +79,20 @@ struct rxrpc_net {
79 struct list_head conn_proc_list; /* List of conns in this namespace for proc */ 79 struct list_head conn_proc_list; /* List of conns in this namespace for proc */
80 struct list_head service_conns; /* Service conns in this namespace */ 80 struct list_head service_conns; /* Service conns in this namespace */
81 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */ 81 rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
82 struct delayed_work service_conn_reaper; 82 struct work_struct service_conn_reaper;
83 struct timer_list service_conn_reap_timer;
83 84
84 unsigned int nr_client_conns; 85 unsigned int nr_client_conns;
85 unsigned int nr_active_client_conns; 86 unsigned int nr_active_client_conns;
86 bool kill_all_client_conns; 87 bool kill_all_client_conns;
88 bool live;
87 spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ 89 spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
88 spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */ 90 spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
89 struct list_head waiting_client_conns; 91 struct list_head waiting_client_conns;
90 struct list_head active_client_conns; 92 struct list_head active_client_conns;
91 struct list_head idle_client_conns; 93 struct list_head idle_client_conns;
92 struct delayed_work client_conn_reaper; 94 struct work_struct client_conn_reaper;
95 struct timer_list client_conn_reap_timer;
93 96
94 struct list_head local_endpoints; 97 struct list_head local_endpoints;
95 struct mutex local_mutex; /* Lock for ->local_endpoints */ 98 struct mutex local_mutex; /* Lock for ->local_endpoints */
@@ -265,6 +268,7 @@ struct rxrpc_local {
265 rwlock_t services_lock; /* lock for services list */ 268 rwlock_t services_lock; /* lock for services list */
266 int debug_id; /* debug ID for printks */ 269 int debug_id; /* debug ID for printks */
267 bool dead; 270 bool dead;
271 bool service_closed; /* Service socket closed */
268 struct sockaddr_rxrpc srx; /* local address */ 272 struct sockaddr_rxrpc srx; /* local address */
269}; 273};
270 274
@@ -338,8 +342,17 @@ enum rxrpc_conn_flag {
338 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ 342 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
339 RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */ 343 RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
340 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ 344 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
345 RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
346 RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
347 RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
348 RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
341}; 349};
342 350
351#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
352 (1UL << RXRPC_CONN_FINAL_ACK_1) | \
353 (1UL << RXRPC_CONN_FINAL_ACK_2) | \
354 (1UL << RXRPC_CONN_FINAL_ACK_3))
355
343/* 356/*
344 * Events that can be raised upon a connection. 357 * Events that can be raised upon a connection.
345 */ 358 */
@@ -393,6 +406,7 @@ struct rxrpc_connection {
393#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1) 406#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
394 struct list_head waiting_calls; /* Calls waiting for channels */ 407 struct list_head waiting_calls; /* Calls waiting for channels */
395 struct rxrpc_channel { 408 struct rxrpc_channel {
409 unsigned long final_ack_at; /* Time at which to issue final ACK */
396 struct rxrpc_call __rcu *call; /* Active call */ 410 struct rxrpc_call __rcu *call; /* Active call */
397 u32 call_id; /* ID of current call */ 411 u32 call_id; /* ID of current call */
398 u32 call_counter; /* Call ID counter */ 412 u32 call_counter; /* Call ID counter */
@@ -404,6 +418,7 @@ struct rxrpc_connection {
404 }; 418 };
405 } channels[RXRPC_MAXCALLS]; 419 } channels[RXRPC_MAXCALLS];
406 420
421 struct timer_list timer; /* Conn event timer */
407 struct work_struct processor; /* connection event processor */ 422 struct work_struct processor; /* connection event processor */
408 union { 423 union {
409 struct rb_node client_node; /* Node in local->client_conns */ 424 struct rb_node client_node; /* Node in local->client_conns */
@@ -457,9 +472,10 @@ enum rxrpc_call_flag {
457enum rxrpc_call_event { 472enum rxrpc_call_event {
458 RXRPC_CALL_EV_ACK, /* need to generate ACK */ 473 RXRPC_CALL_EV_ACK, /* need to generate ACK */
459 RXRPC_CALL_EV_ABORT, /* need to generate abort */ 474 RXRPC_CALL_EV_ABORT, /* need to generate abort */
460 RXRPC_CALL_EV_TIMER, /* Timer expired */
461 RXRPC_CALL_EV_RESEND, /* Tx resend required */ 475 RXRPC_CALL_EV_RESEND, /* Tx resend required */
462 RXRPC_CALL_EV_PING, /* Ping send required */ 476 RXRPC_CALL_EV_PING, /* Ping send required */
477 RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
478 RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
463}; 479};
464 480
465/* 481/*
@@ -503,10 +519,16 @@ struct rxrpc_call {
503 struct rxrpc_peer *peer; /* Peer record for remote address */ 519 struct rxrpc_peer *peer; /* Peer record for remote address */
504 struct rxrpc_sock __rcu *socket; /* socket responsible */ 520 struct rxrpc_sock __rcu *socket; /* socket responsible */
505 struct mutex user_mutex; /* User access mutex */ 521 struct mutex user_mutex; /* User access mutex */
506 ktime_t ack_at; /* When deferred ACK needs to happen */ 522 unsigned long ack_at; /* When deferred ACK needs to happen */
507 ktime_t resend_at; /* When next resend needs to happen */ 523 unsigned long ack_lost_at; /* When ACK is figured as lost */
508 ktime_t ping_at; /* When next to send a ping */ 524 unsigned long resend_at; /* When next resend needs to happen */
509 ktime_t expire_at; /* When the call times out */ 525 unsigned long ping_at; /* When next to send a ping */
526 unsigned long keepalive_at; /* When next to send a keepalive ping */
527 unsigned long expect_rx_by; /* When we expect to get a packet by */
528 unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
529 unsigned long expect_term_by; /* When we expect call termination by */
530 u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
531 u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
510 struct timer_list timer; /* Combined event timer */ 532 struct timer_list timer; /* Combined event timer */
511 struct work_struct processor; /* Event processor */ 533 struct work_struct processor; /* Event processor */
512 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ 534 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
@@ -609,6 +631,8 @@ struct rxrpc_call {
609 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */ 631 ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
610 rxrpc_serial_t acks_latest; /* serial number of latest ACK received */ 632 rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
611 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */ 633 rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
634 rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
635 rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
612}; 636};
613 637
614/* 638/*
@@ -632,6 +656,35 @@ struct rxrpc_ack_summary {
632 u8 cumulative_acks; 656 u8 cumulative_acks;
633}; 657};
634 658
659/*
660 * sendmsg() cmsg-specified parameters.
661 */
662enum rxrpc_command {
663 RXRPC_CMD_SEND_DATA, /* send data message */
664 RXRPC_CMD_SEND_ABORT, /* request abort generation */
665 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
666 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
667};
668
669struct rxrpc_call_params {
670 s64 tx_total_len; /* Total Tx data length (if send data) */
671 unsigned long user_call_ID; /* User's call ID */
672 struct {
673 u32 hard; /* Maximum lifetime (sec) */
674 u32 idle; /* Max time since last data packet (msec) */
675 u32 normal; /* Max time since last call packet (msec) */
676 } timeouts;
677 u8 nr_timeouts; /* Number of timeouts specified */
678};
679
680struct rxrpc_send_params {
681 struct rxrpc_call_params call;
682 u32 abort_code; /* Abort code to Tx (if abort) */
683 enum rxrpc_command command : 8; /* The command to implement */
684 bool exclusive; /* Shared or exclusive call */
685 bool upgrade; /* If the connection is upgradeable */
686};
687
635#include <trace/events/rxrpc.h> 688#include <trace/events/rxrpc.h>
636 689
637/* 690/*
@@ -657,12 +710,19 @@ int rxrpc_reject_call(struct rxrpc_sock *);
657/* 710/*
658 * call_event.c 711 * call_event.c
659 */ 712 */
660void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
661void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
662void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool, 713void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
663 enum rxrpc_propose_ack_trace); 714 enum rxrpc_propose_ack_trace);
664void rxrpc_process_call(struct work_struct *); 715void rxrpc_process_call(struct work_struct *);
665 716
717static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
718 unsigned long expire_at,
719 unsigned long now,
720 enum rxrpc_timer_trace why)
721{
722 trace_rxrpc_timer(call, why, now);
723 timer_reduce(&call->timer, expire_at);
724}
725
666/* 726/*
667 * call_object.c 727 * call_object.c
668 */ 728 */
@@ -672,11 +732,11 @@ extern unsigned int rxrpc_max_call_lifetime;
672extern struct kmem_cache *rxrpc_call_jar; 732extern struct kmem_cache *rxrpc_call_jar;
673 733
674struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); 734struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
675struct rxrpc_call *rxrpc_alloc_call(gfp_t); 735struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
676struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, 736struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
677 struct rxrpc_conn_parameters *, 737 struct rxrpc_conn_parameters *,
678 struct sockaddr_rxrpc *, 738 struct sockaddr_rxrpc *,
679 unsigned long, s64, gfp_t); 739 struct rxrpc_call_params *, gfp_t);
680int rxrpc_retry_client_call(struct rxrpc_sock *, 740int rxrpc_retry_client_call(struct rxrpc_sock *,
681 struct rxrpc_call *, 741 struct rxrpc_call *,
682 struct rxrpc_conn_parameters *, 742 struct rxrpc_conn_parameters *,
@@ -803,8 +863,8 @@ static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call,
803 */ 863 */
804extern unsigned int rxrpc_max_client_connections; 864extern unsigned int rxrpc_max_client_connections;
805extern unsigned int rxrpc_reap_client_connections; 865extern unsigned int rxrpc_reap_client_connections;
806extern unsigned int rxrpc_conn_idle_client_expiry; 866extern unsigned long rxrpc_conn_idle_client_expiry;
807extern unsigned int rxrpc_conn_idle_client_fast_expiry; 867extern unsigned long rxrpc_conn_idle_client_fast_expiry;
808extern struct idr rxrpc_client_conn_ids; 868extern struct idr rxrpc_client_conn_ids;
809 869
810void rxrpc_destroy_client_conn_ids(void); 870void rxrpc_destroy_client_conn_ids(void);
@@ -825,6 +885,7 @@ void rxrpc_process_connection(struct work_struct *);
825 * conn_object.c 885 * conn_object.c
826 */ 886 */
827extern unsigned int rxrpc_connection_expiry; 887extern unsigned int rxrpc_connection_expiry;
888extern unsigned int rxrpc_closed_conn_expiry;
828 889
829struct rxrpc_connection *rxrpc_alloc_connection(gfp_t); 890struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
830struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *, 891struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
@@ -861,6 +922,12 @@ static inline void rxrpc_put_connection(struct rxrpc_connection *conn)
861 rxrpc_put_service_conn(conn); 922 rxrpc_put_service_conn(conn);
862} 923}
863 924
925static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
926 unsigned long expire_at)
927{
928 timer_reduce(&conn->timer, expire_at);
929}
930
864/* 931/*
865 * conn_service.c 932 * conn_service.c
866 */ 933 */
@@ -930,13 +997,13 @@ static inline void rxrpc_queue_local(struct rxrpc_local *local)
930 * misc.c 997 * misc.c
931 */ 998 */
932extern unsigned int rxrpc_max_backlog __read_mostly; 999extern unsigned int rxrpc_max_backlog __read_mostly;
933extern unsigned int rxrpc_requested_ack_delay; 1000extern unsigned long rxrpc_requested_ack_delay;
934extern unsigned int rxrpc_soft_ack_delay; 1001extern unsigned long rxrpc_soft_ack_delay;
935extern unsigned int rxrpc_idle_ack_delay; 1002extern unsigned long rxrpc_idle_ack_delay;
936extern unsigned int rxrpc_rx_window_size; 1003extern unsigned int rxrpc_rx_window_size;
937extern unsigned int rxrpc_rx_mtu; 1004extern unsigned int rxrpc_rx_mtu;
938extern unsigned int rxrpc_rx_jumbo_max; 1005extern unsigned int rxrpc_rx_jumbo_max;
939extern unsigned int rxrpc_resend_timeout; 1006extern unsigned long rxrpc_resend_timeout;
940 1007
941extern const s8 rxrpc_ack_priority[]; 1008extern const s8 rxrpc_ack_priority[];
942 1009
@@ -954,7 +1021,7 @@ static inline struct rxrpc_net *rxrpc_net(struct net *net)
954/* 1021/*
955 * output.c 1022 * output.c
956 */ 1023 */
957int rxrpc_send_ack_packet(struct rxrpc_call *, bool); 1024int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
958int rxrpc_send_abort_packet(struct rxrpc_call *); 1025int rxrpc_send_abort_packet(struct rxrpc_call *);
959int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool); 1026int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
960void rxrpc_reject_packets(struct rxrpc_local *); 1027void rxrpc_reject_packets(struct rxrpc_local *);
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
index cbd1701e813a..3028298ca561 100644
--- a/net/rxrpc/call_accept.c
+++ b/net/rxrpc/call_accept.c
@@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
94 /* Now it gets complicated, because calls get registered with the 94 /* Now it gets complicated, because calls get registered with the
95 * socket here, particularly if a user ID is preassigned by the user. 95 * socket here, particularly if a user ID is preassigned by the user.
96 */ 96 */
97 call = rxrpc_alloc_call(gfp); 97 call = rxrpc_alloc_call(rx, gfp);
98 if (!call) 98 if (!call)
99 return -ENOMEM; 99 return -ENOMEM;
100 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); 100 call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 3574508baf9a..bda952ffe6a6 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -22,80 +22,6 @@
22#include "ar-internal.h" 22#include "ar-internal.h"
23 23
24/* 24/*
25 * Set the timer
26 */
27void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
28 ktime_t now)
29{
30 unsigned long t_j, now_j = jiffies;
31 ktime_t t;
32 bool queue = false;
33
34 if (call->state < RXRPC_CALL_COMPLETE) {
35 t = call->expire_at;
36 if (!ktime_after(t, now)) {
37 trace_rxrpc_timer(call, why, now, now_j);
38 queue = true;
39 goto out;
40 }
41
42 if (!ktime_after(call->resend_at, now)) {
43 call->resend_at = call->expire_at;
44 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
45 queue = true;
46 } else if (ktime_before(call->resend_at, t)) {
47 t = call->resend_at;
48 }
49
50 if (!ktime_after(call->ack_at, now)) {
51 call->ack_at = call->expire_at;
52 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
53 queue = true;
54 } else if (ktime_before(call->ack_at, t)) {
55 t = call->ack_at;
56 }
57
58 if (!ktime_after(call->ping_at, now)) {
59 call->ping_at = call->expire_at;
60 if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
61 queue = true;
62 } else if (ktime_before(call->ping_at, t)) {
63 t = call->ping_at;
64 }
65
66 t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
67 t_j += jiffies;
68
69 /* We have to make sure that the calculated jiffies value falls
70 * at or after the nsec value, or we may loop ceaselessly
71 * because the timer times out, but we haven't reached the nsec
72 * timeout yet.
73 */
74 t_j++;
75
76 if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
77 mod_timer(&call->timer, t_j);
78 trace_rxrpc_timer(call, why, now, now_j);
79 }
80 }
81
82out:
83 if (queue)
84 rxrpc_queue_call(call);
85}
86
87/*
88 * Set the timer
89 */
90void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
91 ktime_t now)
92{
93 read_lock_bh(&call->state_lock);
94 __rxrpc_set_timer(call, why, now);
95 read_unlock_bh(&call->state_lock);
96}
97
98/*
99 * Propose a PING ACK be sent. 25 * Propose a PING ACK be sent.
100 */ 26 */
101static void rxrpc_propose_ping(struct rxrpc_call *call, 27static void rxrpc_propose_ping(struct rxrpc_call *call,
@@ -106,12 +32,13 @@ static void rxrpc_propose_ping(struct rxrpc_call *call,
106 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events)) 32 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
107 rxrpc_queue_call(call); 33 rxrpc_queue_call(call);
108 } else { 34 } else {
109 ktime_t now = ktime_get_real(); 35 unsigned long now = jiffies;
110 ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay); 36 unsigned long ping_at = now + rxrpc_idle_ack_delay;
111 37
112 if (ktime_before(ping_at, call->ping_at)) { 38 if (time_before(ping_at, call->ping_at)) {
113 call->ping_at = ping_at; 39 WRITE_ONCE(call->ping_at, ping_at);
114 rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now); 40 rxrpc_reduce_call_timer(call, ping_at, now,
41 rxrpc_timer_set_for_ping);
115 } 42 }
116 } 43 }
117} 44}
@@ -125,8 +52,7 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
125 enum rxrpc_propose_ack_trace why) 52 enum rxrpc_propose_ack_trace why)
126{ 53{
127 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use; 54 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
128 unsigned int expiry = rxrpc_soft_ack_delay; 55 unsigned long expiry = rxrpc_soft_ack_delay;
129 ktime_t now, ack_at;
130 s8 prior = rxrpc_ack_priority[ack_reason]; 56 s8 prior = rxrpc_ack_priority[ack_reason];
131 57
132 /* Pings are handled specially because we don't want to accidentally 58 /* Pings are handled specially because we don't want to accidentally
@@ -190,11 +116,18 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
190 background) 116 background)
191 rxrpc_queue_call(call); 117 rxrpc_queue_call(call);
192 } else { 118 } else {
193 now = ktime_get_real(); 119 unsigned long now = jiffies, ack_at;
194 ack_at = ktime_add_ms(now, expiry); 120
195 if (ktime_before(ack_at, call->ack_at)) { 121 if (call->peer->rtt_usage > 0)
196 call->ack_at = ack_at; 122 ack_at = nsecs_to_jiffies(call->peer->rtt);
197 rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now); 123 else
124 ack_at = expiry;
125
126 ack_at = jiffies + expiry;
127 if (time_before(ack_at, call->ack_at)) {
128 WRITE_ONCE(call->ack_at, ack_at);
129 rxrpc_reduce_call_timer(call, ack_at, now,
130 rxrpc_timer_set_for_ack);
198 } 131 }
199 } 132 }
200 133
@@ -227,18 +160,28 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
227/* 160/*
228 * Perform retransmission of NAK'd and unack'd packets. 161 * Perform retransmission of NAK'd and unack'd packets.
229 */ 162 */
230static void rxrpc_resend(struct rxrpc_call *call, ktime_t now) 163static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
231{ 164{
232 struct rxrpc_skb_priv *sp; 165 struct rxrpc_skb_priv *sp;
233 struct sk_buff *skb; 166 struct sk_buff *skb;
167 unsigned long resend_at;
234 rxrpc_seq_t cursor, seq, top; 168 rxrpc_seq_t cursor, seq, top;
235 ktime_t max_age, oldest, ack_ts; 169 ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
236 int ix; 170 int ix;
237 u8 annotation, anno_type, retrans = 0, unacked = 0; 171 u8 annotation, anno_type, retrans = 0, unacked = 0;
238 172
239 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 173 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
240 174
241 max_age = ktime_sub_ms(now, rxrpc_resend_timeout); 175 if (call->peer->rtt_usage > 1)
176 timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
177 else
178 timeout = ms_to_ktime(rxrpc_resend_timeout);
179 min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
180 if (ktime_before(timeout, min_timeo))
181 timeout = min_timeo;
182
183 now = ktime_get_real();
184 max_age = ktime_sub(now, timeout);
242 185
243 spin_lock_bh(&call->lock); 186 spin_lock_bh(&call->lock);
244 187
@@ -282,7 +225,9 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
282 ktime_to_ns(ktime_sub(skb->tstamp, max_age))); 225 ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
283 } 226 }
284 227
285 call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout); 228 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
229 resend_at += jiffies + rxrpc_resend_timeout;
230 WRITE_ONCE(call->resend_at, resend_at);
286 231
287 if (unacked) 232 if (unacked)
288 rxrpc_congestion_timeout(call); 233 rxrpc_congestion_timeout(call);
@@ -292,14 +237,15 @@ static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
292 * retransmitting data. 237 * retransmitting data.
293 */ 238 */
294 if (!retrans) { 239 if (!retrans) {
295 rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 240 rxrpc_reduce_call_timer(call, resend_at, now,
241 rxrpc_timer_set_for_resend);
296 spin_unlock_bh(&call->lock); 242 spin_unlock_bh(&call->lock);
297 ack_ts = ktime_sub(now, call->acks_latest_ts); 243 ack_ts = ktime_sub(now, call->acks_latest_ts);
298 if (ktime_to_ns(ack_ts) < call->peer->rtt) 244 if (ktime_to_ns(ack_ts) < call->peer->rtt)
299 goto out; 245 goto out;
300 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false, 246 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
301 rxrpc_propose_ack_ping_for_lost_ack); 247 rxrpc_propose_ack_ping_for_lost_ack);
302 rxrpc_send_ack_packet(call, true); 248 rxrpc_send_ack_packet(call, true, NULL);
303 goto out; 249 goto out;
304 } 250 }
305 251
@@ -364,7 +310,8 @@ void rxrpc_process_call(struct work_struct *work)
364{ 310{
365 struct rxrpc_call *call = 311 struct rxrpc_call *call =
366 container_of(work, struct rxrpc_call, processor); 312 container_of(work, struct rxrpc_call, processor);
367 ktime_t now; 313 rxrpc_serial_t *send_ack;
314 unsigned long now, next, t;
368 315
369 rxrpc_see_call(call); 316 rxrpc_see_call(call);
370 317
@@ -384,22 +331,89 @@ recheck_state:
384 goto out_put; 331 goto out_put;
385 } 332 }
386 333
387 now = ktime_get_real(); 334 /* Work out if any timeouts tripped */
388 if (ktime_before(call->expire_at, now)) { 335 now = jiffies;
336 t = READ_ONCE(call->expect_rx_by);
337 if (time_after_eq(now, t)) {
338 trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
339 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
340 }
341
342 t = READ_ONCE(call->expect_req_by);
343 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
344 time_after_eq(now, t)) {
345 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
346 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
347 }
348
349 t = READ_ONCE(call->expect_term_by);
350 if (time_after_eq(now, t)) {
351 trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
352 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
353 }
354
355 t = READ_ONCE(call->ack_at);
356 if (time_after_eq(now, t)) {
357 trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
358 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
359 set_bit(RXRPC_CALL_EV_ACK, &call->events);
360 }
361
362 t = READ_ONCE(call->ack_lost_at);
363 if (time_after_eq(now, t)) {
364 trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
365 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
366 set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
367 }
368
369 t = READ_ONCE(call->keepalive_at);
370 if (time_after_eq(now, t)) {
371 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
372 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
373 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
374 rxrpc_propose_ack_ping_for_keepalive);
375 set_bit(RXRPC_CALL_EV_PING, &call->events);
376 }
377
378 t = READ_ONCE(call->ping_at);
379 if (time_after_eq(now, t)) {
380 trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
381 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
382 set_bit(RXRPC_CALL_EV_PING, &call->events);
383 }
384
385 t = READ_ONCE(call->resend_at);
386 if (time_after_eq(now, t)) {
387 trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
388 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
389 set_bit(RXRPC_CALL_EV_RESEND, &call->events);
390 }
391
392 /* Process events */
393 if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
389 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME); 394 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
390 set_bit(RXRPC_CALL_EV_ABORT, &call->events); 395 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
391 goto recheck_state; 396 goto recheck_state;
392 } 397 }
393 398
394 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) { 399 send_ack = NULL;
400 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
401 call->acks_lost_top = call->tx_top;
402 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
403 rxrpc_propose_ack_ping_for_lost_ack);
404 send_ack = &call->acks_lost_ping;
405 }
406
407 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
408 send_ack) {
395 if (call->ackr_reason) { 409 if (call->ackr_reason) {
396 rxrpc_send_ack_packet(call, false); 410 rxrpc_send_ack_packet(call, false, send_ack);
397 goto recheck_state; 411 goto recheck_state;
398 } 412 }
399 } 413 }
400 414
401 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) { 415 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
402 rxrpc_send_ack_packet(call, true); 416 rxrpc_send_ack_packet(call, true, NULL);
403 goto recheck_state; 417 goto recheck_state;
404 } 418 }
405 419
@@ -408,7 +422,24 @@ recheck_state:
408 goto recheck_state; 422 goto recheck_state;
409 } 423 }
410 424
411 rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now); 425 /* Make sure the timer is restarted */
426 next = call->expect_rx_by;
427
428#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
429
430 set(call->expect_req_by);
431 set(call->expect_term_by);
432 set(call->ack_at);
433 set(call->ack_lost_at);
434 set(call->resend_at);
435 set(call->keepalive_at);
436 set(call->ping_at);
437
438 now = jiffies;
439 if (time_after_eq(now, next))
440 goto recheck_state;
441
442 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
412 443
413 /* other events may have been raised since we started checking */ 444 /* other events may have been raised since we started checking */
414 if (call->events && call->state < RXRPC_CALL_COMPLETE) { 445 if (call->events && call->state < RXRPC_CALL_COMPLETE) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 994dc2df57e4..0b2db38dd32d 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -51,10 +51,14 @@ static void rxrpc_call_timer_expired(struct timer_list *t)
51 51
52 _enter("%d", call->debug_id); 52 _enter("%d", call->debug_id);
53 53
54 if (call->state < RXRPC_CALL_COMPLETE) 54 if (call->state < RXRPC_CALL_COMPLETE) {
55 rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real()); 55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56 rxrpc_queue_call(call);
57 }
56} 58}
57 59
60static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
61
58/* 62/*
59 * find an extant server call 63 * find an extant server call
60 * - called in process context with IRQs enabled 64 * - called in process context with IRQs enabled
@@ -95,7 +99,7 @@ found_extant_call:
95/* 99/*
96 * allocate a new call 100 * allocate a new call
97 */ 101 */
98struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp) 102struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
99{ 103{
100 struct rxrpc_call *call; 104 struct rxrpc_call *call;
101 105
@@ -114,6 +118,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
114 goto nomem_2; 118 goto nomem_2;
115 119
116 mutex_init(&call->user_mutex); 120 mutex_init(&call->user_mutex);
121
122 /* Prevent lockdep reporting a deadlock false positive between the afs
123 * filesystem and sys_sendmsg() via the mmap sem.
124 */
125 if (rx->sk.sk_kern_sock)
126 lockdep_set_class(&call->user_mutex,
127 &rxrpc_call_user_mutex_lock_class_key);
128
117 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); 129 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
118 INIT_WORK(&call->processor, &rxrpc_process_call); 130 INIT_WORK(&call->processor, &rxrpc_process_call);
119 INIT_LIST_HEAD(&call->link); 131 INIT_LIST_HEAD(&call->link);
@@ -128,6 +140,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
128 atomic_set(&call->usage, 1); 140 atomic_set(&call->usage, 1);
129 call->debug_id = atomic_inc_return(&rxrpc_debug_id); 141 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
130 call->tx_total_len = -1; 142 call->tx_total_len = -1;
143 call->next_rx_timo = 20 * HZ;
144 call->next_req_timo = 1 * HZ;
131 145
132 memset(&call->sock_node, 0xed, sizeof(call->sock_node)); 146 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
133 147
@@ -150,7 +164,8 @@ nomem:
150/* 164/*
151 * Allocate a new client call. 165 * Allocate a new client call.
152 */ 166 */
153static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx, 167static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
168 struct sockaddr_rxrpc *srx,
154 gfp_t gfp) 169 gfp_t gfp)
155{ 170{
156 struct rxrpc_call *call; 171 struct rxrpc_call *call;
@@ -158,7 +173,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
158 173
159 _enter(""); 174 _enter("");
160 175
161 call = rxrpc_alloc_call(gfp); 176 call = rxrpc_alloc_call(rx, gfp);
162 if (!call) 177 if (!call)
163 return ERR_PTR(-ENOMEM); 178 return ERR_PTR(-ENOMEM);
164 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 179 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
@@ -177,15 +192,17 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
177 */ 192 */
178static void rxrpc_start_call_timer(struct rxrpc_call *call) 193static void rxrpc_start_call_timer(struct rxrpc_call *call)
179{ 194{
180 ktime_t now = ktime_get_real(), expire_at; 195 unsigned long now = jiffies;
181 196 unsigned long j = now + MAX_JIFFY_OFFSET;
182 expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime); 197
183 call->expire_at = expire_at; 198 call->ack_at = j;
184 call->ack_at = expire_at; 199 call->ack_lost_at = j;
185 call->ping_at = expire_at; 200 call->resend_at = j;
186 call->resend_at = expire_at; 201 call->ping_at = j;
187 call->timer.expires = jiffies + LONG_MAX / 2; 202 call->expect_rx_by = j;
188 rxrpc_set_timer(call, rxrpc_timer_begin, now); 203 call->expect_req_by = j;
204 call->expect_term_by = j;
205 call->timer.expires = now;
189} 206}
190 207
191/* 208/*
@@ -196,8 +213,7 @@ static void rxrpc_start_call_timer(struct rxrpc_call *call)
196struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 213struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
197 struct rxrpc_conn_parameters *cp, 214 struct rxrpc_conn_parameters *cp,
198 struct sockaddr_rxrpc *srx, 215 struct sockaddr_rxrpc *srx,
199 unsigned long user_call_ID, 216 struct rxrpc_call_params *p,
200 s64 tx_total_len,
201 gfp_t gfp) 217 gfp_t gfp)
202 __releases(&rx->sk.sk_lock.slock) 218 __releases(&rx->sk.sk_lock.slock)
203{ 219{
@@ -207,18 +223,18 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
207 const void *here = __builtin_return_address(0); 223 const void *here = __builtin_return_address(0);
208 int ret; 224 int ret;
209 225
210 _enter("%p,%lx", rx, user_call_ID); 226 _enter("%p,%lx", rx, p->user_call_ID);
211 227
212 call = rxrpc_alloc_client_call(srx, gfp); 228 call = rxrpc_alloc_client_call(rx, srx, gfp);
213 if (IS_ERR(call)) { 229 if (IS_ERR(call)) {
214 release_sock(&rx->sk); 230 release_sock(&rx->sk);
215 _leave(" = %ld", PTR_ERR(call)); 231 _leave(" = %ld", PTR_ERR(call));
216 return call; 232 return call;
217 } 233 }
218 234
219 call->tx_total_len = tx_total_len; 235 call->tx_total_len = p->tx_total_len;
220 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage), 236 trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
221 here, (const void *)user_call_ID); 237 here, (const void *)p->user_call_ID);
222 238
223 /* We need to protect a partially set up call against the user as we 239 /* We need to protect a partially set up call against the user as we
224 * will be acting outside the socket lock. 240 * will be acting outside the socket lock.
@@ -234,16 +250,16 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
234 parent = *pp; 250 parent = *pp;
235 xcall = rb_entry(parent, struct rxrpc_call, sock_node); 251 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
236 252
237 if (user_call_ID < xcall->user_call_ID) 253 if (p->user_call_ID < xcall->user_call_ID)
238 pp = &(*pp)->rb_left; 254 pp = &(*pp)->rb_left;
239 else if (user_call_ID > xcall->user_call_ID) 255 else if (p->user_call_ID > xcall->user_call_ID)
240 pp = &(*pp)->rb_right; 256 pp = &(*pp)->rb_right;
241 else 257 else
242 goto error_dup_user_ID; 258 goto error_dup_user_ID;
243 } 259 }
244 260
245 rcu_assign_pointer(call->socket, rx); 261 rcu_assign_pointer(call->socket, rx);
246 call->user_call_ID = user_call_ID; 262 call->user_call_ID = p->user_call_ID;
247 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 263 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
248 rxrpc_get_call(call, rxrpc_call_got_userid); 264 rxrpc_get_call(call, rxrpc_call_got_userid);
249 rb_link_node(&call->sock_node, parent, pp); 265 rb_link_node(&call->sock_node, parent, pp);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 5f9624bd311c..7f74ca3059f8 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -85,8 +85,8 @@
85 85
86__read_mostly unsigned int rxrpc_max_client_connections = 1000; 86__read_mostly unsigned int rxrpc_max_client_connections = 1000;
87__read_mostly unsigned int rxrpc_reap_client_connections = 900; 87__read_mostly unsigned int rxrpc_reap_client_connections = 900;
88__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 88__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
89__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 89__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
90 90
91/* 91/*
92 * We use machine-unique IDs for our client connections. 92 * We use machine-unique IDs for our client connections.
@@ -554,6 +554,11 @@ static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
554 554
555 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 555 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
556 556
557 /* Cancel the final ACK on the previous call if it hasn't been sent yet
558 * as the DATA packet will implicitly ACK it.
559 */
560 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
561
557 write_lock_bh(&call->state_lock); 562 write_lock_bh(&call->state_lock);
558 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags)) 563 if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
559 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 564 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
@@ -686,7 +691,7 @@ int rxrpc_connect_call(struct rxrpc_call *call,
686 691
687 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 692 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
688 693
689 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work); 694 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
690 rxrpc_cull_active_client_conns(rxnet); 695 rxrpc_cull_active_client_conns(rxnet);
691 696
692 ret = rxrpc_get_client_conn(call, cp, srx, gfp); 697 ret = rxrpc_get_client_conn(call, cp, srx, gfp);
@@ -752,6 +757,18 @@ void rxrpc_expose_client_call(struct rxrpc_call *call)
752} 757}
753 758
754/* 759/*
760 * Set the reap timer.
761 */
762static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
763{
764 unsigned long now = jiffies;
765 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
766
767 if (rxnet->live)
768 timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
769}
770
771/*
755 * Disconnect a client call. 772 * Disconnect a client call.
756 */ 773 */
757void rxrpc_disconnect_client_call(struct rxrpc_call *call) 774void rxrpc_disconnect_client_call(struct rxrpc_call *call)
@@ -813,6 +830,19 @@ void rxrpc_disconnect_client_call(struct rxrpc_call *call)
813 goto out_2; 830 goto out_2;
814 } 831 }
815 832
833 /* Schedule the final ACK to be transmitted in a short while so that it
834 * can be skipped if we find a follow-on call. The first DATA packet
835 * of the follow on call will implicitly ACK this call.
836 */
837 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
838 unsigned long final_ack_at = jiffies + 2;
839
840 WRITE_ONCE(chan->final_ack_at, final_ack_at);
841 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
842 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
843 rxrpc_reduce_conn_timer(conn, final_ack_at);
844 }
845
816 /* Things are more complex and we need the cache lock. We might be 846 /* Things are more complex and we need the cache lock. We might be
817 * able to simply idle the conn or it might now be lurking on the wait 847 * able to simply idle the conn or it might now be lurking on the wait
818 * list. It might even get moved back to the active list whilst we're 848 * list. It might even get moved back to the active list whilst we're
@@ -878,9 +908,7 @@ idle_connection:
878 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); 908 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
879 if (rxnet->idle_client_conns.next == &conn->cache_link && 909 if (rxnet->idle_client_conns.next == &conn->cache_link &&
880 !rxnet->kill_all_client_conns) 910 !rxnet->kill_all_client_conns)
881 queue_delayed_work(rxrpc_workqueue, 911 rxrpc_set_client_reap_timer(rxnet);
882 &rxnet->client_conn_reaper,
883 rxrpc_conn_idle_client_expiry);
884 } else { 912 } else {
885 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive); 913 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
886 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 914 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
@@ -1018,8 +1046,7 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
1018{ 1046{
1019 struct rxrpc_connection *conn; 1047 struct rxrpc_connection *conn;
1020 struct rxrpc_net *rxnet = 1048 struct rxrpc_net *rxnet =
1021 container_of(to_delayed_work(work), 1049 container_of(work, struct rxrpc_net, client_conn_reaper);
1022 struct rxrpc_net, client_conn_reaper);
1023 unsigned long expiry, conn_expires_at, now; 1050 unsigned long expiry, conn_expires_at, now;
1024 unsigned int nr_conns; 1051 unsigned int nr_conns;
1025 bool did_discard = false; 1052 bool did_discard = false;
@@ -1061,6 +1088,8 @@ next:
1061 expiry = rxrpc_conn_idle_client_expiry; 1088 expiry = rxrpc_conn_idle_client_expiry;
1062 if (nr_conns > rxrpc_reap_client_connections) 1089 if (nr_conns > rxrpc_reap_client_connections)
1063 expiry = rxrpc_conn_idle_client_fast_expiry; 1090 expiry = rxrpc_conn_idle_client_fast_expiry;
1091 if (conn->params.local->service_closed)
1092 expiry = rxrpc_closed_conn_expiry * HZ;
1064 1093
1065 conn_expires_at = conn->idle_timestamp + expiry; 1094 conn_expires_at = conn->idle_timestamp + expiry;
1066 1095
@@ -1096,9 +1125,8 @@ not_yet_expired:
1096 */ 1125 */
1097 _debug("not yet"); 1126 _debug("not yet");
1098 if (!rxnet->kill_all_client_conns) 1127 if (!rxnet->kill_all_client_conns)
1099 queue_delayed_work(rxrpc_workqueue, 1128 timer_reduce(&rxnet->client_conn_reap_timer,
1100 &rxnet->client_conn_reaper, 1129 conn_expires_at);
1101 conn_expires_at - now);
1102 1130
1103out: 1131out:
1104 spin_unlock(&rxnet->client_conn_cache_lock); 1132 spin_unlock(&rxnet->client_conn_cache_lock);
@@ -1118,9 +1146,9 @@ void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1118 rxnet->kill_all_client_conns = true; 1146 rxnet->kill_all_client_conns = true;
1119 spin_unlock(&rxnet->client_conn_cache_lock); 1147 spin_unlock(&rxnet->client_conn_cache_lock);
1120 1148
1121 cancel_delayed_work(&rxnet->client_conn_reaper); 1149 del_timer_sync(&rxnet->client_conn_reap_timer);
1122 1150
1123 if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0)) 1151 if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1124 _debug("destroy: queue failed"); 1152 _debug("destroy: queue failed");
1125 1153
1126 _leave(""); 1154 _leave("");
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 59a51a56e7c8..9e9a8db1bc9c 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -24,9 +24,10 @@
24 * Retransmit terminal ACK or ABORT of the previous call. 24 * Retransmit terminal ACK or ABORT of the previous call.
25 */ 25 */
26static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 26static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
27 struct sk_buff *skb) 27 struct sk_buff *skb,
28 unsigned int channel)
28{ 29{
29 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 30 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
30 struct rxrpc_channel *chan; 31 struct rxrpc_channel *chan;
31 struct msghdr msg; 32 struct msghdr msg;
32 struct kvec iov; 33 struct kvec iov;
@@ -48,7 +49,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
48 49
49 _enter("%d", conn->debug_id); 50 _enter("%d", conn->debug_id);
50 51
51 chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK]; 52 chan = &conn->channels[channel];
52 53
53 /* If the last call got moved on whilst we were waiting to run, just 54 /* If the last call got moved on whilst we were waiting to run, just
54 * ignore this packet. 55 * ignore this packet.
@@ -56,7 +57,7 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
56 call_id = READ_ONCE(chan->last_call); 57 call_id = READ_ONCE(chan->last_call);
57 /* Sync with __rxrpc_disconnect_call() */ 58 /* Sync with __rxrpc_disconnect_call() */
58 smp_rmb(); 59 smp_rmb();
59 if (call_id != sp->hdr.callNumber) 60 if (skb && call_id != sp->hdr.callNumber)
60 return; 61 return;
61 62
62 msg.msg_name = &conn->params.peer->srx.transport; 63 msg.msg_name = &conn->params.peer->srx.transport;
@@ -65,9 +66,9 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
65 msg.msg_controllen = 0; 66 msg.msg_controllen = 0;
66 msg.msg_flags = 0; 67 msg.msg_flags = 0;
67 68
68 pkt.whdr.epoch = htonl(sp->hdr.epoch); 69 pkt.whdr.epoch = htonl(conn->proto.epoch);
69 pkt.whdr.cid = htonl(sp->hdr.cid); 70 pkt.whdr.cid = htonl(conn->proto.cid);
70 pkt.whdr.callNumber = htonl(sp->hdr.callNumber); 71 pkt.whdr.callNumber = htonl(call_id);
71 pkt.whdr.seq = 0; 72 pkt.whdr.seq = 0;
72 pkt.whdr.type = chan->last_type; 73 pkt.whdr.type = chan->last_type;
73 pkt.whdr.flags = conn->out_clientflag; 74 pkt.whdr.flags = conn->out_clientflag;
@@ -87,11 +88,11 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
87 mtu = conn->params.peer->if_mtu; 88 mtu = conn->params.peer->if_mtu;
88 mtu -= conn->params.peer->hdrsize; 89 mtu -= conn->params.peer->hdrsize;
89 pkt.ack.bufferSpace = 0; 90 pkt.ack.bufferSpace = 0;
90 pkt.ack.maxSkew = htons(skb->priority); 91 pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
91 pkt.ack.firstPacket = htonl(chan->last_seq); 92 pkt.ack.firstPacket = htonl(chan->last_seq + 1);
92 pkt.ack.previousPacket = htonl(chan->last_seq - 1); 93 pkt.ack.previousPacket = htonl(chan->last_seq);
93 pkt.ack.serial = htonl(sp->hdr.serial); 94 pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
94 pkt.ack.reason = RXRPC_ACK_DUPLICATE; 95 pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
95 pkt.ack.nAcks = 0; 96 pkt.ack.nAcks = 0;
96 pkt.info.rxMTU = htonl(rxrpc_rx_mtu); 97 pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
97 pkt.info.maxMTU = htonl(mtu); 98 pkt.info.maxMTU = htonl(mtu);
@@ -272,7 +273,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
272 switch (sp->hdr.type) { 273 switch (sp->hdr.type) {
273 case RXRPC_PACKET_TYPE_DATA: 274 case RXRPC_PACKET_TYPE_DATA:
274 case RXRPC_PACKET_TYPE_ACK: 275 case RXRPC_PACKET_TYPE_ACK:
275 rxrpc_conn_retransmit_call(conn, skb); 276 rxrpc_conn_retransmit_call(conn, skb,
277 sp->hdr.cid & RXRPC_CHANNELMASK);
276 return 0; 278 return 0;
277 279
278 case RXRPC_PACKET_TYPE_BUSY: 280 case RXRPC_PACKET_TYPE_BUSY:
@@ -379,6 +381,48 @@ abort:
379} 381}
380 382
381/* 383/*
384 * Process delayed final ACKs that we haven't subsumed into a subsequent call.
385 */
386static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
387{
388 unsigned long j = jiffies, next_j;
389 unsigned int channel;
390 bool set;
391
392again:
393 next_j = j + LONG_MAX;
394 set = false;
395 for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
396 struct rxrpc_channel *chan = &conn->channels[channel];
397 unsigned long ack_at;
398
399 if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
400 continue;
401
402 smp_rmb(); /* vs rxrpc_disconnect_client_call */
403 ack_at = READ_ONCE(chan->final_ack_at);
404
405 if (time_before(j, ack_at)) {
406 if (time_before(ack_at, next_j)) {
407 next_j = ack_at;
408 set = true;
409 }
410 continue;
411 }
412
413 if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
414 &conn->flags))
415 rxrpc_conn_retransmit_call(conn, NULL, channel);
416 }
417
418 j = jiffies;
419 if (time_before_eq(next_j, j))
420 goto again;
421 if (set)
422 rxrpc_reduce_conn_timer(conn, next_j);
423}
424
425/*
382 * connection-level event processor 426 * connection-level event processor
383 */ 427 */
384void rxrpc_process_connection(struct work_struct *work) 428void rxrpc_process_connection(struct work_struct *work)
@@ -394,6 +438,10 @@ void rxrpc_process_connection(struct work_struct *work)
394 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 438 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
395 rxrpc_secure_connection(conn); 439 rxrpc_secure_connection(conn);
396 440
441 /* Process delayed ACKs whose time has come. */
442 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
443 rxrpc_process_delayed_final_acks(conn);
444
397 /* go through the conn-level event packets, releasing the ref on this 445 /* go through the conn-level event packets, releasing the ref on this
398 * connection that each one has when we've finished with it */ 446 * connection that each one has when we've finished with it */
399 while ((skb = skb_dequeue(&conn->rx_queue))) { 447 while ((skb = skb_dequeue(&conn->rx_queue))) {
diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
index fe575798592f..1aad04a32d5e 100644
--- a/net/rxrpc/conn_object.c
+++ b/net/rxrpc/conn_object.c
@@ -20,10 +20,19 @@
20/* 20/*
21 * Time till a connection expires after last use (in seconds). 21 * Time till a connection expires after last use (in seconds).
22 */ 22 */
23unsigned int rxrpc_connection_expiry = 10 * 60; 23unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
24unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
24 25
25static void rxrpc_destroy_connection(struct rcu_head *); 26static void rxrpc_destroy_connection(struct rcu_head *);
26 27
28static void rxrpc_connection_timer(struct timer_list *timer)
29{
30 struct rxrpc_connection *conn =
31 container_of(timer, struct rxrpc_connection, timer);
32
33 rxrpc_queue_conn(conn);
34}
35
27/* 36/*
28 * allocate a new connection 37 * allocate a new connection
29 */ 38 */
@@ -38,6 +47,7 @@ struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
38 INIT_LIST_HEAD(&conn->cache_link); 47 INIT_LIST_HEAD(&conn->cache_link);
39 spin_lock_init(&conn->channel_lock); 48 spin_lock_init(&conn->channel_lock);
40 INIT_LIST_HEAD(&conn->waiting_calls); 49 INIT_LIST_HEAD(&conn->waiting_calls);
50 timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
41 INIT_WORK(&conn->processor, &rxrpc_process_connection); 51 INIT_WORK(&conn->processor, &rxrpc_process_connection);
42 INIT_LIST_HEAD(&conn->proc_link); 52 INIT_LIST_HEAD(&conn->proc_link);
43 INIT_LIST_HEAD(&conn->link); 53 INIT_LIST_HEAD(&conn->link);
@@ -301,21 +311,29 @@ rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
301} 311}
302 312
303/* 313/*
314 * Set the service connection reap timer.
315 */
316static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
317 unsigned long reap_at)
318{
319 if (rxnet->live)
320 timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
321}
322
323/*
304 * Release a service connection 324 * Release a service connection
305 */ 325 */
306void rxrpc_put_service_conn(struct rxrpc_connection *conn) 326void rxrpc_put_service_conn(struct rxrpc_connection *conn)
307{ 327{
308 struct rxrpc_net *rxnet;
309 const void *here = __builtin_return_address(0); 328 const void *here = __builtin_return_address(0);
310 int n; 329 int n;
311 330
312 n = atomic_dec_return(&conn->usage); 331 n = atomic_dec_return(&conn->usage);
313 trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here); 332 trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
314 ASSERTCMP(n, >=, 0); 333 ASSERTCMP(n, >=, 0);
315 if (n == 0) { 334 if (n == 1)
316 rxnet = conn->params.local->rxnet; 335 rxrpc_set_service_reap_timer(conn->params.local->rxnet,
317 rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0); 336 jiffies + rxrpc_connection_expiry);
318 }
319} 337}
320 338
321/* 339/*
@@ -332,6 +350,7 @@ static void rxrpc_destroy_connection(struct rcu_head *rcu)
332 350
333 _net("DESTROY CONN %d", conn->debug_id); 351 _net("DESTROY CONN %d", conn->debug_id);
334 352
353 del_timer_sync(&conn->timer);
335 rxrpc_purge_queue(&conn->rx_queue); 354 rxrpc_purge_queue(&conn->rx_queue);
336 355
337 conn->security->clear(conn); 356 conn->security->clear(conn);
@@ -351,17 +370,15 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
351{ 370{
352 struct rxrpc_connection *conn, *_p; 371 struct rxrpc_connection *conn, *_p;
353 struct rxrpc_net *rxnet = 372 struct rxrpc_net *rxnet =
354 container_of(to_delayed_work(work), 373 container_of(work, struct rxrpc_net, service_conn_reaper);
355 struct rxrpc_net, service_conn_reaper); 374 unsigned long expire_at, earliest, idle_timestamp, now;
356 unsigned long reap_older_than, earliest, idle_timestamp, now;
357 375
358 LIST_HEAD(graveyard); 376 LIST_HEAD(graveyard);
359 377
360 _enter(""); 378 _enter("");
361 379
362 now = jiffies; 380 now = jiffies;
363 reap_older_than = now - rxrpc_connection_expiry * HZ; 381 earliest = now + MAX_JIFFY_OFFSET;
364 earliest = ULONG_MAX;
365 382
366 write_lock(&rxnet->conn_lock); 383 write_lock(&rxnet->conn_lock);
367 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) { 384 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
@@ -371,15 +388,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
371 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 388 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
372 continue; 389 continue;
373 390
374 idle_timestamp = READ_ONCE(conn->idle_timestamp); 391 if (rxnet->live) {
375 _debug("reap CONN %d { u=%d,t=%ld }", 392 idle_timestamp = READ_ONCE(conn->idle_timestamp);
376 conn->debug_id, atomic_read(&conn->usage), 393 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
377 (long)reap_older_than - (long)idle_timestamp); 394 if (conn->params.local->service_closed)
378 395 expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
379 if (time_after(idle_timestamp, reap_older_than)) { 396
380 if (time_before(idle_timestamp, earliest)) 397 _debug("reap CONN %d { u=%d,t=%ld }",
381 earliest = idle_timestamp; 398 conn->debug_id, atomic_read(&conn->usage),
382 continue; 399 (long)expire_at - (long)now);
400
401 if (time_before(now, expire_at)) {
402 if (time_before(expire_at, earliest))
403 earliest = expire_at;
404 continue;
405 }
383 } 406 }
384 407
385 /* The usage count sits at 1 whilst the object is unused on the 408 /* The usage count sits at 1 whilst the object is unused on the
@@ -387,6 +410,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
387 */ 410 */
388 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1) 411 if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
389 continue; 412 continue;
413 trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
390 414
391 if (rxrpc_conn_is_client(conn)) 415 if (rxrpc_conn_is_client(conn))
392 BUG(); 416 BUG();
@@ -397,11 +421,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
397 } 421 }
398 write_unlock(&rxnet->conn_lock); 422 write_unlock(&rxnet->conn_lock);
399 423
400 if (earliest != ULONG_MAX) { 424 if (earliest != now + MAX_JIFFY_OFFSET) {
401 _debug("reschedule reaper %ld", (long) earliest - now); 425 _debug("reschedule reaper %ld", (long)earliest - (long)now);
402 ASSERT(time_after(earliest, now)); 426 ASSERT(time_after(earliest, now));
403 rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 427 rxrpc_set_service_reap_timer(rxnet, earliest);
404 earliest - now);
405 } 428 }
406 429
407 while (!list_empty(&graveyard)) { 430 while (!list_empty(&graveyard)) {
@@ -429,9 +452,8 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
429 452
430 rxrpc_destroy_all_client_connections(rxnet); 453 rxrpc_destroy_all_client_connections(rxnet);
431 454
432 rxrpc_connection_expiry = 0; 455 del_timer_sync(&rxnet->service_conn_reap_timer);
433 cancel_delayed_work(&rxnet->client_conn_reaper); 456 rxrpc_queue_work(&rxnet->service_conn_reaper);
434 rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
435 flush_workqueue(rxrpc_workqueue); 457 flush_workqueue(rxrpc_workqueue);
436 458
437 write_lock(&rxnet->conn_lock); 459 write_lock(&rxnet->conn_lock);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1b592073ec96..23a5e61d8f79 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -318,16 +318,18 @@ bad_state:
318static bool rxrpc_receiving_reply(struct rxrpc_call *call) 318static bool rxrpc_receiving_reply(struct rxrpc_call *call)
319{ 319{
320 struct rxrpc_ack_summary summary = { 0 }; 320 struct rxrpc_ack_summary summary = { 0 };
321 unsigned long now, timo;
321 rxrpc_seq_t top = READ_ONCE(call->tx_top); 322 rxrpc_seq_t top = READ_ONCE(call->tx_top);
322 323
323 if (call->ackr_reason) { 324 if (call->ackr_reason) {
324 spin_lock_bh(&call->lock); 325 spin_lock_bh(&call->lock);
325 call->ackr_reason = 0; 326 call->ackr_reason = 0;
326 call->resend_at = call->expire_at;
327 call->ack_at = call->expire_at;
328 spin_unlock_bh(&call->lock); 327 spin_unlock_bh(&call->lock);
329 rxrpc_set_timer(call, rxrpc_timer_init_for_reply, 328 now = jiffies;
330 ktime_get_real()); 329 timo = now + MAX_JIFFY_OFFSET;
330 WRITE_ONCE(call->resend_at, timo);
331 WRITE_ONCE(call->ack_at, timo);
332 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
331 } 333 }
332 334
333 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) 335 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
@@ -437,6 +439,19 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
437 if (state >= RXRPC_CALL_COMPLETE) 439 if (state >= RXRPC_CALL_COMPLETE)
438 return; 440 return;
439 441
442 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
443 unsigned long timo = READ_ONCE(call->next_req_timo);
444 unsigned long now, expect_req_by;
445
446 if (timo) {
447 now = jiffies;
448 expect_req_by = now + timo;
449 WRITE_ONCE(call->expect_req_by, expect_req_by);
450 rxrpc_reduce_call_timer(call, expect_req_by, now,
451 rxrpc_timer_set_for_idle);
452 }
453 }
454
440 /* Received data implicitly ACKs all of the request packets we sent 455 /* Received data implicitly ACKs all of the request packets we sent
441 * when we're acting as a client. 456 * when we're acting as a client.
442 */ 457 */
@@ -616,6 +631,43 @@ found:
616} 631}
617 632
618/* 633/*
634 * Process the response to a ping that we sent to find out if we lost an ACK.
635 *
636 * If we got back a ping response that indicates a lower tx_top than what we
637 * had at the time of the ping transmission, we adjudge all the DATA packets
638 * sent between the response tx_top and the ping-time tx_top to have been lost.
639 */
640static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
641{
642 rxrpc_seq_t top, bottom, seq;
643 bool resend = false;
644
645 spin_lock_bh(&call->lock);
646
647 bottom = call->tx_hard_ack + 1;
648 top = call->acks_lost_top;
649 if (before(bottom, top)) {
650 for (seq = bottom; before_eq(seq, top); seq++) {
651 int ix = seq & RXRPC_RXTX_BUFF_MASK;
652 u8 annotation = call->rxtx_annotations[ix];
653 u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
654
655 if (anno_type != RXRPC_TX_ANNO_UNACK)
656 continue;
657 annotation &= ~RXRPC_TX_ANNO_MASK;
658 annotation |= RXRPC_TX_ANNO_RETRANS;
659 call->rxtx_annotations[ix] = annotation;
660 resend = true;
661 }
662 }
663
664 spin_unlock_bh(&call->lock);
665
666 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
667 rxrpc_queue_call(call);
668}
669
670/*
619 * Process a ping response. 671 * Process a ping response.
620 */ 672 */
621static void rxrpc_input_ping_response(struct rxrpc_call *call, 673static void rxrpc_input_ping_response(struct rxrpc_call *call,
@@ -630,6 +682,9 @@ static void rxrpc_input_ping_response(struct rxrpc_call *call,
630 smp_rmb(); 682 smp_rmb();
631 ping_serial = call->ping_serial; 683 ping_serial = call->ping_serial;
632 684
685 if (orig_serial == call->acks_lost_ping)
686 rxrpc_input_check_for_lost_ack(call);
687
633 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) || 688 if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
634 before(orig_serial, ping_serial)) 689 before(orig_serial, ping_serial))
635 return; 690 return;
@@ -908,9 +963,20 @@ static void rxrpc_input_call_packet(struct rxrpc_call *call,
908 struct sk_buff *skb, u16 skew) 963 struct sk_buff *skb, u16 skew)
909{ 964{
910 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 965 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
966 unsigned long timo;
911 967
912 _enter("%p,%p", call, skb); 968 _enter("%p,%p", call, skb);
913 969
970 timo = READ_ONCE(call->next_rx_timo);
971 if (timo) {
972 unsigned long now = jiffies, expect_rx_by;
973
974 expect_rx_by = jiffies + timo;
975 WRITE_ONCE(call->expect_rx_by, expect_rx_by);
976 rxrpc_reduce_call_timer(call, expect_rx_by, now,
977 rxrpc_timer_set_for_normal);
978 }
979
914 switch (sp->hdr.type) { 980 switch (sp->hdr.type) {
915 case RXRPC_PACKET_TYPE_DATA: 981 case RXRPC_PACKET_TYPE_DATA:
916 rxrpc_input_data(call, skb, skew); 982 rxrpc_input_data(call, skb, skew);
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 1a2d4b112064..c1d9e7fd7448 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -21,33 +21,28 @@
21unsigned int rxrpc_max_backlog __read_mostly = 10; 21unsigned int rxrpc_max_backlog __read_mostly = 10;
22 22
23/* 23/*
24 * Maximum lifetime of a call (in mx).
25 */
26unsigned int rxrpc_max_call_lifetime = 60 * 1000;
27
28/*
29 * How long to wait before scheduling ACK generation after seeing a 24 * How long to wait before scheduling ACK generation after seeing a
30 * packet with RXRPC_REQUEST_ACK set (in ms). 25 * packet with RXRPC_REQUEST_ACK set (in jiffies).
31 */ 26 */
32unsigned int rxrpc_requested_ack_delay = 1; 27unsigned long rxrpc_requested_ack_delay = 1;
33 28
34/* 29/*
35 * How long to wait before scheduling an ACK with subtype DELAY (in ms). 30 * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
36 * 31 *
37 * We use this when we've received new data packets. If those packets aren't 32 * We use this when we've received new data packets. If those packets aren't
38 * all consumed within this time we will send a DELAY ACK if an ACK was not 33 * all consumed within this time we will send a DELAY ACK if an ACK was not
39 * requested to let the sender know it doesn't need to resend. 34 * requested to let the sender know it doesn't need to resend.
40 */ 35 */
41unsigned int rxrpc_soft_ack_delay = 1 * 1000; 36unsigned long rxrpc_soft_ack_delay = HZ;
42 37
43/* 38/*
44 * How long to wait before scheduling an ACK with subtype IDLE (in ms). 39 * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
45 * 40 *
46 * We use this when we've consumed some previously soft-ACK'd packets when 41 * We use this when we've consumed some previously soft-ACK'd packets when
47 * further packets aren't immediately received to decide when to send an IDLE 42 * further packets aren't immediately received to decide when to send an IDLE
48 * ACK let the other end know that it can free up its Tx buffer space. 43 * ACK let the other end know that it can free up its Tx buffer space.
49 */ 44 */
50unsigned int rxrpc_idle_ack_delay = 0.5 * 1000; 45unsigned long rxrpc_idle_ack_delay = HZ / 2;
51 46
52/* 47/*
53 * Receive window size in packets. This indicates the maximum number of 48 * Receive window size in packets. This indicates the maximum number of
@@ -75,7 +70,7 @@ unsigned int rxrpc_rx_jumbo_max = 4;
75/* 70/*
76 * Time till packet resend (in milliseconds). 71 * Time till packet resend (in milliseconds).
77 */ 72 */
78unsigned int rxrpc_resend_timeout = 4 * 1000; 73unsigned long rxrpc_resend_timeout = 4 * HZ;
79 74
80const s8 rxrpc_ack_priority[] = { 75const s8 rxrpc_ack_priority[] = {
81 [0] = 0, 76 [0] = 0,
diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
index 7edceb8522f5..f18c9248e0d4 100644
--- a/net/rxrpc/net_ns.c
+++ b/net/rxrpc/net_ns.c
@@ -14,6 +14,24 @@
14 14
15unsigned int rxrpc_net_id; 15unsigned int rxrpc_net_id;
16 16
17static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
18{
19 struct rxrpc_net *rxnet =
20 container_of(timer, struct rxrpc_net, client_conn_reap_timer);
21
22 if (rxnet->live)
23 rxrpc_queue_work(&rxnet->client_conn_reaper);
24}
25
26static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
27{
28 struct rxrpc_net *rxnet =
29 container_of(timer, struct rxrpc_net, service_conn_reap_timer);
30
31 if (rxnet->live)
32 rxrpc_queue_work(&rxnet->service_conn_reaper);
33}
34
17/* 35/*
18 * Initialise a per-network namespace record. 36 * Initialise a per-network namespace record.
19 */ 37 */
@@ -22,6 +40,7 @@ static __net_init int rxrpc_init_net(struct net *net)
22 struct rxrpc_net *rxnet = rxrpc_net(net); 40 struct rxrpc_net *rxnet = rxrpc_net(net);
23 int ret; 41 int ret;
24 42
43 rxnet->live = true;
25 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch)); 44 get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
26 rxnet->epoch |= RXRPC_RANDOM_EPOCH; 45 rxnet->epoch |= RXRPC_RANDOM_EPOCH;
27 46
@@ -31,8 +50,10 @@ static __net_init int rxrpc_init_net(struct net *net)
31 INIT_LIST_HEAD(&rxnet->conn_proc_list); 50 INIT_LIST_HEAD(&rxnet->conn_proc_list);
32 INIT_LIST_HEAD(&rxnet->service_conns); 51 INIT_LIST_HEAD(&rxnet->service_conns);
33 rwlock_init(&rxnet->conn_lock); 52 rwlock_init(&rxnet->conn_lock);
34 INIT_DELAYED_WORK(&rxnet->service_conn_reaper, 53 INIT_WORK(&rxnet->service_conn_reaper,
35 rxrpc_service_connection_reaper); 54 rxrpc_service_connection_reaper);
55 timer_setup(&rxnet->service_conn_reap_timer,
56 rxrpc_service_conn_reap_timeout, 0);
36 57
37 rxnet->nr_client_conns = 0; 58 rxnet->nr_client_conns = 0;
38 rxnet->nr_active_client_conns = 0; 59 rxnet->nr_active_client_conns = 0;
@@ -42,8 +63,10 @@ static __net_init int rxrpc_init_net(struct net *net)
42 INIT_LIST_HEAD(&rxnet->waiting_client_conns); 63 INIT_LIST_HEAD(&rxnet->waiting_client_conns);
43 INIT_LIST_HEAD(&rxnet->active_client_conns); 64 INIT_LIST_HEAD(&rxnet->active_client_conns);
44 INIT_LIST_HEAD(&rxnet->idle_client_conns); 65 INIT_LIST_HEAD(&rxnet->idle_client_conns);
45 INIT_DELAYED_WORK(&rxnet->client_conn_reaper, 66 INIT_WORK(&rxnet->client_conn_reaper,
46 rxrpc_discard_expired_client_conns); 67 rxrpc_discard_expired_client_conns);
68 timer_setup(&rxnet->client_conn_reap_timer,
69 rxrpc_client_conn_reap_timeout, 0);
47 70
48 INIT_LIST_HEAD(&rxnet->local_endpoints); 71 INIT_LIST_HEAD(&rxnet->local_endpoints);
49 mutex_init(&rxnet->local_mutex); 72 mutex_init(&rxnet->local_mutex);
@@ -60,6 +83,7 @@ static __net_init int rxrpc_init_net(struct net *net)
60 return 0; 83 return 0;
61 84
62err_proc: 85err_proc:
86 rxnet->live = false;
63 return ret; 87 return ret;
64} 88}
65 89
@@ -70,6 +94,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
70{ 94{
71 struct rxrpc_net *rxnet = rxrpc_net(net); 95 struct rxrpc_net *rxnet = rxrpc_net(net);
72 96
97 rxnet->live = false;
73 rxrpc_destroy_all_calls(rxnet); 98 rxrpc_destroy_all_calls(rxnet);
74 rxrpc_destroy_all_connections(rxnet); 99 rxrpc_destroy_all_connections(rxnet);
75 rxrpc_destroy_all_locals(rxnet); 100 rxrpc_destroy_all_locals(rxnet);
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c
index f47659c7b224..42410e910aff 100644
--- a/net/rxrpc/output.c
+++ b/net/rxrpc/output.c
@@ -33,6 +33,24 @@ struct rxrpc_abort_buffer {
33}; 33};
34 34
35/* 35/*
36 * Arrange for a keepalive ping a certain time after we last transmitted. This
37 * lets the far side know we're still interested in this call and helps keep
38 * the route through any intervening firewall open.
39 *
40 * Receiving a response to the ping will prevent the ->expect_rx_by timer from
41 * expiring.
42 */
43static void rxrpc_set_keepalive(struct rxrpc_call *call)
44{
45 unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
46
47 keepalive_at += now;
48 WRITE_ONCE(call->keepalive_at, keepalive_at);
49 rxrpc_reduce_call_timer(call, keepalive_at, now,
50 rxrpc_timer_set_for_keepalive);
51}
52
53/*
36 * Fill out an ACK packet. 54 * Fill out an ACK packet.
37 */ 55 */
38static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn, 56static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
@@ -95,7 +113,8 @@ static size_t rxrpc_fill_out_ack(struct rxrpc_connection *conn,
95/* 113/*
96 * Send an ACK call packet. 114 * Send an ACK call packet.
97 */ 115 */
98int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping) 116int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
117 rxrpc_serial_t *_serial)
99{ 118{
100 struct rxrpc_connection *conn = NULL; 119 struct rxrpc_connection *conn = NULL;
101 struct rxrpc_ack_buffer *pkt; 120 struct rxrpc_ack_buffer *pkt;
@@ -165,6 +184,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
165 ntohl(pkt->ack.firstPacket), 184 ntohl(pkt->ack.firstPacket),
166 ntohl(pkt->ack.serial), 185 ntohl(pkt->ack.serial),
167 pkt->ack.reason, pkt->ack.nAcks); 186 pkt->ack.reason, pkt->ack.nAcks);
187 if (_serial)
188 *_serial = serial;
168 189
169 if (ping) { 190 if (ping) {
170 call->ping_serial = serial; 191 call->ping_serial = serial;
@@ -202,6 +223,8 @@ int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
202 call->ackr_seen = top; 223 call->ackr_seen = top;
203 spin_unlock_bh(&call->lock); 224 spin_unlock_bh(&call->lock);
204 } 225 }
226
227 rxrpc_set_keepalive(call);
205 } 228 }
206 229
207out: 230out:
@@ -323,7 +346,8 @@ int rxrpc_send_data_packet(struct rxrpc_call *call, struct sk_buff *skb,
323 * ACKs if a DATA packet appears to have been lost. 346 * ACKs if a DATA packet appears to have been lost.
324 */ 347 */
325 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) && 348 if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
326 (retrans || 349 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
350 retrans ||
327 call->cong_mode == RXRPC_CALL_SLOW_START || 351 call->cong_mode == RXRPC_CALL_SLOW_START ||
328 (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || 352 (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
329 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 353 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
@@ -370,8 +394,23 @@ done:
370 if (whdr.flags & RXRPC_REQUEST_ACK) { 394 if (whdr.flags & RXRPC_REQUEST_ACK) {
371 call->peer->rtt_last_req = now; 395 call->peer->rtt_last_req = now;
372 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 396 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
397 if (call->peer->rtt_usage > 1) {
398 unsigned long nowj = jiffies, ack_lost_at;
399
400 ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
401 if (ack_lost_at < 1)
402 ack_lost_at = 1;
403
404 ack_lost_at += nowj;
405 WRITE_ONCE(call->ack_lost_at, ack_lost_at);
406 rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
407 rxrpc_timer_set_for_lost_ack);
408 }
373 } 409 }
374 } 410 }
411
412 rxrpc_set_keepalive(call);
413
375 _leave(" = %d [%u]", ret, call->peer->maxdata); 414 _leave(" = %d [%u]", ret, call->peer->maxdata);
376 return ret; 415 return ret;
377 416
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 8510a98b87e1..cc21e8db25b0 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -144,11 +144,13 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
144 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top); 144 trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
145 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top); 145 ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
146 146
147#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
147 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) { 148 if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
148 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false, 149 rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
149 rxrpc_propose_ack_terminal_ack); 150 rxrpc_propose_ack_terminal_ack);
150 rxrpc_send_ack_packet(call, false); 151 rxrpc_send_ack_packet(call, false, NULL);
151 } 152 }
153#endif
152 154
153 write_lock_bh(&call->state_lock); 155 write_lock_bh(&call->state_lock);
154 156
@@ -161,7 +163,7 @@ static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
161 case RXRPC_CALL_SERVER_RECV_REQUEST: 163 case RXRPC_CALL_SERVER_RECV_REQUEST:
162 call->tx_phase = true; 164 call->tx_phase = true;
163 call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 165 call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
164 call->ack_at = call->expire_at; 166 call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
165 write_unlock_bh(&call->state_lock); 167 write_unlock_bh(&call->state_lock);
166 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true, 168 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
167 rxrpc_propose_ack_processing_op); 169 rxrpc_propose_ack_processing_op);
@@ -217,10 +219,10 @@ static void rxrpc_rotate_rx_window(struct rxrpc_call *call)
217 after_eq(top, call->ackr_seen + 2) || 219 after_eq(top, call->ackr_seen + 2) ||
218 (hard_ack == top && after(hard_ack, call->ackr_consumed))) 220 (hard_ack == top && after(hard_ack, call->ackr_consumed)))
219 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, 221 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
220 true, false, 222 true, true,
221 rxrpc_propose_ack_rotate_rx); 223 rxrpc_propose_ack_rotate_rx);
222 if (call->ackr_reason) 224 if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
223 rxrpc_send_ack_packet(call, false); 225 rxrpc_send_ack_packet(call, false, NULL);
224 } 226 }
225} 227}
226 228
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 7d2595582c09..a1c53ac066a1 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -21,22 +21,6 @@
21#include <net/af_rxrpc.h> 21#include <net/af_rxrpc.h>
22#include "ar-internal.h" 22#include "ar-internal.h"
23 23
24enum rxrpc_command {
25 RXRPC_CMD_SEND_DATA, /* send data message */
26 RXRPC_CMD_SEND_ABORT, /* request abort generation */
27 RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
28 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
29};
30
31struct rxrpc_send_params {
32 s64 tx_total_len; /* Total Tx data length (if send data) */
33 unsigned long user_call_ID; /* User's call ID */
34 u32 abort_code; /* Abort code to Tx (if abort) */
35 enum rxrpc_command command : 8; /* The command to implement */
36 bool exclusive; /* Shared or exclusive call */
37 bool upgrade; /* If the connection is upgradeable */
38};
39
40/* 24/*
41 * Wait for space to appear in the Tx queue or a signal to occur. 25 * Wait for space to appear in the Tx queue or a signal to occur.
42 */ 26 */
@@ -174,6 +158,7 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
174 rxrpc_notify_end_tx_t notify_end_tx) 158 rxrpc_notify_end_tx_t notify_end_tx)
175{ 159{
176 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 160 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
161 unsigned long now;
177 rxrpc_seq_t seq = sp->hdr.seq; 162 rxrpc_seq_t seq = sp->hdr.seq;
178 int ret, ix; 163 int ret, ix;
179 u8 annotation = RXRPC_TX_ANNO_UNACK; 164 u8 annotation = RXRPC_TX_ANNO_UNACK;
@@ -213,11 +198,11 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
213 break; 198 break;
214 case RXRPC_CALL_SERVER_ACK_REQUEST: 199 case RXRPC_CALL_SERVER_ACK_REQUEST:
215 call->state = RXRPC_CALL_SERVER_SEND_REPLY; 200 call->state = RXRPC_CALL_SERVER_SEND_REPLY;
216 call->ack_at = call->expire_at; 201 now = jiffies;
202 WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
217 if (call->ackr_reason == RXRPC_ACK_DELAY) 203 if (call->ackr_reason == RXRPC_ACK_DELAY)
218 call->ackr_reason = 0; 204 call->ackr_reason = 0;
219 __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply, 205 trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
220 ktime_get_real());
221 if (!last) 206 if (!last)
222 break; 207 break;
223 /* Fall through */ 208 /* Fall through */
@@ -239,14 +224,19 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
239 _debug("need instant resend %d", ret); 224 _debug("need instant resend %d", ret);
240 rxrpc_instant_resend(call, ix); 225 rxrpc_instant_resend(call, ix);
241 } else { 226 } else {
242 ktime_t now = ktime_get_real(), resend_at; 227 unsigned long now = jiffies, resend_at;
243 228
244 resend_at = ktime_add_ms(now, rxrpc_resend_timeout); 229 if (call->peer->rtt_usage > 1)
245 230 resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
246 if (ktime_before(resend_at, call->resend_at)) { 231 else
247 call->resend_at = resend_at; 232 resend_at = rxrpc_resend_timeout;
248 rxrpc_set_timer(call, rxrpc_timer_set_for_send, now); 233 if (resend_at < 1)
249 } 234 resend_at = 1;
235
236 resend_at = now + rxrpc_resend_timeout;
237 WRITE_ONCE(call->resend_at, resend_at);
238 rxrpc_reduce_call_timer(call, resend_at, now,
239 rxrpc_timer_set_for_send);
250 } 240 }
251 241
252 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 242 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
@@ -295,7 +285,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
295 do { 285 do {
296 /* Check to see if there's a ping ACK to reply to. */ 286 /* Check to see if there's a ping ACK to reply to. */
297 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE) 287 if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
298 rxrpc_send_ack_packet(call, false); 288 rxrpc_send_ack_packet(call, false, NULL);
299 289
300 if (!skb) { 290 if (!skb) {
301 size_t size, chunk, max, space; 291 size_t size, chunk, max, space;
@@ -480,11 +470,11 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
480 if (msg->msg_flags & MSG_CMSG_COMPAT) { 470 if (msg->msg_flags & MSG_CMSG_COMPAT) {
481 if (len != sizeof(u32)) 471 if (len != sizeof(u32))
482 return -EINVAL; 472 return -EINVAL;
483 p->user_call_ID = *(u32 *)CMSG_DATA(cmsg); 473 p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
484 } else { 474 } else {
485 if (len != sizeof(unsigned long)) 475 if (len != sizeof(unsigned long))
486 return -EINVAL; 476 return -EINVAL;
487 p->user_call_ID = *(unsigned long *) 477 p->call.user_call_ID = *(unsigned long *)
488 CMSG_DATA(cmsg); 478 CMSG_DATA(cmsg);
489 } 479 }
490 got_user_ID = true; 480 got_user_ID = true;
@@ -522,11 +512,24 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
522 break; 512 break;
523 513
524 case RXRPC_TX_LENGTH: 514 case RXRPC_TX_LENGTH:
525 if (p->tx_total_len != -1 || len != sizeof(__s64)) 515 if (p->call.tx_total_len != -1 || len != sizeof(__s64))
516 return -EINVAL;
517 p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
518 if (p->call.tx_total_len < 0)
526 return -EINVAL; 519 return -EINVAL;
527 p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg); 520 break;
528 if (p->tx_total_len < 0) 521
522 case RXRPC_SET_CALL_TIMEOUT:
523 if (len & 3 || len < 4 || len > 12)
529 return -EINVAL; 524 return -EINVAL;
525 memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
526 p->call.nr_timeouts = len / 4;
527 if (p->call.timeouts.hard > INT_MAX / HZ)
528 return -ERANGE;
529 if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
530 return -ERANGE;
531 if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
532 return -ERANGE;
530 break; 533 break;
531 534
532 default: 535 default:
@@ -536,7 +539,7 @@ static int rxrpc_sendmsg_cmsg(struct msghdr *msg, struct rxrpc_send_params *p)
536 539
537 if (!got_user_ID) 540 if (!got_user_ID)
538 return -EINVAL; 541 return -EINVAL;
539 if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA) 542 if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
540 return -EINVAL; 543 return -EINVAL;
541 _leave(" = 0"); 544 _leave(" = 0");
542 return 0; 545 return 0;
@@ -576,8 +579,7 @@ rxrpc_new_client_call_for_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg,
576 cp.exclusive = rx->exclusive | p->exclusive; 579 cp.exclusive = rx->exclusive | p->exclusive;
577 cp.upgrade = p->upgrade; 580 cp.upgrade = p->upgrade;
578 cp.service_id = srx->srx_service; 581 cp.service_id = srx->srx_service;
579 call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID, 582 call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
580 p->tx_total_len, GFP_KERNEL);
581 /* The socket is now unlocked */ 583 /* The socket is now unlocked */
582 584
583 _leave(" = %p\n", call); 585 _leave(" = %p\n", call);
@@ -594,15 +596,17 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
594{ 596{
595 enum rxrpc_call_state state; 597 enum rxrpc_call_state state;
596 struct rxrpc_call *call; 598 struct rxrpc_call *call;
599 unsigned long now, j;
597 int ret; 600 int ret;
598 601
599 struct rxrpc_send_params p = { 602 struct rxrpc_send_params p = {
600 .tx_total_len = -1, 603 .call.tx_total_len = -1,
601 .user_call_ID = 0, 604 .call.user_call_ID = 0,
602 .abort_code = 0, 605 .call.nr_timeouts = 0,
603 .command = RXRPC_CMD_SEND_DATA, 606 .abort_code = 0,
604 .exclusive = false, 607 .command = RXRPC_CMD_SEND_DATA,
605 .upgrade = true, 608 .exclusive = false,
609 .upgrade = false,
606 }; 610 };
607 611
608 _enter(""); 612 _enter("");
@@ -615,15 +619,15 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
615 ret = -EINVAL; 619 ret = -EINVAL;
616 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 620 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
617 goto error_release_sock; 621 goto error_release_sock;
618 call = rxrpc_accept_call(rx, p.user_call_ID, NULL); 622 call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
619 /* The socket is now unlocked. */ 623 /* The socket is now unlocked. */
620 if (IS_ERR(call)) 624 if (IS_ERR(call))
621 return PTR_ERR(call); 625 return PTR_ERR(call);
622 rxrpc_put_call(call, rxrpc_call_put); 626 ret = 0;
623 return 0; 627 goto out_put_unlock;
624 } 628 }
625 629
626 call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID); 630 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
627 if (!call) { 631 if (!call) {
628 ret = -EBADSLT; 632 ret = -EBADSLT;
629 if (p.command != RXRPC_CMD_SEND_DATA) 633 if (p.command != RXRPC_CMD_SEND_DATA)
@@ -653,14 +657,39 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
653 goto error_put; 657 goto error_put;
654 } 658 }
655 659
656 if (p.tx_total_len != -1) { 660 if (p.call.tx_total_len != -1) {
657 ret = -EINVAL; 661 ret = -EINVAL;
658 if (call->tx_total_len != -1 || 662 if (call->tx_total_len != -1 ||
659 call->tx_pending || 663 call->tx_pending ||
660 call->tx_top != 0) 664 call->tx_top != 0)
661 goto error_put; 665 goto error_put;
662 call->tx_total_len = p.tx_total_len; 666 call->tx_total_len = p.call.tx_total_len;
667 }
668 }
669
670 switch (p.call.nr_timeouts) {
671 case 3:
672 j = msecs_to_jiffies(p.call.timeouts.normal);
673 if (p.call.timeouts.normal > 0 && j == 0)
674 j = 1;
675 WRITE_ONCE(call->next_rx_timo, j);
676 /* Fall through */
677 case 2:
678 j = msecs_to_jiffies(p.call.timeouts.idle);
679 if (p.call.timeouts.idle > 0 && j == 0)
680 j = 1;
681 WRITE_ONCE(call->next_req_timo, j);
682 /* Fall through */
683 case 1:
684 if (p.call.timeouts.hard > 0) {
685 j = msecs_to_jiffies(p.call.timeouts.hard);
686 now = jiffies;
687 j += now;
688 WRITE_ONCE(call->expect_term_by, j);
689 rxrpc_reduce_call_timer(call, j, now,
690 rxrpc_timer_set_for_hard);
663 } 691 }
692 break;
664 } 693 }
665 694
666 state = READ_ONCE(call->state); 695 state = READ_ONCE(call->state);
@@ -689,6 +718,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
689 ret = rxrpc_send_data(rx, call, msg, len, NULL); 718 ret = rxrpc_send_data(rx, call, msg, len, NULL);
690 } 719 }
691 720
721out_put_unlock:
692 mutex_unlock(&call->user_mutex); 722 mutex_unlock(&call->user_mutex);
693error_put: 723error_put:
694 rxrpc_put_call(call, rxrpc_call_put); 724 rxrpc_put_call(call, rxrpc_call_put);
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 34c706d2f79c..4a7af7aff37d 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -21,6 +21,8 @@ static const unsigned int four = 4;
21static const unsigned int thirtytwo = 32; 21static const unsigned int thirtytwo = 32;
22static const unsigned int n_65535 = 65535; 22static const unsigned int n_65535 = 65535;
23static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1; 23static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
24static const unsigned long one_jiffy = 1;
25static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
24 26
25/* 27/*
26 * RxRPC operating parameters. 28 * RxRPC operating parameters.
@@ -29,64 +31,60 @@ static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
29 * information on the individual parameters. 31 * information on the individual parameters.
30 */ 32 */
31static struct ctl_table rxrpc_sysctl_table[] = { 33static struct ctl_table rxrpc_sysctl_table[] = {
32 /* Values measured in milliseconds */ 34 /* Values measured in milliseconds but used in jiffies */
33 { 35 {
34 .procname = "req_ack_delay", 36 .procname = "req_ack_delay",
35 .data = &rxrpc_requested_ack_delay, 37 .data = &rxrpc_requested_ack_delay,
36 .maxlen = sizeof(unsigned int), 38 .maxlen = sizeof(unsigned long),
37 .mode = 0644, 39 .mode = 0644,
38 .proc_handler = proc_dointvec, 40 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
39 .extra1 = (void *)&zero, 41 .extra1 = (void *)&one_jiffy,
42 .extra2 = (void *)&max_jiffies,
40 }, 43 },
41 { 44 {
42 .procname = "soft_ack_delay", 45 .procname = "soft_ack_delay",
43 .data = &rxrpc_soft_ack_delay, 46 .data = &rxrpc_soft_ack_delay,
44 .maxlen = sizeof(unsigned int), 47 .maxlen = sizeof(unsigned long),
45 .mode = 0644, 48 .mode = 0644,
46 .proc_handler = proc_dointvec, 49 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
47 .extra1 = (void *)&one, 50 .extra1 = (void *)&one_jiffy,
51 .extra2 = (void *)&max_jiffies,
48 }, 52 },
49 { 53 {
50 .procname = "idle_ack_delay", 54 .procname = "idle_ack_delay",
51 .data = &rxrpc_idle_ack_delay, 55 .data = &rxrpc_idle_ack_delay,
52 .maxlen = sizeof(unsigned int), 56 .maxlen = sizeof(unsigned long),
53 .mode = 0644, 57 .mode = 0644,
54 .proc_handler = proc_dointvec, 58 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
55 .extra1 = (void *)&one, 59 .extra1 = (void *)&one_jiffy,
56 }, 60 .extra2 = (void *)&max_jiffies,
57 {
58 .procname = "resend_timeout",
59 .data = &rxrpc_resend_timeout,
60 .maxlen = sizeof(unsigned int),
61 .mode = 0644,
62 .proc_handler = proc_dointvec,
63 .extra1 = (void *)&one,
64 }, 61 },
65 { 62 {
66 .procname = "idle_conn_expiry", 63 .procname = "idle_conn_expiry",
67 .data = &rxrpc_conn_idle_client_expiry, 64 .data = &rxrpc_conn_idle_client_expiry,
68 .maxlen = sizeof(unsigned int), 65 .maxlen = sizeof(unsigned long),
69 .mode = 0644, 66 .mode = 0644,
70 .proc_handler = proc_dointvec_ms_jiffies, 67 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
71 .extra1 = (void *)&one, 68 .extra1 = (void *)&one_jiffy,
69 .extra2 = (void *)&max_jiffies,
72 }, 70 },
73 { 71 {
74 .procname = "idle_conn_fast_expiry", 72 .procname = "idle_conn_fast_expiry",
75 .data = &rxrpc_conn_idle_client_fast_expiry, 73 .data = &rxrpc_conn_idle_client_fast_expiry,
76 .maxlen = sizeof(unsigned int), 74 .maxlen = sizeof(unsigned long),
77 .mode = 0644, 75 .mode = 0644,
78 .proc_handler = proc_dointvec_ms_jiffies, 76 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
79 .extra1 = (void *)&one, 77 .extra1 = (void *)&one_jiffy,
78 .extra2 = (void *)&max_jiffies,
80 }, 79 },
81
82 /* Values measured in seconds but used in jiffies */
83 { 80 {
84 .procname = "max_call_lifetime", 81 .procname = "resend_timeout",
85 .data = &rxrpc_max_call_lifetime, 82 .data = &rxrpc_resend_timeout,
86 .maxlen = sizeof(unsigned int), 83 .maxlen = sizeof(unsigned long),
87 .mode = 0644, 84 .mode = 0644,
88 .proc_handler = proc_dointvec, 85 .proc_handler = proc_doulongvec_ms_jiffies_minmax,
89 .extra1 = (void *)&one, 86 .extra1 = (void *)&one_jiffy,
87 .extra2 = (void *)&max_jiffies,
90 }, 88 },
91 89
92 /* Non-time values */ 90 /* Non-time values */
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 7d97f612c9b9..ddcf04b4ab43 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -336,7 +336,8 @@ static void tcf_block_put_final(struct work_struct *work)
336 struct tcf_chain *chain, *tmp; 336 struct tcf_chain *chain, *tmp;
337 337
338 rtnl_lock(); 338 rtnl_lock();
339 /* Only chain 0 should be still here. */ 339
340 /* At this point, all the chains should have refcnt == 1. */
340 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 341 list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
341 tcf_chain_put(chain); 342 tcf_chain_put(chain);
342 rtnl_unlock(); 343 rtnl_unlock();
@@ -344,15 +345,21 @@ static void tcf_block_put_final(struct work_struct *work)
344} 345}
345 346
346/* XXX: Standalone actions are not allowed to jump to any chain, and bound 347/* XXX: Standalone actions are not allowed to jump to any chain, and bound
347 * actions should be all removed after flushing. However, filters are now 348 * actions should be all removed after flushing.
348 * destroyed in tc filter workqueue with RTNL lock, they can not race here.
349 */ 349 */
350void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, 350void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
351 struct tcf_block_ext_info *ei) 351 struct tcf_block_ext_info *ei)
352{ 352{
353 struct tcf_chain *chain, *tmp; 353 struct tcf_chain *chain;
354 354
355 list_for_each_entry_safe(chain, tmp, &block->chain_list, list) 355 /* Hold a refcnt for all chains, except 0, so that they don't disappear
356 * while we are iterating.
357 */
358 list_for_each_entry(chain, &block->chain_list, list)
359 if (chain->index)
360 tcf_chain_hold(chain);
361
362 list_for_each_entry(chain, &block->chain_list, list)
356 tcf_chain_flush(chain); 363 tcf_chain_flush(chain);
357 364
358 tcf_block_offload_unbind(block, q, ei); 365 tcf_block_offload_unbind(block, q, ei);
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index a9f3e317055c..6fe798c2df1a 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -258,11 +258,8 @@ static int cls_bpf_init(struct tcf_proto *tp)
258 return 0; 258 return 0;
259} 259}
260 260
261static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) 261static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
262{ 262{
263 tcf_exts_destroy(&prog->exts);
264 tcf_exts_put_net(&prog->exts);
265
266 if (cls_bpf_is_ebpf(prog)) 263 if (cls_bpf_is_ebpf(prog))
267 bpf_prog_put(prog->filter); 264 bpf_prog_put(prog->filter);
268 else 265 else
@@ -270,6 +267,14 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
270 267
271 kfree(prog->bpf_name); 268 kfree(prog->bpf_name);
272 kfree(prog->bpf_ops); 269 kfree(prog->bpf_ops);
270}
271
272static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
273{
274 tcf_exts_destroy(&prog->exts);
275 tcf_exts_put_net(&prog->exts);
276
277 cls_bpf_free_parms(prog);
273 kfree(prog); 278 kfree(prog);
274} 279}
275 280
@@ -514,12 +519,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
514 goto errout_idr; 519 goto errout_idr;
515 520
516 ret = cls_bpf_offload(tp, prog, oldprog); 521 ret = cls_bpf_offload(tp, prog, oldprog);
517 if (ret) { 522 if (ret)
518 if (!oldprog) 523 goto errout_parms;
519 idr_remove_ext(&head->handle_idr, prog->handle);
520 __cls_bpf_delete_prog(prog);
521 return ret;
522 }
523 524
524 if (!tc_in_hw(prog->gen_flags)) 525 if (!tc_in_hw(prog->gen_flags))
525 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW; 526 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
@@ -537,6 +538,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
537 *arg = prog; 538 *arg = prog;
538 return 0; 539 return 0;
539 540
541errout_parms:
542 cls_bpf_free_parms(prog);
540errout_idr: 543errout_idr:
541 if (!oldprog) 544 if (!oldprog)
542 idr_remove_ext(&head->handle_idr, prog->handle); 545 idr_remove_ext(&head->handle_idr, prog->handle);
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 6361be7881f1..525eb3a6d625 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -1158,9 +1158,13 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1158 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL) 1158 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
1159 return -EINVAL; 1159 return -EINVAL;
1160 1160
1161 err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
1162 if (err)
1163 goto put_rtab;
1164
1161 err = qdisc_class_hash_init(&q->clhash); 1165 err = qdisc_class_hash_init(&q->clhash);
1162 if (err < 0) 1166 if (err < 0)
1163 goto put_rtab; 1167 goto put_block;
1164 1168
1165 q->link.sibling = &q->link; 1169 q->link.sibling = &q->link;
1166 q->link.common.classid = sch->handle; 1170 q->link.common.classid = sch->handle;
@@ -1194,6 +1198,9 @@ static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
1194 cbq_addprio(q, &q->link); 1198 cbq_addprio(q, &q->link);
1195 return 0; 1199 return 0;
1196 1200
1201put_block:
1202 tcf_block_put(q->link.block);
1203
1197put_rtab: 1204put_rtab:
1198 qdisc_put_rtab(q->link.R_tab); 1205 qdisc_put_rtab(q->link.R_tab);
1199 return err; 1206 return err;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 890f4a4564e7..09c1203c1711 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -724,6 +724,7 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
724 int i; 724 int i;
725 int err; 725 int err;
726 726
727 q->sch = sch;
727 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE); 728 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
728 729
729 err = tcf_block_get(&q->block, &q->filter_list, sch); 730 err = tcf_block_get(&q->block, &q->filter_list, sch);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f5172c21349b..6a38c2503649 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1499,6 +1499,7 @@ static __init int sctp_init(void)
1499 INIT_LIST_HEAD(&sctp_address_families); 1499 INIT_LIST_HEAD(&sctp_address_families);
1500 sctp_v4_pf_init(); 1500 sctp_v4_pf_init();
1501 sctp_v6_pf_init(); 1501 sctp_v6_pf_init();
1502 sctp_sched_ops_init();
1502 1503
1503 status = register_pernet_subsys(&sctp_defaults_ops); 1504 status = register_pernet_subsys(&sctp_defaults_ops);
1504 if (status) 1505 if (status)
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 3204a9b29407..014847e25648 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -188,13 +188,13 @@ static void sctp_for_each_tx_datachunk(struct sctp_association *asoc,
188 list_for_each_entry(chunk, &t->transmitted, transmitted_list) 188 list_for_each_entry(chunk, &t->transmitted, transmitted_list)
189 cb(chunk); 189 cb(chunk);
190 190
191 list_for_each_entry(chunk, &q->retransmit, list) 191 list_for_each_entry(chunk, &q->retransmit, transmitted_list)
192 cb(chunk); 192 cb(chunk);
193 193
194 list_for_each_entry(chunk, &q->sacked, list) 194 list_for_each_entry(chunk, &q->sacked, transmitted_list)
195 cb(chunk); 195 cb(chunk);
196 196
197 list_for_each_entry(chunk, &q->abandoned, list) 197 list_for_each_entry(chunk, &q->abandoned, transmitted_list)
198 cb(chunk); 198 cb(chunk);
199 199
200 list_for_each_entry(chunk, &q->out_chunk_list, list) 200 list_for_each_entry(chunk, &q->out_chunk_list, list)
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index a11db21dc8a0..76ea66be0bbe 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -64,7 +64,7 @@ static void sctp_stream_outq_migrate(struct sctp_stream *stream,
64 */ 64 */
65 65
66 /* Mark as failed send. */ 66 /* Mark as failed send. */
67 sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM); 67 sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
68 if (asoc->peer.prsctp_capable && 68 if (asoc->peer.prsctp_capable &&
69 SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags)) 69 SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
70 asoc->sent_cnt_removable--; 70 asoc->sent_cnt_removable--;
@@ -254,6 +254,30 @@ static int sctp_send_reconf(struct sctp_association *asoc,
254 return retval; 254 return retval;
255} 255}
256 256
257static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
258 __u16 str_nums, __be16 *str_list)
259{
260 struct sctp_association *asoc;
261 __u16 i;
262
263 asoc = container_of(stream, struct sctp_association, stream);
264 if (!asoc->outqueue.out_qlen)
265 return true;
266
267 if (!str_nums)
268 return false;
269
270 for (i = 0; i < str_nums; i++) {
271 __u16 sid = ntohs(str_list[i]);
272
273 if (stream->out[sid].ext &&
274 !list_empty(&stream->out[sid].ext->outq))
275 return false;
276 }
277
278 return true;
279}
280
257int sctp_send_reset_streams(struct sctp_association *asoc, 281int sctp_send_reset_streams(struct sctp_association *asoc,
258 struct sctp_reset_streams *params) 282 struct sctp_reset_streams *params)
259{ 283{
@@ -317,6 +341,11 @@ int sctp_send_reset_streams(struct sctp_association *asoc,
317 for (i = 0; i < str_nums; i++) 341 for (i = 0; i < str_nums; i++)
318 nstr_list[i] = htons(str_list[i]); 342 nstr_list[i] = htons(str_list[i]);
319 343
344 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
345 retval = -EAGAIN;
346 goto out;
347 }
348
320 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); 349 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
321 350
322 kfree(nstr_list); 351 kfree(nstr_list);
@@ -377,6 +406,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
377 if (asoc->strreset_outstanding) 406 if (asoc->strreset_outstanding)
378 return -EINPROGRESS; 407 return -EINPROGRESS;
379 408
409 if (!sctp_outq_is_empty(&asoc->outqueue))
410 return -EAGAIN;
411
380 chunk = sctp_make_strreset_tsnreq(asoc); 412 chunk = sctp_make_strreset_tsnreq(asoc);
381 if (!chunk) 413 if (!chunk)
382 return -ENOMEM; 414 return -ENOMEM;
@@ -563,7 +595,7 @@ struct sctp_chunk *sctp_process_strreset_outreq(
563 flags = SCTP_STREAM_RESET_INCOMING_SSN; 595 flags = SCTP_STREAM_RESET_INCOMING_SSN;
564 } 596 }
565 597
566 nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2; 598 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
567 if (nums) { 599 if (nums) {
568 str_p = outreq->list_of_streams; 600 str_p = outreq->list_of_streams;
569 for (i = 0; i < nums; i++) { 601 for (i = 0; i < nums; i++) {
@@ -627,7 +659,7 @@ struct sctp_chunk *sctp_process_strreset_inreq(
627 goto out; 659 goto out;
628 } 660 }
629 661
630 nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2; 662 nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
631 str_p = inreq->list_of_streams; 663 str_p = inreq->list_of_streams;
632 for (i = 0; i < nums; i++) { 664 for (i = 0; i < nums; i++) {
633 if (ntohs(str_p[i]) >= stream->outcnt) { 665 if (ntohs(str_p[i]) >= stream->outcnt) {
@@ -636,6 +668,12 @@ struct sctp_chunk *sctp_process_strreset_inreq(
636 } 668 }
637 } 669 }
638 670
671 if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
672 result = SCTP_STRRESET_IN_PROGRESS;
673 asoc->strreset_inseq--;
674 goto err;
675 }
676
639 chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0); 677 chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
640 if (!chunk) 678 if (!chunk)
641 goto out; 679 goto out;
@@ -687,12 +725,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
687 i = asoc->strreset_inseq - request_seq - 1; 725 i = asoc->strreset_inseq - request_seq - 1;
688 result = asoc->strreset_result[i]; 726 result = asoc->strreset_result[i];
689 if (result == SCTP_STRRESET_PERFORMED) { 727 if (result == SCTP_STRRESET_PERFORMED) {
690 next_tsn = asoc->next_tsn; 728 next_tsn = asoc->ctsn_ack_point + 1;
691 init_tsn = 729 init_tsn =
692 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1; 730 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
693 } 731 }
694 goto err; 732 goto err;
695 } 733 }
734
735 if (!sctp_outq_is_empty(&asoc->outqueue)) {
736 result = SCTP_STRRESET_IN_PROGRESS;
737 goto err;
738 }
739
696 asoc->strreset_inseq++; 740 asoc->strreset_inseq++;
697 741
698 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ)) 742 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
@@ -703,9 +747,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
703 goto out; 747 goto out;
704 } 748 }
705 749
706 /* G3: The same processing as though a SACK chunk with no gap report 750 /* G4: The same processing as though a FWD-TSN chunk (as defined in
707 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were 751 * [RFC3758]) with all streams affected and a new cumulative TSN
708 * received MUST be performed. 752 * ACK of the Receiver's Next TSN minus 1 were received MUST be
753 * performed.
709 */ 754 */
710 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); 755 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
711 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen); 756 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
@@ -720,10 +765,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
720 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL, 765 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
721 init_tsn, GFP_ATOMIC); 766 init_tsn, GFP_ATOMIC);
722 767
723 /* G4: The same processing as though a FWD-TSN chunk (as defined in 768 /* G3: The same processing as though a SACK chunk with no gap report
724 * [RFC3758]) with all streams affected and a new cumulative TSN 769 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
725 * ACK of the Receiver's Next TSN minus 1 were received MUST be 770 * received MUST be performed.
726 * performed.
727 */ 771 */
728 sctp_outq_free(&asoc->outqueue); 772 sctp_outq_free(&asoc->outqueue);
729 773
@@ -927,7 +971,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
927 971
928 outreq = (struct sctp_strreset_outreq *)req; 972 outreq = (struct sctp_strreset_outreq *)req;
929 str_p = outreq->list_of_streams; 973 str_p = outreq->list_of_streams;
930 nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2; 974 nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
975 sizeof(__u16);
931 976
932 if (result == SCTP_STRRESET_PERFORMED) { 977 if (result == SCTP_STRRESET_PERFORMED) {
933 if (nums) { 978 if (nums) {
@@ -956,7 +1001,8 @@ struct sctp_chunk *sctp_process_strreset_resp(
956 1001
957 inreq = (struct sctp_strreset_inreq *)req; 1002 inreq = (struct sctp_strreset_inreq *)req;
958 str_p = inreq->list_of_streams; 1003 str_p = inreq->list_of_streams;
959 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2; 1004 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
1005 sizeof(__u16);
960 1006
961 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags, 1007 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
962 nums, str_p, GFP_ATOMIC); 1008 nums, str_p, GFP_ATOMIC);
@@ -975,6 +1021,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
975 if (result == SCTP_STRRESET_PERFORMED) { 1021 if (result == SCTP_STRRESET_PERFORMED) {
976 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen( 1022 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
977 &asoc->peer.tsn_map); 1023 &asoc->peer.tsn_map);
1024 LIST_HEAD(temp);
978 1025
979 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn); 1026 sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
980 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); 1027 sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
@@ -983,7 +1030,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
983 SCTP_TSN_MAP_INITIAL, 1030 SCTP_TSN_MAP_INITIAL,
984 stsn, GFP_ATOMIC); 1031 stsn, GFP_ATOMIC);
985 1032
1033 /* Clean up sacked and abandoned queues only. As the
1034 * out_chunk_list may not be empty, splice it to temp,
1035 * then get it back after sctp_outq_free is done.
1036 */
1037 list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
986 sctp_outq_free(&asoc->outqueue); 1038 sctp_outq_free(&asoc->outqueue);
1039 list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
987 1040
988 asoc->next_tsn = rtsn; 1041 asoc->next_tsn = rtsn;
989 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1042 asoc->ctsn_ack_point = asoc->next_tsn - 1;
diff --git a/net/sctp/stream_sched.c b/net/sctp/stream_sched.c
index 0b83ec51e43b..d8c162a4089c 100644
--- a/net/sctp/stream_sched.c
+++ b/net/sctp/stream_sched.c
@@ -119,16 +119,27 @@ static struct sctp_sched_ops sctp_sched_fcfs = {
119 .unsched_all = sctp_sched_fcfs_unsched_all, 119 .unsched_all = sctp_sched_fcfs_unsched_all,
120}; 120};
121 121
122static void sctp_sched_ops_fcfs_init(void)
123{
124 sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
125}
126
122/* API to other parts of the stack */ 127/* API to other parts of the stack */
123 128
124extern struct sctp_sched_ops sctp_sched_prio; 129static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
125extern struct sctp_sched_ops sctp_sched_rr;
126 130
127static struct sctp_sched_ops *sctp_sched_ops[] = { 131void sctp_sched_ops_register(enum sctp_sched_type sched,
128 &sctp_sched_fcfs, 132 struct sctp_sched_ops *sched_ops)
129 &sctp_sched_prio, 133{
130 &sctp_sched_rr, 134 sctp_sched_ops[sched] = sched_ops;
131}; 135}
136
137void sctp_sched_ops_init(void)
138{
139 sctp_sched_ops_fcfs_init();
140 sctp_sched_ops_prio_init();
141 sctp_sched_ops_rr_init();
142}
132 143
133int sctp_sched_set_sched(struct sctp_association *asoc, 144int sctp_sched_set_sched(struct sctp_association *asoc,
134 enum sctp_sched_type sched) 145 enum sctp_sched_type sched)
diff --git a/net/sctp/stream_sched_prio.c b/net/sctp/stream_sched_prio.c
index 384dbf3c8760..7997d35dd0fd 100644
--- a/net/sctp/stream_sched_prio.c
+++ b/net/sctp/stream_sched_prio.c
@@ -333,7 +333,7 @@ static void sctp_sched_prio_unsched_all(struct sctp_stream *stream)
333 sctp_sched_prio_unsched(soute); 333 sctp_sched_prio_unsched(soute);
334} 334}
335 335
336struct sctp_sched_ops sctp_sched_prio = { 336static struct sctp_sched_ops sctp_sched_prio = {
337 .set = sctp_sched_prio_set, 337 .set = sctp_sched_prio_set,
338 .get = sctp_sched_prio_get, 338 .get = sctp_sched_prio_get,
339 .init = sctp_sched_prio_init, 339 .init = sctp_sched_prio_init,
@@ -345,3 +345,8 @@ struct sctp_sched_ops sctp_sched_prio = {
345 .sched_all = sctp_sched_prio_sched_all, 345 .sched_all = sctp_sched_prio_sched_all,
346 .unsched_all = sctp_sched_prio_unsched_all, 346 .unsched_all = sctp_sched_prio_unsched_all,
347}; 347};
348
349void sctp_sched_ops_prio_init(void)
350{
351 sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
352}
diff --git a/net/sctp/stream_sched_rr.c b/net/sctp/stream_sched_rr.c
index 7612a438c5b9..1155692448f1 100644
--- a/net/sctp/stream_sched_rr.c
+++ b/net/sctp/stream_sched_rr.c
@@ -187,7 +187,7 @@ static void sctp_sched_rr_unsched_all(struct sctp_stream *stream)
187 sctp_sched_rr_unsched(stream, soute); 187 sctp_sched_rr_unsched(stream, soute);
188} 188}
189 189
190struct sctp_sched_ops sctp_sched_rr = { 190static struct sctp_sched_ops sctp_sched_rr = {
191 .set = sctp_sched_rr_set, 191 .set = sctp_sched_rr_set,
192 .get = sctp_sched_rr_get, 192 .get = sctp_sched_rr_get,
193 .init = sctp_sched_rr_init, 193 .init = sctp_sched_rr_init,
@@ -199,3 +199,8 @@ struct sctp_sched_ops sctp_sched_rr = {
199 .sched_all = sctp_sched_rr_sched_all, 199 .sched_all = sctp_sched_rr_sched_all,
200 .unsched_all = sctp_sched_rr_unsched_all, 200 .unsched_all = sctp_sched_rr_unsched_all,
201}; 201};
202
203void sctp_sched_ops_rr_init(void)
204{
205 sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
206}
diff --git a/net/tipc/group.c b/net/tipc/group.c
index 12777cac638a..95fec2c057d6 100644
--- a/net/tipc/group.c
+++ b/net/tipc/group.c
@@ -497,6 +497,7 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
497 while ((skb = skb_peek(defq))) { 497 while ((skb = skb_peek(defq))) {
498 hdr = buf_msg(skb); 498 hdr = buf_msg(skb);
499 mtyp = msg_type(hdr); 499 mtyp = msg_type(hdr);
500 blks = msg_blocks(hdr);
500 deliver = true; 501 deliver = true;
501 ack = false; 502 ack = false;
502 update = false; 503 update = false;
@@ -546,7 +547,6 @@ void tipc_group_filter_msg(struct tipc_group *grp, struct sk_buff_head *inputq,
546 if (!update) 547 if (!update)
547 continue; 548 continue;
548 549
549 blks = msg_blocks(hdr);
550 tipc_group_update_rcv_win(grp, blks, node, port, xmitq); 550 tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
551 } 551 }
552 return; 552 return;
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 391775e3575c..a7a73ffe675b 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -797,11 +797,13 @@ static void vmci_transport_handle_detach(struct sock *sk)
797 797
798 /* We should not be sending anymore since the peer won't be 798 /* We should not be sending anymore since the peer won't be
799 * there to receive, but we can still receive if there is data 799 * there to receive, but we can still receive if there is data
800 * left in our consume queue. 800 * left in our consume queue. If the local endpoint is a host,
801 * we can't call vsock_stream_has_data, since that may block,
802 * but a host endpoint can't read data once the VM has
803 * detached, so there is no available data in that case.
801 */ 804 */
802 if (vsock_stream_has_data(vsk) <= 0) { 805 if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
803 sk->sk_state = TCP_CLOSE; 806 vsock_stream_has_data(vsk) <= 0) {
804
805 if (sk->sk_state == TCP_SYN_SENT) { 807 if (sk->sk_state == TCP_SYN_SENT) {
806 /* The peer may detach from a queue pair while 808 /* The peer may detach from a queue pair while
807 * we are still in the connecting state, i.e., 809 * we are still in the connecting state, i.e.,
@@ -811,10 +813,12 @@ static void vmci_transport_handle_detach(struct sock *sk)
811 * event like a reset. 813 * event like a reset.
812 */ 814 */
813 815
816 sk->sk_state = TCP_CLOSE;
814 sk->sk_err = ECONNRESET; 817 sk->sk_err = ECONNRESET;
815 sk->sk_error_report(sk); 818 sk->sk_error_report(sk);
816 return; 819 return;
817 } 820 }
821 sk->sk_state = TCP_CLOSE;
818 } 822 }
819 sk->sk_state_change(sk); 823 sk->sk_state_change(sk);
820 } 824 }
@@ -2144,7 +2148,7 @@ module_exit(vmci_transport_exit);
2144 2148
2145MODULE_AUTHOR("VMware, Inc."); 2149MODULE_AUTHOR("VMware, Inc.");
2146MODULE_DESCRIPTION("VMCI transport for Virtual Sockets"); 2150MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2147MODULE_VERSION("1.0.4.0-k"); 2151MODULE_VERSION("1.0.5.0-k");
2148MODULE_LICENSE("GPL v2"); 2152MODULE_LICENSE("GPL v2");
2149MODULE_ALIAS("vmware_vsock"); 2153MODULE_ALIAS("vmware_vsock");
2150MODULE_ALIAS_NETPROTO(PF_VSOCK); 2154MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index da91bb547db3..1abcc4fc4df1 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -20,6 +20,10 @@ config CFG80211
20 tristate "cfg80211 - wireless configuration API" 20 tristate "cfg80211 - wireless configuration API"
21 depends on RFKILL || !RFKILL 21 depends on RFKILL || !RFKILL
22 select FW_LOADER 22 select FW_LOADER
23 # may need to update this when certificates are changed and are
24 # using a different algorithm, though right now they shouldn't
25 # (this is here rather than below to allow it to be a module)
26 select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
23 ---help--- 27 ---help---
24 cfg80211 is the Linux wireless LAN (802.11) configuration API. 28 cfg80211 is the Linux wireless LAN (802.11) configuration API.
25 Enable this if you have a wireless device. 29 Enable this if you have a wireless device.
@@ -113,6 +117,9 @@ config CFG80211_EXTRA_REGDB_KEYDIR
113 certificates like in the kernel sources (net/wireless/certs/) 117 certificates like in the kernel sources (net/wireless/certs/)
114 that shall be accepted for a signed regulatory database. 118 that shall be accepted for a signed regulatory database.
115 119
120 Note that you need to also select the correct CRYPTO_<hash> modules
121 for your certificates, and if cfg80211 is built-in they also must be.
122
116config CFG80211_REG_CELLULAR_HINTS 123config CFG80211_REG_CELLULAR_HINTS
117 bool "cfg80211 regulatory support for cellular base station hints" 124 bool "cfg80211 regulatory support for cellular base station hints"
118 depends on CFG80211_CERTIFICATION_ONUS 125 depends on CFG80211_CERTIFICATION_ONUS