aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/atm/lec.c6
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bridge/br_input.c23
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netlink.c2
-rw-r--r--net/core/dev.c16
-rw-r--r--net/core/failover.c6
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/net-sysfs.c14
-rw-r--r--net/core/ptp_classifier.c7
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/core/skbuff.c10
-rw-r--r--net/core/sock.c4
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/route.c16
-rw-r--r--net/ipv4/tcp_dctcp.c45
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv6/route.c4
-rw-r--r--net/ipv6/udp.c2
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/mac80211/driver-ops.h3
-rw-r--r--net/mac80211/key.c9
-rw-r--r--net/mac80211/mesh_pathtbl.c2
-rw-r--r--net/mac80211/rx.c10
-rw-r--r--net/mac80211/trace_msg.h7
-rw-r--r--net/mac80211/tx.c53
-rw-r--r--net/netlink/af_netlink.c3
-rw-r--r--net/netrom/af_netrom.c76
-rw-r--r--net/netrom/nr_loopback.c2
-rw-r--r--net/netrom/nr_route.c2
-rw-r--r--net/netrom/sysctl_net_netrom.c5
-rw-r--r--net/rds/af_rds.c3
-rw-r--r--net/rds/bind.c2
-rw-r--r--net/rxrpc/af_rxrpc.c17
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/conn_event.c11
-rw-r--r--net/rxrpc/input.c18
-rw-r--r--net/rxrpc/peer_event.c5
-rw-r--r--net/rxrpc/sendmsg.c21
-rw-r--r--net/sctp/socket.c3
-rw-r--r--net/smc/af_smc.c58
-rw-r--r--net/smc/smc_close.c25
-rw-r--r--net/smc/smc_close.h1
-rw-r--r--net/smc/smc_ism.c5
-rw-r--r--net/smc/smc_pnet.c3
-rw-r--r--net/strparser/strparser.c12
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/tipc/name_table.c3
-rw-r--r--net/tipc/sysctl.c8
-rw-r--r--net/tls/tls_device.c12
-rw-r--r--net/tls/tls_main.c24
-rw-r--r--net/tls/tls_sw.c15
-rw-r--r--net/wireless/nl80211.c18
-rw-r--r--net/wireless/reg.c39
-rw-r--r--net/wireless/scan.c3
-rw-r--r--net/wireless/util.c6
56 files changed, 451 insertions, 220 deletions
diff --git a/net/atm/lec.c b/net/atm/lec.c
index d7f5cf5b7594..ad4f829193f0 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -710,7 +710,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
710 710
711static int lec_mcast_attach(struct atm_vcc *vcc, int arg) 711static int lec_mcast_attach(struct atm_vcc *vcc, int arg)
712{ 712{
713 if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg]) 713 if (arg < 0 || arg >= MAX_LEC_ITF)
714 return -EINVAL;
715 arg = array_index_nospec(arg, MAX_LEC_ITF);
716 if (!dev_lec[arg])
714 return -EINVAL; 717 return -EINVAL;
715 vcc->proto_data = dev_lec[arg]; 718 vcc->proto_data = dev_lec[arg];
716 return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc); 719 return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
@@ -728,6 +731,7 @@ static int lecd_attach(struct atm_vcc *vcc, int arg)
728 i = arg; 731 i = arg;
729 if (arg >= MAX_LEC_ITF) 732 if (arg >= MAX_LEC_ITF)
730 return -EINVAL; 733 return -EINVAL;
734 i = array_index_nospec(arg, MAX_LEC_ITF);
731 if (!dev_lec[i]) { 735 if (!dev_lec[i]) {
732 int size; 736 int size;
733 737
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 9a580999ca57..d892b7c3cc42 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -523,12 +523,12 @@ static int sco_sock_bind(struct socket *sock, struct sockaddr *addr,
523 struct sock *sk = sock->sk; 523 struct sock *sk = sock->sk;
524 int err = 0; 524 int err = 0;
525 525
526 BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
527
528 if (!addr || addr_len < sizeof(struct sockaddr_sco) || 526 if (!addr || addr_len < sizeof(struct sockaddr_sco) ||
529 addr->sa_family != AF_BLUETOOTH) 527 addr->sa_family != AF_BLUETOOTH)
530 return -EINVAL; 528 return -EINVAL;
531 529
530 BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr);
531
532 lock_sock(sk); 532 lock_sock(sk);
533 533
534 if (sk->sk_state != BT_OPEN) { 534 if (sk->sk_state != BT_OPEN) {
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ea7e56119c1..ba303ee99b9b 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -197,13 +197,10 @@ static void __br_handle_local_finish(struct sk_buff *skb)
197/* note: already called with rcu_read_lock */ 197/* note: already called with rcu_read_lock */
198static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) 198static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
199{ 199{
200 struct net_bridge_port *p = br_port_get_rcu(skb->dev);
201
202 __br_handle_local_finish(skb); 200 __br_handle_local_finish(skb);
203 201
204 BR_INPUT_SKB_CB(skb)->brdev = p->br->dev; 202 /* return 1 to signal the okfn() was called so it's ok to use the skb */
205 br_pass_frame_up(skb); 203 return 1;
206 return 0;
207} 204}
208 205
209/* 206/*
@@ -280,10 +277,18 @@ rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
280 goto forward; 277 goto forward;
281 } 278 }
282 279
283 /* Deliver packet to local host only */ 280 /* The else clause should be hit when nf_hook():
284 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, dev_net(skb->dev), 281 * - returns < 0 (drop/error)
285 NULL, skb, skb->dev, NULL, br_handle_local_finish); 282 * - returns = 0 (stolen/nf_queue)
286 return RX_HANDLER_CONSUMED; 283 * Thus return 1 from the okfn() to signal the skb is ok to pass
284 */
285 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN,
286 dev_net(skb->dev), NULL, skb, skb->dev, NULL,
287 br_handle_local_finish) == 1) {
288 return RX_HANDLER_PASS;
289 } else {
290 return RX_HANDLER_CONSUMED;
291 }
287 } 292 }
288 293
289forward: 294forward:
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 02da21d771c9..45e7f4173bba 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -2031,7 +2031,8 @@ static void br_multicast_start_querier(struct net_bridge *br,
2031 2031
2032 __br_multicast_open(br, query); 2032 __br_multicast_open(br, query);
2033 2033
2034 list_for_each_entry(port, &br->port_list, list) { 2034 rcu_read_lock();
2035 list_for_each_entry_rcu(port, &br->port_list, list) {
2035 if (port->state == BR_STATE_DISABLED || 2036 if (port->state == BR_STATE_DISABLED ||
2036 port->state == BR_STATE_BLOCKING) 2037 port->state == BR_STATE_BLOCKING)
2037 continue; 2038 continue;
@@ -2043,6 +2044,7 @@ static void br_multicast_start_querier(struct net_bridge *br,
2043 br_multicast_enable(&port->ip6_own_query); 2044 br_multicast_enable(&port->ip6_own_query);
2044#endif 2045#endif
2045 } 2046 }
2047 rcu_read_unlock();
2046} 2048}
2047 2049
2048int br_multicast_toggle(struct net_bridge *br, unsigned long val) 2050int br_multicast_toggle(struct net_bridge *br, unsigned long val)
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 9c07591b0232..7104cf13da84 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -1441,7 +1441,7 @@ static int br_fill_info(struct sk_buff *skb, const struct net_device *brdev)
1441 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED, 1441 nla_put_u8(skb, IFLA_BR_VLAN_STATS_ENABLED,
1442 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) || 1442 br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) ||
1443 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT, 1443 nla_put_u8(skb, IFLA_BR_VLAN_STATS_PER_PORT,
1444 br_opt_get(br, IFLA_BR_VLAN_STATS_PER_PORT))) 1444 br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)))
1445 return -EMSGSIZE; 1445 return -EMSGSIZE;
1446#endif 1446#endif
1447#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 1447#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
diff --git a/net/core/dev.c b/net/core/dev.c
index fdcff29df915..f409406254dd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1184,7 +1184,21 @@ int dev_change_name(struct net_device *dev, const char *newname)
1184 BUG_ON(!dev_net(dev)); 1184 BUG_ON(!dev_net(dev));
1185 1185
1186 net = dev_net(dev); 1186 net = dev_net(dev);
1187 if (dev->flags & IFF_UP) 1187
1188 /* Some auto-enslaved devices e.g. failover slaves are
1189 * special, as userspace might rename the device after
1190 * the interface had been brought up and running since
1191 * the point kernel initiated auto-enslavement. Allow
1192 * live name change even when these slave devices are
1193 * up and running.
1194 *
1195 * Typically, users of these auto-enslaving devices
1196 * don't actually care about slave name change, as
1197 * they are supposed to operate on master interface
1198 * directly.
1199 */
1200 if (dev->flags & IFF_UP &&
1201 likely(!(dev->priv_flags & IFF_LIVE_RENAME_OK)))
1188 return -EBUSY; 1202 return -EBUSY;
1189 1203
1190 write_seqcount_begin(&devnet_rename_seq); 1204 write_seqcount_begin(&devnet_rename_seq);
diff --git a/net/core/failover.c b/net/core/failover.c
index 4a92a98ccce9..b5cd3c727285 100644
--- a/net/core/failover.c
+++ b/net/core/failover.c
@@ -80,14 +80,14 @@ static int failover_slave_register(struct net_device *slave_dev)
80 goto err_upper_link; 80 goto err_upper_link;
81 } 81 }
82 82
83 slave_dev->priv_flags |= IFF_FAILOVER_SLAVE; 83 slave_dev->priv_flags |= (IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
84 84
85 if (fops && fops->slave_register && 85 if (fops && fops->slave_register &&
86 !fops->slave_register(slave_dev, failover_dev)) 86 !fops->slave_register(slave_dev, failover_dev))
87 return NOTIFY_OK; 87 return NOTIFY_OK;
88 88
89 netdev_upper_dev_unlink(slave_dev, failover_dev); 89 netdev_upper_dev_unlink(slave_dev, failover_dev);
90 slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; 90 slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
91err_upper_link: 91err_upper_link:
92 netdev_rx_handler_unregister(slave_dev); 92 netdev_rx_handler_unregister(slave_dev);
93done: 93done:
@@ -121,7 +121,7 @@ int failover_slave_unregister(struct net_device *slave_dev)
121 121
122 netdev_rx_handler_unregister(slave_dev); 122 netdev_rx_handler_unregister(slave_dev);
123 netdev_upper_dev_unlink(slave_dev, failover_dev); 123 netdev_upper_dev_unlink(slave_dev, failover_dev);
124 slave_dev->priv_flags &= ~IFF_FAILOVER_SLAVE; 124 slave_dev->priv_flags &= ~(IFF_FAILOVER_SLAVE | IFF_LIVE_RENAME_OK);
125 125
126 if (fops && fops->slave_unregister && 126 if (fops && fops->slave_unregister &&
127 !fops->slave_unregister(slave_dev, failover_dev)) 127 !fops->slave_unregister(slave_dev, failover_dev))
diff --git a/net/core/filter.c b/net/core/filter.c
index fc92ebc4e200..27e61ffd9039 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -4383,6 +4383,8 @@ BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4383 * Only binding to IP is supported. 4383 * Only binding to IP is supported.
4384 */ 4384 */
4385 err = -EINVAL; 4385 err = -EINVAL;
4386 if (addr_len < offsetofend(struct sockaddr, sa_family))
4387 return err;
4386 if (addr->sa_family == AF_INET) { 4388 if (addr->sa_family == AF_INET) {
4387 if (addr_len < sizeof(struct sockaddr_in)) 4389 if (addr_len < sizeof(struct sockaddr_in))
4388 return err; 4390 return err;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index f8f94303a1f5..8f8b7b6c2945 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1747,20 +1747,16 @@ int netdev_register_kobject(struct net_device *ndev)
1747 1747
1748 error = device_add(dev); 1748 error = device_add(dev);
1749 if (error) 1749 if (error)
1750 goto error_put_device; 1750 return error;
1751 1751
1752 error = register_queue_kobjects(ndev); 1752 error = register_queue_kobjects(ndev);
1753 if (error) 1753 if (error) {
1754 goto error_device_del; 1754 device_del(dev);
1755 return error;
1756 }
1755 1757
1756 pm_runtime_set_memalloc_noio(dev, true); 1758 pm_runtime_set_memalloc_noio(dev, true);
1757 1759
1758 return 0;
1759
1760error_device_del:
1761 device_del(dev);
1762error_put_device:
1763 put_device(dev);
1764 return error; 1760 return error;
1765} 1761}
1766 1762
diff --git a/net/core/ptp_classifier.c b/net/core/ptp_classifier.c
index 703cf76aa7c2..7109c168b5e0 100644
--- a/net/core/ptp_classifier.c
+++ b/net/core/ptp_classifier.c
@@ -185,9 +185,10 @@ void __init ptp_classifier_init(void)
185 { 0x16, 0, 0, 0x00000000 }, 185 { 0x16, 0, 0, 0x00000000 },
186 { 0x06, 0, 0, 0x00000000 }, 186 { 0x06, 0, 0, 0x00000000 },
187 }; 187 };
188 struct sock_fprog_kern ptp_prog = { 188 struct sock_fprog_kern ptp_prog;
189 .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter, 189
190 }; 190 ptp_prog.len = ARRAY_SIZE(ptp_filter);
191 ptp_prog.filter = ptp_filter;
191 192
192 BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog)); 193 BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
193} 194}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a51cab95ba64..220c56e93659 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -4948,7 +4948,7 @@ static int rtnl_valid_stats_req(const struct nlmsghdr *nlh, bool strict_check,
4948{ 4948{
4949 struct if_stats_msg *ifsm; 4949 struct if_stats_msg *ifsm;
4950 4950
4951 if (nlh->nlmsg_len < sizeof(*ifsm)) { 4951 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifsm))) {
4952 NL_SET_ERR_MSG(extack, "Invalid header for stats dump"); 4952 NL_SET_ERR_MSG(extack, "Invalid header for stats dump");
4953 return -EINVAL; 4953 return -EINVAL;
4954 } 4954 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ef2cd5712098..40796b8bf820 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -5083,7 +5083,8 @@ EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5083 5083
5084static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) 5084static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5085{ 5085{
5086 int mac_len; 5086 int mac_len, meta_len;
5087 void *meta;
5087 5088
5088 if (skb_cow(skb, skb_headroom(skb)) < 0) { 5089 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5089 kfree_skb(skb); 5090 kfree_skb(skb);
@@ -5095,6 +5096,13 @@ static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5095 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), 5096 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5096 mac_len - VLAN_HLEN - ETH_TLEN); 5097 mac_len - VLAN_HLEN - ETH_TLEN);
5097 } 5098 }
5099
5100 meta_len = skb_metadata_len(skb);
5101 if (meta_len) {
5102 meta = skb_metadata_end(skb) - meta_len;
5103 memmove(meta + VLAN_HLEN, meta, meta_len);
5104 }
5105
5098 skb->mac_header += VLAN_HLEN; 5106 skb->mac_header += VLAN_HLEN;
5099 return skb; 5107 return skb;
5100} 5108}
diff --git a/net/core/sock.c b/net/core/sock.c
index 782343bb925b..067878a1e4c5 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -348,7 +348,7 @@ static int sock_get_timeout(long timeo, void *optval, bool old_timeval)
348 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ; 348 tv.tv_usec = ((timeo % HZ) * USEC_PER_SEC) / HZ;
349 } 349 }
350 350
351 if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 351 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
352 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec }; 352 struct old_timeval32 tv32 = { tv.tv_sec, tv.tv_usec };
353 *(struct old_timeval32 *)optval = tv32; 353 *(struct old_timeval32 *)optval = tv32;
354 return sizeof(tv32); 354 return sizeof(tv32);
@@ -372,7 +372,7 @@ static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen, bool
372{ 372{
373 struct __kernel_sock_timeval tv; 373 struct __kernel_sock_timeval tv;
374 374
375 if (in_compat_syscall() && !COMPAT_USE_64BIT_TIME) { 375 if (old_timeval && in_compat_syscall() && !COMPAT_USE_64BIT_TIME) {
376 struct old_timeval32 tv32; 376 struct old_timeval32 tv32;
377 377
378 if (optlen < sizeof(tv32)) 378 if (optlen < sizeof(tv32))
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 79e98e21cdd7..12ce6c526d72 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -121,6 +121,7 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
121 struct guehdr *guehdr; 121 struct guehdr *guehdr;
122 void *data; 122 void *data;
123 u16 doffset = 0; 123 u16 doffset = 0;
124 u8 proto_ctype;
124 125
125 if (!fou) 126 if (!fou)
126 return 1; 127 return 1;
@@ -212,13 +213,14 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
212 if (unlikely(guehdr->control)) 213 if (unlikely(guehdr->control))
213 return gue_control_message(skb, guehdr); 214 return gue_control_message(skb, guehdr);
214 215
216 proto_ctype = guehdr->proto_ctype;
215 __skb_pull(skb, sizeof(struct udphdr) + hdrlen); 217 __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
216 skb_reset_transport_header(skb); 218 skb_reset_transport_header(skb);
217 219
218 if (iptunnel_pull_offloads(skb)) 220 if (iptunnel_pull_offloads(skb))
219 goto drop; 221 goto drop;
220 222
221 return -guehdr->proto_ctype; 223 return -proto_ctype;
222 224
223drop: 225drop:
224 kfree_skb(skb); 226 kfree_skb(skb);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a5da63e5faa2..88ce038dd495 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1185,9 +1185,23 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1185 1185
1186static void ipv4_link_failure(struct sk_buff *skb) 1186static void ipv4_link_failure(struct sk_buff *skb)
1187{ 1187{
1188 struct ip_options opt;
1188 struct rtable *rt; 1189 struct rtable *rt;
1190 int res;
1189 1191
1190 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1192 /* Recompile ip options since IPCB may not be valid anymore.
1193 */
1194 memset(&opt, 0, sizeof(opt));
1195 opt.optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
1196
1197 rcu_read_lock();
1198 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1199 rcu_read_unlock();
1200
1201 if (res)
1202 return;
1203
1204 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1191 1205
1192 rt = skb_rtable(skb); 1206 rt = skb_rtable(skb);
1193 if (rt) 1207 if (rt)
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 359da68d7c06..477cb4aa456c 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -49,9 +49,8 @@
49#define DCTCP_MAX_ALPHA 1024U 49#define DCTCP_MAX_ALPHA 1024U
50 50
51struct dctcp { 51struct dctcp {
52 u32 acked_bytes_ecn; 52 u32 old_delivered;
53 u32 acked_bytes_total; 53 u32 old_delivered_ce;
54 u32 prior_snd_una;
55 u32 prior_rcv_nxt; 54 u32 prior_rcv_nxt;
56 u32 dctcp_alpha; 55 u32 dctcp_alpha;
57 u32 next_seq; 56 u32 next_seq;
@@ -73,8 +72,8 @@ static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
73{ 72{
74 ca->next_seq = tp->snd_nxt; 73 ca->next_seq = tp->snd_nxt;
75 74
76 ca->acked_bytes_ecn = 0; 75 ca->old_delivered = tp->delivered;
77 ca->acked_bytes_total = 0; 76 ca->old_delivered_ce = tp->delivered_ce;
78} 77}
79 78
80static void dctcp_init(struct sock *sk) 79static void dctcp_init(struct sock *sk)
@@ -86,7 +85,6 @@ static void dctcp_init(struct sock *sk)
86 sk->sk_state == TCP_CLOSE)) { 85 sk->sk_state == TCP_CLOSE)) {
87 struct dctcp *ca = inet_csk_ca(sk); 86 struct dctcp *ca = inet_csk_ca(sk);
88 87
89 ca->prior_snd_una = tp->snd_una;
90 ca->prior_rcv_nxt = tp->rcv_nxt; 88 ca->prior_rcv_nxt = tp->rcv_nxt;
91 89
92 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); 90 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
@@ -118,37 +116,25 @@ static void dctcp_update_alpha(struct sock *sk, u32 flags)
118{ 116{
119 const struct tcp_sock *tp = tcp_sk(sk); 117 const struct tcp_sock *tp = tcp_sk(sk);
120 struct dctcp *ca = inet_csk_ca(sk); 118 struct dctcp *ca = inet_csk_ca(sk);
121 u32 acked_bytes = tp->snd_una - ca->prior_snd_una;
122
123 /* If ack did not advance snd_una, count dupack as MSS size.
124 * If ack did update window, do not count it at all.
125 */
126 if (acked_bytes == 0 && !(flags & CA_ACK_WIN_UPDATE))
127 acked_bytes = inet_csk(sk)->icsk_ack.rcv_mss;
128 if (acked_bytes) {
129 ca->acked_bytes_total += acked_bytes;
130 ca->prior_snd_una = tp->snd_una;
131
132 if (flags & CA_ACK_ECE)
133 ca->acked_bytes_ecn += acked_bytes;
134 }
135 119
136 /* Expired RTT */ 120 /* Expired RTT */
137 if (!before(tp->snd_una, ca->next_seq)) { 121 if (!before(tp->snd_una, ca->next_seq)) {
138 u64 bytes_ecn = ca->acked_bytes_ecn; 122 u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
139 u32 alpha = ca->dctcp_alpha; 123 u32 alpha = ca->dctcp_alpha;
140 124
141 /* alpha = (1 - g) * alpha + g * F */ 125 /* alpha = (1 - g) * alpha + g * F */
142 126
143 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g); 127 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
144 if (bytes_ecn) { 128 if (delivered_ce) {
129 u32 delivered = tp->delivered - ca->old_delivered;
130
145 /* If dctcp_shift_g == 1, a 32bit value would overflow 131 /* If dctcp_shift_g == 1, a 32bit value would overflow
146 * after 8 Mbytes. 132 * after 8 M packets.
147 */ 133 */
148 bytes_ecn <<= (10 - dctcp_shift_g); 134 delivered_ce <<= (10 - dctcp_shift_g);
149 do_div(bytes_ecn, max(1U, ca->acked_bytes_total)); 135 delivered_ce /= max(1U, delivered);
150 136
151 alpha = min(alpha + (u32)bytes_ecn, DCTCP_MAX_ALPHA); 137 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
152 } 138 }
153 /* dctcp_alpha can be read from dctcp_get_info() without 139 /* dctcp_alpha can be read from dctcp_get_info() without
154 * synchro, so we ask compiler to not use dctcp_alpha 140 * synchro, so we ask compiler to not use dctcp_alpha
@@ -200,6 +186,7 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
200 union tcp_cc_info *info) 186 union tcp_cc_info *info)
201{ 187{
202 const struct dctcp *ca = inet_csk_ca(sk); 188 const struct dctcp *ca = inet_csk_ca(sk);
189 const struct tcp_sock *tp = tcp_sk(sk);
203 190
204 /* Fill it also in case of VEGASINFO due to req struct limits. 191 /* Fill it also in case of VEGASINFO due to req struct limits.
205 * We can still correctly retrieve it later. 192 * We can still correctly retrieve it later.
@@ -211,8 +198,10 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
211 info->dctcp.dctcp_enabled = 1; 198 info->dctcp.dctcp_enabled = 1;
212 info->dctcp.dctcp_ce_state = (u16) ca->ce_state; 199 info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
213 info->dctcp.dctcp_alpha = ca->dctcp_alpha; 200 info->dctcp.dctcp_alpha = ca->dctcp_alpha;
214 info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn; 201 info->dctcp.dctcp_ab_ecn = tp->mss_cache *
215 info->dctcp.dctcp_ab_tot = ca->acked_bytes_total; 202 (tp->delivered_ce - ca->old_delivered_ce);
203 info->dctcp.dctcp_ab_tot = tp->mss_cache *
204 (tp->delivered - ca->old_delivered);
216 } 205 }
217 206
218 *attr = INET_DIAG_DCTCPINFO; 207 *attr = INET_DIAG_DCTCPINFO;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5def3c48870e..731d3045b50a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -402,11 +402,12 @@ static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb)
402static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) 402static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
403{ 403{
404 struct tcp_sock *tp = tcp_sk(sk); 404 struct tcp_sock *tp = tcp_sk(sk);
405 int room;
406
407 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
405 408
406 /* Check #1 */ 409 /* Check #1 */
407 if (tp->rcv_ssthresh < tp->window_clamp && 410 if (room > 0 && !tcp_under_memory_pressure(sk)) {
408 (int)tp->rcv_ssthresh < tcp_space(sk) &&
409 !tcp_under_memory_pressure(sk)) {
410 int incr; 411 int incr;
411 412
412 /* Check #2. Increase window, if skb with such overhead 413 /* Check #2. Increase window, if skb with such overhead
@@ -419,8 +420,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
419 420
420 if (incr) { 421 if (incr) {
421 incr = max_t(int, incr, 2 * skb->len); 422 incr = max_t(int, incr, 2 * skb->len);
422 tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, 423 tp->rcv_ssthresh += min(room, incr);
423 tp->window_clamp);
424 inet_csk(sk)->icsk_ack.quick |= 1; 424 inet_csk(sk)->icsk_ack.quick |= 1;
425 } 425 }
426 } 426 }
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 0302e0eb07af..7178e32eb15d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2330,6 +2330,10 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2330 2330
2331 rcu_read_lock(); 2331 rcu_read_lock();
2332 from = rcu_dereference(rt6->from); 2332 from = rcu_dereference(rt6->from);
2333 if (!from) {
2334 rcu_read_unlock();
2335 return;
2336 }
2333 nrt6 = ip6_rt_cache_alloc(from, daddr, saddr); 2337 nrt6 = ip6_rt_cache_alloc(from, daddr, saddr);
2334 if (nrt6) { 2338 if (nrt6) {
2335 rt6_do_update_pmtu(nrt6, mtu); 2339 rt6_do_update_pmtu(nrt6, mtu);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index b444483cdb2b..622eeaf5732b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1047,6 +1047,8 @@ static void udp_v6_flush_pending_frames(struct sock *sk)
1047static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr, 1047static int udpv6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
1048 int addr_len) 1048 int addr_len)
1049{ 1049{
1050 if (addr_len < offsetofend(struct sockaddr, sa_family))
1051 return -EINVAL;
1050 /* The following checks are replicated from __ip6_datagram_connect() 1052 /* The following checks are replicated from __ip6_datagram_connect()
1051 * and intended to prevent BPF program called below from accessing 1053 * and intended to prevent BPF program called below from accessing
1052 * bytes that are out of the bound specified by user in addr_len. 1054 * bytes that are out of the bound specified by user in addr_len.
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index b99e73a7e7e0..2017b7d780f5 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -320,14 +320,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
320 struct llc_sap *sap; 320 struct llc_sap *sap;
321 int rc = -EINVAL; 321 int rc = -EINVAL;
322 322
323 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
324
325 lock_sock(sk); 323 lock_sock(sk);
326 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 324 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
327 goto out; 325 goto out;
328 rc = -EAFNOSUPPORT; 326 rc = -EAFNOSUPPORT;
329 if (unlikely(addr->sllc_family != AF_LLC)) 327 if (unlikely(addr->sllc_family != AF_LLC))
330 goto out; 328 goto out;
329 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
331 rc = -ENODEV; 330 rc = -ENODEV;
332 rcu_read_lock(); 331 rcu_read_lock();
333 if (sk->sk_bound_dev_if) { 332 if (sk->sk_bound_dev_if) {
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index 28d022a3eee3..ae4f0be3b393 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -1195,6 +1195,9 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
1195{ 1195{
1196 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif); 1196 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
1197 1197
1198 if (local->in_reconfig)
1199 return;
1200
1198 if (!check_sdata_in_driver(sdata)) 1201 if (!check_sdata_in_driver(sdata))
1199 return; 1202 return;
1200 1203
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 4700718e010f..37e372896230 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -167,8 +167,10 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
167 * The driver doesn't know anything about VLAN interfaces. 167 * The driver doesn't know anything about VLAN interfaces.
168 * Hence, don't send GTKs for VLAN interfaces to the driver. 168 * Hence, don't send GTKs for VLAN interfaces to the driver.
169 */ 169 */
170 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) 170 if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
171 ret = 1;
171 goto out_unsupported; 172 goto out_unsupported;
173 }
172 } 174 }
173 175
174 ret = drv_set_key(key->local, SET_KEY, sdata, 176 ret = drv_set_key(key->local, SET_KEY, sdata,
@@ -213,11 +215,8 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
213 /* all of these we can do in software - if driver can */ 215 /* all of these we can do in software - if driver can */
214 if (ret == 1) 216 if (ret == 1)
215 return 0; 217 return 0;
216 if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL)) { 218 if (ieee80211_hw_check(&key->local->hw, SW_CRYPTO_CONTROL))
217 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
218 return 0;
219 return -EINVAL; 219 return -EINVAL;
220 }
221 return 0; 220 return 0;
222 default: 221 default:
223 return -EINVAL; 222 return -EINVAL;
diff --git a/net/mac80211/mesh_pathtbl.c b/net/mac80211/mesh_pathtbl.c
index 95eb5064fa91..b76a2aefa9ec 100644
--- a/net/mac80211/mesh_pathtbl.c
+++ b/net/mac80211/mesh_pathtbl.c
@@ -23,7 +23,7 @@ static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
23static u32 mesh_table_hash(const void *addr, u32 len, u32 seed) 23static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
24{ 24{
25 /* Use last four bytes of hw addr as hash index */ 25 /* Use last four bytes of hw addr as hash index */
26 return jhash_1word(*(u32 *)(addr+2), seed); 26 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
27} 27}
28 28
29static const struct rhashtable_params mesh_rht_params = { 29static const struct rhashtable_params mesh_rht_params = {
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 7f8d93401ce0..bf0b187f994e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1568,7 +1568,15 @@ static void sta_ps_start(struct sta_info *sta)
1568 return; 1568 return;
1569 1569
1570 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) { 1570 for (tid = 0; tid < IEEE80211_NUM_TIDS; tid++) {
1571 if (txq_has_queue(sta->sta.txq[tid])) 1571 struct ieee80211_txq *txq = sta->sta.txq[tid];
1572 struct txq_info *txqi = to_txq_info(txq);
1573
1574 spin_lock(&local->active_txq_lock[txq->ac]);
1575 if (!list_empty(&txqi->schedule_order))
1576 list_del_init(&txqi->schedule_order);
1577 spin_unlock(&local->active_txq_lock[txq->ac]);
1578
1579 if (txq_has_queue(txq))
1572 set_bit(tid, &sta->txq_buffered_tids); 1580 set_bit(tid, &sta->txq_buffered_tids);
1573 else 1581 else
1574 clear_bit(tid, &sta->txq_buffered_tids); 1582 clear_bit(tid, &sta->txq_buffered_tids);
diff --git a/net/mac80211/trace_msg.h b/net/mac80211/trace_msg.h
index 366b9e6f043e..40141df09f25 100644
--- a/net/mac80211/trace_msg.h
+++ b/net/mac80211/trace_msg.h
@@ -1,4 +1,9 @@
1/* SPDX-License-Identifier: GPL-2.0 */ 1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Portions of this file
4 * Copyright (C) 2019 Intel Corporation
5 */
6
2#ifdef CONFIG_MAC80211_MESSAGE_TRACING 7#ifdef CONFIG_MAC80211_MESSAGE_TRACING
3 8
4#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ) 9#if !defined(__MAC80211_MSG_DRIVER_TRACE) || defined(TRACE_HEADER_MULTI_READ)
@@ -11,7 +16,7 @@
11#undef TRACE_SYSTEM 16#undef TRACE_SYSTEM
12#define TRACE_SYSTEM mac80211_msg 17#define TRACE_SYSTEM mac80211_msg
13 18
14#define MAX_MSG_LEN 100 19#define MAX_MSG_LEN 120
15 20
16DECLARE_EVENT_CLASS(mac80211_msg_event, 21DECLARE_EVENT_CLASS(mac80211_msg_event,
17 TP_PROTO(struct va_format *vaf), 22 TP_PROTO(struct va_format *vaf),
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8a49a74c0a37..2e816dd67be7 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -3221,6 +3221,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3221 u8 max_subframes = sta->sta.max_amsdu_subframes; 3221 u8 max_subframes = sta->sta.max_amsdu_subframes;
3222 int max_frags = local->hw.max_tx_fragments; 3222 int max_frags = local->hw.max_tx_fragments;
3223 int max_amsdu_len = sta->sta.max_amsdu_len; 3223 int max_amsdu_len = sta->sta.max_amsdu_len;
3224 int orig_truesize;
3224 __be16 len; 3225 __be16 len;
3225 void *data; 3226 void *data;
3226 bool ret = false; 3227 bool ret = false;
@@ -3261,6 +3262,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3261 if (!head || skb_is_gso(head)) 3262 if (!head || skb_is_gso(head))
3262 goto out; 3263 goto out;
3263 3264
3265 orig_truesize = head->truesize;
3264 orig_len = head->len; 3266 orig_len = head->len;
3265 3267
3266 if (skb->len + head->len > max_amsdu_len) 3268 if (skb->len + head->len > max_amsdu_len)
@@ -3318,6 +3320,7 @@ static bool ieee80211_amsdu_aggregate(struct ieee80211_sub_if_data *sdata,
3318 *frag_tail = skb; 3320 *frag_tail = skb;
3319 3321
3320out_recalc: 3322out_recalc:
3323 fq->memory_usage += head->truesize - orig_truesize;
3321 if (head->len != orig_len) { 3324 if (head->len != orig_len) {
3322 flow->backlog += head->len - orig_len; 3325 flow->backlog += head->len - orig_len;
3323 tin->backlog_bytes += head->len - orig_len; 3326 tin->backlog_bytes += head->len - orig_len;
@@ -3646,16 +3649,17 @@ EXPORT_SYMBOL(ieee80211_tx_dequeue);
3646struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac) 3649struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
3647{ 3650{
3648 struct ieee80211_local *local = hw_to_local(hw); 3651 struct ieee80211_local *local = hw_to_local(hw);
3652 struct ieee80211_txq *ret = NULL;
3649 struct txq_info *txqi = NULL; 3653 struct txq_info *txqi = NULL;
3650 3654
3651 lockdep_assert_held(&local->active_txq_lock[ac]); 3655 spin_lock_bh(&local->active_txq_lock[ac]);
3652 3656
3653 begin: 3657 begin:
3654 txqi = list_first_entry_or_null(&local->active_txqs[ac], 3658 txqi = list_first_entry_or_null(&local->active_txqs[ac],
3655 struct txq_info, 3659 struct txq_info,
3656 schedule_order); 3660 schedule_order);
3657 if (!txqi) 3661 if (!txqi)
3658 return NULL; 3662 goto out;
3659 3663
3660 if (txqi->txq.sta) { 3664 if (txqi->txq.sta) {
3661 struct sta_info *sta = container_of(txqi->txq.sta, 3665 struct sta_info *sta = container_of(txqi->txq.sta,
@@ -3672,24 +3676,30 @@ struct ieee80211_txq *ieee80211_next_txq(struct ieee80211_hw *hw, u8 ac)
3672 3676
3673 3677
3674 if (txqi->schedule_round == local->schedule_round[ac]) 3678 if (txqi->schedule_round == local->schedule_round[ac])
3675 return NULL; 3679 goto out;
3676 3680
3677 list_del_init(&txqi->schedule_order); 3681 list_del_init(&txqi->schedule_order);
3678 txqi->schedule_round = local->schedule_round[ac]; 3682 txqi->schedule_round = local->schedule_round[ac];
3679 return &txqi->txq; 3683 ret = &txqi->txq;
3684
3685out:
3686 spin_unlock_bh(&local->active_txq_lock[ac]);
3687 return ret;
3680} 3688}
3681EXPORT_SYMBOL(ieee80211_next_txq); 3689EXPORT_SYMBOL(ieee80211_next_txq);
3682 3690
3683void ieee80211_return_txq(struct ieee80211_hw *hw, 3691void __ieee80211_schedule_txq(struct ieee80211_hw *hw,
3684 struct ieee80211_txq *txq) 3692 struct ieee80211_txq *txq,
3693 bool force)
3685{ 3694{
3686 struct ieee80211_local *local = hw_to_local(hw); 3695 struct ieee80211_local *local = hw_to_local(hw);
3687 struct txq_info *txqi = to_txq_info(txq); 3696 struct txq_info *txqi = to_txq_info(txq);
3688 3697
3689 lockdep_assert_held(&local->active_txq_lock[txq->ac]); 3698 spin_lock_bh(&local->active_txq_lock[txq->ac]);
3690 3699
3691 if (list_empty(&txqi->schedule_order) && 3700 if (list_empty(&txqi->schedule_order) &&
3692 (!skb_queue_empty(&txqi->frags) || txqi->tin.backlog_packets)) { 3701 (force || !skb_queue_empty(&txqi->frags) ||
3702 txqi->tin.backlog_packets)) {
3693 /* If airtime accounting is active, always enqueue STAs at the 3703 /* If airtime accounting is active, always enqueue STAs at the
3694 * head of the list to ensure that they only get moved to the 3704 * head of the list to ensure that they only get moved to the
3695 * back by the airtime DRR scheduler once they have a negative 3705 * back by the airtime DRR scheduler once they have a negative
@@ -3706,20 +3716,10 @@ void ieee80211_return_txq(struct ieee80211_hw *hw,
3706 list_add_tail(&txqi->schedule_order, 3716 list_add_tail(&txqi->schedule_order,
3707 &local->active_txqs[txq->ac]); 3717 &local->active_txqs[txq->ac]);
3708 } 3718 }
3709}
3710EXPORT_SYMBOL(ieee80211_return_txq);
3711 3719
3712void ieee80211_schedule_txq(struct ieee80211_hw *hw,
3713 struct ieee80211_txq *txq)
3714 __acquires(txq_lock) __releases(txq_lock)
3715{
3716 struct ieee80211_local *local = hw_to_local(hw);
3717
3718 spin_lock_bh(&local->active_txq_lock[txq->ac]);
3719 ieee80211_return_txq(hw, txq);
3720 spin_unlock_bh(&local->active_txq_lock[txq->ac]); 3720 spin_unlock_bh(&local->active_txq_lock[txq->ac]);
3721} 3721}
3722EXPORT_SYMBOL(ieee80211_schedule_txq); 3722EXPORT_SYMBOL(__ieee80211_schedule_txq);
3723 3723
3724bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw, 3724bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
3725 struct ieee80211_txq *txq) 3725 struct ieee80211_txq *txq)
@@ -3729,7 +3729,7 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
3729 struct sta_info *sta; 3729 struct sta_info *sta;
3730 u8 ac = txq->ac; 3730 u8 ac = txq->ac;
3731 3731
3732 lockdep_assert_held(&local->active_txq_lock[ac]); 3732 spin_lock_bh(&local->active_txq_lock[ac]);
3733 3733
3734 if (!txqi->txq.sta) 3734 if (!txqi->txq.sta)
3735 goto out; 3735 goto out;
@@ -3759,34 +3759,27 @@ bool ieee80211_txq_may_transmit(struct ieee80211_hw *hw,
3759 3759
3760 sta->airtime[ac].deficit += sta->airtime_weight; 3760 sta->airtime[ac].deficit += sta->airtime_weight;
3761 list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]); 3761 list_move_tail(&txqi->schedule_order, &local->active_txqs[ac]);
3762 spin_unlock_bh(&local->active_txq_lock[ac]);
3762 3763
3763 return false; 3764 return false;
3764out: 3765out:
3765 if (!list_empty(&txqi->schedule_order)) 3766 if (!list_empty(&txqi->schedule_order))
3766 list_del_init(&txqi->schedule_order); 3767 list_del_init(&txqi->schedule_order);
3768 spin_unlock_bh(&local->active_txq_lock[ac]);
3767 3769
3768 return true; 3770 return true;
3769} 3771}
3770EXPORT_SYMBOL(ieee80211_txq_may_transmit); 3772EXPORT_SYMBOL(ieee80211_txq_may_transmit);
3771 3773
3772void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac) 3774void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
3773 __acquires(txq_lock)
3774{ 3775{
3775 struct ieee80211_local *local = hw_to_local(hw); 3776 struct ieee80211_local *local = hw_to_local(hw);
3776 3777
3777 spin_lock_bh(&local->active_txq_lock[ac]); 3778 spin_lock_bh(&local->active_txq_lock[ac]);
3778 local->schedule_round[ac]++; 3779 local->schedule_round[ac]++;
3779}
3780EXPORT_SYMBOL(ieee80211_txq_schedule_start);
3781
3782void ieee80211_txq_schedule_end(struct ieee80211_hw *hw, u8 ac)
3783 __releases(txq_lock)
3784{
3785 struct ieee80211_local *local = hw_to_local(hw);
3786
3787 spin_unlock_bh(&local->active_txq_lock[ac]); 3780 spin_unlock_bh(&local->active_txq_lock[ac]);
3788} 3781}
3789EXPORT_SYMBOL(ieee80211_txq_schedule_end); 3782EXPORT_SYMBOL(ieee80211_txq_schedule_start);
3790 3783
3791void __ieee80211_subif_start_xmit(struct sk_buff *skb, 3784void __ieee80211_subif_start_xmit(struct sk_buff *skb,
3792 struct net_device *dev, 3785 struct net_device *dev,
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index f28e937320a3..216ab915dd54 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -988,7 +988,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
988 struct netlink_sock *nlk = nlk_sk(sk); 988 struct netlink_sock *nlk = nlk_sk(sk);
989 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 989 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
990 int err = 0; 990 int err = 0;
991 unsigned long groups = nladdr->nl_groups; 991 unsigned long groups;
992 bool bound; 992 bool bound;
993 993
994 if (addr_len < sizeof(struct sockaddr_nl)) 994 if (addr_len < sizeof(struct sockaddr_nl))
@@ -996,6 +996,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
996 996
997 if (nladdr->nl_family != AF_NETLINK) 997 if (nladdr->nl_family != AF_NETLINK)
998 return -EINVAL; 998 return -EINVAL;
999 groups = nladdr->nl_groups;
999 1000
1000 /* Only superuser is allowed to listen multicasts */ 1001 /* Only superuser is allowed to listen multicasts */
1001 if (groups) { 1002 if (groups) {
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 1d3144d19903..71ffd1a6dc7c 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1392,18 +1392,22 @@ static int __init nr_proto_init(void)
1392 int i; 1392 int i;
1393 int rc = proto_register(&nr_proto, 0); 1393 int rc = proto_register(&nr_proto, 0);
1394 1394
1395 if (rc != 0) 1395 if (rc)
1396 goto out; 1396 return rc;
1397 1397
1398 if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) { 1398 if (nr_ndevs > 0x7fffffff/sizeof(struct net_device *)) {
1399 printk(KERN_ERR "NET/ROM: nr_proto_init - nr_ndevs parameter to large\n"); 1399 pr_err("NET/ROM: %s - nr_ndevs parameter too large\n",
1400 return -1; 1400 __func__);
1401 rc = -EINVAL;
1402 goto unregister_proto;
1401 } 1403 }
1402 1404
1403 dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL); 1405 dev_nr = kcalloc(nr_ndevs, sizeof(struct net_device *), GFP_KERNEL);
1404 if (dev_nr == NULL) { 1406 if (!dev_nr) {
1405 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); 1407 pr_err("NET/ROM: %s - unable to allocate device array\n",
1406 return -1; 1408 __func__);
1409 rc = -ENOMEM;
1410 goto unregister_proto;
1407 } 1411 }
1408 1412
1409 for (i = 0; i < nr_ndevs; i++) { 1413 for (i = 0; i < nr_ndevs; i++) {
@@ -1413,13 +1417,13 @@ static int __init nr_proto_init(void)
1413 sprintf(name, "nr%d", i); 1417 sprintf(name, "nr%d", i);
1414 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup); 1418 dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
1415 if (!dev) { 1419 if (!dev) {
1416 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n"); 1420 rc = -ENOMEM;
1417 goto fail; 1421 goto fail;
1418 } 1422 }
1419 1423
1420 dev->base_addr = i; 1424 dev->base_addr = i;
1421 if (register_netdev(dev)) { 1425 rc = register_netdev(dev);
1422 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register network device\n"); 1426 if (rc) {
1423 free_netdev(dev); 1427 free_netdev(dev);
1424 goto fail; 1428 goto fail;
1425 } 1429 }
@@ -1427,36 +1431,64 @@ static int __init nr_proto_init(void)
1427 dev_nr[i] = dev; 1431 dev_nr[i] = dev;
1428 } 1432 }
1429 1433
1430 if (sock_register(&nr_family_ops)) { 1434 rc = sock_register(&nr_family_ops);
1431 printk(KERN_ERR "NET/ROM: nr_proto_init - unable to register socket family\n"); 1435 if (rc)
1432 goto fail; 1436 goto fail;
1433 }
1434 1437
1435 register_netdevice_notifier(&nr_dev_notifier); 1438 rc = register_netdevice_notifier(&nr_dev_notifier);
1439 if (rc)
1440 goto out_sock;
1436 1441
1437 ax25_register_pid(&nr_pid); 1442 ax25_register_pid(&nr_pid);
1438 ax25_linkfail_register(&nr_linkfail_notifier); 1443 ax25_linkfail_register(&nr_linkfail_notifier);
1439 1444
1440#ifdef CONFIG_SYSCTL 1445#ifdef CONFIG_SYSCTL
1441 nr_register_sysctl(); 1446 rc = nr_register_sysctl();
1447 if (rc)
1448 goto out_sysctl;
1442#endif 1449#endif
1443 1450
1444 nr_loopback_init(); 1451 nr_loopback_init();
1445 1452
1446 proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops); 1453 rc = -ENOMEM;
1447 proc_create_seq("nr_neigh", 0444, init_net.proc_net, &nr_neigh_seqops); 1454 if (!proc_create_seq("nr", 0444, init_net.proc_net, &nr_info_seqops))
1448 proc_create_seq("nr_nodes", 0444, init_net.proc_net, &nr_node_seqops); 1455 goto proc_remove1;
1449out: 1456 if (!proc_create_seq("nr_neigh", 0444, init_net.proc_net,
1450 return rc; 1457 &nr_neigh_seqops))
1458 goto proc_remove2;
1459 if (!proc_create_seq("nr_nodes", 0444, init_net.proc_net,
1460 &nr_node_seqops))
1461 goto proc_remove3;
1462
1463 return 0;
1464
1465proc_remove3:
1466 remove_proc_entry("nr_neigh", init_net.proc_net);
1467proc_remove2:
1468 remove_proc_entry("nr", init_net.proc_net);
1469proc_remove1:
1470
1471 nr_loopback_clear();
1472 nr_rt_free();
1473
1474#ifdef CONFIG_SYSCTL
1475 nr_unregister_sysctl();
1476out_sysctl:
1477#endif
1478 ax25_linkfail_release(&nr_linkfail_notifier);
1479 ax25_protocol_release(AX25_P_NETROM);
1480 unregister_netdevice_notifier(&nr_dev_notifier);
1481out_sock:
1482 sock_unregister(PF_NETROM);
1451fail: 1483fail:
1452 while (--i >= 0) { 1484 while (--i >= 0) {
1453 unregister_netdev(dev_nr[i]); 1485 unregister_netdev(dev_nr[i]);
1454 free_netdev(dev_nr[i]); 1486 free_netdev(dev_nr[i]);
1455 } 1487 }
1456 kfree(dev_nr); 1488 kfree(dev_nr);
1489unregister_proto:
1457 proto_unregister(&nr_proto); 1490 proto_unregister(&nr_proto);
1458 rc = -1; 1491 return rc;
1459 goto out;
1460} 1492}
1461 1493
1462module_init(nr_proto_init); 1494module_init(nr_proto_init);
diff --git a/net/netrom/nr_loopback.c b/net/netrom/nr_loopback.c
index 215ad22a9647..93d13f019981 100644
--- a/net/netrom/nr_loopback.c
+++ b/net/netrom/nr_loopback.c
@@ -70,7 +70,7 @@ static void nr_loopback_timer(struct timer_list *unused)
70 } 70 }
71} 71}
72 72
73void __exit nr_loopback_clear(void) 73void nr_loopback_clear(void)
74{ 74{
75 del_timer_sync(&loopback_timer); 75 del_timer_sync(&loopback_timer);
76 skb_queue_purge(&loopback_queue); 76 skb_queue_purge(&loopback_queue);
diff --git a/net/netrom/nr_route.c b/net/netrom/nr_route.c
index 6485f593e2f0..b76aa668a94b 100644
--- a/net/netrom/nr_route.c
+++ b/net/netrom/nr_route.c
@@ -953,7 +953,7 @@ const struct seq_operations nr_neigh_seqops = {
953/* 953/*
954 * Free all memory associated with the nodes and routes lists. 954 * Free all memory associated with the nodes and routes lists.
955 */ 955 */
956void __exit nr_rt_free(void) 956void nr_rt_free(void)
957{ 957{
958 struct nr_neigh *s = NULL; 958 struct nr_neigh *s = NULL;
959 struct nr_node *t = NULL; 959 struct nr_node *t = NULL;
diff --git a/net/netrom/sysctl_net_netrom.c b/net/netrom/sysctl_net_netrom.c
index ba1c368b3f18..771011b84270 100644
--- a/net/netrom/sysctl_net_netrom.c
+++ b/net/netrom/sysctl_net_netrom.c
@@ -146,9 +146,12 @@ static struct ctl_table nr_table[] = {
146 { } 146 { }
147}; 147};
148 148
149void __init nr_register_sysctl(void) 149int __init nr_register_sysctl(void)
150{ 150{
151 nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table); 151 nr_table_header = register_net_sysctl(&init_net, "net/netrom", nr_table);
152 if (!nr_table_header)
153 return -ENOMEM;
154 return 0;
152} 155}
153 156
154void nr_unregister_sysctl(void) 157void nr_unregister_sysctl(void)
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index d6cc97fbbbb0..2b969f99ef13 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -543,6 +543,9 @@ static int rds_connect(struct socket *sock, struct sockaddr *uaddr,
543 struct rds_sock *rs = rds_sk_to_rs(sk); 543 struct rds_sock *rs = rds_sk_to_rs(sk);
544 int ret = 0; 544 int ret = 0;
545 545
546 if (addr_len < offsetofend(struct sockaddr, sa_family))
547 return -EINVAL;
548
546 lock_sock(sk); 549 lock_sock(sk);
547 550
548 switch (uaddr->sa_family) { 551 switch (uaddr->sa_family) {
diff --git a/net/rds/bind.c b/net/rds/bind.c
index 17c9d9f0c848..0f4398e7f2a7 100644
--- a/net/rds/bind.c
+++ b/net/rds/bind.c
@@ -173,6 +173,8 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
173 /* We allow an RDS socket to be bound to either IPv4 or IPv6 173 /* We allow an RDS socket to be bound to either IPv4 or IPv6
174 * address. 174 * address.
175 */ 175 */
176 if (addr_len < offsetofend(struct sockaddr, sa_family))
177 return -EINVAL;
176 if (uaddr->sa_family == AF_INET) { 178 if (uaddr->sa_family == AF_INET) {
177 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr; 179 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
178 180
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 96f2952bbdfd..ae8c5d7f3bf1 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -135,7 +135,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
135 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr; 135 struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *)saddr;
136 struct rxrpc_local *local; 136 struct rxrpc_local *local;
137 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 137 struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
138 u16 service_id = srx->srx_service; 138 u16 service_id;
139 int ret; 139 int ret;
140 140
141 _enter("%p,%p,%d", rx, saddr, len); 141 _enter("%p,%p,%d", rx, saddr, len);
@@ -143,6 +143,7 @@ static int rxrpc_bind(struct socket *sock, struct sockaddr *saddr, int len)
143 ret = rxrpc_validate_address(rx, srx, len); 143 ret = rxrpc_validate_address(rx, srx, len);
144 if (ret < 0) 144 if (ret < 0)
145 goto error; 145 goto error;
146 service_id = srx->srx_service;
146 147
147 lock_sock(&rx->sk); 148 lock_sock(&rx->sk);
148 149
@@ -370,18 +371,22 @@ EXPORT_SYMBOL(rxrpc_kernel_end_call);
370 * rxrpc_kernel_check_life - Check to see whether a call is still alive 371 * rxrpc_kernel_check_life - Check to see whether a call is still alive
371 * @sock: The socket the call is on 372 * @sock: The socket the call is on
372 * @call: The call to check 373 * @call: The call to check
374 * @_life: Where to store the life value
373 * 375 *
374 * Allow a kernel service to find out whether a call is still alive - ie. we're 376 * Allow a kernel service to find out whether a call is still alive - ie. we're
375 * getting ACKs from the server. Returns a number representing the life state 377 * getting ACKs from the server. Passes back in *_life a number representing
376 * which can be compared to that returned by a previous call. 378 * the life state which can be compared to that returned by a previous call and
379 * return true if the call is still alive.
377 * 380 *
378 * If the life state stalls, rxrpc_kernel_probe_life() should be called and 381 * If the life state stalls, rxrpc_kernel_probe_life() should be called and
379 * then 2RTT waited. 382 * then 2RTT waited.
380 */ 383 */
381u32 rxrpc_kernel_check_life(const struct socket *sock, 384bool rxrpc_kernel_check_life(const struct socket *sock,
382 const struct rxrpc_call *call) 385 const struct rxrpc_call *call,
386 u32 *_life)
383{ 387{
384 return call->acks_latest; 388 *_life = call->acks_latest;
389 return call->state != RXRPC_CALL_COMPLETE;
385} 390}
386EXPORT_SYMBOL(rxrpc_kernel_check_life); 391EXPORT_SYMBOL(rxrpc_kernel_check_life);
387 392
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 4b1a534d290a..062ca9dc29b8 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -654,6 +654,7 @@ struct rxrpc_call {
654 u8 ackr_reason; /* reason to ACK */ 654 u8 ackr_reason; /* reason to ACK */
655 u16 ackr_skew; /* skew on packet being ACK'd */ 655 u16 ackr_skew; /* skew on packet being ACK'd */
656 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */ 656 rxrpc_serial_t ackr_serial; /* serial of packet being ACK'd */
657 rxrpc_serial_t ackr_first_seq; /* first sequence number received */
657 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */ 658 rxrpc_seq_t ackr_prev_seq; /* previous sequence number received */
658 rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */ 659 rxrpc_seq_t ackr_consumed; /* Highest packet shown consumed */
659 rxrpc_seq_t ackr_seen; /* Highest packet shown seen */ 660 rxrpc_seq_t ackr_seen; /* Highest packet shown seen */
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index b6fca8ebb117..8d31fb4c51e1 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -153,7 +153,8 @@ static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
153 * pass a connection-level abort onto all calls on that connection 153 * pass a connection-level abort onto all calls on that connection
154 */ 154 */
155static void rxrpc_abort_calls(struct rxrpc_connection *conn, 155static void rxrpc_abort_calls(struct rxrpc_connection *conn,
156 enum rxrpc_call_completion compl) 156 enum rxrpc_call_completion compl,
157 rxrpc_serial_t serial)
157{ 158{
158 struct rxrpc_call *call; 159 struct rxrpc_call *call;
159 int i; 160 int i;
@@ -173,6 +174,9 @@ static void rxrpc_abort_calls(struct rxrpc_connection *conn,
173 call->call_id, 0, 174 call->call_id, 0,
174 conn->abort_code, 175 conn->abort_code,
175 conn->error); 176 conn->error);
177 else
178 trace_rxrpc_rx_abort(call, serial,
179 conn->abort_code);
176 if (rxrpc_set_call_completion(call, compl, 180 if (rxrpc_set_call_completion(call, compl,
177 conn->abort_code, 181 conn->abort_code,
178 conn->error)) 182 conn->error))
@@ -213,8 +217,6 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
213 conn->state = RXRPC_CONN_LOCALLY_ABORTED; 217 conn->state = RXRPC_CONN_LOCALLY_ABORTED;
214 spin_unlock_bh(&conn->state_lock); 218 spin_unlock_bh(&conn->state_lock);
215 219
216 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED);
217
218 msg.msg_name = &conn->params.peer->srx.transport; 220 msg.msg_name = &conn->params.peer->srx.transport;
219 msg.msg_namelen = conn->params.peer->srx.transport_len; 221 msg.msg_namelen = conn->params.peer->srx.transport_len;
220 msg.msg_control = NULL; 222 msg.msg_control = NULL;
@@ -242,6 +244,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
242 len = iov[0].iov_len + iov[1].iov_len; 244 len = iov[0].iov_len + iov[1].iov_len;
243 245
244 serial = atomic_inc_return(&conn->serial); 246 serial = atomic_inc_return(&conn->serial);
247 rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial);
245 whdr.serial = htonl(serial); 248 whdr.serial = htonl(serial);
246 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code); 249 _proto("Tx CONN ABORT %%%u { %d }", serial, conn->abort_code);
247 250
@@ -321,7 +324,7 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
321 conn->error = -ECONNABORTED; 324 conn->error = -ECONNABORTED;
322 conn->abort_code = abort_code; 325 conn->abort_code = abort_code;
323 conn->state = RXRPC_CONN_REMOTELY_ABORTED; 326 conn->state = RXRPC_CONN_REMOTELY_ABORTED;
324 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED); 327 rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial);
325 return -ECONNABORTED; 328 return -ECONNABORTED;
326 329
327 case RXRPC_PACKET_TYPE_CHALLENGE: 330 case RXRPC_PACKET_TYPE_CHALLENGE:
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 9128aa0e40aa..4c6f9d0a00e7 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -837,7 +837,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
837 u8 acks[RXRPC_MAXACKS]; 837 u8 acks[RXRPC_MAXACKS];
838 } buf; 838 } buf;
839 rxrpc_serial_t acked_serial; 839 rxrpc_serial_t acked_serial;
840 rxrpc_seq_t first_soft_ack, hard_ack; 840 rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
841 int nr_acks, offset, ioffset; 841 int nr_acks, offset, ioffset;
842 842
843 _enter(""); 843 _enter("");
@@ -851,13 +851,14 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
851 851
852 acked_serial = ntohl(buf.ack.serial); 852 acked_serial = ntohl(buf.ack.serial);
853 first_soft_ack = ntohl(buf.ack.firstPacket); 853 first_soft_ack = ntohl(buf.ack.firstPacket);
854 prev_pkt = ntohl(buf.ack.previousPacket);
854 hard_ack = first_soft_ack - 1; 855 hard_ack = first_soft_ack - 1;
855 nr_acks = buf.ack.nAcks; 856 nr_acks = buf.ack.nAcks;
856 summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ? 857 summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
857 buf.ack.reason : RXRPC_ACK__INVALID); 858 buf.ack.reason : RXRPC_ACK__INVALID);
858 859
859 trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial, 860 trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
860 first_soft_ack, ntohl(buf.ack.previousPacket), 861 first_soft_ack, prev_pkt,
861 summary.ack_reason, nr_acks); 862 summary.ack_reason, nr_acks);
862 863
863 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE) 864 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
@@ -878,8 +879,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
878 rxrpc_propose_ack_respond_to_ack); 879 rxrpc_propose_ack_respond_to_ack);
879 } 880 }
880 881
881 /* Discard any out-of-order or duplicate ACKs. */ 882 /* Discard any out-of-order or duplicate ACKs (outside lock). */
882 if (before_eq(sp->hdr.serial, call->acks_latest)) 883 if (before(first_soft_ack, call->ackr_first_seq) ||
884 before(prev_pkt, call->ackr_prev_seq))
883 return; 885 return;
884 886
885 buf.info.rxMTU = 0; 887 buf.info.rxMTU = 0;
@@ -890,12 +892,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
890 892
891 spin_lock(&call->input_lock); 893 spin_lock(&call->input_lock);
892 894
893 /* Discard any out-of-order or duplicate ACKs. */ 895 /* Discard any out-of-order or duplicate ACKs (inside lock). */
894 if (before_eq(sp->hdr.serial, call->acks_latest)) 896 if (before(first_soft_ack, call->ackr_first_seq) ||
897 before(prev_pkt, call->ackr_prev_seq))
895 goto out; 898 goto out;
896 call->acks_latest_ts = skb->tstamp; 899 call->acks_latest_ts = skb->tstamp;
897 call->acks_latest = sp->hdr.serial; 900 call->acks_latest = sp->hdr.serial;
898 901
902 call->ackr_first_seq = first_soft_ack;
903 call->ackr_prev_seq = prev_pkt;
904
899 /* Parse rwind and mtu sizes if provided. */ 905 /* Parse rwind and mtu sizes if provided. */
900 if (buf.info.rxMTU) 906 if (buf.info.rxMTU)
901 rxrpc_input_ackinfo(call, skb, &buf.info); 907 rxrpc_input_ackinfo(call, skb, &buf.info);
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c
index bc05af89fc38..6e84d878053c 100644
--- a/net/rxrpc/peer_event.c
+++ b/net/rxrpc/peer_event.c
@@ -157,6 +157,11 @@ void rxrpc_error_report(struct sock *sk)
157 157
158 _enter("%p{%d}", sk, local->debug_id); 158 _enter("%p{%d}", sk, local->debug_id);
159 159
160 /* Clear the outstanding error value on the socket so that it doesn't
161 * cause kernel_sendmsg() to return it later.
162 */
163 sock_error(sk);
164
160 skb = sock_dequeue_err_skb(sk); 165 skb = sock_dequeue_err_skb(sk);
161 if (!skb) { 166 if (!skb) {
162 _leave("UDP socket errqueue empty"); 167 _leave("UDP socket errqueue empty");
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index 46c9312085b1..bec64deb7b0a 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -152,12 +152,13 @@ static void rxrpc_notify_end_tx(struct rxrpc_sock *rx, struct rxrpc_call *call,
152} 152}
153 153
154/* 154/*
155 * Queue a DATA packet for transmission, set the resend timeout and send the 155 * Queue a DATA packet for transmission, set the resend timeout and send
156 * packet immediately 156 * the packet immediately. Returns the error from rxrpc_send_data_packet()
157 * in case the caller wants to do something with it.
157 */ 158 */
158static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call, 159static int rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
159 struct sk_buff *skb, bool last, 160 struct sk_buff *skb, bool last,
160 rxrpc_notify_end_tx_t notify_end_tx) 161 rxrpc_notify_end_tx_t notify_end_tx)
161{ 162{
162 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 163 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
163 unsigned long now; 164 unsigned long now;
@@ -250,7 +251,8 @@ static void rxrpc_queue_packet(struct rxrpc_sock *rx, struct rxrpc_call *call,
250 251
251out: 252out:
252 rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 253 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
253 _leave(""); 254 _leave(" = %d", ret);
255 return ret;
254} 256}
255 257
256/* 258/*
@@ -423,9 +425,10 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
423 if (ret < 0) 425 if (ret < 0)
424 goto out; 426 goto out;
425 427
426 rxrpc_queue_packet(rx, call, skb, 428 ret = rxrpc_queue_packet(rx, call, skb,
427 !msg_data_left(msg) && !more, 429 !msg_data_left(msg) && !more,
428 notify_end_tx); 430 notify_end_tx);
431 /* Should check for failure here */
429 skb = NULL; 432 skb = NULL;
430 } 433 }
431 } while (msg_data_left(msg) > 0); 434 } while (msg_data_left(msg) > 0);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9874e60c9b0d..4583fa914e62 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4847,7 +4847,8 @@ static int sctp_connect(struct sock *sk, struct sockaddr *addr,
4847 } 4847 }
4848 4848
4849 /* Validate addr_len before calling common connect/connectx routine. */ 4849 /* Validate addr_len before calling common connect/connectx routine. */
4850 af = sctp_get_af_specific(addr->sa_family); 4850 af = addr_len < offsetofend(struct sockaddr, sa_family) ? NULL :
4851 sctp_get_af_specific(addr->sa_family);
4851 if (!af || addr_len < af->sockaddr_len) { 4852 if (!af || addr_len < af->sockaddr_len) {
4852 err = -EINVAL; 4853 err = -EINVAL;
4853 } else { 4854 } else {
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index 77ef53596d18..6f869ef49b32 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -167,10 +167,9 @@ static int smc_release(struct socket *sock)
167 167
168 if (sk->sk_state == SMC_CLOSED) { 168 if (sk->sk_state == SMC_CLOSED) {
169 if (smc->clcsock) { 169 if (smc->clcsock) {
170 mutex_lock(&smc->clcsock_release_lock); 170 release_sock(sk);
171 sock_release(smc->clcsock); 171 smc_clcsock_release(smc);
172 smc->clcsock = NULL; 172 lock_sock(sk);
173 mutex_unlock(&smc->clcsock_release_lock);
174 } 173 }
175 if (!smc->use_fallback) 174 if (!smc->use_fallback)
176 smc_conn_free(&smc->conn); 175 smc_conn_free(&smc->conn);
@@ -446,10 +445,19 @@ static void smc_link_save_peer_info(struct smc_link *link,
446 link->peer_mtu = clc->qp_mtu; 445 link->peer_mtu = clc->qp_mtu;
447} 446}
448 447
448static void smc_switch_to_fallback(struct smc_sock *smc)
449{
450 smc->use_fallback = true;
451 if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
452 smc->clcsock->file = smc->sk.sk_socket->file;
453 smc->clcsock->file->private_data = smc->clcsock;
454 }
455}
456
449/* fall back during connect */ 457/* fall back during connect */
450static int smc_connect_fallback(struct smc_sock *smc, int reason_code) 458static int smc_connect_fallback(struct smc_sock *smc, int reason_code)
451{ 459{
452 smc->use_fallback = true; 460 smc_switch_to_fallback(smc);
453 smc->fallback_rsn = reason_code; 461 smc->fallback_rsn = reason_code;
454 smc_copy_sock_settings_to_clc(smc); 462 smc_copy_sock_settings_to_clc(smc);
455 if (smc->sk.sk_state == SMC_INIT) 463 if (smc->sk.sk_state == SMC_INIT)
@@ -775,10 +783,14 @@ static void smc_connect_work(struct work_struct *work)
775 smc->sk.sk_err = -rc; 783 smc->sk.sk_err = -rc;
776 784
777out: 785out:
778 if (smc->sk.sk_err) 786 if (!sock_flag(&smc->sk, SOCK_DEAD)) {
779 smc->sk.sk_state_change(&smc->sk); 787 if (smc->sk.sk_err) {
780 else 788 smc->sk.sk_state_change(&smc->sk);
781 smc->sk.sk_write_space(&smc->sk); 789 } else { /* allow polling before and after fallback decision */
790 smc->clcsock->sk->sk_write_space(smc->clcsock->sk);
791 smc->sk.sk_write_space(&smc->sk);
792 }
793 }
782 kfree(smc->connect_info); 794 kfree(smc->connect_info);
783 smc->connect_info = NULL; 795 smc->connect_info = NULL;
784 release_sock(&smc->sk); 796 release_sock(&smc->sk);
@@ -872,11 +884,11 @@ static int smc_clcsock_accept(struct smc_sock *lsmc, struct smc_sock **new_smc)
872 if (rc < 0) 884 if (rc < 0)
873 lsk->sk_err = -rc; 885 lsk->sk_err = -rc;
874 if (rc < 0 || lsk->sk_state == SMC_CLOSED) { 886 if (rc < 0 || lsk->sk_state == SMC_CLOSED) {
887 new_sk->sk_prot->unhash(new_sk);
875 if (new_clcsock) 888 if (new_clcsock)
876 sock_release(new_clcsock); 889 sock_release(new_clcsock);
877 new_sk->sk_state = SMC_CLOSED; 890 new_sk->sk_state = SMC_CLOSED;
878 sock_set_flag(new_sk, SOCK_DEAD); 891 sock_set_flag(new_sk, SOCK_DEAD);
879 new_sk->sk_prot->unhash(new_sk);
880 sock_put(new_sk); /* final */ 892 sock_put(new_sk); /* final */
881 *new_smc = NULL; 893 *new_smc = NULL;
882 goto out; 894 goto out;
@@ -927,16 +939,21 @@ struct sock *smc_accept_dequeue(struct sock *parent,
927 939
928 smc_accept_unlink(new_sk); 940 smc_accept_unlink(new_sk);
929 if (new_sk->sk_state == SMC_CLOSED) { 941 if (new_sk->sk_state == SMC_CLOSED) {
942 new_sk->sk_prot->unhash(new_sk);
930 if (isk->clcsock) { 943 if (isk->clcsock) {
931 sock_release(isk->clcsock); 944 sock_release(isk->clcsock);
932 isk->clcsock = NULL; 945 isk->clcsock = NULL;
933 } 946 }
934 new_sk->sk_prot->unhash(new_sk);
935 sock_put(new_sk); /* final */ 947 sock_put(new_sk); /* final */
936 continue; 948 continue;
937 } 949 }
938 if (new_sock) 950 if (new_sock) {
939 sock_graft(new_sk, new_sock); 951 sock_graft(new_sk, new_sock);
952 if (isk->use_fallback) {
953 smc_sk(new_sk)->clcsock->file = new_sock->file;
954 isk->clcsock->file->private_data = isk->clcsock;
955 }
956 }
940 return new_sk; 957 return new_sk;
941 } 958 }
942 return NULL; 959 return NULL;
@@ -956,6 +973,7 @@ void smc_close_non_accepted(struct sock *sk)
956 sock_set_flag(sk, SOCK_DEAD); 973 sock_set_flag(sk, SOCK_DEAD);
957 sk->sk_shutdown |= SHUTDOWN_MASK; 974 sk->sk_shutdown |= SHUTDOWN_MASK;
958 } 975 }
976 sk->sk_prot->unhash(sk);
959 if (smc->clcsock) { 977 if (smc->clcsock) {
960 struct socket *tcp; 978 struct socket *tcp;
961 979
@@ -971,7 +989,6 @@ void smc_close_non_accepted(struct sock *sk)
971 smc_conn_free(&smc->conn); 989 smc_conn_free(&smc->conn);
972 } 990 }
973 release_sock(sk); 991 release_sock(sk);
974 sk->sk_prot->unhash(sk);
975 sock_put(sk); /* final sock_put */ 992 sock_put(sk); /* final sock_put */
976} 993}
977 994
@@ -1037,13 +1054,13 @@ static void smc_listen_out(struct smc_sock *new_smc)
1037 struct smc_sock *lsmc = new_smc->listen_smc; 1054 struct smc_sock *lsmc = new_smc->listen_smc;
1038 struct sock *newsmcsk = &new_smc->sk; 1055 struct sock *newsmcsk = &new_smc->sk;
1039 1056
1040 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1041 if (lsmc->sk.sk_state == SMC_LISTEN) { 1057 if (lsmc->sk.sk_state == SMC_LISTEN) {
1058 lock_sock_nested(&lsmc->sk, SINGLE_DEPTH_NESTING);
1042 smc_accept_enqueue(&lsmc->sk, newsmcsk); 1059 smc_accept_enqueue(&lsmc->sk, newsmcsk);
1060 release_sock(&lsmc->sk);
1043 } else { /* no longer listening */ 1061 } else { /* no longer listening */
1044 smc_close_non_accepted(newsmcsk); 1062 smc_close_non_accepted(newsmcsk);
1045 } 1063 }
1046 release_sock(&lsmc->sk);
1047 1064
1048 /* Wake up accept */ 1065 /* Wake up accept */
1049 lsmc->sk.sk_data_ready(&lsmc->sk); 1066 lsmc->sk.sk_data_ready(&lsmc->sk);
@@ -1087,7 +1104,7 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
1087 return; 1104 return;
1088 } 1105 }
1089 smc_conn_free(&new_smc->conn); 1106 smc_conn_free(&new_smc->conn);
1090 new_smc->use_fallback = true; 1107 smc_switch_to_fallback(new_smc);
1091 new_smc->fallback_rsn = reason_code; 1108 new_smc->fallback_rsn = reason_code;
1092 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) { 1109 if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
1093 if (smc_clc_send_decline(new_smc, reason_code) < 0) { 1110 if (smc_clc_send_decline(new_smc, reason_code) < 0) {
@@ -1237,6 +1254,9 @@ static void smc_listen_work(struct work_struct *work)
1237 int rc = 0; 1254 int rc = 0;
1238 u8 ibport; 1255 u8 ibport;
1239 1256
1257 if (new_smc->listen_smc->sk.sk_state != SMC_LISTEN)
1258 return smc_listen_out_err(new_smc);
1259
1240 if (new_smc->use_fallback) { 1260 if (new_smc->use_fallback) {
1241 smc_listen_out_connected(new_smc); 1261 smc_listen_out_connected(new_smc);
1242 return; 1262 return;
@@ -1244,7 +1264,7 @@ static void smc_listen_work(struct work_struct *work)
1244 1264
1245 /* check if peer is smc capable */ 1265 /* check if peer is smc capable */
1246 if (!tcp_sk(newclcsock->sk)->syn_smc) { 1266 if (!tcp_sk(newclcsock->sk)->syn_smc) {
1247 new_smc->use_fallback = true; 1267 smc_switch_to_fallback(new_smc);
1248 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC; 1268 new_smc->fallback_rsn = SMC_CLC_DECL_PEERNOSMC;
1249 smc_listen_out_connected(new_smc); 1269 smc_listen_out_connected(new_smc);
1250 return; 1270 return;
@@ -1501,7 +1521,7 @@ static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1501 1521
1502 if (msg->msg_flags & MSG_FASTOPEN) { 1522 if (msg->msg_flags & MSG_FASTOPEN) {
1503 if (sk->sk_state == SMC_INIT) { 1523 if (sk->sk_state == SMC_INIT) {
1504 smc->use_fallback = true; 1524 smc_switch_to_fallback(smc);
1505 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; 1525 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1506 } else { 1526 } else {
1507 rc = -EINVAL; 1527 rc = -EINVAL;
@@ -1703,7 +1723,7 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
1703 case TCP_FASTOPEN_NO_COOKIE: 1723 case TCP_FASTOPEN_NO_COOKIE:
1704 /* option not supported by SMC */ 1724 /* option not supported by SMC */
1705 if (sk->sk_state == SMC_INIT) { 1725 if (sk->sk_state == SMC_INIT) {
1706 smc->use_fallback = true; 1726 smc_switch_to_fallback(smc);
1707 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP; 1727 smc->fallback_rsn = SMC_CLC_DECL_OPTUNSUPP;
1708 } else { 1728 } else {
1709 if (!smc->use_fallback) 1729 if (!smc->use_fallback)
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index 2ad37e998509..fc06720b53c1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -21,6 +21,22 @@
21 21
22#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ) 22#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
23 23
24/* release the clcsock that is assigned to the smc_sock */
25void smc_clcsock_release(struct smc_sock *smc)
26{
27 struct socket *tcp;
28
29 if (smc->listen_smc && current_work() != &smc->smc_listen_work)
30 cancel_work_sync(&smc->smc_listen_work);
31 mutex_lock(&smc->clcsock_release_lock);
32 if (smc->clcsock) {
33 tcp = smc->clcsock;
34 smc->clcsock = NULL;
35 sock_release(tcp);
36 }
37 mutex_unlock(&smc->clcsock_release_lock);
38}
39
24static void smc_close_cleanup_listen(struct sock *parent) 40static void smc_close_cleanup_listen(struct sock *parent)
25{ 41{
26 struct sock *sk; 42 struct sock *sk;
@@ -321,6 +337,7 @@ static void smc_close_passive_work(struct work_struct *work)
321 close_work); 337 close_work);
322 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 338 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
323 struct smc_cdc_conn_state_flags *rxflags; 339 struct smc_cdc_conn_state_flags *rxflags;
340 bool release_clcsock = false;
324 struct sock *sk = &smc->sk; 341 struct sock *sk = &smc->sk;
325 int old_state; 342 int old_state;
326 343
@@ -400,13 +417,13 @@ wakeup:
400 if ((sk->sk_state == SMC_CLOSED) && 417 if ((sk->sk_state == SMC_CLOSED) &&
401 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) { 418 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket)) {
402 smc_conn_free(conn); 419 smc_conn_free(conn);
403 if (smc->clcsock) { 420 if (smc->clcsock)
404 sock_release(smc->clcsock); 421 release_clcsock = true;
405 smc->clcsock = NULL;
406 }
407 } 422 }
408 } 423 }
409 release_sock(sk); 424 release_sock(sk);
425 if (release_clcsock)
426 smc_clcsock_release(smc);
410 sock_put(sk); /* sock_hold done by schedulers of close_work */ 427 sock_put(sk); /* sock_hold done by schedulers of close_work */
411} 428}
412 429
diff --git a/net/smc/smc_close.h b/net/smc/smc_close.h
index 19eb6a211c23..e0e3b5df25d2 100644
--- a/net/smc/smc_close.h
+++ b/net/smc/smc_close.h
@@ -23,5 +23,6 @@ void smc_close_wake_tx_prepared(struct smc_sock *smc);
23int smc_close_active(struct smc_sock *smc); 23int smc_close_active(struct smc_sock *smc);
24int smc_close_shutdown_write(struct smc_sock *smc); 24int smc_close_shutdown_write(struct smc_sock *smc);
25void smc_close_init(struct smc_sock *smc); 25void smc_close_init(struct smc_sock *smc);
26void smc_clcsock_release(struct smc_sock *smc);
26 27
27#endif /* SMC_CLOSE_H */ 28#endif /* SMC_CLOSE_H */
diff --git a/net/smc/smc_ism.c b/net/smc/smc_ism.c
index 2fff79db1a59..e89e918b88e0 100644
--- a/net/smc/smc_ism.c
+++ b/net/smc/smc_ism.c
@@ -289,6 +289,11 @@ struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
289 INIT_LIST_HEAD(&smcd->vlan); 289 INIT_LIST_HEAD(&smcd->vlan);
290 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", 290 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
291 WQ_MEM_RECLAIM, name); 291 WQ_MEM_RECLAIM, name);
292 if (!smcd->event_wq) {
293 kfree(smcd->conn);
294 kfree(smcd);
295 return NULL;
296 }
292 return smcd; 297 return smcd;
293} 298}
294EXPORT_SYMBOL_GPL(smcd_alloc_dev); 299EXPORT_SYMBOL_GPL(smcd_alloc_dev);
diff --git a/net/smc/smc_pnet.c b/net/smc/smc_pnet.c
index 8d2f6296279c..0285c7f9e79b 100644
--- a/net/smc/smc_pnet.c
+++ b/net/smc/smc_pnet.c
@@ -603,7 +603,8 @@ static int smc_pnet_flush(struct sk_buff *skb, struct genl_info *info)
603{ 603{
604 struct net *net = genl_info_net(info); 604 struct net *net = genl_info_net(info);
605 605
606 return smc_pnet_remove_by_pnetid(net, NULL); 606 smc_pnet_remove_by_pnetid(net, NULL);
607 return 0;
607} 608}
608 609
609/* SMC_PNETID generic netlink operation definition */ 610/* SMC_PNETID generic netlink operation definition */
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 860dcfb95ee4..fa6c977b4c41 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -140,13 +140,11 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
140 /* We are going to append to the frags_list of head. 140 /* We are going to append to the frags_list of head.
141 * Need to unshare the frag_list. 141 * Need to unshare the frag_list.
142 */ 142 */
143 if (skb_has_frag_list(head)) { 143 err = skb_unclone(head, GFP_ATOMIC);
144 err = skb_unclone(head, GFP_ATOMIC); 144 if (err) {
145 if (err) { 145 STRP_STATS_INCR(strp->stats.mem_fail);
146 STRP_STATS_INCR(strp->stats.mem_fail); 146 desc->error = err;
147 desc->error = err; 147 return 0;
148 return 0;
149 }
150 } 148 }
151 149
152 if (unlikely(skb_shinfo(head)->frag_list)) { 150 if (unlikely(skb_shinfo(head)->frag_list)) {
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 341ecd796aa4..131aa2f0fd27 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -869,6 +869,8 @@ void tipc_link_reset(struct tipc_link *l)
869 __skb_queue_head_init(&list); 869 __skb_queue_head_init(&list);
870 870
871 l->in_session = false; 871 l->in_session = false;
872 /* Force re-synch of peer session number before establishing */
873 l->peer_session--;
872 l->session++; 874 l->session++;
873 l->mtu = l->advertised_mtu; 875 l->mtu = l->advertised_mtu;
874 876
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index bff241f03525..89993afe0fbd 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -909,7 +909,8 @@ static int tipc_nl_service_list(struct net *net, struct tipc_nl_msg *msg,
909 for (; i < TIPC_NAMETBL_SIZE; i++) { 909 for (; i < TIPC_NAMETBL_SIZE; i++) {
910 head = &tn->nametbl->services[i]; 910 head = &tn->nametbl->services[i];
911 911
912 if (*last_type) { 912 if (*last_type ||
913 (!i && *last_key && (*last_lower == *last_key))) {
913 service = tipc_service_find(net, *last_type); 914 service = tipc_service_find(net, *last_type);
914 if (!service) 915 if (!service)
915 return -EPIPE; 916 return -EPIPE;
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index 3481e4906bd6..9df82a573aa7 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -38,6 +38,8 @@
38 38
39#include <linux/sysctl.h> 39#include <linux/sysctl.h>
40 40
41static int zero;
42static int one = 1;
41static struct ctl_table_header *tipc_ctl_hdr; 43static struct ctl_table_header *tipc_ctl_hdr;
42 44
43static struct ctl_table tipc_table[] = { 45static struct ctl_table tipc_table[] = {
@@ -46,14 +48,16 @@ static struct ctl_table tipc_table[] = {
46 .data = &sysctl_tipc_rmem, 48 .data = &sysctl_tipc_rmem,
47 .maxlen = sizeof(sysctl_tipc_rmem), 49 .maxlen = sizeof(sysctl_tipc_rmem),
48 .mode = 0644, 50 .mode = 0644,
49 .proc_handler = proc_dointvec, 51 .proc_handler = proc_dointvec_minmax,
52 .extra1 = &one,
50 }, 53 },
51 { 54 {
52 .procname = "named_timeout", 55 .procname = "named_timeout",
53 .data = &sysctl_tipc_named_timeout, 56 .data = &sysctl_tipc_named_timeout,
54 .maxlen = sizeof(sysctl_tipc_named_timeout), 57 .maxlen = sizeof(sysctl_tipc_named_timeout),
55 .mode = 0644, 58 .mode = 0644,
56 .proc_handler = proc_dointvec, 59 .proc_handler = proc_dointvec_minmax,
60 .extra1 = &zero,
57 }, 61 },
58 { 62 {
59 .procname = "sk_filter", 63 .procname = "sk_filter",
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 135a7ee9db03..9f3bdbc1e593 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -52,8 +52,11 @@ static DEFINE_SPINLOCK(tls_device_lock);
52 52
53static void tls_device_free_ctx(struct tls_context *ctx) 53static void tls_device_free_ctx(struct tls_context *ctx)
54{ 54{
55 if (ctx->tx_conf == TLS_HW) 55 if (ctx->tx_conf == TLS_HW) {
56 kfree(tls_offload_ctx_tx(ctx)); 56 kfree(tls_offload_ctx_tx(ctx));
57 kfree(ctx->tx.rec_seq);
58 kfree(ctx->tx.iv);
59 }
57 60
58 if (ctx->rx_conf == TLS_HW) 61 if (ctx->rx_conf == TLS_HW)
59 kfree(tls_offload_ctx_rx(ctx)); 62 kfree(tls_offload_ctx_rx(ctx));
@@ -216,6 +219,13 @@ void tls_device_sk_destruct(struct sock *sk)
216} 219}
217EXPORT_SYMBOL(tls_device_sk_destruct); 220EXPORT_SYMBOL(tls_device_sk_destruct);
218 221
222void tls_device_free_resources_tx(struct sock *sk)
223{
224 struct tls_context *tls_ctx = tls_get_ctx(sk);
225
226 tls_free_partial_record(sk, tls_ctx);
227}
228
219static void tls_append_frag(struct tls_record_info *record, 229static void tls_append_frag(struct tls_record_info *record,
220 struct page_frag *pfrag, 230 struct page_frag *pfrag,
221 int size) 231 int size)
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index df921a2904b9..9547cea0ce3b 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -208,6 +208,26 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
208 return tls_push_sg(sk, ctx, sg, offset, flags); 208 return tls_push_sg(sk, ctx, sg, offset, flags);
209} 209}
210 210
211bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx)
212{
213 struct scatterlist *sg;
214
215 sg = ctx->partially_sent_record;
216 if (!sg)
217 return false;
218
219 while (1) {
220 put_page(sg_page(sg));
221 sk_mem_uncharge(sk, sg->length);
222
223 if (sg_is_last(sg))
224 break;
225 sg++;
226 }
227 ctx->partially_sent_record = NULL;
228 return true;
229}
230
211static void tls_write_space(struct sock *sk) 231static void tls_write_space(struct sock *sk)
212{ 232{
213 struct tls_context *ctx = tls_get_ctx(sk); 233 struct tls_context *ctx = tls_get_ctx(sk);
@@ -267,6 +287,10 @@ static void tls_sk_proto_close(struct sock *sk, long timeout)
267 kfree(ctx->tx.rec_seq); 287 kfree(ctx->tx.rec_seq);
268 kfree(ctx->tx.iv); 288 kfree(ctx->tx.iv);
269 tls_sw_free_resources_tx(sk); 289 tls_sw_free_resources_tx(sk);
290#ifdef CONFIG_TLS_DEVICE
291 } else if (ctx->tx_conf == TLS_HW) {
292 tls_device_free_resources_tx(sk);
293#endif
270 } 294 }
271 295
272 if (ctx->rx_conf == TLS_SW) { 296 if (ctx->rx_conf == TLS_SW) {
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index 20b191227969..b50ced862f6f 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -2052,20 +2052,7 @@ void tls_sw_free_resources_tx(struct sock *sk)
2052 /* Free up un-sent records in tx_list. First, free 2052 /* Free up un-sent records in tx_list. First, free
2053 * the partially sent record if any at head of tx_list. 2053 * the partially sent record if any at head of tx_list.
2054 */ 2054 */
2055 if (tls_ctx->partially_sent_record) { 2055 if (tls_free_partial_record(sk, tls_ctx)) {
2056 struct scatterlist *sg = tls_ctx->partially_sent_record;
2057
2058 while (1) {
2059 put_page(sg_page(sg));
2060 sk_mem_uncharge(sk, sg->length);
2061
2062 if (sg_is_last(sg))
2063 break;
2064 sg++;
2065 }
2066
2067 tls_ctx->partially_sent_record = NULL;
2068
2069 rec = list_first_entry(&ctx->tx_list, 2056 rec = list_first_entry(&ctx->tx_list,
2070 struct tls_rec, list); 2057 struct tls_rec, list);
2071 list_del(&rec->list); 2058 list_del(&rec->list);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 25a9e3b5c154..47e30a58566c 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -13650,7 +13650,8 @@ static const struct genl_ops nl80211_ops[] = {
13650 .policy = nl80211_policy, 13650 .policy = nl80211_policy,
13651 .flags = GENL_UNS_ADMIN_PERM, 13651 .flags = GENL_UNS_ADMIN_PERM,
13652 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 13652 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
13653 NL80211_FLAG_NEED_RTNL, 13653 NL80211_FLAG_NEED_RTNL |
13654 NL80211_FLAG_CLEAR_SKB,
13654 }, 13655 },
13655 { 13656 {
13656 .cmd = NL80211_CMD_DEAUTHENTICATE, 13657 .cmd = NL80211_CMD_DEAUTHENTICATE,
@@ -13701,7 +13702,8 @@ static const struct genl_ops nl80211_ops[] = {
13701 .policy = nl80211_policy, 13702 .policy = nl80211_policy,
13702 .flags = GENL_UNS_ADMIN_PERM, 13703 .flags = GENL_UNS_ADMIN_PERM,
13703 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 13704 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
13704 NL80211_FLAG_NEED_RTNL, 13705 NL80211_FLAG_NEED_RTNL |
13706 NL80211_FLAG_CLEAR_SKB,
13705 }, 13707 },
13706 { 13708 {
13707 .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS, 13709 .cmd = NL80211_CMD_UPDATE_CONNECT_PARAMS,
@@ -13709,7 +13711,8 @@ static const struct genl_ops nl80211_ops[] = {
13709 .policy = nl80211_policy, 13711 .policy = nl80211_policy,
13710 .flags = GENL_ADMIN_PERM, 13712 .flags = GENL_ADMIN_PERM,
13711 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 13713 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
13712 NL80211_FLAG_NEED_RTNL, 13714 NL80211_FLAG_NEED_RTNL |
13715 NL80211_FLAG_CLEAR_SKB,
13713 }, 13716 },
13714 { 13717 {
13715 .cmd = NL80211_CMD_DISCONNECT, 13718 .cmd = NL80211_CMD_DISCONNECT,
@@ -13738,7 +13741,8 @@ static const struct genl_ops nl80211_ops[] = {
13738 .policy = nl80211_policy, 13741 .policy = nl80211_policy,
13739 .flags = GENL_UNS_ADMIN_PERM, 13742 .flags = GENL_UNS_ADMIN_PERM,
13740 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 13743 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
13741 NL80211_FLAG_NEED_RTNL, 13744 NL80211_FLAG_NEED_RTNL |
13745 NL80211_FLAG_CLEAR_SKB,
13742 }, 13746 },
13743 { 13747 {
13744 .cmd = NL80211_CMD_DEL_PMKSA, 13748 .cmd = NL80211_CMD_DEL_PMKSA,
@@ -14090,7 +14094,8 @@ static const struct genl_ops nl80211_ops[] = {
14090 .policy = nl80211_policy, 14094 .policy = nl80211_policy,
14091 .flags = GENL_UNS_ADMIN_PERM, 14095 .flags = GENL_UNS_ADMIN_PERM,
14092 .internal_flags = NL80211_FLAG_NEED_WIPHY | 14096 .internal_flags = NL80211_FLAG_NEED_WIPHY |
14093 NL80211_FLAG_NEED_RTNL, 14097 NL80211_FLAG_NEED_RTNL |
14098 NL80211_FLAG_CLEAR_SKB,
14094 }, 14099 },
14095 { 14100 {
14096 .cmd = NL80211_CMD_SET_QOS_MAP, 14101 .cmd = NL80211_CMD_SET_QOS_MAP,
@@ -14145,7 +14150,8 @@ static const struct genl_ops nl80211_ops[] = {
14145 .doit = nl80211_set_pmk, 14150 .doit = nl80211_set_pmk,
14146 .policy = nl80211_policy, 14151 .policy = nl80211_policy,
14147 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | 14152 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
14148 NL80211_FLAG_NEED_RTNL, 14153 NL80211_FLAG_NEED_RTNL |
14154 NL80211_FLAG_CLEAR_SKB,
14149 }, 14155 },
14150 { 14156 {
14151 .cmd = NL80211_CMD_DEL_PMK, 14157 .cmd = NL80211_CMD_DEL_PMK,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 2f1bf91eb226..0ba778f371cb 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1309,6 +1309,16 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
1309 return dfs_region1; 1309 return dfs_region1;
1310} 1310}
1311 1311
1312static void reg_wmm_rules_intersect(const struct ieee80211_wmm_ac *wmm_ac1,
1313 const struct ieee80211_wmm_ac *wmm_ac2,
1314 struct ieee80211_wmm_ac *intersect)
1315{
1316 intersect->cw_min = max_t(u16, wmm_ac1->cw_min, wmm_ac2->cw_min);
1317 intersect->cw_max = max_t(u16, wmm_ac1->cw_max, wmm_ac2->cw_max);
1318 intersect->cot = min_t(u16, wmm_ac1->cot, wmm_ac2->cot);
1319 intersect->aifsn = max_t(u8, wmm_ac1->aifsn, wmm_ac2->aifsn);
1320}
1321
1312/* 1322/*
1313 * Helper for regdom_intersect(), this does the real 1323 * Helper for regdom_intersect(), this does the real
1314 * mathematical intersection fun 1324 * mathematical intersection fun
@@ -1323,6 +1333,8 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
1323 struct ieee80211_freq_range *freq_range; 1333 struct ieee80211_freq_range *freq_range;
1324 const struct ieee80211_power_rule *power_rule1, *power_rule2; 1334 const struct ieee80211_power_rule *power_rule1, *power_rule2;
1325 struct ieee80211_power_rule *power_rule; 1335 struct ieee80211_power_rule *power_rule;
1336 const struct ieee80211_wmm_rule *wmm_rule1, *wmm_rule2;
1337 struct ieee80211_wmm_rule *wmm_rule;
1326 u32 freq_diff, max_bandwidth1, max_bandwidth2; 1338 u32 freq_diff, max_bandwidth1, max_bandwidth2;
1327 1339
1328 freq_range1 = &rule1->freq_range; 1340 freq_range1 = &rule1->freq_range;
@@ -1333,6 +1345,10 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
1333 power_rule2 = &rule2->power_rule; 1345 power_rule2 = &rule2->power_rule;
1334 power_rule = &intersected_rule->power_rule; 1346 power_rule = &intersected_rule->power_rule;
1335 1347
1348 wmm_rule1 = &rule1->wmm_rule;
1349 wmm_rule2 = &rule2->wmm_rule;
1350 wmm_rule = &intersected_rule->wmm_rule;
1351
1336 freq_range->start_freq_khz = max(freq_range1->start_freq_khz, 1352 freq_range->start_freq_khz = max(freq_range1->start_freq_khz,
1337 freq_range2->start_freq_khz); 1353 freq_range2->start_freq_khz);
1338 freq_range->end_freq_khz = min(freq_range1->end_freq_khz, 1354 freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
@@ -1376,6 +1392,29 @@ static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
1376 intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms, 1392 intersected_rule->dfs_cac_ms = max(rule1->dfs_cac_ms,
1377 rule2->dfs_cac_ms); 1393 rule2->dfs_cac_ms);
1378 1394
1395 if (rule1->has_wmm && rule2->has_wmm) {
1396 u8 ac;
1397
1398 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
1399 reg_wmm_rules_intersect(&wmm_rule1->client[ac],
1400 &wmm_rule2->client[ac],
1401 &wmm_rule->client[ac]);
1402 reg_wmm_rules_intersect(&wmm_rule1->ap[ac],
1403 &wmm_rule2->ap[ac],
1404 &wmm_rule->ap[ac]);
1405 }
1406
1407 intersected_rule->has_wmm = true;
1408 } else if (rule1->has_wmm) {
1409 *wmm_rule = *wmm_rule1;
1410 intersected_rule->has_wmm = true;
1411 } else if (rule2->has_wmm) {
1412 *wmm_rule = *wmm_rule2;
1413 intersected_rule->has_wmm = true;
1414 } else {
1415 intersected_rule->has_wmm = false;
1416 }
1417
1379 if (!is_valid_reg_rule(intersected_rule)) 1418 if (!is_valid_reg_rule(intersected_rule))
1380 return -EINVAL; 1419 return -EINVAL;
1381 1420
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 287518c6caa4..04d888628f29 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -190,10 +190,9 @@ static size_t cfg80211_gen_new_ie(const u8 *ie, size_t ielen,
190 /* copy subelement as we need to change its content to 190 /* copy subelement as we need to change its content to
191 * mark an ie after it is processed. 191 * mark an ie after it is processed.
192 */ 192 */
193 sub_copy = kmalloc(subie_len, gfp); 193 sub_copy = kmemdup(subelement, subie_len, gfp);
194 if (!sub_copy) 194 if (!sub_copy)
195 return 0; 195 return 0;
196 memcpy(sub_copy, subelement, subie_len);
197 196
198 pos = &new_ie[0]; 197 pos = &new_ie[0];
199 198
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e4b8db5e81ec..75899b62bdc9 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1220,9 +1220,11 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate)
1220 else if (rate->bw == RATE_INFO_BW_HE_RU && 1220 else if (rate->bw == RATE_INFO_BW_HE_RU &&
1221 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26) 1221 rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_26)
1222 result = rates_26[rate->he_gi]; 1222 result = rates_26[rate->he_gi];
1223 else if (WARN(1, "invalid HE MCS: bw:%d, ru:%d\n", 1223 else {
1224 rate->bw, rate->he_ru_alloc)) 1224 WARN(1, "invalid HE MCS: bw:%d, ru:%d\n",
1225 rate->bw, rate->he_ru_alloc);
1225 return 0; 1226 return 0;
1227 }
1226 1228
1227 /* now scale to the appropriate MCS */ 1229 /* now scale to the appropriate MCS */
1228 tmp = result; 1230 tmp = result;