aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_xen.c8
-rw-r--r--net/bridge/br_netlink.c9
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_stp_timer.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c3
-rw-r--r--net/bridge/netfilter/ebtables.c9
-rw-r--r--net/ceph/auth_x.c13
-rw-r--r--net/ceph/ceph_common.c13
-rw-r--r--net/ceph/messenger.c26
-rw-r--r--net/ceph/mon_client.c4
-rw-r--r--net/ceph/osdmap.c1
-rw-r--r--net/core/dev.c57
-rw-r--r--net/core/devlink.c8
-rw-r--r--net/core/dst.c23
-rw-r--r--net/core/filter.c1
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/net_namespace.c19
-rw-r--r--net/core/rtnetlink.c88
-rw-r--r--net/core/skbuff.c5
-rw-r--r--net/core/sock.c23
-rw-r--r--net/core/sysctl_net_core.c2
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dsa/dsa.c47
-rw-r--r--net/dsa/dsa2.c4
-rw-r--r--net/dsa/legacy.c47
-rw-r--r--net/ipv4/af_inet.c2
-rw-r--r--net/ipv4/arp.c48
-rw-r--r--net/ipv4/esp4.c5
-rw-r--r--net/ipv4/fib_frontend.c15
-rw-r--r--net/ipv4/fib_semantics.c17
-rw-r--r--net/ipv4/fib_trie.c26
-rw-r--r--net/ipv4/ipmr.c18
-rw-r--r--net/ipv4/route.c10
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_cong.c1
-rw-r--r--net/ipv4/tcp_input.c11
-rw-r--r--net/ipv4/udp.c4
-rw-r--r--net/ipv4/udp_impl.h1
-rw-r--r--net/ipv6/addrconf.c5
-rw-r--r--net/ipv6/calipso.c6
-rw-r--r--net/ipv6/ip6_gre.c13
-rw-r--r--net/ipv6/ip6_offload.c9
-rw-r--r--net/ipv6/ip6_output.c20
-rw-r--r--net/ipv6/ip6_tunnel.c24
-rw-r--r--net/ipv6/output_core.c14
-rw-r--r--net/ipv6/ping.c2
-rw-r--r--net/ipv6/raw.c2
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/udp_impl.h1
-rw-r--r--net/ipv6/udp_offload.c6
-rw-r--r--net/ipv6/xfrm6_mode_ro.c2
-rw-r--r--net/ipv6/xfrm6_mode_transport.c2
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/llc/af_llc.c3
-rw-r--r--net/mac80211/agg-tx.c128
-rw-r--r--net/mac80211/ht.c16
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c11
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/sta_info.c2
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mpls/af_mpls.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c19
-rw-r--r--net/netfilter/nf_conntrack_helper.c12
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c9
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_tables_api.c160
-rw-r--r--net/netfilter/nfnetlink_cthelper.c17
-rw-r--r--net/netfilter/nft_bitwise.c19
-rw-r--r--net/netfilter/nft_cmp.c12
-rw-r--r--net/netfilter/nft_ct.c4
-rw-r--r--net/netfilter/nft_immediate.c5
-rw-r--r--net/netfilter/nft_range.c4
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c22
-rw-r--r--net/netfilter/x_tables.c24
-rw-r--r--net/netfilter/xt_CT.c6
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/conntrack.c4
-rw-r--r--net/packet/af_packet.c14
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sctp/associola.c4
-rw-r--r--net/sctp/input.c16
-rw-r--r--net/sctp/ipv6.c49
-rw-r--r--net/sctp/sm_make_chunk.c13
-rw-r--r--net/sctp/sm_statefuns.c3
-rw-r--r--net/smc/Kconfig4
-rw-r--r--net/smc/smc_clc.c4
-rw-r--r--net/smc/smc_core.c16
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_ib.c21
-rw-r--r--net/smc/smc_ib.h2
-rw-r--r--net/sunrpc/xprtrdma/backchannel.c6
-rw-r--r--net/sunrpc/xprtsock.c7
-rw-r--r--net/tipc/socket.c38
-rw-r--r--net/vmw_vsock/af_vsock.c21
-rw-r--r--net/wireless/scan.c8
-rw-r--r--net/wireless/util.c10
-rw-r--r--net/x25/af_x25.c24
-rw-r--r--net/x25/sysctl_net_x25.c5
-rw-r--r--net/xfrm/xfrm_device.c2
-rw-r--r--net/xfrm/xfrm_policy.c47
-rw-r--r--net/xfrm/xfrm_state.c2
106 files changed, 938 insertions, 638 deletions
diff --git a/net/9p/trans_xen.c b/net/9p/trans_xen.c
index 71e85643b3f9..6ad3e043c617 100644
--- a/net/9p/trans_xen.c
+++ b/net/9p/trans_xen.c
@@ -454,8 +454,8 @@ static int xen_9pfs_front_probe(struct xenbus_device *dev,
454 goto error_xenbus; 454 goto error_xenbus;
455 } 455 }
456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL); 456 priv->tag = xenbus_read(xbt, dev->nodename, "tag", NULL);
457 if (!priv->tag) { 457 if (IS_ERR(priv->tag)) {
458 ret = -EINVAL; 458 ret = PTR_ERR(priv->tag);
459 goto error_xenbus; 459 goto error_xenbus;
460 } 460 }
461 ret = xenbus_transaction_end(xbt, 0); 461 ret = xenbus_transaction_end(xbt, 0);
@@ -525,7 +525,7 @@ static struct xenbus_driver xen_9pfs_front_driver = {
525 .otherend_changed = xen_9pfs_front_changed, 525 .otherend_changed = xen_9pfs_front_changed,
526}; 526};
527 527
528int p9_trans_xen_init(void) 528static int p9_trans_xen_init(void)
529{ 529{
530 if (!xen_domain()) 530 if (!xen_domain())
531 return -ENODEV; 531 return -ENODEV;
@@ -537,7 +537,7 @@ int p9_trans_xen_init(void)
537} 537}
538module_init(p9_trans_xen_init); 538module_init(p9_trans_xen_init);
539 539
540void p9_trans_xen_exit(void) 540static void p9_trans_xen_exit(void)
541{ 541{
542 v9fs_unregister_trans(&p9_xen_trans); 542 v9fs_unregister_trans(&p9_xen_trans);
543 return xenbus_unregister_driver(&xen_9pfs_front_driver); 543 return xenbus_unregister_driver(&xen_9pfs_front_driver);
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index c5ce7745b230..32bd3ead9ba1 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -595,7 +595,7 @@ static int br_afspec(struct net_bridge *br,
595 err = 0; 595 err = 0;
596 switch (nla_type(attr)) { 596 switch (nla_type(attr)) {
597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO: 597 case IFLA_BRIDGE_VLAN_TUNNEL_INFO:
598 if (!(p->flags & BR_VLAN_TUNNEL)) 598 if (!p || !(p->flags & BR_VLAN_TUNNEL))
599 return -EINVAL; 599 return -EINVAL;
600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr); 600 err = br_parse_vlan_tunnel_info(attr, &tinfo_curr);
601 if (err) 601 if (err)
@@ -835,6 +835,13 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
835 return -EPROTONOSUPPORT; 835 return -EPROTONOSUPPORT;
836 } 836 }
837 } 837 }
838
839 if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
840 __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
841
842 if (defpvid >= VLAN_VID_MASK)
843 return -EINVAL;
844 }
838#endif 845#endif
839 846
840 return 0; 847 return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 08341d2aa9c9..6f12a5271219 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -179,6 +179,8 @@ static void br_stp_start(struct net_bridge *br)
179 br_debug(br, "using kernel STP\n"); 179 br_debug(br, "using kernel STP\n");
180 180
181 /* To start timers on any ports left in blocking */ 181 /* To start timers on any ports left in blocking */
182 if (br->dev->flags & IFF_UP)
183 mod_timer(&br->hello_timer, jiffies + br->hello_time);
182 br_port_state_selection(br); 184 br_port_state_selection(br);
183 } 185 }
184 186
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index c98b3e5c140a..60b6fe277a8b 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 if (br->stp_enabled != BR_USER_STP) 43 if (br->stp_enabled == BR_KERNEL_STP)
44 mod_timer(&br->hello_timer, 44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time)); 45 round_jiffies(jiffies + br->hello_time));
46 } 46 }
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 5929309beaa1..db85230e49c3 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -68,6 +68,9 @@ static int ebt_arpreply_tg_check(const struct xt_tgchk_param *par)
68 if (e->ethproto != htons(ETH_P_ARP) || 68 if (e->ethproto != htons(ETH_P_ARP) ||
69 e->invflags & EBT_IPROTO) 69 e->invflags & EBT_IPROTO)
70 return -EINVAL; 70 return -EINVAL;
71 if (ebt_invalid_target(info->target))
72 return -EINVAL;
73
71 return 0; 74 return 0;
72} 75}
73 76
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 9ec0c9f908fa..9c6e619f452b 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -1373,7 +1373,8 @@ static inline int ebt_obj_to_user(char __user *um, const char *_name,
1373 strlcpy(name, _name, sizeof(name)); 1373 strlcpy(name, _name, sizeof(name));
1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) || 1374 if (copy_to_user(um, name, EBT_FUNCTION_MAXNAMELEN) ||
1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) || 1375 put_user(datasize, (int __user *)(um + EBT_FUNCTION_MAXNAMELEN)) ||
1376 xt_data_to_user(um + entrysize, data, usersize, datasize)) 1376 xt_data_to_user(um + entrysize, data, usersize, datasize,
1377 XT_ALIGN(datasize)))
1377 return -EFAULT; 1378 return -EFAULT;
1378 1379
1379 return 0; 1380 return 0;
@@ -1658,7 +1659,8 @@ static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1658 if (match->compat_to_user(cm->data, m->data)) 1659 if (match->compat_to_user(cm->data, m->data))
1659 return -EFAULT; 1660 return -EFAULT;
1660 } else { 1661 } else {
1661 if (xt_data_to_user(cm->data, m->data, match->usersize, msize)) 1662 if (xt_data_to_user(cm->data, m->data, match->usersize, msize,
1663 COMPAT_XT_ALIGN(msize)))
1662 return -EFAULT; 1664 return -EFAULT;
1663 } 1665 }
1664 1666
@@ -1687,7 +1689,8 @@ static int compat_target_to_user(struct ebt_entry_target *t,
1687 if (target->compat_to_user(cm->data, t->data)) 1689 if (target->compat_to_user(cm->data, t->data))
1688 return -EFAULT; 1690 return -EFAULT;
1689 } else { 1691 } else {
1690 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize)) 1692 if (xt_data_to_user(cm->data, t->data, target->usersize, tsize,
1693 COMPAT_XT_ALIGN(tsize)))
1691 return -EFAULT; 1694 return -EFAULT;
1692 } 1695 }
1693 1696
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 2034fb926670..8757fb87dab8 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -151,7 +151,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
151 struct timespec validity; 151 struct timespec validity;
152 void *tp, *tpend; 152 void *tp, *tpend;
153 void **ptp; 153 void **ptp;
154 struct ceph_crypto_key new_session_key; 154 struct ceph_crypto_key new_session_key = { 0 };
155 struct ceph_buffer *new_ticket_blob; 155 struct ceph_buffer *new_ticket_blob;
156 unsigned long new_expires, new_renew_after; 156 unsigned long new_expires, new_renew_after;
157 u64 new_secret_id; 157 u64 new_secret_id;
@@ -215,6 +215,9 @@ static int process_one_ticket(struct ceph_auth_client *ac,
215 dout(" ticket blob is %d bytes\n", dlen); 215 dout(" ticket blob is %d bytes\n", dlen);
216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad); 216 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
217 blob_struct_v = ceph_decode_8(ptp); 217 blob_struct_v = ceph_decode_8(ptp);
218 if (blob_struct_v != 1)
219 goto bad;
220
218 new_secret_id = ceph_decode_64(ptp); 221 new_secret_id = ceph_decode_64(ptp);
219 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend); 222 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
220 if (ret) 223 if (ret)
@@ -234,13 +237,13 @@ static int process_one_ticket(struct ceph_auth_client *ac,
234 type, ceph_entity_type_name(type), th->secret_id, 237 type, ceph_entity_type_name(type), th->secret_id,
235 (int)th->ticket_blob->vec.iov_len); 238 (int)th->ticket_blob->vec.iov_len);
236 xi->have_keys |= th->service; 239 xi->have_keys |= th->service;
237 240 return 0;
238out:
239 return ret;
240 241
241bad: 242bad:
242 ret = -EINVAL; 243 ret = -EINVAL;
243 goto out; 244out:
245 ceph_crypto_key_destroy(&new_session_key);
246 return ret;
244} 247}
245 248
246static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac, 249static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index 4fd02831beed..47e94b560ba0 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -56,19 +56,6 @@ static const struct kernel_param_ops param_ops_supported_features = {
56module_param_cb(supported_features, &param_ops_supported_features, NULL, 56module_param_cb(supported_features, &param_ops_supported_features, NULL,
57 S_IRUGO); 57 S_IRUGO);
58 58
59/*
60 * find filename portion of a path (/foo/bar/baz -> baz)
61 */
62const char *ceph_file_part(const char *s, int len)
63{
64 const char *e = s + len;
65
66 while (e != s && *(e-1) != '/')
67 e--;
68 return e;
69}
70EXPORT_SYMBOL(ceph_file_part);
71
72const char *ceph_msg_type_name(int type) 59const char *ceph_msg_type_name(int type)
73{ 60{
74 switch (type) { 61 switch (type) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 5766a6c896c4..588a91930051 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1174,8 +1174,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1174 * Returns true if the result moves the cursor on to the next piece 1174 * Returns true if the result moves the cursor on to the next piece
1175 * of the data item. 1175 * of the data item.
1176 */ 1176 */
1177static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, 1177static void ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1178 size_t bytes) 1178 size_t bytes)
1179{ 1179{
1180 bool new_piece; 1180 bool new_piece;
1181 1181
@@ -1207,8 +1207,6 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1207 new_piece = true; 1207 new_piece = true;
1208 } 1208 }
1209 cursor->need_crc = new_piece; 1209 cursor->need_crc = new_piece;
1210
1211 return new_piece;
1212} 1210}
1213 1211
1214static size_t sizeof_footer(struct ceph_connection *con) 1212static size_t sizeof_footer(struct ceph_connection *con)
@@ -1577,7 +1575,6 @@ static int write_partial_message_data(struct ceph_connection *con)
1577 size_t page_offset; 1575 size_t page_offset;
1578 size_t length; 1576 size_t length;
1579 bool last_piece; 1577 bool last_piece;
1580 bool need_crc;
1581 int ret; 1578 int ret;
1582 1579
1583 page = ceph_msg_data_next(cursor, &page_offset, &length, 1580 page = ceph_msg_data_next(cursor, &page_offset, &length,
@@ -1592,7 +1589,7 @@ static int write_partial_message_data(struct ceph_connection *con)
1592 } 1589 }
1593 if (do_datacrc && cursor->need_crc) 1590 if (do_datacrc && cursor->need_crc)
1594 crc = ceph_crc32c_page(crc, page, page_offset, length); 1591 crc = ceph_crc32c_page(crc, page, page_offset, length);
1595 need_crc = ceph_msg_data_advance(cursor, (size_t)ret); 1592 ceph_msg_data_advance(cursor, (size_t)ret);
1596 } 1593 }
1597 1594
1598 dout("%s %p msg %p done\n", __func__, con, msg); 1595 dout("%s %p msg %p done\n", __func__, con, msg);
@@ -2231,10 +2228,18 @@ static void process_ack(struct ceph_connection *con)
2231 struct ceph_msg *m; 2228 struct ceph_msg *m;
2232 u64 ack = le64_to_cpu(con->in_temp_ack); 2229 u64 ack = le64_to_cpu(con->in_temp_ack);
2233 u64 seq; 2230 u64 seq;
2231 bool reconnect = (con->in_tag == CEPH_MSGR_TAG_SEQ);
2232 struct list_head *list = reconnect ? &con->out_queue : &con->out_sent;
2234 2233
2235 while (!list_empty(&con->out_sent)) { 2234 /*
2236 m = list_first_entry(&con->out_sent, struct ceph_msg, 2235 * In the reconnect case, con_fault() has requeued messages
2237 list_head); 2236 * in out_sent. We should cleanup old messages according to
2237 * the reconnect seq.
2238 */
2239 while (!list_empty(list)) {
2240 m = list_first_entry(list, struct ceph_msg, list_head);
2241 if (reconnect && m->needs_out_seq)
2242 break;
2238 seq = le64_to_cpu(m->hdr.seq); 2243 seq = le64_to_cpu(m->hdr.seq);
2239 if (seq > ack) 2244 if (seq > ack)
2240 break; 2245 break;
@@ -2243,6 +2248,7 @@ static void process_ack(struct ceph_connection *con)
2243 m->ack_stamp = jiffies; 2248 m->ack_stamp = jiffies;
2244 ceph_msg_remove(m); 2249 ceph_msg_remove(m);
2245 } 2250 }
2251
2246 prepare_read_tag(con); 2252 prepare_read_tag(con);
2247} 2253}
2248 2254
@@ -2299,7 +2305,7 @@ static int read_partial_msg_data(struct ceph_connection *con)
2299 2305
2300 if (do_datacrc) 2306 if (do_datacrc)
2301 crc = ceph_crc32c_page(crc, page, page_offset, ret); 2307 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2302 (void) ceph_msg_data_advance(cursor, (size_t)ret); 2308 ceph_msg_data_advance(cursor, (size_t)ret);
2303 } 2309 }
2304 if (do_datacrc) 2310 if (do_datacrc)
2305 con->in_data_crc = crc; 2311 con->in_data_crc = crc;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 29a0ef351c5e..250f11f78609 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -43,15 +43,13 @@ struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
43 int i, err = -EINVAL; 43 int i, err = -EINVAL;
44 struct ceph_fsid fsid; 44 struct ceph_fsid fsid;
45 u32 epoch, num_mon; 45 u32 epoch, num_mon;
46 u16 version;
47 u32 len; 46 u32 len;
48 47
49 ceph_decode_32_safe(&p, end, len, bad); 48 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad); 49 ceph_decode_need(&p, end, len, bad);
51 50
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); 51 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
53 52 p += sizeof(u16); /* skip version */
54 ceph_decode_16_safe(&p, end, version, bad);
55 53
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); 54 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid)); 55 ceph_decode_copy(&p, &fsid, sizeof(fsid));
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index ffe9e904d4d1..55e3a477f92d 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -317,6 +317,7 @@ static struct crush_map *crush_decode(void *pbyval, void *end)
317 u32 yes; 317 u32 yes;
318 struct crush_rule *r; 318 struct crush_rule *r;
319 319
320 err = -EINVAL;
320 ceph_decode_32_safe(p, end, yes, bad); 321 ceph_decode_32_safe(p, end, yes, bad);
321 if (!yes) { 322 if (!yes) {
322 dout("crush_decode NO rule %d off %x %p to %p\n", 323 dout("crush_decode NO rule %d off %x %p to %p\n",
diff --git a/net/core/dev.c b/net/core/dev.c
index 96cf83da0d66..fca407b4a6ea 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -6852,6 +6852,32 @@ int dev_change_proto_down(struct net_device *dev, bool proto_down)
6852} 6852}
6853EXPORT_SYMBOL(dev_change_proto_down); 6853EXPORT_SYMBOL(dev_change_proto_down);
6854 6854
6855bool __dev_xdp_attached(struct net_device *dev, xdp_op_t xdp_op)
6856{
6857 struct netdev_xdp xdp;
6858
6859 memset(&xdp, 0, sizeof(xdp));
6860 xdp.command = XDP_QUERY_PROG;
6861
6862 /* Query must always succeed. */
6863 WARN_ON(xdp_op(dev, &xdp) < 0);
6864 return xdp.prog_attached;
6865}
6866
6867static int dev_xdp_install(struct net_device *dev, xdp_op_t xdp_op,
6868 struct netlink_ext_ack *extack,
6869 struct bpf_prog *prog)
6870{
6871 struct netdev_xdp xdp;
6872
6873 memset(&xdp, 0, sizeof(xdp));
6874 xdp.command = XDP_SETUP_PROG;
6875 xdp.extack = extack;
6876 xdp.prog = prog;
6877
6878 return xdp_op(dev, &xdp);
6879}
6880
6855/** 6881/**
6856 * dev_change_xdp_fd - set or clear a bpf program for a device rx path 6882 * dev_change_xdp_fd - set or clear a bpf program for a device rx path
6857 * @dev: device 6883 * @dev: device
@@ -6864,41 +6890,34 @@ EXPORT_SYMBOL(dev_change_proto_down);
6864int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, 6890int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
6865 int fd, u32 flags) 6891 int fd, u32 flags)
6866{ 6892{
6867 int (*xdp_op)(struct net_device *dev, struct netdev_xdp *xdp);
6868 const struct net_device_ops *ops = dev->netdev_ops; 6893 const struct net_device_ops *ops = dev->netdev_ops;
6869 struct bpf_prog *prog = NULL; 6894 struct bpf_prog *prog = NULL;
6870 struct netdev_xdp xdp; 6895 xdp_op_t xdp_op, xdp_chk;
6871 int err; 6896 int err;
6872 6897
6873 ASSERT_RTNL(); 6898 ASSERT_RTNL();
6874 6899
6875 xdp_op = ops->ndo_xdp; 6900 xdp_op = xdp_chk = ops->ndo_xdp;
6901 if (!xdp_op && (flags & XDP_FLAGS_DRV_MODE))
6902 return -EOPNOTSUPP;
6876 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE)) 6903 if (!xdp_op || (flags & XDP_FLAGS_SKB_MODE))
6877 xdp_op = generic_xdp_install; 6904 xdp_op = generic_xdp_install;
6905 if (xdp_op == xdp_chk)
6906 xdp_chk = generic_xdp_install;
6878 6907
6879 if (fd >= 0) { 6908 if (fd >= 0) {
6880 if (flags & XDP_FLAGS_UPDATE_IF_NOEXIST) { 6909 if (xdp_chk && __dev_xdp_attached(dev, xdp_chk))
6881 memset(&xdp, 0, sizeof(xdp)); 6910 return -EEXIST;
6882 xdp.command = XDP_QUERY_PROG; 6911 if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) &&
6883 6912 __dev_xdp_attached(dev, xdp_op))
6884 err = xdp_op(dev, &xdp); 6913 return -EBUSY;
6885 if (err < 0)
6886 return err;
6887 if (xdp.prog_attached)
6888 return -EBUSY;
6889 }
6890 6914
6891 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 6915 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
6892 if (IS_ERR(prog)) 6916 if (IS_ERR(prog))
6893 return PTR_ERR(prog); 6917 return PTR_ERR(prog);
6894 } 6918 }
6895 6919
6896 memset(&xdp, 0, sizeof(xdp)); 6920 err = dev_xdp_install(dev, xdp_op, extack, prog);
6897 xdp.command = XDP_SETUP_PROG;
6898 xdp.extack = extack;
6899 xdp.prog = prog;
6900
6901 err = xdp_op(dev, &xdp);
6902 if (err < 0 && prog) 6921 if (err < 0 && prog)
6903 bpf_prog_put(prog); 6922 bpf_prog_put(prog);
6904 6923
diff --git a/net/core/devlink.c b/net/core/devlink.c
index b0b87a292e7c..a0adfc31a3fe 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -1680,8 +1680,10 @@ start_again:
1680 1680
1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 1681 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
1682 &devlink_nl_family, NLM_F_MULTI, cmd); 1682 &devlink_nl_family, NLM_F_MULTI, cmd);
1683 if (!hdr) 1683 if (!hdr) {
1684 nlmsg_free(skb);
1684 return -EMSGSIZE; 1685 return -EMSGSIZE;
1686 }
1685 1687
1686 if (devlink_nl_put_handle(skb, devlink)) 1688 if (devlink_nl_put_handle(skb, devlink))
1687 goto nla_put_failure; 1689 goto nla_put_failure;
@@ -2098,8 +2100,10 @@ start_again:
2098 2100
2099 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq, 2101 hdr = genlmsg_put(skb, info->snd_portid, info->snd_seq,
2100 &devlink_nl_family, NLM_F_MULTI, cmd); 2102 &devlink_nl_family, NLM_F_MULTI, cmd);
2101 if (!hdr) 2103 if (!hdr) {
2104 nlmsg_free(skb);
2102 return -EMSGSIZE; 2105 return -EMSGSIZE;
2106 }
2103 2107
2104 if (devlink_nl_put_handle(skb, devlink)) 2108 if (devlink_nl_put_handle(skb, devlink))
2105 goto nla_put_failure; 2109 goto nla_put_failure;
diff --git a/net/core/dst.c b/net/core/dst.c
index 960e503b5a52..6192f11beec9 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@ int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
151} 151}
152EXPORT_SYMBOL(dst_discard_out); 152EXPORT_SYMBOL(dst_discard_out);
153 153
154const u32 dst_default_metrics[RTAX_MAX + 1] = { 154const struct dst_metrics dst_default_metrics = {
155 /* This initializer is needed to force linker to place this variable 155 /* This initializer is needed to force linker to place this variable
156 * into const section. Otherwise it might end into bss section. 156 * into const section. Otherwise it might end into bss section.
157 * We really want to avoid false sharing on this variable, and catch 157 * We really want to avoid false sharing on this variable, and catch
158 * any writes on it. 158 * any writes on it.
159 */ 159 */
160 [RTAX_MAX] = 0xdeadbeef, 160 .refcnt = ATOMIC_INIT(1),
161}; 161};
162 162
163void dst_init(struct dst_entry *dst, struct dst_ops *ops, 163void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@ void dst_init(struct dst_entry *dst, struct dst_ops *ops,
169 if (dev) 169 if (dev)
170 dev_hold(dev); 170 dev_hold(dev);
171 dst->ops = ops; 171 dst->ops = ops;
172 dst_init_metrics(dst, dst_default_metrics, true); 172 dst_init_metrics(dst, dst_default_metrics.metrics, true);
173 dst->expires = 0UL; 173 dst->expires = 0UL;
174 dst->path = dst; 174 dst->path = dst;
175 dst->from = NULL; 175 dst->from = NULL;
@@ -314,25 +314,30 @@ EXPORT_SYMBOL(dst_release);
314 314
315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old) 315u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
316{ 316{
317 u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC); 317 struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
318 318
319 if (p) { 319 if (p) {
320 u32 *old_p = __DST_METRICS_PTR(old); 320 struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
321 unsigned long prev, new; 321 unsigned long prev, new;
322 322
323 memcpy(p, old_p, sizeof(u32) * RTAX_MAX); 323 atomic_set(&p->refcnt, 1);
324 memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
324 325
325 new = (unsigned long) p; 326 new = (unsigned long) p;
326 prev = cmpxchg(&dst->_metrics, old, new); 327 prev = cmpxchg(&dst->_metrics, old, new);
327 328
328 if (prev != old) { 329 if (prev != old) {
329 kfree(p); 330 kfree(p);
330 p = __DST_METRICS_PTR(prev); 331 p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
331 if (prev & DST_METRICS_READ_ONLY) 332 if (prev & DST_METRICS_READ_ONLY)
332 p = NULL; 333 p = NULL;
334 } else if (prev & DST_METRICS_REFCOUNTED) {
335 if (atomic_dec_and_test(&old_p->refcnt))
336 kfree(old_p);
333 } 337 }
334 } 338 }
335 return p; 339 BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
340 return (u32 *)p;
336} 341}
337EXPORT_SYMBOL(dst_cow_metrics_generic); 342EXPORT_SYMBOL(dst_cow_metrics_generic);
338 343
@@ -341,7 +346,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
341{ 346{
342 unsigned long prev, new; 347 unsigned long prev, new;
343 348
344 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY; 349 new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
345 prev = cmpxchg(&dst->_metrics, old, new); 350 prev = cmpxchg(&dst->_metrics, old, new);
346 if (prev == old) 351 if (prev == old)
347 kfree(__DST_METRICS_PTR(old)); 352 kfree(__DST_METRICS_PTR(old));
diff --git a/net/core/filter.c b/net/core/filter.c
index a253a6197e6b..a6bb95fa87b2 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2281,6 +2281,7 @@ bool bpf_helper_changes_pkt_data(void *func)
2281 func == bpf_skb_change_head || 2281 func == bpf_skb_change_head ||
2282 func == bpf_skb_change_tail || 2282 func == bpf_skb_change_tail ||
2283 func == bpf_skb_pull_data || 2283 func == bpf_skb_pull_data ||
2284 func == bpf_clone_redirect ||
2284 func == bpf_l3_csum_replace || 2285 func == bpf_l3_csum_replace ||
2285 func == bpf_l4_csum_replace || 2286 func == bpf_l4_csum_replace ||
2286 func == bpf_xdp_adjust_head) 2287 func == bpf_xdp_adjust_head)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 58b0bcc125b5..d274f81fcc2c 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1132,10 +1132,6 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1132 lladdr = neigh->ha; 1132 lladdr = neigh->ha;
1133 } 1133 }
1134 1134
1135 if (new & NUD_CONNECTED)
1136 neigh->confirmed = jiffies;
1137 neigh->updated = jiffies;
1138
1139 /* If entry was valid and address is not changed, 1135 /* If entry was valid and address is not changed,
1140 do not change entry state, if new one is STALE. 1136 do not change entry state, if new one is STALE.
1141 */ 1137 */
@@ -1157,6 +1153,16 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1157 } 1153 }
1158 } 1154 }
1159 1155
1156 /* Update timestamps only once we know we will make a change to the
1157 * neighbour entry. Otherwise we risk to move the locktime window with
1158 * noop updates and ignore relevant ARP updates.
1159 */
1160 if (new != old || lladdr != neigh->ha) {
1161 if (new & NUD_CONNECTED)
1162 neigh->confirmed = jiffies;
1163 neigh->updated = jiffies;
1164 }
1165
1160 if (new != old) { 1166 if (new != old) {
1161 neigh_del_timer(neigh); 1167 neigh_del_timer(neigh);
1162 if (new & NUD_PROBE) 1168 if (new & NUD_PROBE)
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 1934efd4a9d4..26bbfababff2 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -315,6 +315,25 @@ out_undo:
315 goto out; 315 goto out;
316} 316}
317 317
318static int __net_init net_defaults_init_net(struct net *net)
319{
320 net->core.sysctl_somaxconn = SOMAXCONN;
321 return 0;
322}
323
324static struct pernet_operations net_defaults_ops = {
325 .init = net_defaults_init_net,
326};
327
328static __init int net_defaults_init(void)
329{
330 if (register_pernet_subsys(&net_defaults_ops))
331 panic("Cannot initialize net default settings");
332
333 return 0;
334}
335
336core_initcall(net_defaults_init);
318 337
319#ifdef CONFIG_NET_NS 338#ifdef CONFIG_NET_NS
320static struct ucounts *inc_net_namespaces(struct user_namespace *ns) 339static struct ucounts *inc_net_namespaces(struct user_namespace *ns)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bcb0f610ee42..9e2c0a7cb325 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -899,8 +899,7 @@ static size_t rtnl_port_size(const struct net_device *dev,
899static size_t rtnl_xdp_size(void) 899static size_t rtnl_xdp_size(void)
900{ 900{
901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ 901 size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
902 nla_total_size(1) + /* XDP_ATTACHED */ 902 nla_total_size(1); /* XDP_ATTACHED */
903 nla_total_size(4); /* XDP_FLAGS */
904 903
905 return xdp_size; 904 return xdp_size;
906} 905}
@@ -1247,37 +1246,34 @@ static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
1247 return 0; 1246 return 0;
1248} 1247}
1249 1248
1249static u8 rtnl_xdp_attached_mode(struct net_device *dev)
1250{
1251 const struct net_device_ops *ops = dev->netdev_ops;
1252
1253 ASSERT_RTNL();
1254
1255 if (rcu_access_pointer(dev->xdp_prog))
1256 return XDP_ATTACHED_SKB;
1257 if (ops->ndo_xdp && __dev_xdp_attached(dev, ops->ndo_xdp))
1258 return XDP_ATTACHED_DRV;
1259
1260 return XDP_ATTACHED_NONE;
1261}
1262
1250static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev) 1263static int rtnl_xdp_fill(struct sk_buff *skb, struct net_device *dev)
1251{ 1264{
1252 struct nlattr *xdp; 1265 struct nlattr *xdp;
1253 u32 xdp_flags = 0;
1254 u8 val = 0;
1255 int err; 1266 int err;
1256 1267
1257 xdp = nla_nest_start(skb, IFLA_XDP); 1268 xdp = nla_nest_start(skb, IFLA_XDP);
1258 if (!xdp) 1269 if (!xdp)
1259 return -EMSGSIZE; 1270 return -EMSGSIZE;
1260 if (rcu_access_pointer(dev->xdp_prog)) { 1271
1261 xdp_flags = XDP_FLAGS_SKB_MODE; 1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED,
1262 val = 1; 1273 rtnl_xdp_attached_mode(dev));
1263 } else if (dev->netdev_ops->ndo_xdp) {
1264 struct netdev_xdp xdp_op = {};
1265
1266 xdp_op.command = XDP_QUERY_PROG;
1267 err = dev->netdev_ops->ndo_xdp(dev, &xdp_op);
1268 if (err)
1269 goto err_cancel;
1270 val = xdp_op.prog_attached;
1271 }
1272 err = nla_put_u8(skb, IFLA_XDP_ATTACHED, val);
1273 if (err) 1274 if (err)
1274 goto err_cancel; 1275 goto err_cancel;
1275 1276
1276 if (xdp_flags) {
1277 err = nla_put_u32(skb, IFLA_XDP_FLAGS, xdp_flags);
1278 if (err)
1279 goto err_cancel;
1280 }
1281 nla_nest_end(skb, xdp); 1277 nla_nest_end(skb, xdp);
1282 return 0; 1278 return 0;
1283 1279
@@ -1631,13 +1627,13 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
1631 cb->nlh->nlmsg_seq, 0, 1627 cb->nlh->nlmsg_seq, 0,
1632 flags, 1628 flags,
1633 ext_filter_mask); 1629 ext_filter_mask);
1634 /* If we ran out of room on the first message,
1635 * we're in trouble
1636 */
1637 WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
1638 1630
1639 if (err < 0) 1631 if (err < 0) {
1640 goto out; 1632 if (likely(skb->len))
1633 goto out;
1634
1635 goto out_err;
1636 }
1641 1637
1642 nl_dump_check_consistent(cb, nlmsg_hdr(skb)); 1638 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
1643cont: 1639cont:
@@ -1645,10 +1641,12 @@ cont:
1645 } 1641 }
1646 } 1642 }
1647out: 1643out:
1644 err = skb->len;
1645out_err:
1648 cb->args[1] = idx; 1646 cb->args[1] = idx;
1649 cb->args[0] = h; 1647 cb->args[0] = h;
1650 1648
1651 return skb->len; 1649 return err;
1652} 1650}
1653 1651
1654int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len, 1652int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
@@ -2199,6 +2197,11 @@ static int do_setlink(const struct sk_buff *skb,
2199 err = -EINVAL; 2197 err = -EINVAL;
2200 goto errout; 2198 goto errout;
2201 } 2199 }
2200 if ((xdp_flags & XDP_FLAGS_SKB_MODE) &&
2201 (xdp_flags & XDP_FLAGS_DRV_MODE)) {
2202 err = -EINVAL;
2203 goto errout;
2204 }
2202 } 2205 }
2203 2206
2204 if (xdp[IFLA_XDP_FD]) { 2207 if (xdp[IFLA_XDP_FD]) {
@@ -3228,8 +3231,11 @@ static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
3228 int err = 0; 3231 int err = 0;
3229 int fidx = 0; 3232 int fidx = 0;
3230 3233
3231 if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, 3234 err = nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb,
3232 IFLA_MAX, ifla_policy, NULL) == 0) { 3235 IFLA_MAX, ifla_policy, NULL);
3236 if (err < 0) {
3237 return -EINVAL;
3238 } else if (err == 0) {
3233 if (tb[IFLA_MASTER]) 3239 if (tb[IFLA_MASTER])
3234 br_idx = nla_get_u32(tb[IFLA_MASTER]); 3240 br_idx = nla_get_u32(tb[IFLA_MASTER]);
3235 } 3241 }
@@ -3452,8 +3458,12 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3452 err = br_dev->netdev_ops->ndo_bridge_getlink( 3458 err = br_dev->netdev_ops->ndo_bridge_getlink(
3453 skb, portid, seq, dev, 3459 skb, portid, seq, dev,
3454 filter_mask, NLM_F_MULTI); 3460 filter_mask, NLM_F_MULTI);
3455 if (err < 0 && err != -EOPNOTSUPP) 3461 if (err < 0 && err != -EOPNOTSUPP) {
3456 break; 3462 if (likely(skb->len))
3463 break;
3464
3465 goto out_err;
3466 }
3457 } 3467 }
3458 idx++; 3468 idx++;
3459 } 3469 }
@@ -3464,16 +3474,22 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
3464 seq, dev, 3474 seq, dev,
3465 filter_mask, 3475 filter_mask,
3466 NLM_F_MULTI); 3476 NLM_F_MULTI);
3467 if (err < 0 && err != -EOPNOTSUPP) 3477 if (err < 0 && err != -EOPNOTSUPP) {
3468 break; 3478 if (likely(skb->len))
3479 break;
3480
3481 goto out_err;
3482 }
3469 } 3483 }
3470 idx++; 3484 idx++;
3471 } 3485 }
3472 } 3486 }
3487 err = skb->len;
3488out_err:
3473 rcu_read_unlock(); 3489 rcu_read_unlock();
3474 cb->args[0] = idx; 3490 cb->args[0] = idx;
3475 3491
3476 return skb->len; 3492 return err;
3477} 3493}
3478 3494
3479static inline size_t bridge_nlmsg_size(void) 3495static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 346d3e85dfbc..b1be7c01efe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3754,8 +3754,11 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3754 3754
3755 spin_lock_irqsave(&q->lock, flags); 3755 spin_lock_irqsave(&q->lock, flags);
3756 skb = __skb_dequeue(q); 3756 skb = __skb_dequeue(q);
3757 if (skb && (skb_next = skb_peek(q))) 3757 if (skb && (skb_next = skb_peek(q))) {
3758 icmp_next = is_icmp_err_skb(skb_next); 3758 icmp_next = is_icmp_err_skb(skb_next);
3759 if (icmp_next)
3760 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
3761 }
3759 spin_unlock_irqrestore(&q->lock, flags); 3762 spin_unlock_irqrestore(&q->lock, flags);
3760 3763
3761 if (is_icmp_err_skb(skb) && !icmp_next) 3764 if (is_icmp_err_skb(skb) && !icmp_next)
diff --git a/net/core/sock.c b/net/core/sock.c
index 79c6aee6af9b..727f924b7f91 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -139,10 +139,7 @@
139 139
140#include <trace/events/sock.h> 140#include <trace/events/sock.h>
141 141
142#ifdef CONFIG_INET
143#include <net/tcp.h> 142#include <net/tcp.h>
144#endif
145
146#include <net/busy_poll.h> 143#include <net/busy_poll.h>
147 144
148static DEFINE_MUTEX(proto_list_mutex); 145static DEFINE_MUTEX(proto_list_mutex);
@@ -1803,28 +1800,24 @@ EXPORT_SYMBOL(skb_set_owner_w);
1803 * delay queue. We want to allow the owner socket to send more 1800 * delay queue. We want to allow the owner socket to send more
1804 * packets, as if they were already TX completed by a typical driver. 1801 * packets, as if they were already TX completed by a typical driver.
1805 * But we also want to keep skb->sk set because some packet schedulers 1802 * But we also want to keep skb->sk set because some packet schedulers
1806 * rely on it (sch_fq for example). So we set skb->truesize to a small 1803 * rely on it (sch_fq for example).
1807 * amount (1) and decrease sk_wmem_alloc accordingly.
1808 */ 1804 */
1809void skb_orphan_partial(struct sk_buff *skb) 1805void skb_orphan_partial(struct sk_buff *skb)
1810{ 1806{
1811 /* If this skb is a TCP pure ACK or already went here, 1807 if (skb_is_tcp_pure_ack(skb))
1812 * we have nothing to do. 2 is already a very small truesize.
1813 */
1814 if (skb->truesize <= 2)
1815 return; 1808 return;
1816 1809
1817 /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
1818 * so we do not completely orphan skb, but transfert all
1819 * accounted bytes but one, to avoid unexpected reorders.
1820 */
1821 if (skb->destructor == sock_wfree 1810 if (skb->destructor == sock_wfree
1822#ifdef CONFIG_INET 1811#ifdef CONFIG_INET
1823 || skb->destructor == tcp_wfree 1812 || skb->destructor == tcp_wfree
1824#endif 1813#endif
1825 ) { 1814 ) {
1826 atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc); 1815 struct sock *sk = skb->sk;
1827 skb->truesize = 1; 1816
1817 if (atomic_inc_not_zero(&sk->sk_refcnt)) {
1818 atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
1819 skb->destructor = sock_efree;
1820 }
1828 } else { 1821 } else {
1829 skb_orphan(skb); 1822 skb_orphan(skb);
1830 } 1823 }
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index ea23254b2457..b7cd9aafe99e 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -479,8 +479,6 @@ static __net_init int sysctl_core_net_init(struct net *net)
479{ 479{
480 struct ctl_table *tbl; 480 struct ctl_table *tbl;
481 481
482 net->core.sysctl_somaxconn = SOMAXCONN;
483
484 tbl = netns_core_table; 482 tbl = netns_core_table;
485 if (!net_eq(net, &init_net)) { 483 if (!net_eq(net, &init_net)) {
486 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL); 484 tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 840f14aaa016..992621172220 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -426,6 +426,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
426 newsk->sk_backlog_rcv = dccp_v4_do_rcv; 426 newsk->sk_backlog_rcv = dccp_v4_do_rcv;
427 newnp->pktoptions = NULL; 427 newnp->pktoptions = NULL;
428 newnp->opt = NULL; 428 newnp->opt = NULL;
429 newnp->ipv6_mc_list = NULL;
430 newnp->ipv6_ac_list = NULL;
431 newnp->ipv6_fl_list = NULL;
429 newnp->mcast_oif = inet6_iif(skb); 432 newnp->mcast_oif = inet6_iif(skb);
430 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit; 433 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
431 434
@@ -490,6 +493,9 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
490 /* Clone RX bits */ 493 /* Clone RX bits */
491 newnp->rxopt.all = np->rxopt.all; 494 newnp->rxopt.all = np->rxopt.all;
492 495
496 newnp->ipv6_mc_list = NULL;
497 newnp->ipv6_ac_list = NULL;
498 newnp->ipv6_fl_list = NULL;
493 newnp->pktoptions = NULL; 499 newnp->pktoptions = NULL;
494 newnp->opt = NULL; 500 newnp->opt = NULL;
495 newnp->mcast_oif = inet6_iif(skb); 501 newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 26130ae438da..90038d45a547 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -223,6 +223,53 @@ static int dsa_switch_rcv(struct sk_buff *skb, struct net_device *dev,
223 return 0; 223 return 0;
224} 224}
225 225
226#ifdef CONFIG_PM_SLEEP
227int dsa_switch_suspend(struct dsa_switch *ds)
228{
229 int i, ret = 0;
230
231 /* Suspend slave network devices */
232 for (i = 0; i < ds->num_ports; i++) {
233 if (!dsa_is_port_initialized(ds, i))
234 continue;
235
236 ret = dsa_slave_suspend(ds->ports[i].netdev);
237 if (ret)
238 return ret;
239 }
240
241 if (ds->ops->suspend)
242 ret = ds->ops->suspend(ds);
243
244 return ret;
245}
246EXPORT_SYMBOL_GPL(dsa_switch_suspend);
247
248int dsa_switch_resume(struct dsa_switch *ds)
249{
250 int i, ret = 0;
251
252 if (ds->ops->resume)
253 ret = ds->ops->resume(ds);
254
255 if (ret)
256 return ret;
257
258 /* Resume slave network devices */
259 for (i = 0; i < ds->num_ports; i++) {
260 if (!dsa_is_port_initialized(ds, i))
261 continue;
262
263 ret = dsa_slave_resume(ds->ports[i].netdev);
264 if (ret)
265 return ret;
266 }
267
268 return 0;
269}
270EXPORT_SYMBOL_GPL(dsa_switch_resume);
271#endif
272
226static struct packet_type dsa_pack_type __read_mostly = { 273static struct packet_type dsa_pack_type __read_mostly = {
227 .type = cpu_to_be16(ETH_P_XDSA), 274 .type = cpu_to_be16(ETH_P_XDSA),
228 .func = dsa_switch_rcv, 275 .func = dsa_switch_rcv,
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index 033b3bfb63dc..7796580e99ee 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -484,8 +484,10 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
484 dsa_ds_unapply(dst, ds); 484 dsa_ds_unapply(dst, ds);
485 } 485 }
486 486
487 if (dst->cpu_switch) 487 if (dst->cpu_switch) {
488 dsa_cpu_port_ethtool_restore(dst->cpu_switch); 488 dsa_cpu_port_ethtool_restore(dst->cpu_switch);
489 dst->cpu_switch = NULL;
490 }
489 491
490 pr_info("DSA: tree %d unapplied\n", dst->tree); 492 pr_info("DSA: tree %d unapplied\n", dst->tree);
491 dst->applied = false; 493 dst->applied = false;
diff --git a/net/dsa/legacy.c b/net/dsa/legacy.c
index ad345c8b0b06..7281098df04e 100644
--- a/net/dsa/legacy.c
+++ b/net/dsa/legacy.c
@@ -289,53 +289,6 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
289 dsa_switch_unregister_notifier(ds); 289 dsa_switch_unregister_notifier(ds);
290} 290}
291 291
292#ifdef CONFIG_PM_SLEEP
293int dsa_switch_suspend(struct dsa_switch *ds)
294{
295 int i, ret = 0;
296
297 /* Suspend slave network devices */
298 for (i = 0; i < ds->num_ports; i++) {
299 if (!dsa_is_port_initialized(ds, i))
300 continue;
301
302 ret = dsa_slave_suspend(ds->ports[i].netdev);
303 if (ret)
304 return ret;
305 }
306
307 if (ds->ops->suspend)
308 ret = ds->ops->suspend(ds);
309
310 return ret;
311}
312EXPORT_SYMBOL_GPL(dsa_switch_suspend);
313
314int dsa_switch_resume(struct dsa_switch *ds)
315{
316 int i, ret = 0;
317
318 if (ds->ops->resume)
319 ret = ds->ops->resume(ds);
320
321 if (ret)
322 return ret;
323
324 /* Resume slave network devices */
325 for (i = 0; i < ds->num_ports; i++) {
326 if (!dsa_is_port_initialized(ds, i))
327 continue;
328
329 ret = dsa_slave_resume(ds->ports[i].netdev);
330 if (ret)
331 return ret;
332 }
333
334 return 0;
335}
336EXPORT_SYMBOL_GPL(dsa_switch_resume);
337#endif
338
339/* platform driver init and cleanup *****************************************/ 292/* platform driver init and cleanup *****************************************/
340static int dev_is_class(struct device *dev, void *class) 293static int dev_is_class(struct device *dev, void *class)
341{ 294{
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f3dad1661343..58925b6597de 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1043,7 +1043,7 @@ static struct inet_protosw inetsw_array[] =
1043 .type = SOCK_DGRAM, 1043 .type = SOCK_DGRAM,
1044 .protocol = IPPROTO_ICMP, 1044 .protocol = IPPROTO_ICMP,
1045 .prot = &ping_prot, 1045 .prot = &ping_prot,
1046 .ops = &inet_dgram_ops, 1046 .ops = &inet_sockraw_ops,
1047 .flags = INET_PROTOSW_REUSE, 1047 .flags = INET_PROTOSW_REUSE,
1048 }, 1048 },
1049 1049
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 0937b34c27ca..e9f3386a528b 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -641,6 +641,32 @@ void arp_xmit(struct sk_buff *skb)
641} 641}
642EXPORT_SYMBOL(arp_xmit); 642EXPORT_SYMBOL(arp_xmit);
643 643
644static bool arp_is_garp(struct net *net, struct net_device *dev,
645 int *addr_type, __be16 ar_op,
646 __be32 sip, __be32 tip,
647 unsigned char *sha, unsigned char *tha)
648{
649 bool is_garp = tip == sip;
650
651 /* Gratuitous ARP _replies_ also require target hwaddr to be
652 * the same as source.
653 */
654 if (is_garp && ar_op == htons(ARPOP_REPLY))
655 is_garp =
656 /* IPv4 over IEEE 1394 doesn't provide target
657 * hardware address field in its ARP payload.
658 */
659 tha &&
660 !memcmp(tha, sha, dev->addr_len);
661
662 if (is_garp) {
663 *addr_type = inet_addr_type_dev_table(net, dev, sip);
664 if (*addr_type != RTN_UNICAST)
665 is_garp = false;
666 }
667 return is_garp;
668}
669
644/* 670/*
645 * Process an arp request. 671 * Process an arp request.
646 */ 672 */
@@ -653,6 +679,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
653 unsigned char *arp_ptr; 679 unsigned char *arp_ptr;
654 struct rtable *rt; 680 struct rtable *rt;
655 unsigned char *sha; 681 unsigned char *sha;
682 unsigned char *tha = NULL;
656 __be32 sip, tip; 683 __be32 sip, tip;
657 u16 dev_type = dev->type; 684 u16 dev_type = dev->type;
658 int addr_type; 685 int addr_type;
@@ -724,6 +751,7 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
724 break; 751 break;
725#endif 752#endif
726 default: 753 default:
754 tha = arp_ptr;
727 arp_ptr += dev->addr_len; 755 arp_ptr += dev->addr_len;
728 } 756 }
729 memcpy(&tip, arp_ptr, 4); 757 memcpy(&tip, arp_ptr, 4);
@@ -835,19 +863,25 @@ static int arp_process(struct net *net, struct sock *sk, struct sk_buff *skb)
835 863
836 n = __neigh_lookup(&arp_tbl, &sip, dev, 0); 864 n = __neigh_lookup(&arp_tbl, &sip, dev, 0);
837 865
838 if (IN_DEV_ARP_ACCEPT(in_dev)) { 866 addr_type = -1;
839 unsigned int addr_type = inet_addr_type_dev_table(net, dev, sip); 867 if (n || IN_DEV_ARP_ACCEPT(in_dev)) {
868 is_garp = arp_is_garp(net, dev, &addr_type, arp->ar_op,
869 sip, tip, sha, tha);
870 }
840 871
872 if (IN_DEV_ARP_ACCEPT(in_dev)) {
841 /* Unsolicited ARP is not accepted by default. 873 /* Unsolicited ARP is not accepted by default.
842 It is possible, that this option should be enabled for some 874 It is possible, that this option should be enabled for some
843 devices (strip is candidate) 875 devices (strip is candidate)
844 */ 876 */
845 is_garp = arp->ar_op == htons(ARPOP_REQUEST) && tip == sip &&
846 addr_type == RTN_UNICAST;
847
848 if (!n && 877 if (!n &&
849 ((arp->ar_op == htons(ARPOP_REPLY) && 878 (is_garp ||
850 addr_type == RTN_UNICAST) || is_garp)) 879 (arp->ar_op == htons(ARPOP_REPLY) &&
880 (addr_type == RTN_UNICAST ||
881 (addr_type < 0 &&
882 /* postpone calculation to as late as possible */
883 inet_addr_type_dev_table(net, dev, sip) ==
884 RTN_UNICAST)))))
851 n = __neigh_lookup(&arp_tbl, &sip, dev, 1); 885 n = __neigh_lookup(&arp_tbl, &sip, dev, 1);
852 } 886 }
853 887
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 65cc02bd82bc..93322f895eab 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -248,6 +248,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
248 u8 *tail; 248 u8 *tail;
249 u8 *vaddr; 249 u8 *vaddr;
250 int nfrags; 250 int nfrags;
251 int esph_offset;
251 struct page *page; 252 struct page *page;
252 struct sk_buff *trailer; 253 struct sk_buff *trailer;
253 int tailen = esp->tailen; 254 int tailen = esp->tailen;
@@ -313,11 +314,13 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
313 } 314 }
314 315
315cow: 316cow:
317 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
318
316 nfrags = skb_cow_data(skb, tailen, &trailer); 319 nfrags = skb_cow_data(skb, tailen, &trailer);
317 if (nfrags < 0) 320 if (nfrags < 0)
318 goto out; 321 goto out;
319 tail = skb_tail_pointer(trailer); 322 tail = skb_tail_pointer(trailer);
320 esp->esph = ip_esp_hdr(skb); 323 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
321 324
322skip_cow: 325skip_cow:
323 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto); 326 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 39bd1edee676..83e3ed258467 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -763,7 +763,7 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
763 unsigned int e = 0, s_e; 763 unsigned int e = 0, s_e;
764 struct fib_table *tb; 764 struct fib_table *tb;
765 struct hlist_head *head; 765 struct hlist_head *head;
766 int dumped = 0; 766 int dumped = 0, err;
767 767
768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) && 768 if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED) 769 ((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -783,20 +783,27 @@ static int inet_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
783 if (dumped) 783 if (dumped)
784 memset(&cb->args[2], 0, sizeof(cb->args) - 784 memset(&cb->args[2], 0, sizeof(cb->args) -
785 2 * sizeof(cb->args[0])); 785 2 * sizeof(cb->args[0]));
786 if (fib_table_dump(tb, skb, cb) < 0) 786 err = fib_table_dump(tb, skb, cb);
787 goto out; 787 if (err < 0) {
788 if (likely(skb->len))
789 goto out;
790
791 goto out_err;
792 }
788 dumped = 1; 793 dumped = 1;
789next: 794next:
790 e++; 795 e++;
791 } 796 }
792 } 797 }
793out: 798out:
799 err = skb->len;
800out_err:
794 rcu_read_unlock(); 801 rcu_read_unlock();
795 802
796 cb->args[1] = e; 803 cb->args[1] = e;
797 cb->args[0] = h; 804 cb->args[0] = h;
798 805
799 return skb->len; 806 return err;
800} 807}
801 808
802/* Prepare and feed intra-kernel routing request. 809/* Prepare and feed intra-kernel routing request.
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index da449ddb8cc1..ad9ad4aab5da 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -203,6 +203,7 @@ static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
203static void free_fib_info_rcu(struct rcu_head *head) 203static void free_fib_info_rcu(struct rcu_head *head)
204{ 204{
205 struct fib_info *fi = container_of(head, struct fib_info, rcu); 205 struct fib_info *fi = container_of(head, struct fib_info, rcu);
206 struct dst_metrics *m;
206 207
207 change_nexthops(fi) { 208 change_nexthops(fi) {
208 if (nexthop_nh->nh_dev) 209 if (nexthop_nh->nh_dev)
@@ -213,8 +214,9 @@ static void free_fib_info_rcu(struct rcu_head *head)
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input); 214 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi); 215 } endfor_nexthops(fi);
215 216
216 if (fi->fib_metrics != (u32 *) dst_default_metrics) 217 m = fi->fib_metrics;
217 kfree(fi->fib_metrics); 218 if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
219 kfree(m);
218 kfree(fi); 220 kfree(fi);
219} 221}
220 222
@@ -971,11 +973,11 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
971 val = 255; 973 val = 255;
972 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) 974 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
973 return -EINVAL; 975 return -EINVAL;
974 fi->fib_metrics[type - 1] = val; 976 fi->fib_metrics->metrics[type - 1] = val;
975 } 977 }
976 978
977 if (ecn_ca) 979 if (ecn_ca)
978 fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; 980 fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
979 981
980 return 0; 982 return 0;
981} 983}
@@ -1033,11 +1035,12 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
1033 goto failure; 1035 goto failure;
1034 fib_info_cnt++; 1036 fib_info_cnt++;
1035 if (cfg->fc_mx) { 1037 if (cfg->fc_mx) {
1036 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); 1038 fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
1037 if (!fi->fib_metrics) 1039 if (!fi->fib_metrics)
1038 goto failure; 1040 goto failure;
1041 atomic_set(&fi->fib_metrics->refcnt, 1);
1039 } else 1042 } else
1040 fi->fib_metrics = (u32 *) dst_default_metrics; 1043 fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
1041 1044
1042 fi->fib_net = net; 1045 fi->fib_net = net;
1043 fi->fib_protocol = cfg->fc_protocol; 1046 fi->fib_protocol = cfg->fc_protocol;
@@ -1238,7 +1241,7 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1238 if (fi->fib_priority && 1241 if (fi->fib_priority &&
1239 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority)) 1242 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1240 goto nla_put_failure; 1243 goto nla_put_failure;
1241 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 1244 if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
1242 goto nla_put_failure; 1245 goto nla_put_failure;
1243 1246
1244 if (fi->fib_prefsrc && 1247 if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 1201409ba1dc..51182ff2b441 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1983,6 +1983,8 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1983 1983
1984 /* rcu_read_lock is hold by caller */ 1984 /* rcu_read_lock is hold by caller */
1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) { 1985 hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
1986 int err;
1987
1986 if (i < s_i) { 1988 if (i < s_i) {
1987 i++; 1989 i++;
1988 continue; 1990 continue;
@@ -1993,17 +1995,14 @@ static int fn_trie_dump_leaf(struct key_vector *l, struct fib_table *tb,
1993 continue; 1995 continue;
1994 } 1996 }
1995 1997
1996 if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid, 1998 err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
1997 cb->nlh->nlmsg_seq, 1999 cb->nlh->nlmsg_seq, RTM_NEWROUTE,
1998 RTM_NEWROUTE, 2000 tb->tb_id, fa->fa_type,
1999 tb->tb_id, 2001 xkey, KEYLENGTH - fa->fa_slen,
2000 fa->fa_type, 2002 fa->fa_tos, fa->fa_info, NLM_F_MULTI);
2001 xkey, 2003 if (err < 0) {
2002 KEYLENGTH - fa->fa_slen,
2003 fa->fa_tos,
2004 fa->fa_info, NLM_F_MULTI) < 0) {
2005 cb->args[4] = i; 2004 cb->args[4] = i;
2006 return -1; 2005 return err;
2007 } 2006 }
2008 i++; 2007 i++;
2009 } 2008 }
@@ -2025,10 +2024,13 @@ int fib_table_dump(struct fib_table *tb, struct sk_buff *skb,
2025 t_key key = cb->args[3]; 2024 t_key key = cb->args[3];
2026 2025
2027 while ((l = leaf_walk_rcu(&tp, key)) != NULL) { 2026 while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
2028 if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) { 2027 int err;
2028
2029 err = fn_trie_dump_leaf(l, tb, skb, cb);
2030 if (err < 0) {
2029 cb->args[3] = key; 2031 cb->args[3] = key;
2030 cb->args[2] = count; 2032 cb->args[2] = count;
2031 return -1; 2033 return err;
2032 } 2034 }
2033 2035
2034 ++count; 2036 ++count;
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 3a02d52ed50e..551de4d023a8 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1980,6 +1980,20 @@ int ip_mr_input(struct sk_buff *skb)
1980 struct net *net = dev_net(skb->dev); 1980 struct net *net = dev_net(skb->dev);
1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1981 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1982 struct mr_table *mrt; 1982 struct mr_table *mrt;
1983 struct net_device *dev;
1984
1985 /* skb->dev passed in is the loX master dev for vrfs.
1986 * As there are no vifs associated with loopback devices,
1987 * get the proper interface that does have a vif associated with it.
1988 */
1989 dev = skb->dev;
1990 if (netif_is_l3_master(skb->dev)) {
1991 dev = dev_get_by_index_rcu(net, IPCB(skb)->iif);
1992 if (!dev) {
1993 kfree_skb(skb);
1994 return -ENODEV;
1995 }
1996 }
1983 1997
1984 /* Packet is looped back after forward, it should not be 1998 /* Packet is looped back after forward, it should not be
1985 * forwarded second time, but still can be delivered locally. 1999 * forwarded second time, but still can be delivered locally.
@@ -2017,7 +2031,7 @@ int ip_mr_input(struct sk_buff *skb)
2017 /* already under rcu_read_lock() */ 2031 /* already under rcu_read_lock() */
2018 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 2032 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
2019 if (!cache) { 2033 if (!cache) {
2020 int vif = ipmr_find_vif(mrt, skb->dev); 2034 int vif = ipmr_find_vif(mrt, dev);
2021 2035
2022 if (vif >= 0) 2036 if (vif >= 0)
2023 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr, 2037 cache = ipmr_cache_find_any(mrt, ip_hdr(skb)->daddr,
@@ -2037,7 +2051,7 @@ int ip_mr_input(struct sk_buff *skb)
2037 } 2051 }
2038 2052
2039 read_lock(&mrt_lock); 2053 read_lock(&mrt_lock);
2040 vif = ipmr_find_vif(mrt, skb->dev); 2054 vif = ipmr_find_vif(mrt, dev);
2041 if (vif >= 0) { 2055 if (vif >= 0) {
2042 int err2 = ipmr_cache_unresolved(mrt, vif, skb); 2056 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
2043 read_unlock(&mrt_lock); 2057 read_unlock(&mrt_lock);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 655d9eebe43e..6883b3d4ba8f 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1385,8 +1385,12 @@ static void rt_add_uncached_list(struct rtable *rt)
1385 1385
1386static void ipv4_dst_destroy(struct dst_entry *dst) 1386static void ipv4_dst_destroy(struct dst_entry *dst)
1387{ 1387{
1388 struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
1388 struct rtable *rt = (struct rtable *) dst; 1389 struct rtable *rt = (struct rtable *) dst;
1389 1390
1391 if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
1392 kfree(p);
1393
1390 if (!list_empty(&rt->rt_uncached)) { 1394 if (!list_empty(&rt->rt_uncached)) {
1391 struct uncached_list *ul = rt->rt_uncached_list; 1395 struct uncached_list *ul = rt->rt_uncached_list;
1392 1396
@@ -1438,7 +1442,11 @@ static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1438 rt->rt_gateway = nh->nh_gw; 1442 rt->rt_gateway = nh->nh_gw;
1439 rt->rt_uses_gateway = 1; 1443 rt->rt_uses_gateway = 1;
1440 } 1444 }
1441 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 1445 dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
1446 if (fi->fib_metrics != &dst_default_metrics) {
1447 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
1448 atomic_inc(&fi->fib_metrics->refcnt);
1449 }
1442#ifdef CONFIG_IP_ROUTE_CLASSID 1450#ifdef CONFIG_IP_ROUTE_CLASSID
1443 rt->dst.tclassid = nh->nh_tclassid; 1451 rt->dst.tclassid = nh->nh_tclassid;
1444#endif 1452#endif
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1e4c76d2b827..b5ea036ca781 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1084,9 +1084,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1084{ 1084{
1085 struct tcp_sock *tp = tcp_sk(sk); 1085 struct tcp_sock *tp = tcp_sk(sk);
1086 struct inet_sock *inet = inet_sk(sk); 1086 struct inet_sock *inet = inet_sk(sk);
1087 struct sockaddr *uaddr = msg->msg_name;
1087 int err, flags; 1088 int err, flags;
1088 1089
1089 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE)) 1090 if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
1091 (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
1092 uaddr->sa_family == AF_UNSPEC))
1090 return -EOPNOTSUPP; 1093 return -EOPNOTSUPP;
1091 if (tp->fastopen_req) 1094 if (tp->fastopen_req)
1092 return -EALREADY; /* Another Fast Open is in progress */ 1095 return -EALREADY; /* Another Fast Open is in progress */
@@ -1108,7 +1111,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1108 } 1111 }
1109 } 1112 }
1110 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0; 1113 flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1111 err = __inet_stream_connect(sk->sk_socket, msg->msg_name, 1114 err = __inet_stream_connect(sk->sk_socket, uaddr,
1112 msg->msg_namelen, flags, 1); 1115 msg->msg_namelen, flags, 1);
1113 /* fastopen_req could already be freed in __inet_stream_connect 1116 /* fastopen_req could already be freed in __inet_stream_connect
1114 * if the connection times out or gets rst 1117 * if the connection times out or gets rst
@@ -2320,6 +2323,10 @@ int tcp_disconnect(struct sock *sk, int flags)
2320 tcp_set_ca_state(sk, TCP_CA_Open); 2323 tcp_set_ca_state(sk, TCP_CA_Open);
2321 tcp_clear_retrans(tp); 2324 tcp_clear_retrans(tp);
2322 inet_csk_delack_init(sk); 2325 inet_csk_delack_init(sk);
2326 /* Initialize rcv_mss to TCP_MIN_MSS to avoid division by 0
2327 * issue in __tcp_select_window()
2328 */
2329 icsk->icsk_ack.rcv_mss = TCP_MIN_MSS;
2323 tcp_init_send_head(sk); 2330 tcp_init_send_head(sk);
2324 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); 2331 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2325 __sk_dst_reset(sk); 2332 __sk_dst_reset(sk);
@@ -2374,9 +2381,10 @@ static int tcp_repair_set_window(struct tcp_sock *tp, char __user *optbuf, int l
2374 return 0; 2381 return 0;
2375} 2382}
2376 2383
2377static int tcp_repair_options_est(struct tcp_sock *tp, 2384static int tcp_repair_options_est(struct sock *sk,
2378 struct tcp_repair_opt __user *optbuf, unsigned int len) 2385 struct tcp_repair_opt __user *optbuf, unsigned int len)
2379{ 2386{
2387 struct tcp_sock *tp = tcp_sk(sk);
2380 struct tcp_repair_opt opt; 2388 struct tcp_repair_opt opt;
2381 2389
2382 while (len >= sizeof(opt)) { 2390 while (len >= sizeof(opt)) {
@@ -2389,6 +2397,7 @@ static int tcp_repair_options_est(struct tcp_sock *tp,
2389 switch (opt.opt_code) { 2397 switch (opt.opt_code) {
2390 case TCPOPT_MSS: 2398 case TCPOPT_MSS:
2391 tp->rx_opt.mss_clamp = opt.opt_val; 2399 tp->rx_opt.mss_clamp = opt.opt_val;
2400 tcp_mtup_init(sk);
2392 break; 2401 break;
2393 case TCPOPT_WINDOW: 2402 case TCPOPT_WINDOW:
2394 { 2403 {
@@ -2548,7 +2557,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2548 if (!tp->repair) 2557 if (!tp->repair)
2549 err = -EINVAL; 2558 err = -EINVAL;
2550 else if (sk->sk_state == TCP_ESTABLISHED) 2559 else if (sk->sk_state == TCP_ESTABLISHED)
2551 err = tcp_repair_options_est(tp, 2560 err = tcp_repair_options_est(sk,
2552 (struct tcp_repair_opt __user *)optval, 2561 (struct tcp_repair_opt __user *)optval,
2553 optlen); 2562 optlen);
2554 else 2563 else
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 6e3c512054a6..324c9bcc5456 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -180,6 +180,7 @@ void tcp_init_congestion_control(struct sock *sk)
180{ 180{
181 const struct inet_connection_sock *icsk = inet_csk(sk); 181 const struct inet_connection_sock *icsk = inet_csk(sk);
182 182
183 tcp_sk(sk)->prior_ssthresh = 0;
183 if (icsk->icsk_ca_ops->init) 184 if (icsk->icsk_ca_ops->init)
184 icsk->icsk_ca_ops->init(sk); 185 icsk->icsk_ca_ops->init(sk);
185 if (tcp_ca_needs_ecn(sk)) 186 if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 5a3ad09e2786..174d4376baa5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1179,13 +1179,14 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1179 */ 1179 */
1180 if (pkt_len > mss) { 1180 if (pkt_len > mss) {
1181 unsigned int new_len = (pkt_len / mss) * mss; 1181 unsigned int new_len = (pkt_len / mss) * mss;
1182 if (!in_sack && new_len < pkt_len) { 1182 if (!in_sack && new_len < pkt_len)
1183 new_len += mss; 1183 new_len += mss;
1184 if (new_len >= skb->len)
1185 return 0;
1186 }
1187 pkt_len = new_len; 1184 pkt_len = new_len;
1188 } 1185 }
1186
1187 if (pkt_len >= skb->len && !in_sack)
1188 return 0;
1189
1189 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); 1190 err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
1190 if (err < 0) 1191 if (err < 0)
1191 return err; 1192 return err;
@@ -3189,7 +3190,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3189 int delta; 3190 int delta;
3190 3191
3191 /* Non-retransmitted hole got filled? That's reordering */ 3192 /* Non-retransmitted hole got filled? That's reordering */
3192 if (reord < prior_fackets) 3193 if (reord < prior_fackets && reord <= tp->fackets_out)
3193 tcp_update_reordering(sk, tp->fackets_out - reord, 0); 3194 tcp_update_reordering(sk, tp->fackets_out - reord, 0);
3194 3195
3195 delta = tcp_is_fack(tp) ? pkts_acked : 3196 delta = tcp_is_fack(tp) ? pkts_acked :
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index ea6e4cff9faf..1d6219bf2d6b 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1612,7 +1612,7 @@ static void udp_v4_rehash(struct sock *sk)
1612 udp_lib_rehash(sk, new_hash); 1612 udp_lib_rehash(sk, new_hash);
1613} 1613}
1614 1614
1615int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1615static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1616{ 1616{
1617 int rc; 1617 int rc;
1618 1618
@@ -1657,7 +1657,7 @@ EXPORT_SYMBOL(udp_encap_enable);
1657 * Note that in the success and error cases, the skb is assumed to 1657 * Note that in the success and error cases, the skb is assumed to
1658 * have either been requeued or freed. 1658 * have either been requeued or freed.
1659 */ 1659 */
1660int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1660static int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1661{ 1661{
1662 struct udp_sock *up = udp_sk(sk); 1662 struct udp_sock *up = udp_sk(sk);
1663 int is_udplite = IS_UDPLITE(sk); 1663 int is_udplite = IS_UDPLITE(sk);
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h
index feb50a16398d..a8cf8c6fb60c 100644
--- a/net/ipv4/udp_impl.h
+++ b/net/ipv4/udp_impl.h
@@ -25,7 +25,6 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
25 int flags, int *addr_len); 25 int flags, int *addr_len);
26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, 26int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
27 int flags); 27 int flags);
28int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
29void udp_destroy_sock(struct sock *sk); 28void udp_destroy_sock(struct sock *sk);
30 29
31#ifdef CONFIG_PROC_FS 30#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8d297a79b568..6a4fb1e629fb 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1022,7 +1022,10 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
1022 INIT_HLIST_NODE(&ifa->addr_lst); 1022 INIT_HLIST_NODE(&ifa->addr_lst);
1023 ifa->scope = scope; 1023 ifa->scope = scope;
1024 ifa->prefix_len = pfxlen; 1024 ifa->prefix_len = pfxlen;
1025 ifa->flags = flags | IFA_F_TENTATIVE; 1025 ifa->flags = flags;
1026 /* No need to add the TENTATIVE flag for addresses with NODAD */
1027 if (!(flags & IFA_F_NODAD))
1028 ifa->flags |= IFA_F_TENTATIVE;
1026 ifa->valid_lft = valid_lft; 1029 ifa->valid_lft = valid_lft;
1027 ifa->prefered_lft = prefered_lft; 1030 ifa->prefered_lft = prefered_lft;
1028 ifa->cstamp = ifa->tstamp = jiffies; 1031 ifa->cstamp = ifa->tstamp = jiffies;
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 37ac9de713c6..8d772fea1dde 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -1319,7 +1319,7 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1319 struct ipv6hdr *ip6_hdr; 1319 struct ipv6hdr *ip6_hdr;
1320 struct ipv6_opt_hdr *hop; 1320 struct ipv6_opt_hdr *hop;
1321 unsigned char buf[CALIPSO_MAX_BUFFER]; 1321 unsigned char buf[CALIPSO_MAX_BUFFER];
1322 int len_delta, new_end, pad; 1322 int len_delta, new_end, pad, payload;
1323 unsigned int start, end; 1323 unsigned int start, end;
1324 1324
1325 ip6_hdr = ipv6_hdr(skb); 1325 ip6_hdr = ipv6_hdr(skb);
@@ -1346,6 +1346,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1346 if (ret_val < 0) 1346 if (ret_val < 0)
1347 return ret_val; 1347 return ret_val;
1348 1348
1349 ip6_hdr = ipv6_hdr(skb); /* Reset as skb_cow() may have moved it */
1350
1349 if (len_delta) { 1351 if (len_delta) {
1350 if (len_delta > 0) 1352 if (len_delta > 0)
1351 skb_push(skb, len_delta); 1353 skb_push(skb, len_delta);
@@ -1355,6 +1357,8 @@ static int calipso_skbuff_setattr(struct sk_buff *skb,
1355 sizeof(*ip6_hdr) + start); 1357 sizeof(*ip6_hdr) + start);
1356 skb_reset_network_header(skb); 1358 skb_reset_network_header(skb);
1357 ip6_hdr = ipv6_hdr(skb); 1359 ip6_hdr = ipv6_hdr(skb);
1360 payload = ntohs(ip6_hdr->payload_len);
1361 ip6_hdr->payload_len = htons(payload + len_delta);
1358 } 1362 }
1359 1363
1360 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1); 1364 hop = (struct ipv6_opt_hdr *)(ip6_hdr + 1);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 8d128ba79b66..0c5b4caa1949 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -537,11 +537,10 @@ static inline int ip6gre_xmit_ipv4(struct sk_buff *skb, struct net_device *dev)
537 537
538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 538 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
539 539
540 dsfield = ipv4_get_dsfield(iph);
541
542 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 540 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
543 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 541 dsfield = ipv4_get_dsfield(iph);
544 & IPV6_TCLASS_MASK; 542 else
543 dsfield = ip6_tclass(t->parms.flowinfo);
545 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 544 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
546 fl6.flowi6_mark = skb->mark; 545 fl6.flowi6_mark = skb->mark;
547 else 546 else
@@ -598,9 +597,11 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
598 597
599 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 598 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
600 599
601 dsfield = ipv6_get_dsfield(ipv6h);
602 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 600 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
603 fl6.flowlabel |= (*(__be32 *) ipv6h & IPV6_TCLASS_MASK); 601 dsfield = ipv6_get_dsfield(ipv6h);
602 else
603 dsfield = ip6_tclass(t->parms.flowinfo);
604
604 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 605 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
605 fl6.flowlabel |= ip6_flowlabel(ipv6h); 606 fl6.flowlabel |= ip6_flowlabel(ipv6h);
606 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 607 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 93e58a5e1837..cdb3728faca7 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -63,7 +63,6 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
63 const struct net_offload *ops; 63 const struct net_offload *ops;
64 int proto; 64 int proto;
65 struct frag_hdr *fptr; 65 struct frag_hdr *fptr;
66 unsigned int unfrag_ip6hlen;
67 unsigned int payload_len; 66 unsigned int payload_len;
68 u8 *prevhdr; 67 u8 *prevhdr;
69 int offset = 0; 68 int offset = 0;
@@ -116,8 +115,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
116 skb->network_header = (u8 *)ipv6h - skb->head; 115 skb->network_header = (u8 *)ipv6h - skb->head;
117 116
118 if (udpfrag) { 117 if (udpfrag) {
119 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 118 int err = ip6_find_1stfragopt(skb, &prevhdr);
120 fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen); 119 if (err < 0) {
120 kfree_skb_list(segs);
121 return ERR_PTR(err);
122 }
123 fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
121 fptr->frag_off = htons(offset); 124 fptr->frag_off = htons(offset);
122 if (skb->next) 125 if (skb->next)
123 fptr->frag_off |= htons(IP6_MF); 126 fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 58f6288e9ba5..bf8a58a1c32d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -597,7 +597,10 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
597 int ptr, offset = 0, err = 0; 597 int ptr, offset = 0, err = 0;
598 u8 *prevhdr, nexthdr = 0; 598 u8 *prevhdr, nexthdr = 0;
599 599
600 hlen = ip6_find_1stfragopt(skb, &prevhdr); 600 err = ip6_find_1stfragopt(skb, &prevhdr);
601 if (err < 0)
602 goto fail;
603 hlen = err;
601 nexthdr = *prevhdr; 604 nexthdr = *prevhdr;
602 605
603 mtu = ip6_skb_dst_mtu(skb); 606 mtu = ip6_skb_dst_mtu(skb);
@@ -1463,6 +1466,11 @@ alloc_new_skb:
1463 */ 1466 */
1464 alloclen += sizeof(struct frag_hdr); 1467 alloclen += sizeof(struct frag_hdr);
1465 1468
1469 copy = datalen - transhdrlen - fraggap;
1470 if (copy < 0) {
1471 err = -EINVAL;
1472 goto error;
1473 }
1466 if (transhdrlen) { 1474 if (transhdrlen) {
1467 skb = sock_alloc_send_skb(sk, 1475 skb = sock_alloc_send_skb(sk,
1468 alloclen + hh_len, 1476 alloclen + hh_len,
@@ -1512,13 +1520,9 @@ alloc_new_skb:
1512 data += fraggap; 1520 data += fraggap;
1513 pskb_trim_unique(skb_prev, maxfraglen); 1521 pskb_trim_unique(skb_prev, maxfraglen);
1514 } 1522 }
1515 copy = datalen - transhdrlen - fraggap; 1523 if (copy > 0 &&
1516 1524 getfrag(from, data + transhdrlen, offset,
1517 if (copy < 0) { 1525 copy, fraggap, skb) < 0) {
1518 err = -EINVAL;
1519 kfree_skb(skb);
1520 goto error;
1521 } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1522 err = -EFAULT; 1526 err = -EFAULT;
1523 kfree_skb(skb); 1527 kfree_skb(skb);
1524 goto error; 1528 goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 6eb2ae507500..9b37f9747fc6 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1095,6 +1095,9 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1095 1095
1096 if (!dst) { 1096 if (!dst) {
1097route_lookup: 1097route_lookup:
1098 /* add dsfield to flowlabel for route lookup */
1099 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1100
1098 dst = ip6_route_output(net, NULL, fl6); 1101 dst = ip6_route_output(net, NULL, fl6);
1099 1102
1100 if (dst->error) 1103 if (dst->error)
@@ -1196,7 +1199,7 @@ route_lookup:
1196 skb_push(skb, sizeof(struct ipv6hdr)); 1199 skb_push(skb, sizeof(struct ipv6hdr));
1197 skb_reset_network_header(skb); 1200 skb_reset_network_header(skb);
1198 ipv6h = ipv6_hdr(skb); 1201 ipv6h = ipv6_hdr(skb);
1199 ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), 1202 ip6_flow_hdr(ipv6h, dsfield,
1200 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6)); 1203 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1201 ipv6h->hop_limit = hop_limit; 1204 ipv6h->hop_limit = hop_limit;
1202 ipv6h->nexthdr = proto; 1205 ipv6h->nexthdr = proto;
@@ -1231,8 +1234,6 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1231 if (tproto != IPPROTO_IPIP && tproto != 0) 1234 if (tproto != IPPROTO_IPIP && tproto != 0)
1232 return -1; 1235 return -1;
1233 1236
1234 dsfield = ipv4_get_dsfield(iph);
1235
1236 if (t->parms.collect_md) { 1237 if (t->parms.collect_md) {
1237 struct ip_tunnel_info *tun_info; 1238 struct ip_tunnel_info *tun_info;
1238 const struct ip_tunnel_key *key; 1239 const struct ip_tunnel_key *key;
@@ -1246,6 +1247,7 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1246 fl6.flowi6_proto = IPPROTO_IPIP; 1247 fl6.flowi6_proto = IPPROTO_IPIP;
1247 fl6.daddr = key->u.ipv6.dst; 1248 fl6.daddr = key->u.ipv6.dst;
1248 fl6.flowlabel = key->label; 1249 fl6.flowlabel = key->label;
1250 dsfield = ip6_tclass(key->label);
1249 } else { 1251 } else {
1250 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1252 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1251 encap_limit = t->parms.encap_limit; 1253 encap_limit = t->parms.encap_limit;
@@ -1254,8 +1256,9 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1254 fl6.flowi6_proto = IPPROTO_IPIP; 1256 fl6.flowi6_proto = IPPROTO_IPIP;
1255 1257
1256 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1258 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1257 fl6.flowlabel |= htonl((__u32)iph->tos << IPV6_TCLASS_SHIFT) 1259 dsfield = ipv4_get_dsfield(iph);
1258 & IPV6_TCLASS_MASK; 1260 else
1261 dsfield = ip6_tclass(t->parms.flowinfo);
1259 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1262 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1260 fl6.flowi6_mark = skb->mark; 1263 fl6.flowi6_mark = skb->mark;
1261 else 1264 else
@@ -1267,6 +1270,8 @@ ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1267 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1270 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1268 return -1; 1271 return -1;
1269 1272
1273 dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1274
1270 skb_set_inner_ipproto(skb, IPPROTO_IPIP); 1275 skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1271 1276
1272 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1277 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
@@ -1300,8 +1305,6 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1300 ip6_tnl_addr_conflict(t, ipv6h)) 1305 ip6_tnl_addr_conflict(t, ipv6h))
1301 return -1; 1306 return -1;
1302 1307
1303 dsfield = ipv6_get_dsfield(ipv6h);
1304
1305 if (t->parms.collect_md) { 1308 if (t->parms.collect_md) {
1306 struct ip_tunnel_info *tun_info; 1309 struct ip_tunnel_info *tun_info;
1307 const struct ip_tunnel_key *key; 1310 const struct ip_tunnel_key *key;
@@ -1315,6 +1318,7 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1315 fl6.flowi6_proto = IPPROTO_IPV6; 1318 fl6.flowi6_proto = IPPROTO_IPV6;
1316 fl6.daddr = key->u.ipv6.dst; 1319 fl6.daddr = key->u.ipv6.dst;
1317 fl6.flowlabel = key->label; 1320 fl6.flowlabel = key->label;
1321 dsfield = ip6_tclass(key->label);
1318 } else { 1322 } else {
1319 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb)); 1323 offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1320 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */ 1324 /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
@@ -1337,7 +1341,9 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1337 fl6.flowi6_proto = IPPROTO_IPV6; 1341 fl6.flowi6_proto = IPPROTO_IPV6;
1338 1342
1339 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS) 1343 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1340 fl6.flowlabel |= (*(__be32 *)ipv6h & IPV6_TCLASS_MASK); 1344 dsfield = ipv6_get_dsfield(ipv6h);
1345 else
1346 dsfield = ip6_tclass(t->parms.flowinfo);
1341 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL) 1347 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1342 fl6.flowlabel |= ip6_flowlabel(ipv6h); 1348 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1343 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK) 1349 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
@@ -1351,6 +1357,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1351 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1357 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1352 return -1; 1358 return -1;
1353 1359
1360 dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1361
1354 skb_set_inner_ipproto(skb, IPPROTO_IPV6); 1362 skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1355 1363
1356 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1364 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index cd4252346a32..e9065b8d3af8 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -79,14 +79,13 @@ EXPORT_SYMBOL(ipv6_select_ident);
79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 79int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
80{ 80{
81 u16 offset = sizeof(struct ipv6hdr); 81 u16 offset = sizeof(struct ipv6hdr);
82 struct ipv6_opt_hdr *exthdr =
83 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
84 unsigned int packet_len = skb_tail_pointer(skb) - 82 unsigned int packet_len = skb_tail_pointer(skb) -
85 skb_network_header(skb); 83 skb_network_header(skb);
86 int found_rhdr = 0; 84 int found_rhdr = 0;
87 *nexthdr = &ipv6_hdr(skb)->nexthdr; 85 *nexthdr = &ipv6_hdr(skb)->nexthdr;
88 86
89 while (offset + 1 <= packet_len) { 87 while (offset <= packet_len) {
88 struct ipv6_opt_hdr *exthdr;
90 89
91 switch (**nexthdr) { 90 switch (**nexthdr) {
92 91
@@ -107,13 +106,16 @@ int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
107 return offset; 106 return offset;
108 } 107 }
109 108
110 offset += ipv6_optlen(exthdr); 109 if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
111 *nexthdr = &exthdr->nexthdr; 110 return -EINVAL;
111
112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 112 exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
113 offset); 113 offset);
114 offset += ipv6_optlen(exthdr);
115 *nexthdr = &exthdr->nexthdr;
114 } 116 }
115 117
116 return offset; 118 return -EINVAL;
117} 119}
118EXPORT_SYMBOL(ip6_find_1stfragopt); 120EXPORT_SYMBOL(ip6_find_1stfragopt);
119 121
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 9b522fa90e6d..ac826dd338ff 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -192,7 +192,7 @@ static struct inet_protosw pingv6_protosw = {
192 .type = SOCK_DGRAM, 192 .type = SOCK_DGRAM,
193 .protocol = IPPROTO_ICMPV6, 193 .protocol = IPPROTO_ICMPV6,
194 .prot = &pingv6_prot, 194 .prot = &pingv6_prot,
195 .ops = &inet6_dgram_ops, 195 .ops = &inet6_sockraw_ops,
196 .flags = INET_PROTOSW_REUSE, 196 .flags = INET_PROTOSW_REUSE,
197}; 197};
198 198
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 1f992d9e261d..60be012fe708 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1338,7 +1338,7 @@ void raw6_proc_exit(void)
1338#endif /* CONFIG_PROC_FS */ 1338#endif /* CONFIG_PROC_FS */
1339 1339
1340/* Same as inet6_dgram_ops, sans udp_poll. */ 1340/* Same as inet6_dgram_ops, sans udp_poll. */
1341static const struct proto_ops inet6_sockraw_ops = { 1341const struct proto_ops inet6_sockraw_ops = {
1342 .family = PF_INET6, 1342 .family = PF_INET6,
1343 .owner = THIS_MODULE, 1343 .owner = THIS_MODULE,
1344 .release = inet6_release, 1344 .release = inet6_release,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7a8237acd210..4f4310a36a04 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1062,6 +1062,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific; 1062 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1063#endif 1063#endif
1064 1064
1065 newnp->ipv6_mc_list = NULL;
1065 newnp->ipv6_ac_list = NULL; 1066 newnp->ipv6_ac_list = NULL;
1066 newnp->ipv6_fl_list = NULL; 1067 newnp->ipv6_fl_list = NULL;
1067 newnp->pktoptions = NULL; 1068 newnp->pktoptions = NULL;
@@ -1131,6 +1132,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
1131 First: no IPv4 options. 1132 First: no IPv4 options.
1132 */ 1133 */
1133 newinet->inet_opt = NULL; 1134 newinet->inet_opt = NULL;
1135 newnp->ipv6_mc_list = NULL;
1134 newnp->ipv6_ac_list = NULL; 1136 newnp->ipv6_ac_list = NULL;
1135 newnp->ipv6_fl_list = NULL; 1137 newnp->ipv6_fl_list = NULL;
1136 1138
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 04862abfe4ec..06ec39b79609 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -526,7 +526,7 @@ out:
526 return; 526 return;
527} 527}
528 528
529int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 529static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
530{ 530{
531 int rc; 531 int rc;
532 532
@@ -569,7 +569,7 @@ void udpv6_encap_enable(void)
569} 569}
570EXPORT_SYMBOL(udpv6_encap_enable); 570EXPORT_SYMBOL(udpv6_encap_enable);
571 571
572int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 572static int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
573{ 573{
574 struct udp_sock *up = udp_sk(sk); 574 struct udp_sock *up = udp_sk(sk);
575 int is_udplite = IS_UDPLITE(sk); 575 int is_udplite = IS_UDPLITE(sk);
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index e78bdc76dcc3..f180b3d85e31 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -26,7 +26,6 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname,
26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); 26int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len);
27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, 27int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock,
28 int flags, int *addr_len); 28 int flags, int *addr_len);
29int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
30void udpv6_destroy_sock(struct sock *sk); 29void udpv6_destroy_sock(struct sock *sk);
31 30
32#ifdef CONFIG_PROC_FS 31#ifdef CONFIG_PROC_FS
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index ac858c480f2f..a2267f80febb 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
29 u8 frag_hdr_sz = sizeof(struct frag_hdr); 29 u8 frag_hdr_sz = sizeof(struct frag_hdr);
30 __wsum csum; 30 __wsum csum;
31 int tnl_hlen; 31 int tnl_hlen;
32 int err;
32 33
33 mss = skb_shinfo(skb)->gso_size; 34 mss = skb_shinfo(skb)->gso_size;
34 if (unlikely(skb->len <= mss)) 35 if (unlikely(skb->len <= mss))
@@ -90,7 +91,10 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
90 /* Find the unfragmentable header and shift it left by frag_hdr_sz 91 /* Find the unfragmentable header and shift it left by frag_hdr_sz
91 * bytes to insert fragment header. 92 * bytes to insert fragment header.
92 */ 93 */
93 unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr); 94 err = ip6_find_1stfragopt(skb, &prevhdr);
95 if (err < 0)
96 return ERR_PTR(err);
97 unfrag_ip6hlen = err;
94 nexthdr = *prevhdr; 98 nexthdr = *prevhdr;
95 *prevhdr = NEXTHDR_FRAGMENT; 99 *prevhdr = NEXTHDR_FRAGMENT;
96 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + 100 unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e015906f9ca..07d36573f50b 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@ static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
47 iph = ipv6_hdr(skb); 47 iph = ipv6_hdr(skb);
48 48
49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 49 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
50 if (hdr_len < 0)
51 return hdr_len;
50 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 52 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
51 skb_set_network_header(skb, -x->props.header_len); 53 skb_set_network_header(skb, -x->props.header_len);
52 skb->transport_header = skb->network_header + hdr_len; 54 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 7a92c0f31912..9ad07a91708e 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -30,6 +30,8 @@ static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
30 skb_set_inner_transport_header(skb, skb_transport_offset(skb)); 30 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
31 31
32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr); 32 hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
33 if (hdr_len < 0)
34 return hdr_len;
33 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data); 35 skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
34 skb_set_network_header(skb, -x->props.header_len); 36 skb_set_network_header(skb, -x->props.header_len);
35 skb->transport_header = skb->network_header + hdr_len; 37 skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c1950bb14735..512dc43d0ce6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3285,7 +3285,7 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
3285 p += pol->sadb_x_policy_len*8; 3285 p += pol->sadb_x_policy_len*8;
3286 sec_ctx = (struct sadb_x_sec_ctx *)p; 3286 sec_ctx = (struct sadb_x_sec_ctx *)p;
3287 if (len < pol->sadb_x_policy_len*8 + 3287 if (len < pol->sadb_x_policy_len*8 +
3288 sec_ctx->sadb_x_sec_len) { 3288 sec_ctx->sadb_x_sec_len*8) {
3289 *dir = -EINVAL; 3289 *dir = -EINVAL;
3290 goto out; 3290 goto out;
3291 } 3291 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8364fe5b59e4..c38d16f22d2a 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -311,6 +311,8 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
311 int rc = -EINVAL; 311 int rc = -EINVAL;
312 312
313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap); 313 dprintk("%s: binding %02X\n", __func__, addr->sllc_sap);
314
315 lock_sock(sk);
314 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr))) 316 if (unlikely(!sock_flag(sk, SOCK_ZAPPED) || addrlen != sizeof(*addr)))
315 goto out; 317 goto out;
316 rc = -EAFNOSUPPORT; 318 rc = -EAFNOSUPPORT;
@@ -382,6 +384,7 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen)
382out_put: 384out_put:
383 llc_sap_put(sap); 385 llc_sap_put(sap);
384out: 386out:
387 release_sock(sk);
385 return rc; 388 return rc;
386} 389}
387 390
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 60e2a62f7bef..cf2392b2ac71 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -7,7 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright(c) 2015 Intel Deutschland GmbH 10 * Copyright(c) 2015-2017 Intel Deutschland GmbH
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -741,46 +741,43 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
741 ieee80211_agg_start_txq(sta, tid, true); 741 ieee80211_agg_start_txq(sta, tid, true);
742} 742}
743 743
744void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid) 744void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
745 struct tid_ampdu_tx *tid_tx)
745{ 746{
746 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 747 struct ieee80211_sub_if_data *sdata = sta->sdata;
747 struct ieee80211_local *local = sdata->local; 748 struct ieee80211_local *local = sdata->local;
748 struct sta_info *sta;
749 struct tid_ampdu_tx *tid_tx;
750 749
751 trace_api_start_tx_ba_cb(sdata, ra, tid); 750 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
751 return;
752
753 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
754 ieee80211_agg_tx_operational(local, sta, tid);
755}
756
757static struct tid_ampdu_tx *
758ieee80211_lookup_tid_tx(struct ieee80211_sub_if_data *sdata,
759 const u8 *ra, u16 tid, struct sta_info **sta)
760{
761 struct tid_ampdu_tx *tid_tx;
752 762
753 if (tid >= IEEE80211_NUM_TIDS) { 763 if (tid >= IEEE80211_NUM_TIDS) {
754 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n", 764 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
755 tid, IEEE80211_NUM_TIDS); 765 tid, IEEE80211_NUM_TIDS);
756 return; 766 return NULL;
757 } 767 }
758 768
759 mutex_lock(&local->sta_mtx); 769 *sta = sta_info_get_bss(sdata, ra);
760 sta = sta_info_get_bss(sdata, ra); 770 if (!*sta) {
761 if (!sta) {
762 mutex_unlock(&local->sta_mtx);
763 ht_dbg(sdata, "Could not find station: %pM\n", ra); 771 ht_dbg(sdata, "Could not find station: %pM\n", ra);
764 return; 772 return NULL;
765 } 773 }
766 774
767 mutex_lock(&sta->ampdu_mlme.mtx); 775 tid_tx = rcu_dereference((*sta)->ampdu_mlme.tid_tx[tid]);
768 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
769 776
770 if (WARN_ON(!tid_tx)) { 777 if (WARN_ON(!tid_tx))
771 ht_dbg(sdata, "addBA was not requested!\n"); 778 ht_dbg(sdata, "addBA was not requested!\n");
772 goto unlock;
773 }
774 779
775 if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state))) 780 return tid_tx;
776 goto unlock;
777
778 if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
779 ieee80211_agg_tx_operational(local, sta, tid);
780
781 unlock:
782 mutex_unlock(&sta->ampdu_mlme.mtx);
783 mutex_unlock(&local->sta_mtx);
784} 781}
785 782
786void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 783void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -788,19 +785,20 @@ void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
788{ 785{
789 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 786 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
790 struct ieee80211_local *local = sdata->local; 787 struct ieee80211_local *local = sdata->local;
791 struct ieee80211_ra_tid *ra_tid; 788 struct sta_info *sta;
792 struct sk_buff *skb = dev_alloc_skb(0); 789 struct tid_ampdu_tx *tid_tx;
793 790
794 if (unlikely(!skb)) 791 trace_api_start_tx_ba_cb(sdata, ra, tid);
795 return;
796 792
797 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 793 rcu_read_lock();
798 memcpy(&ra_tid->ra, ra, ETH_ALEN); 794 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
799 ra_tid->tid = tid; 795 if (!tid_tx)
796 goto out;
800 797
801 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START; 798 set_bit(HT_AGG_STATE_START_CB, &tid_tx->state);
802 skb_queue_tail(&sdata->skb_queue, skb); 799 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
803 ieee80211_queue_work(&local->hw, &sdata->work); 800 out:
801 rcu_read_unlock();
804} 802}
805EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe); 803EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);
806 804
@@ -860,37 +858,18 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
860} 858}
861EXPORT_SYMBOL(ieee80211_stop_tx_ba_session); 859EXPORT_SYMBOL(ieee80211_stop_tx_ba_session);
862 860
863void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid) 861void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
862 struct tid_ampdu_tx *tid_tx)
864{ 863{
865 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 864 struct ieee80211_sub_if_data *sdata = sta->sdata;
866 struct ieee80211_local *local = sdata->local;
867 struct sta_info *sta;
868 struct tid_ampdu_tx *tid_tx;
869 bool send_delba = false; 865 bool send_delba = false;
870 866
871 trace_api_stop_tx_ba_cb(sdata, ra, tid); 867 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
872 868 sta->sta.addr, tid);
873 if (tid >= IEEE80211_NUM_TIDS) {
874 ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
875 tid, IEEE80211_NUM_TIDS);
876 return;
877 }
878
879 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n", ra, tid);
880
881 mutex_lock(&local->sta_mtx);
882
883 sta = sta_info_get_bss(sdata, ra);
884 if (!sta) {
885 ht_dbg(sdata, "Could not find station: %pM\n", ra);
886 goto unlock;
887 }
888 869
889 mutex_lock(&sta->ampdu_mlme.mtx);
890 spin_lock_bh(&sta->lock); 870 spin_lock_bh(&sta->lock);
891 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
892 871
893 if (!tid_tx || !test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) { 872 if (!test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
894 ht_dbg(sdata, 873 ht_dbg(sdata,
895 "unexpected callback to A-MPDU stop for %pM tid %d\n", 874 "unexpected callback to A-MPDU stop for %pM tid %d\n",
896 sta->sta.addr, tid); 875 sta->sta.addr, tid);
@@ -906,12 +885,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
906 spin_unlock_bh(&sta->lock); 885 spin_unlock_bh(&sta->lock);
907 886
908 if (send_delba) 887 if (send_delba)
909 ieee80211_send_delba(sdata, ra, tid, 888 ieee80211_send_delba(sdata, sta->sta.addr, tid,
910 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE); 889 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
911
912 mutex_unlock(&sta->ampdu_mlme.mtx);
913 unlock:
914 mutex_unlock(&local->sta_mtx);
915} 890}
916 891
917void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif, 892void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
@@ -919,19 +894,20 @@ void ieee80211_stop_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
919{ 894{
920 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 895 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
921 struct ieee80211_local *local = sdata->local; 896 struct ieee80211_local *local = sdata->local;
922 struct ieee80211_ra_tid *ra_tid; 897 struct sta_info *sta;
923 struct sk_buff *skb = dev_alloc_skb(0); 898 struct tid_ampdu_tx *tid_tx;
924 899
925 if (unlikely(!skb)) 900 trace_api_stop_tx_ba_cb(sdata, ra, tid);
926 return;
927 901
928 ra_tid = (struct ieee80211_ra_tid *) &skb->cb; 902 rcu_read_lock();
929 memcpy(&ra_tid->ra, ra, ETH_ALEN); 903 tid_tx = ieee80211_lookup_tid_tx(sdata, ra, tid, &sta);
930 ra_tid->tid = tid; 904 if (!tid_tx)
905 goto out;
931 906
932 skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_STOP; 907 set_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state);
933 skb_queue_tail(&sdata->skb_queue, skb); 908 ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);
934 ieee80211_queue_work(&local->hw, &sdata->work); 909 out:
910 rcu_read_unlock();
935} 911}
936EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe); 912EXPORT_SYMBOL(ieee80211_stop_tx_ba_cb_irqsafe);
937 913
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index f4a528773563..6ca5442b1e03 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -7,6 +7,7 @@
7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> 7 * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
9 * Copyright 2007-2010, Intel Corporation 9 * Copyright 2007-2010, Intel Corporation
10 * Copyright 2017 Intel Deutschland GmbH
10 * 11 *
11 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as 13 * it under the terms of the GNU General Public License version 2 as
@@ -289,8 +290,6 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
289{ 290{
290 int i; 291 int i;
291 292
292 cancel_work_sync(&sta->ampdu_mlme.work);
293
294 for (i = 0; i < IEEE80211_NUM_TIDS; i++) { 293 for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
295 __ieee80211_stop_tx_ba_session(sta, i, reason); 294 __ieee80211_stop_tx_ba_session(sta, i, reason);
296 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, 295 __ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
@@ -298,6 +297,9 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta,
298 reason != AGG_STOP_DESTROY_STA && 297 reason != AGG_STOP_DESTROY_STA &&
299 reason != AGG_STOP_PEER_REQUEST); 298 reason != AGG_STOP_PEER_REQUEST);
300 } 299 }
300
301 /* stopping might queue the work again - so cancel only afterwards */
302 cancel_work_sync(&sta->ampdu_mlme.work);
301} 303}
302 304
303void ieee80211_ba_session_work(struct work_struct *work) 305void ieee80211_ba_session_work(struct work_struct *work)
@@ -352,10 +354,16 @@ void ieee80211_ba_session_work(struct work_struct *work)
352 spin_unlock_bh(&sta->lock); 354 spin_unlock_bh(&sta->lock);
353 355
354 tid_tx = rcu_dereference_protected_tid_tx(sta, tid); 356 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
355 if (tid_tx && test_and_clear_bit(HT_AGG_STATE_WANT_STOP, 357 if (!tid_tx)
356 &tid_tx->state)) 358 continue;
359
360 if (test_and_clear_bit(HT_AGG_STATE_START_CB, &tid_tx->state))
361 ieee80211_start_tx_ba_cb(sta, tid, tid_tx);
362 if (test_and_clear_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state))
357 ___ieee80211_stop_tx_ba_session(sta, tid, 363 ___ieee80211_stop_tx_ba_session(sta, tid,
358 AGG_STOP_LOCAL_REQUEST); 364 AGG_STOP_LOCAL_REQUEST);
365 if (test_and_clear_bit(HT_AGG_STATE_STOP_CB, &tid_tx->state))
366 ieee80211_stop_tx_ba_cb(sta, tid, tid_tx);
359 } 367 }
360 mutex_unlock(&sta->ampdu_mlme.mtx); 368 mutex_unlock(&sta->ampdu_mlme.mtx);
361} 369}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index f8f6c148f554..665501ac358f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1036,8 +1036,6 @@ struct ieee80211_rx_agg {
1036 1036
1037enum sdata_queue_type { 1037enum sdata_queue_type {
1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0, 1038 IEEE80211_SDATA_QUEUE_TYPE_FRAME = 0,
1039 IEEE80211_SDATA_QUEUE_AGG_START = 1,
1040 IEEE80211_SDATA_QUEUE_AGG_STOP = 2,
1041 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3, 1039 IEEE80211_SDATA_QUEUE_RX_AGG_START = 3,
1042 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4, 1040 IEEE80211_SDATA_QUEUE_RX_AGG_STOP = 4,
1043}; 1041};
@@ -1427,12 +1425,6 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
1427 return local->hw.wiphy->bands[band]; 1425 return local->hw.wiphy->bands[band];
1428} 1426}
1429 1427
1430/* this struct represents 802.11n's RA/TID combination */
1431struct ieee80211_ra_tid {
1432 u8 ra[ETH_ALEN];
1433 u16 tid;
1434};
1435
1436/* this struct holds the value parsing from channel switch IE */ 1428/* this struct holds the value parsing from channel switch IE */
1437struct ieee80211_csa_ie { 1429struct ieee80211_csa_ie {
1438 struct cfg80211_chan_def chandef; 1430 struct cfg80211_chan_def chandef;
@@ -1794,8 +1786,10 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1794 enum ieee80211_agg_stop_reason reason); 1786 enum ieee80211_agg_stop_reason reason);
1795int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid, 1787int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
1796 enum ieee80211_agg_stop_reason reason); 1788 enum ieee80211_agg_stop_reason reason);
1797void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid); 1789void ieee80211_start_tx_ba_cb(struct sta_info *sta, int tid,
1798void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid); 1790 struct tid_ampdu_tx *tid_tx);
1791void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1792 struct tid_ampdu_tx *tid_tx);
1799void ieee80211_ba_session_work(struct work_struct *work); 1793void ieee80211_ba_session_work(struct work_struct *work);
1800void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid); 1794void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid);
1801void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid); 1795void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3bd5b81f5d81..8fae1a72e6a7 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1237,7 +1237,6 @@ static void ieee80211_iface_work(struct work_struct *work)
1237 struct ieee80211_local *local = sdata->local; 1237 struct ieee80211_local *local = sdata->local;
1238 struct sk_buff *skb; 1238 struct sk_buff *skb;
1239 struct sta_info *sta; 1239 struct sta_info *sta;
1240 struct ieee80211_ra_tid *ra_tid;
1241 struct ieee80211_rx_agg *rx_agg; 1240 struct ieee80211_rx_agg *rx_agg;
1242 1241
1243 if (!ieee80211_sdata_running(sdata)) 1242 if (!ieee80211_sdata_running(sdata))
@@ -1253,15 +1252,7 @@ static void ieee80211_iface_work(struct work_struct *work)
1253 while ((skb = skb_dequeue(&sdata->skb_queue))) { 1252 while ((skb = skb_dequeue(&sdata->skb_queue))) {
1254 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1253 struct ieee80211_mgmt *mgmt = (void *)skb->data;
1255 1254
1256 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { 1255 if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1257 ra_tid = (void *)&skb->cb;
1258 ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra,
1259 ra_tid->tid);
1260 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) {
1261 ra_tid = (void *)&skb->cb;
1262 ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra,
1263 ra_tid->tid);
1264 } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_RX_AGG_START) {
1265 rx_agg = (void *)&skb->cb; 1256 rx_agg = (void *)&skb->cb;
1266 mutex_lock(&local->sta_mtx); 1257 mutex_lock(&local->sta_mtx);
1267 sta = sta_info_get_bss(sdata, rx_agg->addr); 1258 sta = sta_info_get_bss(sdata, rx_agg->addr);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 35f4c7d7a500..1f75280ba26c 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2492,7 +2492,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
2492 if (is_multicast_ether_addr(hdr->addr1)) { 2492 if (is_multicast_ether_addr(hdr->addr1)) {
2493 mpp_addr = hdr->addr3; 2493 mpp_addr = hdr->addr3;
2494 proxied_addr = mesh_hdr->eaddr1; 2494 proxied_addr = mesh_hdr->eaddr1;
2495 } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) { 2495 } else if ((mesh_hdr->flags & MESH_FLAGS_AE) ==
2496 MESH_FLAGS_AE_A5_A6) {
2496 /* has_a4 already checked in ieee80211_rx_mesh_check */ 2497 /* has_a4 already checked in ieee80211_rx_mesh_check */
2497 mpp_addr = hdr->addr4; 2498 mpp_addr = hdr->addr4;
2498 proxied_addr = mesh_hdr->eaddr2; 2499 proxied_addr = mesh_hdr->eaddr2;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index 7cdf7a835bb0..403e3cc58b57 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -2155,7 +2155,7 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
2155 struct ieee80211_sta_rx_stats *cpurxs; 2155 struct ieee80211_sta_rx_stats *cpurxs;
2156 2156
2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu); 2157 cpurxs = per_cpu_ptr(sta->pcpu_rx_stats, cpu);
2158 sinfo->rx_packets += cpurxs->dropped; 2158 sinfo->rx_dropped_misc += cpurxs->dropped;
2159 } 2159 }
2160 } 2160 }
2161 2161
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 5609cacb20d5..ea0747d6a6da 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -116,6 +116,8 @@ enum ieee80211_sta_info_flags {
116#define HT_AGG_STATE_STOPPING 3 116#define HT_AGG_STATE_STOPPING 3
117#define HT_AGG_STATE_WANT_START 4 117#define HT_AGG_STATE_WANT_START 4
118#define HT_AGG_STATE_WANT_STOP 5 118#define HT_AGG_STATE_WANT_STOP 5
119#define HT_AGG_STATE_START_CB 6
120#define HT_AGG_STATE_STOP_CB 7
119 121
120enum ieee80211_agg_stop_reason { 122enum ieee80211_agg_stop_reason {
121 AGG_STOP_DECLINED, 123 AGG_STOP_DECLINED,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 257ec66009da..7b05fd1497ce 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1418,7 +1418,7 @@ static void mpls_ifup(struct net_device *dev, unsigned int flags)
1418 continue; 1418 continue;
1419 alive++; 1419 alive++;
1420 nh_flags &= ~flags; 1420 nh_flags &= ~flags;
1421 WRITE_ONCE(nh->nh_flags, flags); 1421 WRITE_ONCE(nh->nh_flags, nh_flags);
1422 } endfor_nexthops(rt); 1422 } endfor_nexthops(rt);
1423 1423
1424 WRITE_ONCE(rt->rt_nhn_alive, alive); 1424 WRITE_ONCE(rt->rt_nhn_alive, alive);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index d2d7bdf1d510..ad99c1ceea6f 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -849,10 +849,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
849{ 849{
850 unsigned int verdict = NF_DROP; 850 unsigned int verdict = NF_DROP;
851 851
852 if (IP_VS_FWD_METHOD(cp) != 0) { 852 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
853 pr_err("shouldn't reach here, because the box is on the " 853 goto ignore_cp;
854 "half connection in the tun/dr module.\n");
855 }
856 854
857 /* Ensure the checksum is correct */ 855 /* Ensure the checksum is correct */
858 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) { 856 if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -886,6 +884,8 @@ static int handle_response_icmp(int af, struct sk_buff *skb,
886 ip_vs_notrack(skb); 884 ip_vs_notrack(skb);
887 else 885 else
888 ip_vs_update_conntrack(skb, cp, 0); 886 ip_vs_update_conntrack(skb, cp, 0);
887
888ignore_cp:
889 verdict = NF_ACCEPT; 889 verdict = NF_ACCEPT;
890 890
891out: 891out:
@@ -1385,8 +1385,11 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1385 */ 1385 */
1386 cp = pp->conn_out_get(ipvs, af, skb, &iph); 1386 cp = pp->conn_out_get(ipvs, af, skb, &iph);
1387 1387
1388 if (likely(cp)) 1388 if (likely(cp)) {
1389 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
1390 goto ignore_cp;
1389 return handle_response(af, skb, pd, cp, &iph, hooknum); 1391 return handle_response(af, skb, pd, cp, &iph, hooknum);
1392 }
1390 1393
1391 /* Check for real-server-started requests */ 1394 /* Check for real-server-started requests */
1392 if (atomic_read(&ipvs->conn_out_counter)) { 1395 if (atomic_read(&ipvs->conn_out_counter)) {
@@ -1444,9 +1447,15 @@ ip_vs_out(struct netns_ipvs *ipvs, unsigned int hooknum, struct sk_buff *skb, in
1444 } 1447 }
1445 } 1448 }
1446 } 1449 }
1450
1451out:
1447 IP_VS_DBG_PKT(12, af, pp, skb, iph.off, 1452 IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
1448 "ip_vs_out: packet continues traversal as normal"); 1453 "ip_vs_out: packet continues traversal as normal");
1449 return NF_ACCEPT; 1454 return NF_ACCEPT;
1455
1456ignore_cp:
1457 __ip_vs_conn_put(cp);
1458 goto out;
1450} 1459}
1451 1460
1452/* 1461/*
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 3a60efa7799b..7f6100ca63be 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -174,6 +174,10 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
174#endif 174#endif
175 if (h != NULL && !try_module_get(h->me)) 175 if (h != NULL && !try_module_get(h->me))
176 h = NULL; 176 h = NULL;
177 if (h != NULL && !refcount_inc_not_zero(&h->refcnt)) {
178 module_put(h->me);
179 h = NULL;
180 }
177 181
178 rcu_read_unlock(); 182 rcu_read_unlock();
179 183
@@ -181,6 +185,13 @@ nf_conntrack_helper_try_module_get(const char *name, u16 l3num, u8 protonum)
181} 185}
182EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get); 186EXPORT_SYMBOL_GPL(nf_conntrack_helper_try_module_get);
183 187
188void nf_conntrack_helper_put(struct nf_conntrack_helper *helper)
189{
190 refcount_dec(&helper->refcnt);
191 module_put(helper->me);
192}
193EXPORT_SYMBOL_GPL(nf_conntrack_helper_put);
194
184struct nf_conn_help * 195struct nf_conn_help *
185nf_ct_helper_ext_add(struct nf_conn *ct, 196nf_ct_helper_ext_add(struct nf_conn *ct,
186 struct nf_conntrack_helper *helper, gfp_t gfp) 197 struct nf_conntrack_helper *helper, gfp_t gfp)
@@ -417,6 +428,7 @@ int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
417 } 428 }
418 } 429 }
419 } 430 }
431 refcount_set(&me->refcnt, 1);
420 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]); 432 hlist_add_head_rcu(&me->hnode, &nf_ct_helper_hash[h]);
421 nf_ct_helper_count++; 433 nf_ct_helper_count++;
422out: 434out:
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index dcf561b5c97a..a8be9b72e6cd 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
45#include <net/netfilter/nf_conntrack_zones.h> 45#include <net/netfilter/nf_conntrack_zones.h>
46#include <net/netfilter/nf_conntrack_timestamp.h> 46#include <net/netfilter/nf_conntrack_timestamp.h>
47#include <net/netfilter/nf_conntrack_labels.h> 47#include <net/netfilter/nf_conntrack_labels.h>
48#include <net/netfilter/nf_conntrack_seqadj.h>
49#include <net/netfilter/nf_conntrack_synproxy.h>
48#ifdef CONFIG_NF_NAT_NEEDED 50#ifdef CONFIG_NF_NAT_NEEDED
49#include <net/netfilter/nf_nat_core.h> 51#include <net/netfilter/nf_nat_core.h>
50#include <net/netfilter/nf_nat_l4proto.h> 52#include <net/netfilter/nf_nat_l4proto.h>
@@ -888,8 +890,13 @@ restart:
888 } 890 }
889out: 891out:
890 local_bh_enable(); 892 local_bh_enable();
891 if (last) 893 if (last) {
894 /* nf ct hash resize happened, now clear the leftover. */
895 if ((struct nf_conn *)cb->args[1] == last)
896 cb->args[1] = 0;
897
892 nf_ct_put(last); 898 nf_ct_put(last);
899 }
893 900
894 while (i) { 901 while (i) {
895 i--; 902 i--;
@@ -1007,9 +1014,8 @@ static const struct nla_policy tuple_nla_policy[CTA_TUPLE_MAX+1] = {
1007 1014
1008static int 1015static int
1009ctnetlink_parse_tuple(const struct nlattr * const cda[], 1016ctnetlink_parse_tuple(const struct nlattr * const cda[],
1010 struct nf_conntrack_tuple *tuple, 1017 struct nf_conntrack_tuple *tuple, u32 type,
1011 enum ctattr_type type, u_int8_t l3num, 1018 u_int8_t l3num, struct nf_conntrack_zone *zone)
1012 struct nf_conntrack_zone *zone)
1013{ 1019{
1014 struct nlattr *tb[CTA_TUPLE_MAX+1]; 1020 struct nlattr *tb[CTA_TUPLE_MAX+1];
1015 int err; 1021 int err;
@@ -1828,6 +1834,8 @@ ctnetlink_create_conntrack(struct net *net,
1828 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC); 1834 nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
1829 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC); 1835 nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
1830 nf_ct_labels_ext_add(ct); 1836 nf_ct_labels_ext_add(ct);
1837 nfct_seqadj_ext_add(ct);
1838 nfct_synproxy_ext_add(ct);
1831 1839
1832 /* we must add conntrack extensions before confirmation. */ 1840 /* we must add conntrack extensions before confirmation. */
1833 ct->status |= IPS_CONFIRMED; 1841 ct->status |= IPS_CONFIRMED;
@@ -2447,7 +2455,7 @@ static struct nfnl_ct_hook ctnetlink_glue_hook = {
2447 2455
2448static int ctnetlink_exp_dump_tuple(struct sk_buff *skb, 2456static int ctnetlink_exp_dump_tuple(struct sk_buff *skb,
2449 const struct nf_conntrack_tuple *tuple, 2457 const struct nf_conntrack_tuple *tuple,
2450 enum ctattr_expect type) 2458 u32 type)
2451{ 2459{
2452 struct nlattr *nest_parms; 2460 struct nlattr *nest_parms;
2453 2461
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index 13875d599a85..1c5b14a6cab3 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -512,16 +512,19 @@ static int sctp_error(struct net *net, struct nf_conn *tpl, struct sk_buff *skb,
512 u8 pf, unsigned int hooknum) 512 u8 pf, unsigned int hooknum)
513{ 513{
514 const struct sctphdr *sh; 514 const struct sctphdr *sh;
515 struct sctphdr _sctph;
516 const char *logmsg; 515 const char *logmsg;
517 516
518 sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph); 517 if (skb->len < dataoff + sizeof(struct sctphdr)) {
519 if (!sh) {
520 logmsg = "nf_ct_sctp: short packet "; 518 logmsg = "nf_ct_sctp: short packet ";
521 goto out_invalid; 519 goto out_invalid;
522 } 520 }
523 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && 521 if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING &&
524 skb->ip_summed == CHECKSUM_NONE) { 522 skb->ip_summed == CHECKSUM_NONE) {
523 if (!skb_make_writable(skb, dataoff + sizeof(struct sctphdr))) {
524 logmsg = "nf_ct_sctp: failed to read header ";
525 goto out_invalid;
526 }
527 sh = (const struct sctphdr *)(skb->data + dataoff);
525 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) { 528 if (sh->checksum != sctp_compute_cksum(skb, dataoff)) {
526 logmsg = "nf_ct_sctp: bad CRC "; 529 logmsg = "nf_ct_sctp: bad CRC ";
527 goto out_invalid; 530 goto out_invalid;
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index b48d6b5aae8a..6c72922d20ca 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -409,6 +409,10 @@ nf_nat_setup_info(struct nf_conn *ct,
409{ 409{
410 struct nf_conntrack_tuple curr_tuple, new_tuple; 410 struct nf_conntrack_tuple curr_tuple, new_tuple;
411 411
412 /* Can't setup nat info for confirmed ct. */
413 if (nf_ct_is_confirmed(ct))
414 return NF_ACCEPT;
415
412 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC || 416 NF_CT_ASSERT(maniptype == NF_NAT_MANIP_SRC ||
413 maniptype == NF_NAT_MANIP_DST); 417 maniptype == NF_NAT_MANIP_DST);
414 BUG_ON(nf_nat_initialized(ct, maniptype)); 418 BUG_ON(nf_nat_initialized(ct, maniptype));
@@ -562,7 +566,7 @@ static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
562 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack() 566 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
563 * will delete entry from already-freed table. 567 * will delete entry from already-freed table.
564 */ 568 */
565 ct->status &= ~IPS_NAT_DONE_MASK; 569 clear_bit(IPS_SRC_NAT_DONE_BIT, &ct->status);
566 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource, 570 rhltable_remove(&nf_nat_bysource_table, &ct->nat_bysource,
567 nf_nat_bysource_params); 571 nf_nat_bysource_params);
568 572
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 559225029740..da314be0c048 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3367,35 +3367,50 @@ static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
3367 return nf_tables_fill_setelem(args->skb, set, elem); 3367 return nf_tables_fill_setelem(args->skb, set, elem);
3368} 3368}
3369 3369
3370struct nft_set_dump_ctx {
3371 const struct nft_set *set;
3372 struct nft_ctx ctx;
3373};
3374
3370static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb) 3375static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3371{ 3376{
3377 struct nft_set_dump_ctx *dump_ctx = cb->data;
3372 struct net *net = sock_net(skb->sk); 3378 struct net *net = sock_net(skb->sk);
3373 u8 genmask = nft_genmask_cur(net); 3379 struct nft_af_info *afi;
3380 struct nft_table *table;
3374 struct nft_set *set; 3381 struct nft_set *set;
3375 struct nft_set_dump_args args; 3382 struct nft_set_dump_args args;
3376 struct nft_ctx ctx; 3383 bool set_found = false;
3377 struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
3378 struct nfgenmsg *nfmsg; 3384 struct nfgenmsg *nfmsg;
3379 struct nlmsghdr *nlh; 3385 struct nlmsghdr *nlh;
3380 struct nlattr *nest; 3386 struct nlattr *nest;
3381 u32 portid, seq; 3387 u32 portid, seq;
3382 int event, err; 3388 int event;
3383 3389
3384 err = nlmsg_parse(cb->nlh, sizeof(struct nfgenmsg), nla, 3390 rcu_read_lock();
3385 NFTA_SET_ELEM_LIST_MAX, nft_set_elem_list_policy, 3391 list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
3386 NULL); 3392 if (afi != dump_ctx->ctx.afi)
3387 if (err < 0) 3393 continue;
3388 return err;
3389 3394
3390 err = nft_ctx_init_from_elemattr(&ctx, net, cb->skb, cb->nlh, 3395 list_for_each_entry_rcu(table, &afi->tables, list) {
3391 (void *)nla, genmask); 3396 if (table != dump_ctx->ctx.table)
3392 if (err < 0) 3397 continue;
3393 return err;
3394 3398
3395 set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_ELEM_LIST_SET], 3399 list_for_each_entry_rcu(set, &table->sets, list) {
3396 genmask); 3400 if (set == dump_ctx->set) {
3397 if (IS_ERR(set)) 3401 set_found = true;
3398 return PTR_ERR(set); 3402 break;
3403 }
3404 }
3405 break;
3406 }
3407 break;
3408 }
3409
3410 if (!set_found) {
3411 rcu_read_unlock();
3412 return -ENOENT;
3413 }
3399 3414
3400 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM); 3415 event = nfnl_msg_type(NFNL_SUBSYS_NFTABLES, NFT_MSG_NEWSETELEM);
3401 portid = NETLINK_CB(cb->skb).portid; 3416 portid = NETLINK_CB(cb->skb).portid;
@@ -3407,11 +3422,11 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3407 goto nla_put_failure; 3422 goto nla_put_failure;
3408 3423
3409 nfmsg = nlmsg_data(nlh); 3424 nfmsg = nlmsg_data(nlh);
3410 nfmsg->nfgen_family = ctx.afi->family; 3425 nfmsg->nfgen_family = afi->family;
3411 nfmsg->version = NFNETLINK_V0; 3426 nfmsg->version = NFNETLINK_V0;
3412 nfmsg->res_id = htons(ctx.net->nft.base_seq & 0xffff); 3427 nfmsg->res_id = htons(net->nft.base_seq & 0xffff);
3413 3428
3414 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, ctx.table->name)) 3429 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_TABLE, table->name))
3415 goto nla_put_failure; 3430 goto nla_put_failure;
3416 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name)) 3431 if (nla_put_string(skb, NFTA_SET_ELEM_LIST_SET, set->name))
3417 goto nla_put_failure; 3432 goto nla_put_failure;
@@ -3422,12 +3437,13 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3422 3437
3423 args.cb = cb; 3438 args.cb = cb;
3424 args.skb = skb; 3439 args.skb = skb;
3425 args.iter.genmask = nft_genmask_cur(ctx.net); 3440 args.iter.genmask = nft_genmask_cur(net);
3426 args.iter.skip = cb->args[0]; 3441 args.iter.skip = cb->args[0];
3427 args.iter.count = 0; 3442 args.iter.count = 0;
3428 args.iter.err = 0; 3443 args.iter.err = 0;
3429 args.iter.fn = nf_tables_dump_setelem; 3444 args.iter.fn = nf_tables_dump_setelem;
3430 set->ops->walk(&ctx, set, &args.iter); 3445 set->ops->walk(&dump_ctx->ctx, set, &args.iter);
3446 rcu_read_unlock();
3431 3447
3432 nla_nest_end(skb, nest); 3448 nla_nest_end(skb, nest);
3433 nlmsg_end(skb, nlh); 3449 nlmsg_end(skb, nlh);
@@ -3441,9 +3457,16 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
3441 return skb->len; 3457 return skb->len;
3442 3458
3443nla_put_failure: 3459nla_put_failure:
3460 rcu_read_unlock();
3444 return -ENOSPC; 3461 return -ENOSPC;
3445} 3462}
3446 3463
3464static int nf_tables_dump_set_done(struct netlink_callback *cb)
3465{
3466 kfree(cb->data);
3467 return 0;
3468}
3469
3447static int nf_tables_getsetelem(struct net *net, struct sock *nlsk, 3470static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3448 struct sk_buff *skb, const struct nlmsghdr *nlh, 3471 struct sk_buff *skb, const struct nlmsghdr *nlh,
3449 const struct nlattr * const nla[]) 3472 const struct nlattr * const nla[])
@@ -3465,7 +3488,18 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
3465 if (nlh->nlmsg_flags & NLM_F_DUMP) { 3488 if (nlh->nlmsg_flags & NLM_F_DUMP) {
3466 struct netlink_dump_control c = { 3489 struct netlink_dump_control c = {
3467 .dump = nf_tables_dump_set, 3490 .dump = nf_tables_dump_set,
3491 .done = nf_tables_dump_set_done,
3468 }; 3492 };
3493 struct nft_set_dump_ctx *dump_ctx;
3494
3495 dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_KERNEL);
3496 if (!dump_ctx)
3497 return -ENOMEM;
3498
3499 dump_ctx->set = set;
3500 dump_ctx->ctx = ctx;
3501
3502 c.data = dump_ctx;
3469 return netlink_dump_start(nlsk, skb, nlh, &c); 3503 return netlink_dump_start(nlsk, skb, nlh, &c);
3470 } 3504 }
3471 return -EOPNOTSUPP; 3505 return -EOPNOTSUPP;
@@ -3593,9 +3627,9 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3593{ 3627{
3594 struct nft_set_ext *ext = nft_set_elem_ext(set, elem); 3628 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3595 3629
3596 nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); 3630 nft_data_release(nft_set_ext_key(ext), NFT_DATA_VALUE);
3597 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) 3631 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3598 nft_data_uninit(nft_set_ext_data(ext), set->dtype); 3632 nft_data_release(nft_set_ext_data(ext), set->dtype);
3599 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) 3633 if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3600 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); 3634 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3601 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) 3635 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
@@ -3604,6 +3638,18 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem,
3604} 3638}
3605EXPORT_SYMBOL_GPL(nft_set_elem_destroy); 3639EXPORT_SYMBOL_GPL(nft_set_elem_destroy);
3606 3640
3641/* Only called from commit path, nft_set_elem_deactivate() already deals with
3642 * the refcounting from the preparation phase.
3643 */
3644static void nf_tables_set_elem_destroy(const struct nft_set *set, void *elem)
3645{
3646 struct nft_set_ext *ext = nft_set_elem_ext(set, elem);
3647
3648 if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR))
3649 nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext));
3650 kfree(elem);
3651}
3652
3607static int nft_setelem_parse_flags(const struct nft_set *set, 3653static int nft_setelem_parse_flags(const struct nft_set *set,
3608 const struct nlattr *attr, u32 *flags) 3654 const struct nlattr *attr, u32 *flags)
3609{ 3655{
@@ -3815,9 +3861,9 @@ err4:
3815 kfree(elem.priv); 3861 kfree(elem.priv);
3816err3: 3862err3:
3817 if (nla[NFTA_SET_ELEM_DATA] != NULL) 3863 if (nla[NFTA_SET_ELEM_DATA] != NULL)
3818 nft_data_uninit(&data, d2.type); 3864 nft_data_release(&data, d2.type);
3819err2: 3865err2:
3820 nft_data_uninit(&elem.key.val, d1.type); 3866 nft_data_release(&elem.key.val, d1.type);
3821err1: 3867err1:
3822 return err; 3868 return err;
3823} 3869}
@@ -3862,6 +3908,53 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
3862 return err; 3908 return err;
3863} 3909}
3864 3910
3911/**
3912 * nft_data_hold - hold a nft_data item
3913 *
3914 * @data: struct nft_data to release
3915 * @type: type of data
3916 *
3917 * Hold a nft_data item. NFT_DATA_VALUE types can be silently discarded,
3918 * NFT_DATA_VERDICT bumps the reference to chains in case of NFT_JUMP and
3919 * NFT_GOTO verdicts. This function must be called on active data objects
3920 * from the second phase of the commit protocol.
3921 */
3922static void nft_data_hold(const struct nft_data *data, enum nft_data_types type)
3923{
3924 if (type == NFT_DATA_VERDICT) {
3925 switch (data->verdict.code) {
3926 case NFT_JUMP:
3927 case NFT_GOTO:
3928 data->verdict.chain->use++;
3929 break;
3930 }
3931 }
3932}
3933
3934static void nft_set_elem_activate(const struct net *net,
3935 const struct nft_set *set,
3936 struct nft_set_elem *elem)
3937{
3938 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3939
3940 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3941 nft_data_hold(nft_set_ext_data(ext), set->dtype);
3942 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3943 (*nft_set_ext_obj(ext))->use++;
3944}
3945
3946static void nft_set_elem_deactivate(const struct net *net,
3947 const struct nft_set *set,
3948 struct nft_set_elem *elem)
3949{
3950 const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
3951
3952 if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA))
3953 nft_data_release(nft_set_ext_data(ext), set->dtype);
3954 if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF))
3955 (*nft_set_ext_obj(ext))->use--;
3956}
3957
3865static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, 3958static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3866 const struct nlattr *attr) 3959 const struct nlattr *attr)
3867{ 3960{
@@ -3927,6 +4020,8 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3927 kfree(elem.priv); 4020 kfree(elem.priv);
3928 elem.priv = priv; 4021 elem.priv = priv;
3929 4022
4023 nft_set_elem_deactivate(ctx->net, set, &elem);
4024
3930 nft_trans_elem(trans) = elem; 4025 nft_trans_elem(trans) = elem;
3931 list_add_tail(&trans->list, &ctx->net->nft.commit_list); 4026 list_add_tail(&trans->list, &ctx->net->nft.commit_list);
3932 return 0; 4027 return 0;
@@ -3936,7 +4031,7 @@ err4:
3936err3: 4031err3:
3937 kfree(elem.priv); 4032 kfree(elem.priv);
3938err2: 4033err2:
3939 nft_data_uninit(&elem.key.val, desc.type); 4034 nft_data_release(&elem.key.val, desc.type);
3940err1: 4035err1:
3941 return err; 4036 return err;
3942} 4037}
@@ -4743,8 +4838,8 @@ static void nf_tables_commit_release(struct nft_trans *trans)
4743 nft_set_destroy(nft_trans_set(trans)); 4838 nft_set_destroy(nft_trans_set(trans));
4744 break; 4839 break;
4745 case NFT_MSG_DELSETELEM: 4840 case NFT_MSG_DELSETELEM:
4746 nft_set_elem_destroy(nft_trans_elem_set(trans), 4841 nf_tables_set_elem_destroy(nft_trans_elem_set(trans),
4747 nft_trans_elem(trans).priv, true); 4842 nft_trans_elem(trans).priv);
4748 break; 4843 break;
4749 case NFT_MSG_DELOBJ: 4844 case NFT_MSG_DELOBJ:
4750 nft_obj_destroy(nft_trans_obj(trans)); 4845 nft_obj_destroy(nft_trans_obj(trans));
@@ -4979,6 +5074,7 @@ static int nf_tables_abort(struct net *net, struct sk_buff *skb)
4979 case NFT_MSG_DELSETELEM: 5074 case NFT_MSG_DELSETELEM:
4980 te = (struct nft_trans_elem *)trans->data; 5075 te = (struct nft_trans_elem *)trans->data;
4981 5076
5077 nft_set_elem_activate(net, te->set, &te->elem);
4982 te->set->ops->activate(net, te->set, &te->elem); 5078 te->set->ops->activate(net, te->set, &te->elem);
4983 te->set->ndeact--; 5079 te->set->ndeact--;
4984 5080
@@ -5464,7 +5560,7 @@ int nft_data_init(const struct nft_ctx *ctx,
5464EXPORT_SYMBOL_GPL(nft_data_init); 5560EXPORT_SYMBOL_GPL(nft_data_init);
5465 5561
5466/** 5562/**
5467 * nft_data_uninit - release a nft_data item 5563 * nft_data_release - release a nft_data item
5468 * 5564 *
5469 * @data: struct nft_data to release 5565 * @data: struct nft_data to release
5470 * @type: type of data 5566 * @type: type of data
@@ -5472,7 +5568,7 @@ EXPORT_SYMBOL_GPL(nft_data_init);
5472 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded, 5568 * Release a nft_data item. NFT_DATA_VALUE types can be silently discarded,
5473 * all others need to be released by calling this function. 5569 * all others need to be released by calling this function.
5474 */ 5570 */
5475void nft_data_uninit(const struct nft_data *data, enum nft_data_types type) 5571void nft_data_release(const struct nft_data *data, enum nft_data_types type)
5476{ 5572{
5477 if (type < NFT_DATA_VERDICT) 5573 if (type < NFT_DATA_VERDICT)
5478 return; 5574 return;
@@ -5483,7 +5579,7 @@ void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
5483 WARN_ON(1); 5579 WARN_ON(1);
5484 } 5580 }
5485} 5581}
5486EXPORT_SYMBOL_GPL(nft_data_uninit); 5582EXPORT_SYMBOL_GPL(nft_data_release);
5487 5583
5488int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data, 5584int nft_data_dump(struct sk_buff *skb, int attr, const struct nft_data *data,
5489 enum nft_data_types type, unsigned int len) 5585 enum nft_data_types type, unsigned int len)
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c
index 950bf6eadc65..be678a323598 100644
--- a/net/netfilter/nfnetlink_cthelper.c
+++ b/net/netfilter/nfnetlink_cthelper.c
@@ -686,6 +686,7 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
686 tuple_set = true; 686 tuple_set = true;
687 } 687 }
688 688
689 ret = -ENOENT;
689 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) { 690 list_for_each_entry_safe(nlcth, n, &nfnl_cthelper_list, list) {
690 cur = &nlcth->helper; 691 cur = &nlcth->helper;
691 j++; 692 j++;
@@ -699,16 +700,20 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl,
699 tuple.dst.protonum != cur->tuple.dst.protonum)) 700 tuple.dst.protonum != cur->tuple.dst.protonum))
700 continue; 701 continue;
701 702
702 found = true; 703 if (refcount_dec_if_one(&cur->refcnt)) {
703 nf_conntrack_helper_unregister(cur); 704 found = true;
704 kfree(cur->expect_policy); 705 nf_conntrack_helper_unregister(cur);
706 kfree(cur->expect_policy);
705 707
706 list_del(&nlcth->list); 708 list_del(&nlcth->list);
707 kfree(nlcth); 709 kfree(nlcth);
710 } else {
711 ret = -EBUSY;
712 }
708 } 713 }
709 714
710 /* Make sure we return success if we flush and there is no helpers */ 715 /* Make sure we return success if we flush and there is no helpers */
711 return (found || j == 0) ? 0 : -ENOENT; 716 return (found || j == 0) ? 0 : ret;
712} 717}
713 718
714static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = { 719static const struct nla_policy nfnl_cthelper_policy[NFCTH_MAX+1] = {
diff --git a/net/netfilter/nft_bitwise.c b/net/netfilter/nft_bitwise.c
index 877d9acd91ef..fff8073e2a56 100644
--- a/net/netfilter/nft_bitwise.c
+++ b/net/netfilter/nft_bitwise.c
@@ -83,17 +83,26 @@ static int nft_bitwise_init(const struct nft_ctx *ctx,
83 tb[NFTA_BITWISE_MASK]); 83 tb[NFTA_BITWISE_MASK]);
84 if (err < 0) 84 if (err < 0)
85 return err; 85 return err;
86 if (d1.len != priv->len) 86 if (d1.len != priv->len) {
87 return -EINVAL; 87 err = -EINVAL;
88 goto err1;
89 }
88 90
89 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2, 91 err = nft_data_init(NULL, &priv->xor, sizeof(priv->xor), &d2,
90 tb[NFTA_BITWISE_XOR]); 92 tb[NFTA_BITWISE_XOR]);
91 if (err < 0) 93 if (err < 0)
92 return err; 94 goto err1;
93 if (d2.len != priv->len) 95 if (d2.len != priv->len) {
94 return -EINVAL; 96 err = -EINVAL;
97 goto err2;
98 }
95 99
96 return 0; 100 return 0;
101err2:
102 nft_data_release(&priv->xor, d2.type);
103err1:
104 nft_data_release(&priv->mask, d1.type);
105 return err;
97} 106}
98 107
99static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr) 108static int nft_bitwise_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_cmp.c b/net/netfilter/nft_cmp.c
index 2b96effeadc1..c2945eb3397c 100644
--- a/net/netfilter/nft_cmp.c
+++ b/net/netfilter/nft_cmp.c
@@ -201,10 +201,18 @@ nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
201 if (err < 0) 201 if (err < 0)
202 return ERR_PTR(err); 202 return ERR_PTR(err);
203 203
204 if (desc.type != NFT_DATA_VALUE) {
205 err = -EINVAL;
206 goto err1;
207 }
208
204 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) 209 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
205 return &nft_cmp_fast_ops; 210 return &nft_cmp_fast_ops;
206 else 211
207 return &nft_cmp_ops; 212 return &nft_cmp_ops;
213err1:
214 nft_data_release(&data, desc.type);
215 return ERR_PTR(-EINVAL);
208} 216}
209 217
210struct nft_expr_type nft_cmp_type __read_mostly = { 218struct nft_expr_type nft_cmp_type __read_mostly = {
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index a34ceb38fc55..1678e9e75e8e 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -826,9 +826,9 @@ static void nft_ct_helper_obj_destroy(struct nft_object *obj)
826 struct nft_ct_helper_obj *priv = nft_obj_data(obj); 826 struct nft_ct_helper_obj *priv = nft_obj_data(obj);
827 827
828 if (priv->helper4) 828 if (priv->helper4)
829 module_put(priv->helper4->me); 829 nf_conntrack_helper_put(priv->helper4);
830 if (priv->helper6) 830 if (priv->helper6)
831 module_put(priv->helper6->me); 831 nf_conntrack_helper_put(priv->helper6);
832} 832}
833 833
834static void nft_ct_helper_obj_eval(struct nft_object *obj, 834static void nft_ct_helper_obj_eval(struct nft_object *obj,
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 728baf88295a..4717d7796927 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -65,7 +65,7 @@ static int nft_immediate_init(const struct nft_ctx *ctx,
65 return 0; 65 return 0;
66 66
67err1: 67err1:
68 nft_data_uninit(&priv->data, desc.type); 68 nft_data_release(&priv->data, desc.type);
69 return err; 69 return err;
70} 70}
71 71
@@ -73,7 +73,8 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx,
73 const struct nft_expr *expr) 73 const struct nft_expr *expr)
74{ 74{
75 const struct nft_immediate_expr *priv = nft_expr_priv(expr); 75 const struct nft_immediate_expr *priv = nft_expr_priv(expr);
76 return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); 76
77 return nft_data_release(&priv->data, nft_dreg_to_type(priv->dreg));
77} 78}
78 79
79static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) 80static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr)
diff --git a/net/netfilter/nft_range.c b/net/netfilter/nft_range.c
index 9edc74eedc10..cedb96c3619f 100644
--- a/net/netfilter/nft_range.c
+++ b/net/netfilter/nft_range.c
@@ -102,9 +102,9 @@ static int nft_range_init(const struct nft_ctx *ctx, const struct nft_expr *expr
102 priv->len = desc_from.len; 102 priv->len = desc_from.len;
103 return 0; 103 return 0;
104err2: 104err2:
105 nft_data_uninit(&priv->data_to, desc_to.type); 105 nft_data_release(&priv->data_to, desc_to.type);
106err1: 106err1:
107 nft_data_uninit(&priv->data_from, desc_from.type); 107 nft_data_release(&priv->data_from, desc_from.type);
108 return err; 108 return err;
109} 109}
110 110
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 8ec086b6b56b..3d3a6df4ce70 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -222,7 +222,7 @@ static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
222 struct nft_set_elem elem; 222 struct nft_set_elem elem;
223 int err; 223 int err;
224 224
225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_KERNEL); 225 err = rhashtable_walk_init(&priv->ht, &hti, GFP_ATOMIC);
226 iter->err = err; 226 iter->err = err;
227 if (err) 227 if (err)
228 return; 228 return;
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index e97e2fb53f0a..fbdbaa00dd5f 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -116,17 +116,17 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
116 else if (d > 0) 116 else if (d > 0)
117 p = &parent->rb_right; 117 p = &parent->rb_right;
118 else { 118 else {
119 if (nft_set_elem_active(&rbe->ext, genmask)) { 119 if (nft_rbtree_interval_end(rbe) &&
120 if (nft_rbtree_interval_end(rbe) && 120 !nft_rbtree_interval_end(new)) {
121 !nft_rbtree_interval_end(new)) 121 p = &parent->rb_left;
122 p = &parent->rb_left; 122 } else if (!nft_rbtree_interval_end(rbe) &&
123 else if (!nft_rbtree_interval_end(rbe) && 123 nft_rbtree_interval_end(new)) {
124 nft_rbtree_interval_end(new)) 124 p = &parent->rb_right;
125 p = &parent->rb_right; 125 } else if (nft_set_elem_active(&rbe->ext, genmask)) {
126 else { 126 *ext = &rbe->ext;
127 *ext = &rbe->ext; 127 return -EEXIST;
128 return -EEXIST; 128 } else {
129 } 129 p = &parent->rb_left;
130 } 130 }
131 } 131 }
132 } 132 }
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8876b7da6884..1770c1d9b37f 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -283,28 +283,30 @@ static int xt_obj_to_user(u16 __user *psize, u16 size,
283 &U->u.user.revision, K->u.kernel.TYPE->revision) 283 &U->u.user.revision, K->u.kernel.TYPE->revision)
284 284
285int xt_data_to_user(void __user *dst, const void *src, 285int xt_data_to_user(void __user *dst, const void *src,
286 int usersize, int size) 286 int usersize, int size, int aligned_size)
287{ 287{
288 usersize = usersize ? : size; 288 usersize = usersize ? : size;
289 if (copy_to_user(dst, src, usersize)) 289 if (copy_to_user(dst, src, usersize))
290 return -EFAULT; 290 return -EFAULT;
291 if (usersize != size && clear_user(dst + usersize, size - usersize)) 291 if (usersize != aligned_size &&
292 clear_user(dst + usersize, aligned_size - usersize))
292 return -EFAULT; 293 return -EFAULT;
293 294
294 return 0; 295 return 0;
295} 296}
296EXPORT_SYMBOL_GPL(xt_data_to_user); 297EXPORT_SYMBOL_GPL(xt_data_to_user);
297 298
298#define XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \ 299#define XT_DATA_TO_USER(U, K, TYPE) \
299 xt_data_to_user(U->data, K->data, \ 300 xt_data_to_user(U->data, K->data, \
300 K->u.kernel.TYPE->usersize, \ 301 K->u.kernel.TYPE->usersize, \
301 C_SIZE ? : K->u.kernel.TYPE->TYPE##size) 302 K->u.kernel.TYPE->TYPE##size, \
303 XT_ALIGN(K->u.kernel.TYPE->TYPE##size))
302 304
303int xt_match_to_user(const struct xt_entry_match *m, 305int xt_match_to_user(const struct xt_entry_match *m,
304 struct xt_entry_match __user *u) 306 struct xt_entry_match __user *u)
305{ 307{
306 return XT_OBJ_TO_USER(u, m, match, 0) || 308 return XT_OBJ_TO_USER(u, m, match, 0) ||
307 XT_DATA_TO_USER(u, m, match, 0); 309 XT_DATA_TO_USER(u, m, match);
308} 310}
309EXPORT_SYMBOL_GPL(xt_match_to_user); 311EXPORT_SYMBOL_GPL(xt_match_to_user);
310 312
@@ -312,7 +314,7 @@ int xt_target_to_user(const struct xt_entry_target *t,
312 struct xt_entry_target __user *u) 314 struct xt_entry_target __user *u)
313{ 315{
314 return XT_OBJ_TO_USER(u, t, target, 0) || 316 return XT_OBJ_TO_USER(u, t, target, 0) ||
315 XT_DATA_TO_USER(u, t, target, 0); 317 XT_DATA_TO_USER(u, t, target);
316} 318}
317EXPORT_SYMBOL_GPL(xt_target_to_user); 319EXPORT_SYMBOL_GPL(xt_target_to_user);
318 320
@@ -611,6 +613,12 @@ void xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr,
611} 613}
612EXPORT_SYMBOL_GPL(xt_compat_match_from_user); 614EXPORT_SYMBOL_GPL(xt_compat_match_from_user);
613 615
616#define COMPAT_XT_DATA_TO_USER(U, K, TYPE, C_SIZE) \
617 xt_data_to_user(U->data, K->data, \
618 K->u.kernel.TYPE->usersize, \
619 C_SIZE, \
620 COMPAT_XT_ALIGN(C_SIZE))
621
614int xt_compat_match_to_user(const struct xt_entry_match *m, 622int xt_compat_match_to_user(const struct xt_entry_match *m,
615 void __user **dstptr, unsigned int *size) 623 void __user **dstptr, unsigned int *size)
616{ 624{
@@ -626,7 +634,7 @@ int xt_compat_match_to_user(const struct xt_entry_match *m,
626 if (match->compat_to_user((void __user *)cm->data, m->data)) 634 if (match->compat_to_user((void __user *)cm->data, m->data))
627 return -EFAULT; 635 return -EFAULT;
628 } else { 636 } else {
629 if (XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm))) 637 if (COMPAT_XT_DATA_TO_USER(cm, m, match, msize - sizeof(*cm)))
630 return -EFAULT; 638 return -EFAULT;
631 } 639 }
632 640
@@ -972,7 +980,7 @@ int xt_compat_target_to_user(const struct xt_entry_target *t,
972 if (target->compat_to_user((void __user *)ct->data, t->data)) 980 if (target->compat_to_user((void __user *)ct->data, t->data))
973 return -EFAULT; 981 return -EFAULT;
974 } else { 982 } else {
975 if (XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct))) 983 if (COMPAT_XT_DATA_TO_USER(ct, t, target, tsize - sizeof(*ct)))
976 return -EFAULT; 984 return -EFAULT;
977 } 985 }
978 986
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index bb7ad82dcd56..623ef37de886 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -96,7 +96,7 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
96 96
97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL); 97 help = nf_ct_helper_ext_add(ct, helper, GFP_KERNEL);
98 if (help == NULL) { 98 if (help == NULL) {
99 module_put(helper->me); 99 nf_conntrack_helper_put(helper);
100 return -ENOMEM; 100 return -ENOMEM;
101 } 101 }
102 102
@@ -263,7 +263,7 @@ out:
263err4: 263err4:
264 help = nfct_help(ct); 264 help = nfct_help(ct);
265 if (help) 265 if (help)
266 module_put(help->helper->me); 266 nf_conntrack_helper_put(help->helper);
267err3: 267err3:
268 nf_ct_tmpl_free(ct); 268 nf_ct_tmpl_free(ct);
269err2: 269err2:
@@ -346,7 +346,7 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par,
346 if (ct) { 346 if (ct) {
347 help = nfct_help(ct); 347 help = nfct_help(ct);
348 if (help) 348 if (help)
349 module_put(help->helper->me); 349 nf_conntrack_helper_put(help->helper);
350 350
351 nf_ct_netns_put(par->net, par->family); 351 nf_ct_netns_put(par->net, par->family);
352 352
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index ee841f00a6ec..7586d446d7dc 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -62,6 +62,7 @@
62#include <asm/cacheflush.h> 62#include <asm/cacheflush.h>
63#include <linux/hash.h> 63#include <linux/hash.h>
64#include <linux/genetlink.h> 64#include <linux/genetlink.h>
65#include <linux/net_namespace.h>
65 66
66#include <net/net_namespace.h> 67#include <net/net_namespace.h>
67#include <net/sock.h> 68#include <net/sock.h>
@@ -1415,7 +1416,8 @@ static void do_one_broadcast(struct sock *sk,
1415 goto out; 1416 goto out;
1416 } 1417 }
1417 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net); 1418 NETLINK_CB(p->skb2).nsid = peernet2id(sock_net(sk), p->net);
1418 NETLINK_CB(p->skb2).nsid_is_set = true; 1419 if (NETLINK_CB(p->skb2).nsid != NETNSA_NSID_NOT_ASSIGNED)
1420 NETLINK_CB(p->skb2).nsid_is_set = true;
1419 val = netlink_broadcast_deliver(sk, p->skb2); 1421 val = netlink_broadcast_deliver(sk, p->skb2);
1420 if (val < 0) { 1422 if (val < 0) {
1421 netlink_overrun(sk); 1423 netlink_overrun(sk);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index bf602e33c40a..08679ebb3068 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -1123,7 +1123,7 @@ static int ovs_ct_add_helper(struct ovs_conntrack_info *info, const char *name,
1123 1123
1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL); 1124 help = nf_ct_helper_ext_add(info->ct, helper, GFP_KERNEL);
1125 if (!help) { 1125 if (!help) {
1126 module_put(helper->me); 1126 nf_conntrack_helper_put(helper);
1127 return -ENOMEM; 1127 return -ENOMEM;
1128 } 1128 }
1129 1129
@@ -1584,7 +1584,7 @@ void ovs_ct_free_action(const struct nlattr *a)
1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info) 1584static void __ovs_ct_free_action(struct ovs_conntrack_info *ct_info)
1585{ 1585{
1586 if (ct_info->helper) 1586 if (ct_info->helper)
1587 module_put(ct_info->helper->me); 1587 nf_conntrack_helper_put(ct_info->helper);
1588 if (ct_info->ct) 1588 if (ct_info->ct)
1589 nf_ct_tmpl_free(ct_info->ct); 1589 nf_ct_tmpl_free(ct_info->ct);
1590} 1590}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index f4001763134d..e3eeed19cc7a 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2658,13 +2658,6 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex); 2658 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2659 } 2659 }
2660 2660
2661 sockc.tsflags = po->sk.sk_tsflags;
2662 if (msg->msg_controllen) {
2663 err = sock_cmsg_send(&po->sk, msg, &sockc);
2664 if (unlikely(err))
2665 goto out;
2666 }
2667
2668 err = -ENXIO; 2661 err = -ENXIO;
2669 if (unlikely(dev == NULL)) 2662 if (unlikely(dev == NULL))
2670 goto out; 2663 goto out;
@@ -2672,6 +2665,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2672 if (unlikely(!(dev->flags & IFF_UP))) 2665 if (unlikely(!(dev->flags & IFF_UP)))
2673 goto out_put; 2666 goto out_put;
2674 2667
2668 sockc.tsflags = po->sk.sk_tsflags;
2669 if (msg->msg_controllen) {
2670 err = sock_cmsg_send(&po->sk, msg, &sockc);
2671 if (unlikely(err))
2672 goto out_put;
2673 }
2674
2675 if (po->sk.sk_socket->type == SOCK_RAW) 2675 if (po->sk.sk_socket->type == SOCK_RAW)
2676 reserve = dev->hard_header_len; 2676 reserve = dev->hard_header_len;
2677 size_max = po->tx_ring.frame_size 2677 size_max = po->tx_ring.frame_size
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index dee469fed967..51859b8edd7e 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -203,7 +203,6 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
203 203
204 *arg = (unsigned long) head; 204 *arg = (unsigned long) head;
205 rcu_assign_pointer(tp->root, new); 205 rcu_assign_pointer(tp->root, new);
206 call_rcu(&head->rcu, mall_destroy_rcu);
207 return 0; 206 return 0;
208 207
209err_replace_hw_filter: 208err_replace_hw_filter:
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bbe57d57b67f..e88342fde1bc 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1831,6 +1831,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
1831 if (!qdisc_dev(root)) 1831 if (!qdisc_dev(root))
1832 return 0; 1832 return 0;
1833 1833
1834 if (tcm->tcm_parent) {
1835 q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent));
1836 if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1837 return -1;
1838 return 0;
1839 }
1834 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) { 1840 hash_for_each(qdisc_dev(root)->qdisc_hash, b, q, hash) {
1835 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) 1841 if (tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0)
1836 return -1; 1842 return -1;
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index a9708da28eb5..95238284c422 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1176,7 +1176,9 @@ void sctp_assoc_update(struct sctp_association *asoc,
1176 1176
1177 asoc->ctsn_ack_point = asoc->next_tsn - 1; 1177 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point; 1178 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1179 if (!asoc->stream) { 1179
1180 if (sctp_state(asoc, COOKIE_WAIT)) {
1181 sctp_stream_free(asoc->stream);
1180 asoc->stream = new->stream; 1182 asoc->stream = new->stream;
1181 new->stream = NULL; 1183 new->stream = NULL;
1182 } 1184 }
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 0e06a278d2a9..ba9ad32fc447 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -473,15 +473,14 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
473 struct sctp_association **app, 473 struct sctp_association **app,
474 struct sctp_transport **tpp) 474 struct sctp_transport **tpp)
475{ 475{
476 struct sctp_init_chunk *chunkhdr, _chunkhdr;
476 union sctp_addr saddr; 477 union sctp_addr saddr;
477 union sctp_addr daddr; 478 union sctp_addr daddr;
478 struct sctp_af *af; 479 struct sctp_af *af;
479 struct sock *sk = NULL; 480 struct sock *sk = NULL;
480 struct sctp_association *asoc; 481 struct sctp_association *asoc;
481 struct sctp_transport *transport = NULL; 482 struct sctp_transport *transport = NULL;
482 struct sctp_init_chunk *chunkhdr;
483 __u32 vtag = ntohl(sctphdr->vtag); 483 __u32 vtag = ntohl(sctphdr->vtag);
484 int len = skb->len - ((void *)sctphdr - (void *)skb->data);
485 484
486 *app = NULL; *tpp = NULL; 485 *app = NULL; *tpp = NULL;
487 486
@@ -516,13 +515,16 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
516 * discard the packet. 515 * discard the packet.
517 */ 516 */
518 if (vtag == 0) { 517 if (vtag == 0) {
519 chunkhdr = (void *)sctphdr + sizeof(struct sctphdr); 518 /* chunk header + first 4 octects of init header */
520 if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t) 519 chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
521 + sizeof(__be32) || 520 sizeof(struct sctphdr),
521 sizeof(struct sctp_chunkhdr) +
522 sizeof(__be32), &_chunkhdr);
523 if (!chunkhdr ||
522 chunkhdr->chunk_hdr.type != SCTP_CID_INIT || 524 chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
523 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) { 525 ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
524 goto out; 526 goto out;
525 } 527
526 } else if (vtag != asoc->c.peer_vtag) { 528 } else if (vtag != asoc->c.peer_vtag) {
527 goto out; 529 goto out;
528 } 530 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 961ee59f696a..f5b45b8b8b16 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -240,12 +240,10 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
240 struct sctp_bind_addr *bp; 240 struct sctp_bind_addr *bp;
241 struct ipv6_pinfo *np = inet6_sk(sk); 241 struct ipv6_pinfo *np = inet6_sk(sk);
242 struct sctp_sockaddr_entry *laddr; 242 struct sctp_sockaddr_entry *laddr;
243 union sctp_addr *baddr = NULL;
244 union sctp_addr *daddr = &t->ipaddr; 243 union sctp_addr *daddr = &t->ipaddr;
245 union sctp_addr dst_saddr; 244 union sctp_addr dst_saddr;
246 struct in6_addr *final_p, final; 245 struct in6_addr *final_p, final;
247 __u8 matchlen = 0; 246 __u8 matchlen = 0;
248 __u8 bmatchlen;
249 sctp_scope_t scope; 247 sctp_scope_t scope;
250 248
251 memset(fl6, 0, sizeof(struct flowi6)); 249 memset(fl6, 0, sizeof(struct flowi6));
@@ -312,23 +310,37 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
312 */ 310 */
313 rcu_read_lock(); 311 rcu_read_lock();
314 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 312 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
315 if (!laddr->valid) 313 struct dst_entry *bdst;
314 __u8 bmatchlen;
315
316 if (!laddr->valid ||
317 laddr->state != SCTP_ADDR_SRC ||
318 laddr->a.sa.sa_family != AF_INET6 ||
319 scope > sctp_scope(&laddr->a))
316 continue; 320 continue;
317 if ((laddr->state == SCTP_ADDR_SRC) && 321
318 (laddr->a.sa.sa_family == AF_INET6) && 322 fl6->saddr = laddr->a.v6.sin6_addr;
319 (scope <= sctp_scope(&laddr->a))) { 323 fl6->fl6_sport = laddr->a.v6.sin6_port;
320 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
321 if (!baddr || (matchlen < bmatchlen)) {
322 baddr = &laddr->a;
323 matchlen = bmatchlen;
324 }
325 }
326 }
327 if (baddr) {
328 fl6->saddr = baddr->v6.sin6_addr;
329 fl6->fl6_sport = baddr->v6.sin6_port;
330 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final); 324 final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
331 dst = ip6_dst_lookup_flow(sk, fl6, final_p); 325 bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
326
327 if (!IS_ERR(bdst) &&
328 ipv6_chk_addr(dev_net(bdst->dev),
329 &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
330 if (!IS_ERR_OR_NULL(dst))
331 dst_release(dst);
332 dst = bdst;
333 break;
334 }
335
336 bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
337 if (matchlen > bmatchlen)
338 continue;
339
340 if (!IS_ERR_OR_NULL(dst))
341 dst_release(dst);
342 dst = bdst;
343 matchlen = bmatchlen;
332 } 344 }
333 rcu_read_unlock(); 345 rcu_read_unlock();
334 346
@@ -665,6 +677,9 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
665 newnp = inet6_sk(newsk); 677 newnp = inet6_sk(newsk);
666 678
667 memcpy(newnp, np, sizeof(struct ipv6_pinfo)); 679 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
680 newnp->ipv6_mc_list = NULL;
681 newnp->ipv6_ac_list = NULL;
682 newnp->ipv6_fl_list = NULL;
668 683
669 rcu_read_lock(); 684 rcu_read_lock();
670 opt = rcu_dereference(np->opt); 685 opt = rcu_dereference(np->opt);
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 8a08f13469c4..92e332e17391 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2454,16 +2454,11 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
2454 * stream sequence number shall be set to 0. 2454 * stream sequence number shall be set to 0.
2455 */ 2455 */
2456 2456
2457 /* Allocate storage for the negotiated streams if it is not a temporary 2457 if (sctp_stream_init(asoc, gfp))
2458 * association. 2458 goto clean_up;
2459 */
2460 if (!asoc->temp) {
2461 if (sctp_stream_init(asoc, gfp))
2462 goto clean_up;
2463 2459
2464 if (sctp_assoc_set_id(asoc, gfp)) 2460 if (!asoc->temp && sctp_assoc_set_id(asoc, gfp))
2465 goto clean_up; 2461 goto clean_up;
2466 }
2467 2462
2468 /* ADDIP Section 4.1 ASCONF Chunk Procedures 2463 /* ADDIP Section 4.1 ASCONF Chunk Procedures
2469 * 2464 *
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 4f5e6cfc7f60..f863b5573e42 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -2088,6 +2088,9 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,
2088 } 2088 }
2089 } 2089 }
2090 2090
2091 /* Set temp so that it won't be added into hashtable */
2092 new_asoc->temp = 1;
2093
2091 /* Compare the tie_tag in cookie with the verification tag of 2094 /* Compare the tie_tag in cookie with the verification tag of
2092 * current association. 2095 * current association.
2093 */ 2096 */
diff --git a/net/smc/Kconfig b/net/smc/Kconfig
index c717ef0896aa..33954852f3f8 100644
--- a/net/smc/Kconfig
+++ b/net/smc/Kconfig
@@ -8,6 +8,10 @@ config SMC
8 The Linux implementation of the SMC-R solution is designed as 8 The Linux implementation of the SMC-R solution is designed as
9 a separate socket family SMC. 9 a separate socket family SMC.
10 10
11 Warning: SMC will expose all memory for remote reads and writes
12 once a connection is established. Don't enable this option except
13 for tightly controlled lab environment.
14
11 Select this option if you want to run SMC socket applications 15 Select this option if you want to run SMC socket applications
12 16
13config SMC_DIAG 17config SMC_DIAG
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e41f594a1e1d..03ec058d18df 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -204,7 +204,7 @@ int smc_clc_send_confirm(struct smc_sock *smc)
204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 204 memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
205 hton24(cclc.qpn, link->roce_qp->qp_num); 205 hton24(cclc.qpn, link->roce_qp->qp_num);
206 cclc.rmb_rkey = 206 cclc.rmb_rkey =
207 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 207 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */ 208 cclc.conn_idx = 1; /* for now: 1 RMB = 1 RMBE */
209 cclc.rmbe_alert_token = htonl(conn->alert_token_local); 209 cclc.rmbe_alert_token = htonl(conn->alert_token_local);
210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu); 210 cclc.qp_mtu = min(link->path_mtu, link->peer_mtu);
@@ -256,7 +256,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN); 256 memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
257 hton24(aclc.qpn, link->roce_qp->qp_num); 257 hton24(aclc.qpn, link->roce_qp->qp_num);
258 aclc.rmb_rkey = 258 aclc.rmb_rkey =
259 htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey); 259 htonl(conn->rmb_desc->rkey[SMC_SINGLE_LINK]);
260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */ 260 aclc.conn_idx = 1; /* as long as 1 RMB = 1 RMBE */
261 aclc.rmbe_alert_token = htonl(conn->alert_token_local); 261 aclc.rmbe_alert_token = htonl(conn->alert_token_local);
262 aclc.qp_mtu = link->path_mtu; 262 aclc.qp_mtu = link->path_mtu;
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 65020e93ff21..3ac09a629ea1 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -613,19 +613,8 @@ int smc_rmb_create(struct smc_sock *smc)
613 rmb_desc = NULL; 613 rmb_desc = NULL;
614 continue; /* if mapping failed, try smaller one */ 614 continue; /* if mapping failed, try smaller one */
615 } 615 }
616 rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, 616 rmb_desc->rkey[SMC_SINGLE_LINK] =
617 IB_ACCESS_REMOTE_WRITE | 617 lgr->lnk[SMC_SINGLE_LINK].roce_pd->unsafe_global_rkey;
618 IB_ACCESS_LOCAL_WRITE,
619 &rmb_desc->mr_rx[SMC_SINGLE_LINK]);
620 if (rc) {
621 smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev,
622 tmp_bufsize, rmb_desc,
623 DMA_FROM_DEVICE);
624 kfree(rmb_desc->cpu_addr);
625 kfree(rmb_desc);
626 rmb_desc = NULL;
627 continue;
628 }
629 rmb_desc->used = 1; 618 rmb_desc->used = 1;
630 write_lock_bh(&lgr->rmbs_lock); 619 write_lock_bh(&lgr->rmbs_lock);
631 list_add(&rmb_desc->list, 620 list_add(&rmb_desc->list,
@@ -668,6 +657,7 @@ int smc_rmb_rtoken_handling(struct smc_connection *conn,
668 657
669 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 658 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) {
670 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 659 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) &&
660 (lgr->rtokens[i][SMC_SINGLE_LINK].dma_addr == dma_addr) &&
671 test_bit(i, lgr->rtokens_used_mask)) { 661 test_bit(i, lgr->rtokens_used_mask)) {
672 conn->rtoken_idx = i; 662 conn->rtoken_idx = i;
673 return 0; 663 return 0;
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index 27eb38056a27..b013cb43a327 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -93,7 +93,7 @@ struct smc_buf_desc {
93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX]; 93 u64 dma_addr[SMC_LINKS_PER_LGR_MAX];
94 /* mapped address of buffer */ 94 /* mapped address of buffer */
95 void *cpu_addr; /* virtual address of buffer */ 95 void *cpu_addr; /* virtual address of buffer */
96 struct ib_mr *mr_rx[SMC_LINKS_PER_LGR_MAX]; 96 u32 rkey[SMC_LINKS_PER_LGR_MAX];
97 /* for rmb only: 97 /* for rmb only:
98 * rkey provided to peer 98 * rkey provided to peer
99 */ 99 */
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index cb69ab977cd7..b31715505a35 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -37,24 +37,6 @@ u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
37 * identifier 37 * identifier
38 */ 38 */
39 39
40int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
41 struct ib_mr **mr)
42{
43 int rc;
44
45 if (*mr)
46 return 0; /* already done */
47
48 /* obtain unique key -
49 * next invocation of get_dma_mr returns a different key!
50 */
51 *mr = pd->device->get_dma_mr(pd, access_flags);
52 rc = PTR_ERR_OR_ZERO(*mr);
53 if (IS_ERR(*mr))
54 *mr = NULL;
55 return rc;
56}
57
58static int smc_ib_modify_qp_init(struct smc_link *lnk) 40static int smc_ib_modify_qp_init(struct smc_link *lnk)
59{ 41{
60 struct ib_qp_attr qp_attr; 42 struct ib_qp_attr qp_attr;
@@ -210,7 +192,8 @@ int smc_ib_create_protection_domain(struct smc_link *lnk)
210{ 192{
211 int rc; 193 int rc;
212 194
213 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); 195 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev,
196 IB_PD_UNSAFE_GLOBAL_RKEY);
214 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 197 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
215 if (IS_ERR(lnk->roce_pd)) 198 if (IS_ERR(lnk->roce_pd))
216 lnk->roce_pd = NULL; 199 lnk->roce_pd = NULL;
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 7e1f0e24d177..b567152a526d 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -61,8 +61,6 @@ void smc_ib_dealloc_protection_domain(struct smc_link *lnk);
61int smc_ib_create_protection_domain(struct smc_link *lnk); 61int smc_ib_create_protection_domain(struct smc_link *lnk);
62void smc_ib_destroy_queue_pair(struct smc_link *lnk); 62void smc_ib_destroy_queue_pair(struct smc_link *lnk);
63int smc_ib_create_queue_pair(struct smc_link *lnk); 63int smc_ib_create_queue_pair(struct smc_link *lnk);
64int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
65 struct ib_mr **mr);
66int smc_ib_ready_link(struct smc_link *lnk); 64int smc_ib_ready_link(struct smc_link *lnk);
67int smc_ib_modify_qp_rts(struct smc_link *lnk); 65int smc_ib_modify_qp_rts(struct smc_link *lnk);
68int smc_ib_modify_qp_reset(struct smc_link *lnk); 66int smc_ib_modify_qp_reset(struct smc_link *lnk);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index 24fedd4b117e..03f6b5840764 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -119,11 +119,9 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs)
119 119
120 for (i = 0; i < (reqs << 1); i++) { 120 for (i = 0; i < (reqs << 1); i++) {
121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL); 121 rqst = kzalloc(sizeof(*rqst), GFP_KERNEL);
122 if (!rqst) { 122 if (!rqst)
123 pr_err("RPC: %s: Failed to create bc rpc_rqst\n",
124 __func__);
125 goto out_free; 123 goto out_free;
126 } 124
127 dprintk("RPC: %s: new rqst %p\n", __func__, rqst); 125 dprintk("RPC: %s: new rqst %p\n", __func__, rqst);
128 126
129 rqst->rq_xprt = &r_xprt->rx_xprt; 127 rqst->rq_xprt = &r_xprt->rx_xprt;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 16aff8ddc16f..d5b54c020dec 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -2432,7 +2432,12 @@ static void xs_tcp_setup_socket(struct work_struct *work)
2432 case -ENETUNREACH: 2432 case -ENETUNREACH:
2433 case -EADDRINUSE: 2433 case -EADDRINUSE:
2434 case -ENOBUFS: 2434 case -ENOBUFS:
2435 /* retry with existing socket, after a delay */ 2435 /*
2436 * xs_tcp_force_close() wakes tasks with -EIO.
2437 * We need to wake them first to ensure the
2438 * correct error code.
2439 */
2440 xprt_wake_pending_tasks(xprt, status);
2436 xs_tcp_force_close(xprt); 2441 xs_tcp_force_close(xprt);
2437 goto out; 2442 goto out;
2438 } 2443 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 0d4f2f455a7c..1b92b72e812f 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -362,25 +362,25 @@ static int tipc_sk_sock_err(struct socket *sock, long *timeout)
362 return 0; 362 return 0;
363} 363}
364 364
365#define tipc_wait_for_cond(sock_, timeout_, condition_) \ 365#define tipc_wait_for_cond(sock_, timeo_, condition_) \
366({ \ 366({ \
367 int rc_ = 0; \ 367 struct sock *sk_; \
368 int done_ = 0; \ 368 int rc_; \
369 \ 369 \
370 while (!(condition_) && !done_) { \ 370 while ((rc_ = !(condition_))) { \
371 struct sock *sk_ = sock->sk; \ 371 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
372 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \ 372 sk_ = (sock_)->sk; \
373 \ 373 rc_ = tipc_sk_sock_err((sock_), timeo_); \
374 rc_ = tipc_sk_sock_err(sock_, timeout_); \ 374 if (rc_) \
375 if (rc_) \ 375 break; \
376 break; \ 376 prepare_to_wait(sk_sleep(sk_), &wait_, TASK_INTERRUPTIBLE); \
377 prepare_to_wait(sk_sleep(sk_), &wait_, \ 377 release_sock(sk_); \
378 TASK_INTERRUPTIBLE); \ 378 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
379 done_ = sk_wait_event(sk_, timeout_, \ 379 sched_annotate_sleep(); \
380 (condition_), &wait_); \ 380 lock_sock(sk_); \
381 remove_wait_queue(sk_sleep(sk_), &wait_); \ 381 remove_wait_queue(sk_sleep(sk_), &wait_); \
382 } \ 382 } \
383 rc_; \ 383 rc_; \
384}) 384})
385 385
386/** 386/**
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 6f7f6757ceef..dfc8c51e4d74 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1540,8 +1540,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1540 long timeout; 1540 long timeout;
1541 int err; 1541 int err;
1542 struct vsock_transport_send_notify_data send_data; 1542 struct vsock_transport_send_notify_data send_data;
1543 1543 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1544 DEFINE_WAIT(wait);
1545 1544
1546 sk = sock->sk; 1545 sk = sock->sk;
1547 vsk = vsock_sk(sk); 1546 vsk = vsock_sk(sk);
@@ -1584,11 +1583,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1584 if (err < 0) 1583 if (err < 0)
1585 goto out; 1584 goto out;
1586 1585
1587
1588 while (total_written < len) { 1586 while (total_written < len) {
1589 ssize_t written; 1587 ssize_t written;
1590 1588
1591 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1589 add_wait_queue(sk_sleep(sk), &wait);
1592 while (vsock_stream_has_space(vsk) == 0 && 1590 while (vsock_stream_has_space(vsk) == 0 &&
1593 sk->sk_err == 0 && 1591 sk->sk_err == 0 &&
1594 !(sk->sk_shutdown & SEND_SHUTDOWN) && 1592 !(sk->sk_shutdown & SEND_SHUTDOWN) &&
@@ -1597,33 +1595,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1597 /* Don't wait for non-blocking sockets. */ 1595 /* Don't wait for non-blocking sockets. */
1598 if (timeout == 0) { 1596 if (timeout == 0) {
1599 err = -EAGAIN; 1597 err = -EAGAIN;
1600 finish_wait(sk_sleep(sk), &wait); 1598 remove_wait_queue(sk_sleep(sk), &wait);
1601 goto out_err; 1599 goto out_err;
1602 } 1600 }
1603 1601
1604 err = transport->notify_send_pre_block(vsk, &send_data); 1602 err = transport->notify_send_pre_block(vsk, &send_data);
1605 if (err < 0) { 1603 if (err < 0) {
1606 finish_wait(sk_sleep(sk), &wait); 1604 remove_wait_queue(sk_sleep(sk), &wait);
1607 goto out_err; 1605 goto out_err;
1608 } 1606 }
1609 1607
1610 release_sock(sk); 1608 release_sock(sk);
1611 timeout = schedule_timeout(timeout); 1609 timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
1612 lock_sock(sk); 1610 lock_sock(sk);
1613 if (signal_pending(current)) { 1611 if (signal_pending(current)) {
1614 err = sock_intr_errno(timeout); 1612 err = sock_intr_errno(timeout);
1615 finish_wait(sk_sleep(sk), &wait); 1613 remove_wait_queue(sk_sleep(sk), &wait);
1616 goto out_err; 1614 goto out_err;
1617 } else if (timeout == 0) { 1615 } else if (timeout == 0) {
1618 err = -EAGAIN; 1616 err = -EAGAIN;
1619 finish_wait(sk_sleep(sk), &wait); 1617 remove_wait_queue(sk_sleep(sk), &wait);
1620 goto out_err; 1618 goto out_err;
1621 } 1619 }
1622
1623 prepare_to_wait(sk_sleep(sk), &wait,
1624 TASK_INTERRUPTIBLE);
1625 } 1620 }
1626 finish_wait(sk_sleep(sk), &wait); 1621 remove_wait_queue(sk_sleep(sk), &wait);
1627 1622
1628 /* These checks occur both as part of and after the loop 1623 /* These checks occur both as part of and after the loop
1629 * conditional since we need to check before and after 1624 * conditional since we need to check before and after
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 14d5f0c8c45f..9f0901f3e42b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -322,9 +322,9 @@ cfg80211_find_sched_scan_req(struct cfg80211_registered_device *rdev, u64 reqid)
322{ 322{
323 struct cfg80211_sched_scan_request *pos; 323 struct cfg80211_sched_scan_request *pos;
324 324
325 ASSERT_RTNL(); 325 WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
326 326
327 list_for_each_entry(pos, &rdev->sched_scan_req_list, list) { 327 list_for_each_entry_rcu(pos, &rdev->sched_scan_req_list, list) {
328 if (pos->reqid == reqid) 328 if (pos->reqid == reqid)
329 return pos; 329 return pos;
330 } 330 }
@@ -398,13 +398,13 @@ void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
398 trace_cfg80211_sched_scan_results(wiphy, reqid); 398 trace_cfg80211_sched_scan_results(wiphy, reqid);
399 /* ignore if we're not scanning */ 399 /* ignore if we're not scanning */
400 400
401 rtnl_lock(); 401 rcu_read_lock();
402 request = cfg80211_find_sched_scan_req(rdev, reqid); 402 request = cfg80211_find_sched_scan_req(rdev, reqid);
403 if (request) { 403 if (request) {
404 request->report_results = true; 404 request->report_results = true;
405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk); 405 queue_work(cfg80211_wq, &rdev->sched_scan_res_wk);
406 } 406 }
407 rtnl_unlock(); 407 rcu_read_unlock();
408} 408}
409EXPORT_SYMBOL(cfg80211_sched_scan_results); 409EXPORT_SYMBOL(cfg80211_sched_scan_results);
410 410
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 7198373e2920..4992f1025c9d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -454,6 +454,8 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
454 if (iftype == NL80211_IFTYPE_MESH_POINT) 454 if (iftype == NL80211_IFTYPE_MESH_POINT)
455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1); 455 skb_copy_bits(skb, hdrlen, &mesh_flags, 1);
456 456
457 mesh_flags &= MESH_FLAGS_AE;
458
457 switch (hdr->frame_control & 459 switch (hdr->frame_control &
458 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) { 460 cpu_to_le16(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) {
459 case cpu_to_le16(IEEE80211_FCTL_TODS): 461 case cpu_to_le16(IEEE80211_FCTL_TODS):
@@ -469,9 +471,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
469 iftype != NL80211_IFTYPE_STATION)) 471 iftype != NL80211_IFTYPE_STATION))
470 return -1; 472 return -1;
471 if (iftype == NL80211_IFTYPE_MESH_POINT) { 473 if (iftype == NL80211_IFTYPE_MESH_POINT) {
472 if (mesh_flags & MESH_FLAGS_AE_A4) 474 if (mesh_flags == MESH_FLAGS_AE_A4)
473 return -1; 475 return -1;
474 if (mesh_flags & MESH_FLAGS_AE_A5_A6) { 476 if (mesh_flags == MESH_FLAGS_AE_A5_A6) {
475 skb_copy_bits(skb, hdrlen + 477 skb_copy_bits(skb, hdrlen +
476 offsetof(struct ieee80211s_hdr, eaddr1), 478 offsetof(struct ieee80211s_hdr, eaddr1),
477 tmp.h_dest, 2 * ETH_ALEN); 479 tmp.h_dest, 2 * ETH_ALEN);
@@ -487,9 +489,9 @@ int ieee80211_data_to_8023_exthdr(struct sk_buff *skb, struct ethhdr *ehdr,
487 ether_addr_equal(tmp.h_source, addr))) 489 ether_addr_equal(tmp.h_source, addr)))
488 return -1; 490 return -1;
489 if (iftype == NL80211_IFTYPE_MESH_POINT) { 491 if (iftype == NL80211_IFTYPE_MESH_POINT) {
490 if (mesh_flags & MESH_FLAGS_AE_A5_A6) 492 if (mesh_flags == MESH_FLAGS_AE_A5_A6)
491 return -1; 493 return -1;
492 if (mesh_flags & MESH_FLAGS_AE_A4) 494 if (mesh_flags == MESH_FLAGS_AE_A4)
493 skb_copy_bits(skb, hdrlen + 495 skb_copy_bits(skb, hdrlen +
494 offsetof(struct ieee80211s_hdr, eaddr1), 496 offsetof(struct ieee80211s_hdr, eaddr1),
495 tmp.h_source, ETH_ALEN); 497 tmp.h_source, ETH_ALEN);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index 8b911c29860e..5a1a98df3499 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1791,32 +1791,40 @@ void x25_kill_by_neigh(struct x25_neigh *nb)
1791 1791
1792static int __init x25_init(void) 1792static int __init x25_init(void)
1793{ 1793{
1794 int rc = proto_register(&x25_proto, 0); 1794 int rc;
1795 1795
1796 if (rc != 0) 1796 rc = proto_register(&x25_proto, 0);
1797 if (rc)
1797 goto out; 1798 goto out;
1798 1799
1799 rc = sock_register(&x25_family_ops); 1800 rc = sock_register(&x25_family_ops);
1800 if (rc != 0) 1801 if (rc)
1801 goto out_proto; 1802 goto out_proto;
1802 1803
1803 dev_add_pack(&x25_packet_type); 1804 dev_add_pack(&x25_packet_type);
1804 1805
1805 rc = register_netdevice_notifier(&x25_dev_notifier); 1806 rc = register_netdevice_notifier(&x25_dev_notifier);
1806 if (rc != 0) 1807 if (rc)
1807 goto out_sock; 1808 goto out_sock;
1808 1809
1809 pr_info("Linux Version 0.2\n"); 1810 rc = x25_register_sysctl();
1811 if (rc)
1812 goto out_dev;
1810 1813
1811 x25_register_sysctl();
1812 rc = x25_proc_init(); 1814 rc = x25_proc_init();
1813 if (rc != 0) 1815 if (rc)
1814 goto out_dev; 1816 goto out_sysctl;
1817
1818 pr_info("Linux Version 0.2\n");
1819
1815out: 1820out:
1816 return rc; 1821 return rc;
1822out_sysctl:
1823 x25_unregister_sysctl();
1817out_dev: 1824out_dev:
1818 unregister_netdevice_notifier(&x25_dev_notifier); 1825 unregister_netdevice_notifier(&x25_dev_notifier);
1819out_sock: 1826out_sock:
1827 dev_remove_pack(&x25_packet_type);
1820 sock_unregister(AF_X25); 1828 sock_unregister(AF_X25);
1821out_proto: 1829out_proto:
1822 proto_unregister(&x25_proto); 1830 proto_unregister(&x25_proto);
diff --git a/net/x25/sysctl_net_x25.c b/net/x25/sysctl_net_x25.c
index a06dfe143c67..ba078c85f0a1 100644
--- a/net/x25/sysctl_net_x25.c
+++ b/net/x25/sysctl_net_x25.c
@@ -73,9 +73,12 @@ static struct ctl_table x25_table[] = {
73 { }, 73 { },
74}; 74};
75 75
76void __init x25_register_sysctl(void) 76int __init x25_register_sysctl(void)
77{ 77{
78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table); 78 x25_table_header = register_net_sysctl(&init_net, "net/x25", x25_table);
79 if (!x25_table_header)
80 return -ENOMEM;
81 return 0;
79} 82}
80 83
81void x25_unregister_sysctl(void) 84void x25_unregister_sysctl(void)
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c
index 8ec8a3fcf8d4..574e6f32f94f 100644
--- a/net/xfrm/xfrm_device.c
+++ b/net/xfrm/xfrm_device.c
@@ -170,7 +170,7 @@ static int xfrm_dev_feat_change(struct net_device *dev)
170 170
171static int xfrm_dev_down(struct net_device *dev) 171static int xfrm_dev_down(struct net_device *dev)
172{ 172{
173 if (dev->hw_features & NETIF_F_HW_ESP) 173 if (dev->features & NETIF_F_HW_ESP)
174 xfrm_dev_state_flush(dev_net(dev), dev, true); 174 xfrm_dev_state_flush(dev_net(dev), dev, true);
175 175
176 xfrm_garbage_collect(dev_net(dev)); 176 xfrm_garbage_collect(dev_net(dev));
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index b00a1d5a7f52..ed4e52d95172 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1797,43 +1797,6 @@ free_dst:
1797 goto out; 1797 goto out;
1798} 1798}
1799 1799
1800#ifdef CONFIG_XFRM_SUB_POLICY
1801static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
1802{
1803 if (!*target) {
1804 *target = kmalloc(size, GFP_ATOMIC);
1805 if (!*target)
1806 return -ENOMEM;
1807 }
1808
1809 memcpy(*target, src, size);
1810 return 0;
1811}
1812#endif
1813
1814static int xfrm_dst_update_parent(struct dst_entry *dst,
1815 const struct xfrm_selector *sel)
1816{
1817#ifdef CONFIG_XFRM_SUB_POLICY
1818 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1819 return xfrm_dst_alloc_copy((void **)&(xdst->partner),
1820 sel, sizeof(*sel));
1821#else
1822 return 0;
1823#endif
1824}
1825
1826static int xfrm_dst_update_origin(struct dst_entry *dst,
1827 const struct flowi *fl)
1828{
1829#ifdef CONFIG_XFRM_SUB_POLICY
1830 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
1831 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
1832#else
1833 return 0;
1834#endif
1835}
1836
1837static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1800static int xfrm_expand_policies(const struct flowi *fl, u16 family,
1838 struct xfrm_policy **pols, 1801 struct xfrm_policy **pols,
1839 int *num_pols, int *num_xfrms) 1802 int *num_pols, int *num_xfrms)
@@ -1905,16 +1868,6 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1905 1868
1906 xdst = (struct xfrm_dst *)dst; 1869 xdst = (struct xfrm_dst *)dst;
1907 xdst->num_xfrms = err; 1870 xdst->num_xfrms = err;
1908 if (num_pols > 1)
1909 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1910 else
1911 err = xfrm_dst_update_origin(dst, fl);
1912 if (unlikely(err)) {
1913 dst_free(dst);
1914 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1915 return ERR_PTR(err);
1916 }
1917
1918 xdst->num_pols = num_pols; 1871 xdst->num_pols = num_pols;
1919 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols); 1872 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
1920 xdst->policy_genid = atomic_read(&pols[0]->genid); 1873 xdst->policy_genid = atomic_read(&pols[0]->genid);
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index fc3c5aa38754..2e291bc5f1fc 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1383,6 +1383,8 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
1383 x->curlft.add_time = orig->curlft.add_time; 1383 x->curlft.add_time = orig->curlft.add_time;
1384 x->km.state = orig->km.state; 1384 x->km.state = orig->km.state;
1385 x->km.seq = orig->km.seq; 1385 x->km.seq = orig->km.seq;
1386 x->replay = orig->replay;
1387 x->preplay = orig->preplay;
1386 1388
1387 return x; 1389 return x;
1388 1390