aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
committerDavid S. Miller <davem@davemloft.net>2013-11-04 13:48:30 -0500
commit394efd19d5fcae936261bd48e5b33b21897aacf8 (patch)
treec48cf3ddbb07fd87309f1abdf31a27c71330e587 /net
parentf421436a591d34fa5279b54a96ac07d70250cc8d (diff)
parentbe408cd3e1fef73e9408b196a79b9934697fe3b1 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/emulex/benet/be.h drivers/net/netconsole.c net/bridge/br_private.h Three mostly trivial conflicts. The net/bridge/br_private.h conflict was a function signature (argument addition) change overlapping with the extern removals from Joe Perches. In drivers/net/netconsole.c we had one change adjusting a printk message whilst another changed "printk(KERN_INFO" into "pr_info(". Lastly, the emulex change was a new inline function addition overlapping with Joe Perches's extern removals. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/bridge/br_device.c2
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c44
-rw-r--r--net/bridge/br_private.h5
-rw-r--r--net/bridge/netfilter/ebt_ulog.c9
-rw-r--r--net/core/flow_dissector.c2
-rw-r--r--net/core/netpoll.c31
-rw-r--r--net/ipv4/netfilter/arp_tables.c5
-rw-r--r--net/ipv4/netfilter/ip_tables.c5
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c7
-rw-r--r--net/ipv4/tcp_input.c34
-rw-r--r--net/ipv4/tcp_offload.c13
-rw-r--r--net/ipv4/xfrm4_policy.c8
-rw-r--r--net/ipv6/netfilter/ip6_tables.c5
-rw-r--r--net/ipv6/route.c9
-rw-r--r--net/ipv6/xfrm6_policy.c8
-rw-r--r--net/netfilter/x_tables.c7
-rw-r--r--net/netfilter/xt_NFQUEUE.c7
-rw-r--r--net/openvswitch/dp_notify.c7
-rw-r--r--net/openvswitch/vport-netdev.c16
-rw-r--r--net/openvswitch/vport-netdev.h1
-rw-r--r--net/sched/sch_fq.c1
-rw-r--r--net/sctp/ipv6.c4
-rw-r--r--net/sctp/sm_sideeffect.c1
-rw-r--r--net/x25/Kconfig4
-rw-r--r--net/xfrm/xfrm_ipcomp.c12
26 files changed, 153 insertions, 96 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index ca04163635da..e6b7fecb3af1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -64,7 +64,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
64 br_flood_deliver(br, skb, false); 64 br_flood_deliver(br, skb, false);
65 goto out; 65 goto out;
66 } 66 }
67 if (br_multicast_rcv(br, NULL, skb)) { 67 if (br_multicast_rcv(br, NULL, skb, vid)) {
68 kfree_skb(skb); 68 kfree_skb(skb);
69 goto out; 69 goto out;
70 } 70 }
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a2fd37ec35f7..7e73c32e205d 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid); 80 br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
81 81
82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) && 82 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
83 br_multicast_rcv(br, p, skb)) 83 br_multicast_rcv(br, p, skb, vid))
84 goto drop; 84 goto drop;
85 85
86 if (p->state == BR_STATE_LEARNING) 86 if (p->state == BR_STATE_LEARNING)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 0513ef3ce667..4c214b2b88ef 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -947,7 +947,8 @@ void br_multicast_disable_port(struct net_bridge_port *port)
947 947
948static int br_ip4_multicast_igmp3_report(struct net_bridge *br, 948static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
949 struct net_bridge_port *port, 949 struct net_bridge_port *port,
950 struct sk_buff *skb) 950 struct sk_buff *skb,
951 u16 vid)
951{ 952{
952 struct igmpv3_report *ih; 953 struct igmpv3_report *ih;
953 struct igmpv3_grec *grec; 954 struct igmpv3_grec *grec;
@@ -957,12 +958,10 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
957 int type; 958 int type;
958 int err = 0; 959 int err = 0;
959 __be32 group; 960 __be32 group;
960 u16 vid = 0;
961 961
962 if (!pskb_may_pull(skb, sizeof(*ih))) 962 if (!pskb_may_pull(skb, sizeof(*ih)))
963 return -EINVAL; 963 return -EINVAL;
964 964
965 br_vlan_get_tag(skb, &vid);
966 ih = igmpv3_report_hdr(skb); 965 ih = igmpv3_report_hdr(skb);
967 num = ntohs(ih->ngrec); 966 num = ntohs(ih->ngrec);
968 len = sizeof(*ih); 967 len = sizeof(*ih);
@@ -1005,7 +1004,8 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1005#if IS_ENABLED(CONFIG_IPV6) 1004#if IS_ENABLED(CONFIG_IPV6)
1006static int br_ip6_multicast_mld2_report(struct net_bridge *br, 1005static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1007 struct net_bridge_port *port, 1006 struct net_bridge_port *port,
1008 struct sk_buff *skb) 1007 struct sk_buff *skb,
1008 u16 vid)
1009{ 1009{
1010 struct icmp6hdr *icmp6h; 1010 struct icmp6hdr *icmp6h;
1011 struct mld2_grec *grec; 1011 struct mld2_grec *grec;
@@ -1013,12 +1013,10 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1013 int len; 1013 int len;
1014 int num; 1014 int num;
1015 int err = 0; 1015 int err = 0;
1016 u16 vid = 0;
1017 1016
1018 if (!pskb_may_pull(skb, sizeof(*icmp6h))) 1017 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
1019 return -EINVAL; 1018 return -EINVAL;
1020 1019
1021 br_vlan_get_tag(skb, &vid);
1022 icmp6h = icmp6_hdr(skb); 1020 icmp6h = icmp6_hdr(skb);
1023 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); 1021 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
1024 len = sizeof(*icmp6h); 1022 len = sizeof(*icmp6h);
@@ -1141,7 +1139,8 @@ static void br_multicast_query_received(struct net_bridge *br,
1141 1139
1142static int br_ip4_multicast_query(struct net_bridge *br, 1140static int br_ip4_multicast_query(struct net_bridge *br,
1143 struct net_bridge_port *port, 1141 struct net_bridge_port *port,
1144 struct sk_buff *skb) 1142 struct sk_buff *skb,
1143 u16 vid)
1145{ 1144{
1146 const struct iphdr *iph = ip_hdr(skb); 1145 const struct iphdr *iph = ip_hdr(skb);
1147 struct igmphdr *ih = igmp_hdr(skb); 1146 struct igmphdr *ih = igmp_hdr(skb);
@@ -1153,7 +1152,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1153 unsigned long now = jiffies; 1152 unsigned long now = jiffies;
1154 __be32 group; 1153 __be32 group;
1155 int err = 0; 1154 int err = 0;
1156 u16 vid = 0;
1157 1155
1158 spin_lock(&br->multicast_lock); 1156 spin_lock(&br->multicast_lock);
1159 if (!netif_running(br->dev) || 1157 if (!netif_running(br->dev) ||
@@ -1189,7 +1187,6 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1189 if (!group) 1187 if (!group)
1190 goto out; 1188 goto out;
1191 1189
1192 br_vlan_get_tag(skb, &vid);
1193 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid); 1190 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group, vid);
1194 if (!mp) 1191 if (!mp)
1195 goto out; 1192 goto out;
@@ -1219,7 +1216,8 @@ out:
1219#if IS_ENABLED(CONFIG_IPV6) 1216#if IS_ENABLED(CONFIG_IPV6)
1220static int br_ip6_multicast_query(struct net_bridge *br, 1217static int br_ip6_multicast_query(struct net_bridge *br,
1221 struct net_bridge_port *port, 1218 struct net_bridge_port *port,
1222 struct sk_buff *skb) 1219 struct sk_buff *skb,
1220 u16 vid)
1223{ 1221{
1224 const struct ipv6hdr *ip6h = ipv6_hdr(skb); 1222 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1225 struct mld_msg *mld; 1223 struct mld_msg *mld;
@@ -1231,7 +1229,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1231 unsigned long now = jiffies; 1229 unsigned long now = jiffies;
1232 const struct in6_addr *group = NULL; 1230 const struct in6_addr *group = NULL;
1233 int err = 0; 1231 int err = 0;
1234 u16 vid = 0;
1235 1232
1236 spin_lock(&br->multicast_lock); 1233 spin_lock(&br->multicast_lock);
1237 if (!netif_running(br->dev) || 1234 if (!netif_running(br->dev) ||
@@ -1265,7 +1262,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1265 if (!group) 1262 if (!group)
1266 goto out; 1263 goto out;
1267 1264
1268 br_vlan_get_tag(skb, &vid);
1269 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid); 1265 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group, vid);
1270 if (!mp) 1266 if (!mp)
1271 goto out; 1267 goto out;
@@ -1439,7 +1435,8 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1439 1435
1440static int br_multicast_ipv4_rcv(struct net_bridge *br, 1436static int br_multicast_ipv4_rcv(struct net_bridge *br,
1441 struct net_bridge_port *port, 1437 struct net_bridge_port *port,
1442 struct sk_buff *skb) 1438 struct sk_buff *skb,
1439 u16 vid)
1443{ 1440{
1444 struct sk_buff *skb2 = skb; 1441 struct sk_buff *skb2 = skb;
1445 const struct iphdr *iph; 1442 const struct iphdr *iph;
@@ -1447,7 +1444,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1447 unsigned int len; 1444 unsigned int len;
1448 unsigned int offset; 1445 unsigned int offset;
1449 int err; 1446 int err;
1450 u16 vid = 0;
1451 1447
1452 /* We treat OOM as packet loss for now. */ 1448 /* We treat OOM as packet loss for now. */
1453 if (!pskb_may_pull(skb, sizeof(*iph))) 1449 if (!pskb_may_pull(skb, sizeof(*iph)))
@@ -1508,7 +1504,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1508 1504
1509 err = 0; 1505 err = 0;
1510 1506
1511 br_vlan_get_tag(skb2, &vid);
1512 BR_INPUT_SKB_CB(skb)->igmp = 1; 1507 BR_INPUT_SKB_CB(skb)->igmp = 1;
1513 ih = igmp_hdr(skb2); 1508 ih = igmp_hdr(skb2);
1514 1509
@@ -1519,10 +1514,10 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1519 err = br_ip4_multicast_add_group(br, port, ih->group, vid); 1514 err = br_ip4_multicast_add_group(br, port, ih->group, vid);
1520 break; 1515 break;
1521 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1516 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1522 err = br_ip4_multicast_igmp3_report(br, port, skb2); 1517 err = br_ip4_multicast_igmp3_report(br, port, skb2, vid);
1523 break; 1518 break;
1524 case IGMP_HOST_MEMBERSHIP_QUERY: 1519 case IGMP_HOST_MEMBERSHIP_QUERY:
1525 err = br_ip4_multicast_query(br, port, skb2); 1520 err = br_ip4_multicast_query(br, port, skb2, vid);
1526 break; 1521 break;
1527 case IGMP_HOST_LEAVE_MESSAGE: 1522 case IGMP_HOST_LEAVE_MESSAGE:
1528 br_ip4_multicast_leave_group(br, port, ih->group, vid); 1523 br_ip4_multicast_leave_group(br, port, ih->group, vid);
@@ -1540,7 +1535,8 @@ err_out:
1540#if IS_ENABLED(CONFIG_IPV6) 1535#if IS_ENABLED(CONFIG_IPV6)
1541static int br_multicast_ipv6_rcv(struct net_bridge *br, 1536static int br_multicast_ipv6_rcv(struct net_bridge *br,
1542 struct net_bridge_port *port, 1537 struct net_bridge_port *port,
1543 struct sk_buff *skb) 1538 struct sk_buff *skb,
1539 u16 vid)
1544{ 1540{
1545 struct sk_buff *skb2; 1541 struct sk_buff *skb2;
1546 const struct ipv6hdr *ip6h; 1542 const struct ipv6hdr *ip6h;
@@ -1550,7 +1546,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1550 unsigned int len; 1546 unsigned int len;
1551 int offset; 1547 int offset;
1552 int err; 1548 int err;
1553 u16 vid = 0;
1554 1549
1555 if (!pskb_may_pull(skb, sizeof(*ip6h))) 1550 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1556 return -EINVAL; 1551 return -EINVAL;
@@ -1640,7 +1635,6 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1640 1635
1641 err = 0; 1636 err = 0;
1642 1637
1643 br_vlan_get_tag(skb, &vid);
1644 BR_INPUT_SKB_CB(skb)->igmp = 1; 1638 BR_INPUT_SKB_CB(skb)->igmp = 1;
1645 1639
1646 switch (icmp6_type) { 1640 switch (icmp6_type) {
@@ -1657,10 +1651,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1657 break; 1651 break;
1658 } 1652 }
1659 case ICMPV6_MLD2_REPORT: 1653 case ICMPV6_MLD2_REPORT:
1660 err = br_ip6_multicast_mld2_report(br, port, skb2); 1654 err = br_ip6_multicast_mld2_report(br, port, skb2, vid);
1661 break; 1655 break;
1662 case ICMPV6_MGM_QUERY: 1656 case ICMPV6_MGM_QUERY:
1663 err = br_ip6_multicast_query(br, port, skb2); 1657 err = br_ip6_multicast_query(br, port, skb2, vid);
1664 break; 1658 break;
1665 case ICMPV6_MGM_REDUCTION: 1659 case ICMPV6_MGM_REDUCTION:
1666 { 1660 {
@@ -1681,7 +1675,7 @@ out:
1681#endif 1675#endif
1682 1676
1683int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1677int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1684 struct sk_buff *skb) 1678 struct sk_buff *skb, u16 vid)
1685{ 1679{
1686 BR_INPUT_SKB_CB(skb)->igmp = 0; 1680 BR_INPUT_SKB_CB(skb)->igmp = 0;
1687 BR_INPUT_SKB_CB(skb)->mrouters_only = 0; 1681 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
@@ -1691,10 +1685,10 @@ int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1691 1685
1692 switch (skb->protocol) { 1686 switch (skb->protocol) {
1693 case htons(ETH_P_IP): 1687 case htons(ETH_P_IP):
1694 return br_multicast_ipv4_rcv(br, port, skb); 1688 return br_multicast_ipv4_rcv(br, port, skb, vid);
1695#if IS_ENABLED(CONFIG_IPV6) 1689#if IS_ENABLED(CONFIG_IPV6)
1696 case htons(ETH_P_IPV6): 1690 case htons(ETH_P_IPV6):
1697 return br_multicast_ipv6_rcv(br, port, skb); 1691 return br_multicast_ipv6_rcv(br, port, skb, vid);
1698#endif 1692#endif
1699 } 1693 }
1700 1694
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index d1ca6d956633..229d820bdf0b 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -435,7 +435,7 @@ int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
435#ifdef CONFIG_BRIDGE_IGMP_SNOOPING 435#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
436extern unsigned int br_mdb_rehash_seq; 436extern unsigned int br_mdb_rehash_seq;
437int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 437int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
438 struct sk_buff *skb); 438 struct sk_buff *skb, u16 vid);
439struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 439struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
440 struct sk_buff *skb, u16 vid); 440 struct sk_buff *skb, u16 vid);
441void br_multicast_add_port(struct net_bridge_port *port); 441void br_multicast_add_port(struct net_bridge_port *port);
@@ -504,7 +504,8 @@ static inline bool br_multicast_querier_exists(struct net_bridge *br,
504#else 504#else
505static inline int br_multicast_rcv(struct net_bridge *br, 505static inline int br_multicast_rcv(struct net_bridge *br,
506 struct net_bridge_port *port, 506 struct net_bridge_port *port,
507 struct sk_buff *skb) 507 struct sk_buff *skb,
508 u16 vid)
508{ 509{
509 return 0; 510 return 0;
510} 511}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index 518093802d1d..7c470c371e14 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
181 ub->qlen++; 181 ub->qlen++;
182 182
183 pm = nlmsg_data(nlh); 183 pm = nlmsg_data(nlh);
184 memset(pm, 0, sizeof(*pm));
184 185
185 /* Fill in the ulog data */ 186 /* Fill in the ulog data */
186 pm->version = EBT_ULOG_VERSION; 187 pm->version = EBT_ULOG_VERSION;
@@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
193 pm->hook = hooknr; 194 pm->hook = hooknr;
194 if (uloginfo->prefix != NULL) 195 if (uloginfo->prefix != NULL)
195 strcpy(pm->prefix, uloginfo->prefix); 196 strcpy(pm->prefix, uloginfo->prefix);
196 else
197 *(pm->prefix) = '\0';
198 197
199 if (in) { 198 if (in) {
200 strcpy(pm->physindev, in->name); 199 strcpy(pm->physindev, in->name);
@@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
204 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name); 203 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
205 else 204 else
206 strcpy(pm->indev, in->name); 205 strcpy(pm->indev, in->name);
207 } else 206 }
208 pm->indev[0] = pm->physindev[0] = '\0';
209 207
210 if (out) { 208 if (out) {
211 /* If out exists, then out is a bridge port */ 209 /* If out exists, then out is a bridge port */
212 strcpy(pm->physoutdev, out->name); 210 strcpy(pm->physoutdev, out->name);
213 /* rcu_read_lock()ed by nf_hook_slow */ 211 /* rcu_read_lock()ed by nf_hook_slow */
214 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name); 212 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
215 } else 213 }
216 pm->outdev[0] = pm->physoutdev[0] = '\0';
217 214
218 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0) 215 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
219 BUG(); 216 BUG();
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 5cac36e6ccd1..0242035192f1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -66,7 +66,7 @@ again:
66 struct iphdr _iph; 66 struct iphdr _iph;
67ip: 67ip:
68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 68 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
69 if (!iph) 69 if (!iph || iph->ihl < 5)
70 return false; 70 return false;
71 71
72 if (ip_is_fragment(iph)) 72 if (ip_is_fragment(iph))
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index fc75c9e461b8..8f971990677c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -636,8 +636,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
636 636
637 netpoll_send_skb(np, send_skb); 637 netpoll_send_skb(np, send_skb);
638 638
639 /* If there are several rx_hooks for the same address, 639 /* If there are several rx_skb_hooks for the same
640 we're fine by sending a single reply */ 640 * address we're fine by sending a single reply
641 */
641 break; 642 break;
642 } 643 }
643 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 644 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -719,8 +720,9 @@ static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo
719 720
720 netpoll_send_skb(np, send_skb); 721 netpoll_send_skb(np, send_skb);
721 722
722 /* If there are several rx_hooks for the same address, 723 /* If there are several rx_skb_hooks for the same
723 we're fine by sending a single reply */ 724 * address, we're fine by sending a single reply
725 */
724 break; 726 break;
725 } 727 }
726 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 728 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
@@ -756,11 +758,12 @@ static bool pkt_is_ns(struct sk_buff *skb)
756 758
757int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) 759int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
758{ 760{
759 int proto, len, ulen; 761 int proto, len, ulen, data_len;
760 int hits = 0; 762 int hits = 0, offset;
761 const struct iphdr *iph; 763 const struct iphdr *iph;
762 struct udphdr *uh; 764 struct udphdr *uh;
763 struct netpoll *np, *tmp; 765 struct netpoll *np, *tmp;
766 uint16_t source;
764 767
765 if (list_empty(&npinfo->rx_np)) 768 if (list_empty(&npinfo->rx_np))
766 goto out; 769 goto out;
@@ -820,7 +823,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
820 823
821 len -= iph->ihl*4; 824 len -= iph->ihl*4;
822 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4); 825 uh = (struct udphdr *)(((char *)iph) + iph->ihl*4);
826 offset = (unsigned char *)(uh + 1) - skb->data;
823 ulen = ntohs(uh->len); 827 ulen = ntohs(uh->len);
828 data_len = skb->len - offset;
829 source = ntohs(uh->source);
824 830
825 if (ulen != len) 831 if (ulen != len)
826 goto out; 832 goto out;
@@ -834,9 +840,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
834 if (np->local_port && np->local_port != ntohs(uh->dest)) 840 if (np->local_port && np->local_port != ntohs(uh->dest))
835 continue; 841 continue;
836 842
837 np->rx_hook(np, ntohs(uh->source), 843 np->rx_skb_hook(np, source, skb, offset, data_len);
838 (char *)(uh+1),
839 ulen - sizeof(struct udphdr));
840 hits++; 844 hits++;
841 } 845 }
842 } else { 846 } else {
@@ -859,7 +863,10 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
859 if (!pskb_may_pull(skb, sizeof(struct udphdr))) 863 if (!pskb_may_pull(skb, sizeof(struct udphdr)))
860 goto out; 864 goto out;
861 uh = udp_hdr(skb); 865 uh = udp_hdr(skb);
866 offset = (unsigned char *)(uh + 1) - skb->data;
862 ulen = ntohs(uh->len); 867 ulen = ntohs(uh->len);
868 data_len = skb->len - offset;
869 source = ntohs(uh->source);
863 if (ulen != skb->len) 870 if (ulen != skb->len)
864 goto out; 871 goto out;
865 if (udp6_csum_init(skb, uh, IPPROTO_UDP)) 872 if (udp6_csum_init(skb, uh, IPPROTO_UDP))
@@ -872,9 +879,7 @@ int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo)
872 if (np->local_port && np->local_port != ntohs(uh->dest)) 879 if (np->local_port && np->local_port != ntohs(uh->dest))
873 continue; 880 continue;
874 881
875 np->rx_hook(np, ntohs(uh->source), 882 np->rx_skb_hook(np, source, skb, offset, data_len);
876 (char *)(uh+1),
877 ulen - sizeof(struct udphdr));
878 hits++; 883 hits++;
879 } 884 }
880#endif 885#endif
@@ -1062,7 +1067,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1062 1067
1063 npinfo->netpoll = np; 1068 npinfo->netpoll = np;
1064 1069
1065 if (np->rx_hook) { 1070 if (np->rx_skb_hook) {
1066 spin_lock_irqsave(&npinfo->rx_lock, flags); 1071 spin_lock_irqsave(&npinfo->rx_lock, flags);
1067 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 1072 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
1068 list_add_tail(&np->rx, &npinfo->rx_np); 1073 list_add_tail(&np->rx, &npinfo->rx_np);
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 85a4f21aac1a..59da7cde0724 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -271,6 +271,11 @@ unsigned int arpt_do_table(struct sk_buff *skb,
271 local_bh_disable(); 271 local_bh_disable();
272 addend = xt_write_recseq_begin(); 272 addend = xt_write_recseq_begin();
273 private = table->private; 273 private = table->private;
274 /*
275 * Ensure we load private-> members after we've fetched the base
276 * pointer.
277 */
278 smp_read_barrier_depends();
274 table_base = private->entries[smp_processor_id()]; 279 table_base = private->entries[smp_processor_id()];
275 280
276 e = get_entry(table_base, private->hook_entry[hook]); 281 e = get_entry(table_base, private->hook_entry[hook]);
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d23118d95ff9..718dfbd30cbe 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -327,6 +327,11 @@ ipt_do_table(struct sk_buff *skb,
327 addend = xt_write_recseq_begin(); 327 addend = xt_write_recseq_begin();
328 private = table->private; 328 private = table->private;
329 cpu = smp_processor_id(); 329 cpu = smp_processor_id();
330 /*
331 * Ensure we load private-> members after we've fetched the base
332 * pointer.
333 */
334 smp_read_barrier_depends();
330 table_base = private->entries[cpu]; 335 table_base = private->entries[cpu];
331 jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; 336 jumpstack = (struct ipt_entry **)private->jumpstack[cpu];
332 stackptr = per_cpu_ptr(private->stackptr, cpu); 337 stackptr = per_cpu_ptr(private->stackptr, cpu);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index cbc22158af49..9cb993cd224b 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
220 ub->qlen++; 220 ub->qlen++;
221 221
222 pm = nlmsg_data(nlh); 222 pm = nlmsg_data(nlh);
223 memset(pm, 0, sizeof(*pm));
223 224
224 /* We might not have a timestamp, get one */ 225 /* We might not have a timestamp, get one */
225 if (skb->tstamp.tv64 == 0) 226 if (skb->tstamp.tv64 == 0)
@@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
238 } 239 }
239 else if (loginfo->prefix[0] != '\0') 240 else if (loginfo->prefix[0] != '\0')
240 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix)); 241 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
241 else
242 *(pm->prefix) = '\0';
243 242
244 if (in && in->hard_header_len > 0 && 243 if (in && in->hard_header_len > 0 &&
245 skb->mac_header != skb->network_header && 244 skb->mac_header != skb->network_header &&
@@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
251 250
252 if (in) 251 if (in)
253 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name)); 252 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
254 else
255 pm->indev_name[0] = '\0';
256 253
257 if (out) 254 if (out)
258 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name)); 255 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
259 else
260 pm->outdev_name[0] = '\0';
261 256
262 /* copy_len <= skb->len, so can't fail. */ 257 /* copy_len <= skb->len, so can't fail. */
263 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0) 258 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b935397c703c..63095b218b4a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2903,7 +2903,8 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2903 * left edge of the send window. 2903 * left edge of the send window.
2904 * See draft-ietf-tcplw-high-performance-00, section 3.3. 2904 * See draft-ietf-tcplw-high-performance-00, section 3.3.
2905 */ 2905 */
2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 2906 if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2907 flag & FLAG_ACKED)
2907 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 2908 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
2908 2909
2909 if (seq_rtt < 0) 2910 if (seq_rtt < 0)
@@ -2918,14 +2919,19 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
2918} 2919}
2919 2920
2920/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */ 2921/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
2921static void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) 2922static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2922{ 2923{
2923 struct tcp_sock *tp = tcp_sk(sk); 2924 struct tcp_sock *tp = tcp_sk(sk);
2924 s32 seq_rtt = -1; 2925 s32 seq_rtt = -1;
2925 2926
2926 if (tp->lsndtime && !tp->total_retrans) 2927 if (synack_stamp && !tp->total_retrans)
2927 seq_rtt = tcp_time_stamp - tp->lsndtime; 2928 seq_rtt = tcp_time_stamp - synack_stamp;
2928 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2929
2930 /* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
2931 * sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
2932 */
2933 if (!tp->srtt)
2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2929} 2935}
2930 2936
2931static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
@@ -3028,6 +3034,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3028 s32 seq_rtt = -1; 3034 s32 seq_rtt = -1;
3029 s32 ca_seq_rtt = -1; 3035 s32 ca_seq_rtt = -1;
3030 ktime_t last_ackt = net_invalid_timestamp(); 3036 ktime_t last_ackt = net_invalid_timestamp();
3037 bool rtt_update;
3031 3038
3032 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { 3039 while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
3033 struct tcp_skb_cb *scb = TCP_SKB_CB(skb); 3040 struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
@@ -3104,14 +3111,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3104 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 3111 if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
3105 flag |= FLAG_SACK_RENEGING; 3112 flag |= FLAG_SACK_RENEGING;
3106 3113
3107 if (tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt) || 3114 rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
3108 (flag & FLAG_ACKED))
3109 tcp_rearm_rto(sk);
3110 3115
3111 if (flag & FLAG_ACKED) { 3116 if (flag & FLAG_ACKED) {
3112 const struct tcp_congestion_ops *ca_ops 3117 const struct tcp_congestion_ops *ca_ops
3113 = inet_csk(sk)->icsk_ca_ops; 3118 = inet_csk(sk)->icsk_ca_ops;
3114 3119
3120 tcp_rearm_rto(sk);
3115 if (unlikely(icsk->icsk_mtup.probe_size && 3121 if (unlikely(icsk->icsk_mtup.probe_size &&
3116 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { 3122 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) {
3117 tcp_mtup_probe_success(sk); 3123 tcp_mtup_probe_success(sk);
@@ -3150,6 +3156,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
3150 3156
3151 ca_ops->pkts_acked(sk, pkts_acked, rtt_us); 3157 ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
3152 } 3158 }
3159 } else if (skb && rtt_update && sack_rtt >= 0 &&
3160 sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
3161 /* Do not re-arm RTO if the sack RTT is measured from data sent
3162 * after when the head was last (re)transmitted. Otherwise the
3163 * timeout may continue to extend in loss recovery.
3164 */
3165 tcp_rearm_rto(sk);
3153 } 3166 }
3154 3167
3155#if FASTRETRANS_DEBUG > 0 3168#if FASTRETRANS_DEBUG > 0
@@ -5626,6 +5639,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5626 struct request_sock *req; 5639 struct request_sock *req;
5627 int queued = 0; 5640 int queued = 0;
5628 bool acceptable; 5641 bool acceptable;
5642 u32 synack_stamp;
5629 5643
5630 tp->rx_opt.saw_tstamp = 0; 5644 tp->rx_opt.saw_tstamp = 0;
5631 5645
@@ -5708,9 +5722,11 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5708 * so release it. 5722 * so release it.
5709 */ 5723 */
5710 if (req) { 5724 if (req) {
5725 synack_stamp = tcp_rsk(req)->snt_synack;
5711 tp->total_retrans = req->num_retrans; 5726 tp->total_retrans = req->num_retrans;
5712 reqsk_fastopen_remove(sk, req, false); 5727 reqsk_fastopen_remove(sk, req, false);
5713 } else { 5728 } else {
5729 synack_stamp = tp->lsndtime;
5714 /* Make sure socket is routed, for correct metrics. */ 5730 /* Make sure socket is routed, for correct metrics. */
5715 icsk->icsk_af_ops->rebuild_header(sk); 5731 icsk->icsk_af_ops->rebuild_header(sk);
5716 tcp_init_congestion_control(sk); 5732 tcp_init_congestion_control(sk);
@@ -5733,7 +5749,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5733 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; 5749 tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
5734 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; 5750 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale;
5735 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); 5751 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq);
5736 tcp_synack_rtt_meas(sk, req); 5752 tcp_synack_rtt_meas(sk, synack_stamp);
5737 5753
5738 if (tp->rx_opt.tstamp_ok) 5754 if (tp->rx_opt.tstamp_ok)
5739 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 5755 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c
index a7a5583eab04..a2b68a108eae 100644
--- a/net/ipv4/tcp_offload.c
+++ b/net/ipv4/tcp_offload.c
@@ -18,6 +18,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
18 netdev_features_t features) 18 netdev_features_t features)
19{ 19{
20 struct sk_buff *segs = ERR_PTR(-EINVAL); 20 struct sk_buff *segs = ERR_PTR(-EINVAL);
21 unsigned int sum_truesize = 0;
21 struct tcphdr *th; 22 struct tcphdr *th;
22 unsigned int thlen; 23 unsigned int thlen;
23 unsigned int seq; 24 unsigned int seq;
@@ -104,13 +105,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
104 if (copy_destructor) { 105 if (copy_destructor) {
105 skb->destructor = gso_skb->destructor; 106 skb->destructor = gso_skb->destructor;
106 skb->sk = gso_skb->sk; 107 skb->sk = gso_skb->sk;
107 /* {tcp|sock}_wfree() use exact truesize accounting : 108 sum_truesize += skb->truesize;
108 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
109 * So we account mss bytes of 'true size' for each segment.
110 * The last segment will contain the remaining.
111 */
112 skb->truesize = mss;
113 gso_skb->truesize -= mss;
114 } 109 }
115 skb = skb->next; 110 skb = skb->next;
116 th = tcp_hdr(skb); 111 th = tcp_hdr(skb);
@@ -127,7 +122,9 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
127 if (copy_destructor) { 122 if (copy_destructor) {
128 swap(gso_skb->sk, skb->sk); 123 swap(gso_skb->sk, skb->sk);
129 swap(gso_skb->destructor, skb->destructor); 124 swap(gso_skb->destructor, skb->destructor);
130 swap(gso_skb->truesize, skb->truesize); 125 sum_truesize += skb->truesize;
126 atomic_add(sum_truesize - gso_skb->truesize,
127 &skb->sk->sk_wmem_alloc);
131 } 128 }
132 129
133 delta = htonl(oldlen + (skb_tail_pointer(skb) - 130 delta = htonl(oldlen + (skb_tail_pointer(skb) -
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index ccde54248c8c..e1a63930a967 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -104,10 +104,14 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
104 const struct iphdr *iph = ip_hdr(skb); 104 const struct iphdr *iph = ip_hdr(skb);
105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 105 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
106 struct flowi4 *fl4 = &fl->u.ip4; 106 struct flowi4 *fl4 = &fl->u.ip4;
107 int oif = 0;
108
109 if (skb_dst(skb))
110 oif = skb_dst(skb)->dev->ifindex;
107 111
108 memset(fl4, 0, sizeof(struct flowi4)); 112 memset(fl4, 0, sizeof(struct flowi4));
109 fl4->flowi4_mark = skb->mark; 113 fl4->flowi4_mark = skb->mark;
110 fl4->flowi4_oif = skb_dst(skb)->dev->ifindex; 114 fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
111 115
112 if (!ip_is_fragment(iph)) { 116 if (!ip_is_fragment(iph)) {
113 switch (iph->protocol) { 117 switch (iph->protocol) {
@@ -236,7 +240,7 @@ static struct dst_ops xfrm4_dst_ops = {
236 .destroy = xfrm4_dst_destroy, 240 .destroy = xfrm4_dst_destroy,
237 .ifdown = xfrm4_dst_ifdown, 241 .ifdown = xfrm4_dst_ifdown,
238 .local_out = __ip_local_out, 242 .local_out = __ip_local_out,
239 .gc_thresh = 1024, 243 .gc_thresh = 32768,
240}; 244};
241 245
242static struct xfrm_policy_afinfo xfrm4_policy_afinfo = { 246static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 44400c216dc6..710238f58aa9 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -349,6 +349,11 @@ ip6t_do_table(struct sk_buff *skb,
349 local_bh_disable(); 349 local_bh_disable();
350 addend = xt_write_recseq_begin(); 350 addend = xt_write_recseq_begin();
351 private = table->private; 351 private = table->private;
352 /*
353 * Ensure we load private-> members after we've fetched the base
354 * pointer.
355 */
356 smp_read_barrier_depends();
352 cpu = smp_processor_id(); 357 cpu = smp_processor_id();
353 table_base = private->entries[cpu]; 358 table_base = private->entries[cpu];
354 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; 359 jumpstack = (struct ip6t_entry **)private->jumpstack[cpu];
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1ac0b6e17d95..fd399ac6c1f7 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1087,10 +1087,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1087 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev))) 1087 if (rt->rt6i_genid != rt_genid_ipv6(dev_net(rt->dst.dev)))
1088 return NULL; 1088 return NULL;
1089 1089
1090 if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) 1090 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1091 return dst; 1091 return NULL;
1092 1092
1093 return NULL; 1093 if (rt6_check_expired(rt))
1094 return NULL;
1095
1096 return dst;
1094} 1097}
1095 1098
1096static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) 1099static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 08ed2772b7aa..5f8e128c512d 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -135,10 +135,14 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
135 struct ipv6_opt_hdr *exthdr; 135 struct ipv6_opt_hdr *exthdr;
136 const unsigned char *nh = skb_network_header(skb); 136 const unsigned char *nh = skb_network_header(skb);
137 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 137 u8 nexthdr = nh[IP6CB(skb)->nhoff];
138 int oif = 0;
139
140 if (skb_dst(skb))
141 oif = skb_dst(skb)->dev->ifindex;
138 142
139 memset(fl6, 0, sizeof(struct flowi6)); 143 memset(fl6, 0, sizeof(struct flowi6));
140 fl6->flowi6_mark = skb->mark; 144 fl6->flowi6_mark = skb->mark;
141 fl6->flowi6_oif = skb_dst(skb)->dev->ifindex; 145 fl6->flowi6_oif = reverse ? skb->skb_iif : oif;
142 146
143 fl6->daddr = reverse ? hdr->saddr : hdr->daddr; 147 fl6->daddr = reverse ? hdr->saddr : hdr->daddr;
144 fl6->saddr = reverse ? hdr->daddr : hdr->saddr; 148 fl6->saddr = reverse ? hdr->daddr : hdr->saddr;
@@ -285,7 +289,7 @@ static struct dst_ops xfrm6_dst_ops = {
285 .destroy = xfrm6_dst_destroy, 289 .destroy = xfrm6_dst_destroy,
286 .ifdown = xfrm6_dst_ifdown, 290 .ifdown = xfrm6_dst_ifdown,
287 .local_out = __ip6_local_out, 291 .local_out = __ip6_local_out,
288 .gc_thresh = 1024, 292 .gc_thresh = 32768,
289}; 293};
290 294
291static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { 295static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 8b03028cca69..227aa11e8409 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -845,8 +845,13 @@ xt_replace_table(struct xt_table *table,
845 return NULL; 845 return NULL;
846 } 846 }
847 847
848 table->private = newinfo;
849 newinfo->initial_entries = private->initial_entries; 848 newinfo->initial_entries = private->initial_entries;
849 /*
850 * Ensure contents of newinfo are visible before assigning to
851 * private.
852 */
853 smp_wmb();
854 table->private = newinfo;
850 855
851 /* 856 /*
852 * Even though table entries have now been swapped, other CPU's 857 * Even though table entries have now been swapped, other CPU's
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
index 1e2fae32f81b..ed00fef58996 100644
--- a/net/netfilter/xt_NFQUEUE.c
+++ b/net/netfilter/xt_NFQUEUE.c
@@ -147,6 +147,7 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
147{ 147{
148 const struct xt_NFQ_info_v3 *info = par->targinfo; 148 const struct xt_NFQ_info_v3 *info = par->targinfo;
149 u32 queue = info->queuenum; 149 u32 queue = info->queuenum;
150 int ret;
150 151
151 if (info->queues_total > 1) { 152 if (info->queues_total > 1) {
152 if (info->flags & NFQ_FLAG_CPU_FANOUT) { 153 if (info->flags & NFQ_FLAG_CPU_FANOUT) {
@@ -157,7 +158,11 @@ nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par)
157 queue = nfqueue_hash(skb, par); 158 queue = nfqueue_hash(skb, par);
158 } 159 }
159 160
160 return NF_QUEUE_NR(queue); 161 ret = NF_QUEUE_NR(queue);
162 if (info->flags & NFQ_FLAG_BYPASS)
163 ret |= NF_VERDICT_FLAG_QUEUE_BYPASS;
164
165 return ret;
161} 166}
162 167
163static struct xt_target nfqueue_tg_reg[] __read_mostly = { 168static struct xt_target nfqueue_tg_reg[] __read_mostly = {
diff --git a/net/openvswitch/dp_notify.c b/net/openvswitch/dp_notify.c
index c3235675f359..5c2dab276109 100644
--- a/net/openvswitch/dp_notify.c
+++ b/net/openvswitch/dp_notify.c
@@ -65,8 +65,7 @@ void ovs_dp_notify_wq(struct work_struct *work)
65 continue; 65 continue;
66 66
67 netdev_vport = netdev_vport_priv(vport); 67 netdev_vport = netdev_vport_priv(vport);
68 if (netdev_vport->dev->reg_state == NETREG_UNREGISTERED || 68 if (!(netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH))
69 netdev_vport->dev->reg_state == NETREG_UNREGISTERING)
70 dp_detach_port_notify(vport); 69 dp_detach_port_notify(vport);
71 } 70 }
72 } 71 }
@@ -88,6 +87,10 @@ static int dp_device_event(struct notifier_block *unused, unsigned long event,
88 return NOTIFY_DONE; 87 return NOTIFY_DONE;
89 88
90 if (event == NETDEV_UNREGISTER) { 89 if (event == NETDEV_UNREGISTER) {
90 /* upper_dev_unlink and decrement promisc immediately */
91 ovs_netdev_detach_dev(vport);
92
93 /* schedule vport destroy, dev_put and genl notification */
91 ovs_net = net_generic(dev_net(dev), ovs_net_id); 94 ovs_net = net_generic(dev_net(dev), ovs_net_id);
92 queue_work(system_wq, &ovs_net->dp_notify_work); 95 queue_work(system_wq, &ovs_net->dp_notify_work);
93 } 96 }
diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c
index 09d93c13cfd6..d21f77d875ba 100644
--- a/net/openvswitch/vport-netdev.c
+++ b/net/openvswitch/vport-netdev.c
@@ -150,15 +150,25 @@ static void free_port_rcu(struct rcu_head *rcu)
150 ovs_vport_free(vport_from_priv(netdev_vport)); 150 ovs_vport_free(vport_from_priv(netdev_vport));
151} 151}
152 152
153static void netdev_destroy(struct vport *vport) 153void ovs_netdev_detach_dev(struct vport *vport)
154{ 154{
155 struct netdev_vport *netdev_vport = netdev_vport_priv(vport); 155 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
156 156
157 rtnl_lock(); 157 ASSERT_RTNL();
158 netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH; 158 netdev_vport->dev->priv_flags &= ~IFF_OVS_DATAPATH;
159 netdev_rx_handler_unregister(netdev_vport->dev); 159 netdev_rx_handler_unregister(netdev_vport->dev);
160 netdev_upper_dev_unlink(netdev_vport->dev, get_dpdev(vport->dp)); 160 netdev_upper_dev_unlink(netdev_vport->dev,
161 netdev_master_upper_dev_get(netdev_vport->dev));
161 dev_set_promiscuity(netdev_vport->dev, -1); 162 dev_set_promiscuity(netdev_vport->dev, -1);
163}
164
165static void netdev_destroy(struct vport *vport)
166{
167 struct netdev_vport *netdev_vport = netdev_vport_priv(vport);
168
169 rtnl_lock();
170 if (netdev_vport->dev->priv_flags & IFF_OVS_DATAPATH)
171 ovs_netdev_detach_dev(vport);
162 rtnl_unlock(); 172 rtnl_unlock();
163 173
164 call_rcu(&netdev_vport->rcu, free_port_rcu); 174 call_rcu(&netdev_vport->rcu, free_port_rcu);
diff --git a/net/openvswitch/vport-netdev.h b/net/openvswitch/vport-netdev.h
index dd298b5c5cdb..8df01c1127e5 100644
--- a/net/openvswitch/vport-netdev.h
+++ b/net/openvswitch/vport-netdev.h
@@ -39,5 +39,6 @@ netdev_vport_priv(const struct vport *vport)
39} 39}
40 40
41const char *ovs_netdev_get_name(const struct vport *); 41const char *ovs_netdev_get_name(const struct vport *);
42void ovs_netdev_detach_dev(struct vport *);
42 43
43#endif /* vport_netdev.h */ 44#endif /* vport_netdev.h */
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index a9dfdda9ed1d..fdc041c57853 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -255,6 +255,7 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
255 f->socket_hash != sk->sk_hash)) { 255 f->socket_hash != sk->sk_hash)) {
256 f->credit = q->initial_quantum; 256 f->credit = q->initial_quantum;
257 f->socket_hash = sk->sk_hash; 257 f->socket_hash = sk->sk_hash;
258 f->time_next_packet = 0ULL;
258 } 259 }
259 return f; 260 return f;
260 } 261 }
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index f6334aa19151..7567e6f1a920 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -279,7 +279,9 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
279 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port)); 279 sctp_v6_to_addr(&dst_saddr, &fl6->saddr, htons(bp->port));
280 rcu_read_lock(); 280 rcu_read_lock();
281 list_for_each_entry_rcu(laddr, &bp->address_list, list) { 281 list_for_each_entry_rcu(laddr, &bp->address_list, list) {
282 if (!laddr->valid || (laddr->state != SCTP_ADDR_SRC)) 282 if (!laddr->valid || laddr->state == SCTP_ADDR_DEL ||
283 (laddr->state != SCTP_ADDR_SRC &&
284 !asoc->src_out_of_asoc_ok))
283 continue; 285 continue;
284 286
285 /* Do not compare against v4 addrs */ 287 /* Do not compare against v4 addrs */
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index 666c66842799..1a6eef39ab2f 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -860,7 +860,6 @@ static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds,
860 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) 860 (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK))
861 return; 861 return;
862 862
863 BUG_ON(asoc->peer.primary_path == NULL);
864 sctp_unhash_established(asoc); 863 sctp_unhash_established(asoc);
865 sctp_association_free(asoc); 864 sctp_association_free(asoc);
866} 865}
diff --git a/net/x25/Kconfig b/net/x25/Kconfig
index c959312c45e3..e2fa133f9fba 100644
--- a/net/x25/Kconfig
+++ b/net/x25/Kconfig
@@ -16,8 +16,8 @@ config X25
16 if you want that) and the lower level data link layer protocol LAPB 16 if you want that) and the lower level data link layer protocol LAPB
17 (say Y to "LAPB Data Link Driver" below if you want that). 17 (say Y to "LAPB Data Link Driver" below if you want that).
18 18
19 You can read more about X.25 at <http://www.sangoma.com/x25.htm> and 19 You can read more about X.25 at <http://www.sangoma.com/tutorials/x25/> and
20 <http://www.cisco.com/univercd/cc/td/doc/product/software/ios11/cbook/cx25.htm>. 20 <http://docwiki.cisco.com/wiki/X.25>.
21 Information about X.25 for Linux is contained in the files 21 Information about X.25 for Linux is contained in the files
22 <file:Documentation/networking/x25.txt> and 22 <file:Documentation/networking/x25.txt> and
23 <file:Documentation/networking/x25-iface.txt>. 23 <file:Documentation/networking/x25-iface.txt>.
diff --git a/net/xfrm/xfrm_ipcomp.c b/net/xfrm/xfrm_ipcomp.c
index b943c7fc5ed2..ccfdc7115a83 100644
--- a/net/xfrm/xfrm_ipcomp.c
+++ b/net/xfrm/xfrm_ipcomp.c
@@ -141,14 +141,14 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
141 const int plen = skb->len; 141 const int plen = skb->len;
142 int dlen = IPCOMP_SCRATCH_SIZE; 142 int dlen = IPCOMP_SCRATCH_SIZE;
143 u8 *start = skb->data; 143 u8 *start = skb->data;
144 const int cpu = get_cpu(); 144 struct crypto_comp *tfm;
145 u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu); 145 u8 *scratch;
146 struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
147 int err; 146 int err;
148 147
149 local_bh_disable(); 148 local_bh_disable();
149 scratch = *this_cpu_ptr(ipcomp_scratches);
150 tfm = *this_cpu_ptr(ipcd->tfms);
150 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen); 151 err = crypto_comp_compress(tfm, start, plen, scratch, &dlen);
151 local_bh_enable();
152 if (err) 152 if (err)
153 goto out; 153 goto out;
154 154
@@ -158,13 +158,13 @@ static int ipcomp_compress(struct xfrm_state *x, struct sk_buff *skb)
158 } 158 }
159 159
160 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen); 160 memcpy(start + sizeof(struct ip_comp_hdr), scratch, dlen);
161 put_cpu(); 161 local_bh_enable();
162 162
163 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr)); 163 pskb_trim(skb, dlen + sizeof(struct ip_comp_hdr));
164 return 0; 164 return 0;
165 165
166out: 166out:
167 put_cpu(); 167 local_bh_enable();
168 return err; 168 return err;
169} 169}
170 170