aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dev.c95
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/dst.c15
-rw-r--r--net/core/fib_rules.c4
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/neighbour.c40
-rw-r--r--net/core/net-sysfs.c8
-rw-r--r--net/core/netpoll.c4
-rw-r--r--net/core/rtnetlink.c1
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c4
11 files changed, 121 insertions, 57 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 17d67b579beb..c2442b46646e 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -133,6 +133,8 @@
133#include <linux/pci.h> 133#include <linux/pci.h>
134#include <linux/inetdevice.h> 134#include <linux/inetdevice.h>
135#include <linux/cpu_rmap.h> 135#include <linux/cpu_rmap.h>
136#include <linux/if_tunnel.h>
137#include <linux/if_pppox.h>
136 138
137#include "net-sysfs.h" 139#include "net-sysfs.h"
138 140
@@ -2519,24 +2521,29 @@ static inline void ____napi_schedule(struct softnet_data *sd,
2519 2521
2520/* 2522/*
2521 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses 2523 * __skb_get_rxhash: calculate a flow hash based on src/dst addresses
2522 * and src/dst port numbers. Returns a non-zero hash number on success 2524 * and src/dst port numbers. Sets rxhash in skb to non-zero hash value
2523 * and 0 on failure. 2525 * on success, zero indicates no valid hash. Also, sets l4_rxhash in skb
2526 * if hash is a canonical 4-tuple hash over transport ports.
2524 */ 2527 */
2525__u32 __skb_get_rxhash(struct sk_buff *skb) 2528void __skb_get_rxhash(struct sk_buff *skb)
2526{ 2529{
2527 int nhoff, hash = 0, poff; 2530 int nhoff, hash = 0, poff;
2528 const struct ipv6hdr *ip6; 2531 const struct ipv6hdr *ip6;
2529 const struct iphdr *ip; 2532 const struct iphdr *ip;
2533 const struct vlan_hdr *vlan;
2530 u8 ip_proto; 2534 u8 ip_proto;
2531 u32 addr1, addr2, ihl; 2535 u32 addr1, addr2;
2536 u16 proto;
2532 union { 2537 union {
2533 u32 v32; 2538 u32 v32;
2534 u16 v16[2]; 2539 u16 v16[2];
2535 } ports; 2540 } ports;
2536 2541
2537 nhoff = skb_network_offset(skb); 2542 nhoff = skb_network_offset(skb);
2543 proto = skb->protocol;
2538 2544
2539 switch (skb->protocol) { 2545again:
2546 switch (proto) {
2540 case __constant_htons(ETH_P_IP): 2547 case __constant_htons(ETH_P_IP):
2541 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff)) 2548 if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
2542 goto done; 2549 goto done;
@@ -2548,7 +2555,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2548 ip_proto = ip->protocol; 2555 ip_proto = ip->protocol;
2549 addr1 = (__force u32) ip->saddr; 2556 addr1 = (__force u32) ip->saddr;
2550 addr2 = (__force u32) ip->daddr; 2557 addr2 = (__force u32) ip->daddr;
2551 ihl = ip->ihl; 2558 nhoff += ip->ihl * 4;
2552 break; 2559 break;
2553 case __constant_htons(ETH_P_IPV6): 2560 case __constant_htons(ETH_P_IPV6):
2554 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff)) 2561 if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
@@ -2558,20 +2565,62 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2558 ip_proto = ip6->nexthdr; 2565 ip_proto = ip6->nexthdr;
2559 addr1 = (__force u32) ip6->saddr.s6_addr32[3]; 2566 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2560 addr2 = (__force u32) ip6->daddr.s6_addr32[3]; 2567 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2561 ihl = (40 >> 2); 2568 nhoff += 40;
2562 break; 2569 break;
2570 case __constant_htons(ETH_P_8021Q):
2571 if (!pskb_may_pull(skb, sizeof(*vlan) + nhoff))
2572 goto done;
2573 vlan = (const struct vlan_hdr *) (skb->data + nhoff);
2574 proto = vlan->h_vlan_encapsulated_proto;
2575 nhoff += sizeof(*vlan);
2576 goto again;
2577 case __constant_htons(ETH_P_PPP_SES):
2578 if (!pskb_may_pull(skb, PPPOE_SES_HLEN + nhoff))
2579 goto done;
2580 proto = *((__be16 *) (skb->data + nhoff +
2581 sizeof(struct pppoe_hdr)));
2582 nhoff += PPPOE_SES_HLEN;
2583 goto again;
2563 default: 2584 default:
2564 goto done; 2585 goto done;
2565 } 2586 }
2566 2587
2588 switch (ip_proto) {
2589 case IPPROTO_GRE:
2590 if (pskb_may_pull(skb, nhoff + 16)) {
2591 u8 *h = skb->data + nhoff;
2592 __be16 flags = *(__be16 *)h;
2593
2594 /*
2595 * Only look inside GRE if version zero and no
2596 * routing
2597 */
2598 if (!(flags & (GRE_VERSION|GRE_ROUTING))) {
2599 proto = *(__be16 *)(h + 2);
2600 nhoff += 4;
2601 if (flags & GRE_CSUM)
2602 nhoff += 4;
2603 if (flags & GRE_KEY)
2604 nhoff += 4;
2605 if (flags & GRE_SEQ)
2606 nhoff += 4;
2607 goto again;
2608 }
2609 }
2610 break;
2611 default:
2612 break;
2613 }
2614
2567 ports.v32 = 0; 2615 ports.v32 = 0;
2568 poff = proto_ports_offset(ip_proto); 2616 poff = proto_ports_offset(ip_proto);
2569 if (poff >= 0) { 2617 if (poff >= 0) {
2570 nhoff += ihl * 4 + poff; 2618 nhoff += poff;
2571 if (pskb_may_pull(skb, nhoff + 4)) { 2619 if (pskb_may_pull(skb, nhoff + 4)) {
2572 ports.v32 = * (__force u32 *) (skb->data + nhoff); 2620 ports.v32 = * (__force u32 *) (skb->data + nhoff);
2573 if (ports.v16[1] < ports.v16[0]) 2621 if (ports.v16[1] < ports.v16[0])
2574 swap(ports.v16[0], ports.v16[1]); 2622 swap(ports.v16[0], ports.v16[1]);
2623 skb->l4_rxhash = 1;
2575 } 2624 }
2576 } 2625 }
2577 2626
@@ -2584,7 +2633,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
2584 hash = 1; 2633 hash = 1;
2585 2634
2586done: 2635done:
2587 return hash; 2636 skb->rxhash = hash;
2588} 2637}
2589EXPORT_SYMBOL(__skb_get_rxhash); 2638EXPORT_SYMBOL(__skb_get_rxhash);
2590 2639
@@ -2673,13 +2722,13 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2673 map = rcu_dereference(rxqueue->rps_map); 2722 map = rcu_dereference(rxqueue->rps_map);
2674 if (map) { 2723 if (map) {
2675 if (map->len == 1 && 2724 if (map->len == 1 &&
2676 !rcu_dereference_raw(rxqueue->rps_flow_table)) { 2725 !rcu_access_pointer(rxqueue->rps_flow_table)) {
2677 tcpu = map->cpus[0]; 2726 tcpu = map->cpus[0];
2678 if (cpu_online(tcpu)) 2727 if (cpu_online(tcpu))
2679 cpu = tcpu; 2728 cpu = tcpu;
2680 goto done; 2729 goto done;
2681 } 2730 }
2682 } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { 2731 } else if (!rcu_access_pointer(rxqueue->rps_flow_table)) {
2683 goto done; 2732 goto done;
2684 } 2733 }
2685 2734
@@ -3094,8 +3143,8 @@ void netdev_rx_handler_unregister(struct net_device *dev)
3094{ 3143{
3095 3144
3096 ASSERT_RTNL(); 3145 ASSERT_RTNL();
3097 rcu_assign_pointer(dev->rx_handler, NULL); 3146 RCU_INIT_POINTER(dev->rx_handler, NULL);
3098 rcu_assign_pointer(dev->rx_handler_data, NULL); 3147 RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3099} 3148}
3100EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); 3149EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3101 3150
@@ -4489,9 +4538,7 @@ void __dev_set_rx_mode(struct net_device *dev)
4489 if (!netif_device_present(dev)) 4538 if (!netif_device_present(dev))
4490 return; 4539 return;
4491 4540
4492 if (ops->ndo_set_rx_mode) 4541 if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
4493 ops->ndo_set_rx_mode(dev);
4494 else {
4495 /* Unicast addresses changes may only happen under the rtnl, 4542 /* Unicast addresses changes may only happen under the rtnl,
4496 * therefore calling __dev_set_promiscuity here is safe. 4543 * therefore calling __dev_set_promiscuity here is safe.
4497 */ 4544 */
@@ -4502,10 +4549,10 @@ void __dev_set_rx_mode(struct net_device *dev)
4502 __dev_set_promiscuity(dev, -1); 4549 __dev_set_promiscuity(dev, -1);
4503 dev->uc_promisc = false; 4550 dev->uc_promisc = false;
4504 } 4551 }
4505
4506 if (ops->ndo_set_multicast_list)
4507 ops->ndo_set_multicast_list(dev);
4508 } 4552 }
4553
4554 if (ops->ndo_set_rx_mode)
4555 ops->ndo_set_rx_mode(dev);
4509} 4556}
4510 4557
4511void dev_set_rx_mode(struct net_device *dev) 4558void dev_set_rx_mode(struct net_device *dev)
@@ -4855,7 +4902,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4855 return -EOPNOTSUPP; 4902 return -EOPNOTSUPP;
4856 4903
4857 case SIOCADDMULTI: 4904 case SIOCADDMULTI:
4858 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4905 if (!ops->ndo_set_rx_mode ||
4859 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4906 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4860 return -EINVAL; 4907 return -EINVAL;
4861 if (!netif_device_present(dev)) 4908 if (!netif_device_present(dev))
@@ -4863,7 +4910,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4863 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data); 4910 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4864 4911
4865 case SIOCDELMULTI: 4912 case SIOCDELMULTI:
4866 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4913 if (!ops->ndo_set_rx_mode ||
4867 ifr->ifr_hwaddr.sa_family != AF_UNSPEC) 4914 ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
4868 return -EINVAL; 4915 return -EINVAL;
4869 if (!netif_device_present(dev)) 4916 if (!netif_device_present(dev))
@@ -5727,8 +5774,8 @@ void netdev_run_todo(void)
5727 5774
5728 /* paranoia */ 5775 /* paranoia */
5729 BUG_ON(netdev_refcnt_read(dev)); 5776 BUG_ON(netdev_refcnt_read(dev));
5730 WARN_ON(rcu_dereference_raw(dev->ip_ptr)); 5777 WARN_ON(rcu_access_pointer(dev->ip_ptr));
5731 WARN_ON(rcu_dereference_raw(dev->ip6_ptr)); 5778 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
5732 WARN_ON(dev->dn_ptr); 5779 WARN_ON(dev->dn_ptr);
5733 5780
5734 if (dev->destructor) 5781 if (dev->destructor)
@@ -5932,7 +5979,7 @@ void free_netdev(struct net_device *dev)
5932 kfree(dev->_rx); 5979 kfree(dev->_rx);
5933#endif 5980#endif
5934 5981
5935 kfree(rcu_dereference_raw(dev->ingress_queue)); 5982 kfree(rcu_dereference_protected(dev->ingress_queue, 1));
5936 5983
5937 /* Flush device addresses */ 5984 /* Flush device addresses */
5938 dev_addr_flush(dev); 5985 dev_addr_flush(dev);
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index e2e66939ed00..283d1b863876 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -591,8 +591,8 @@ EXPORT_SYMBOL(dev_mc_del_global);
591 * addresses that have no users left. The source device must be 591 * addresses that have no users left. The source device must be
592 * locked by netif_tx_lock_bh. 592 * locked by netif_tx_lock_bh.
593 * 593 *
594 * This function is intended to be called from the dev->set_multicast_list 594 * This function is intended to be called from the ndo_set_rx_mode
595 * or dev->set_rx_mode function of layered software devices. 595 * function of layered software devices.
596 */ 596 */
597int dev_mc_sync(struct net_device *to, struct net_device *from) 597int dev_mc_sync(struct net_device *to, struct net_device *from)
598{ 598{
diff --git a/net/core/dst.c b/net/core/dst.c
index 14b33baf0733..d5e2c4c09107 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -171,7 +171,7 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
171 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
172 dst->expires = 0UL; 172 dst->expires = 0UL;
173 dst->path = dst; 173 dst->path = dst;
174 dst->_neighbour = NULL; 174 RCU_INIT_POINTER(dst->_neighbour, NULL);
175#ifdef CONFIG_XFRM 175#ifdef CONFIG_XFRM
176 dst->xfrm = NULL; 176 dst->xfrm = NULL;
177#endif 177#endif
@@ -229,11 +229,11 @@ struct dst_entry *dst_destroy(struct dst_entry * dst)
229 smp_rmb(); 229 smp_rmb();
230 230
231again: 231again:
232 neigh = dst->_neighbour; 232 neigh = rcu_dereference_protected(dst->_neighbour, 1);
233 child = dst->child; 233 child = dst->child;
234 234
235 if (neigh) { 235 if (neigh) {
236 dst->_neighbour = NULL; 236 RCU_INIT_POINTER(dst->_neighbour, NULL);
237 neigh_release(neigh); 237 neigh_release(neigh);
238 } 238 }
239 239
@@ -360,14 +360,19 @@ static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
360 if (!unregister) { 360 if (!unregister) {
361 dst->input = dst->output = dst_discard; 361 dst->input = dst->output = dst_discard;
362 } else { 362 } else {
363 struct neighbour *neigh;
364
363 dst->dev = dev_net(dst->dev)->loopback_dev; 365 dst->dev = dev_net(dst->dev)->loopback_dev;
364 dev_hold(dst->dev); 366 dev_hold(dst->dev);
365 dev_put(dev); 367 dev_put(dev);
366 if (dst->_neighbour && dst->_neighbour->dev == dev) { 368 rcu_read_lock();
367 dst->_neighbour->dev = dst->dev; 369 neigh = dst_get_neighbour(dst);
370 if (neigh && neigh->dev == dev) {
371 neigh->dev = dst->dev;
368 dev_hold(dst->dev); 372 dev_hold(dst->dev);
369 dev_put(dev); 373 dev_put(dev);
370 } 374 }
375 rcu_read_unlock();
371 } 376 }
372} 377}
373 378
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index e7ab0c0285b5..67c5c288cd80 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -487,7 +487,7 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
487 if (ops->nr_goto_rules > 0) { 487 if (ops->nr_goto_rules > 0) {
488 list_for_each_entry(tmp, &ops->rules_list, list) { 488 list_for_each_entry(tmp, &ops->rules_list, list) {
489 if (rtnl_dereference(tmp->ctarget) == rule) { 489 if (rtnl_dereference(tmp->ctarget) == rule) {
490 rcu_assign_pointer(tmp->ctarget, NULL); 490 RCU_INIT_POINTER(tmp->ctarget, NULL);
491 ops->unresolved_rules++; 491 ops->unresolved_rules++;
492 } 492 }
493 } 493 }
@@ -545,7 +545,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
545 frh->flags = rule->flags; 545 frh->flags = rule->flags;
546 546
547 if (rule->action == FR_ACT_GOTO && 547 if (rule->action == FR_ACT_GOTO &&
548 rcu_dereference_raw(rule->ctarget) == NULL) 548 rcu_access_pointer(rule->ctarget) == NULL)
549 frh->flags |= FIB_RULE_UNRESOLVED; 549 frh->flags |= FIB_RULE_UNRESOLVED;
550 550
551 if (rule->iifname[0]) { 551 if (rule->iifname[0]) {
diff --git a/net/core/filter.c b/net/core/filter.c
index 36f975fa87cb..8fcc2d776e09 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -645,7 +645,7 @@ int sk_detach_filter(struct sock *sk)
645 filter = rcu_dereference_protected(sk->sk_filter, 645 filter = rcu_dereference_protected(sk->sk_filter,
646 sock_owned_by_user(sk)); 646 sock_owned_by_user(sk));
647 if (filter) { 647 if (filter) {
648 rcu_assign_pointer(sk->sk_filter, NULL); 648 RCU_INIT_POINTER(sk->sk_filter, NULL);
649 sk_filter_uncharge(sk, filter); 649 sk_filter_uncharge(sk, filter);
650 ret = 0; 650 ret = 0;
651 } 651 }
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8fab9b0bb203..4002261f20d1 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -844,6 +844,19 @@ static void neigh_invalidate(struct neighbour *neigh)
844 skb_queue_purge(&neigh->arp_queue); 844 skb_queue_purge(&neigh->arp_queue);
845} 845}
846 846
847static void neigh_probe(struct neighbour *neigh)
848 __releases(neigh->lock)
849{
850 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
851 /* keep skb alive even if arp_queue overflows */
852 if (skb)
853 skb = skb_copy(skb, GFP_ATOMIC);
854 write_unlock(&neigh->lock);
855 neigh->ops->solicit(neigh, skb);
856 atomic_inc(&neigh->probes);
857 kfree_skb(skb);
858}
859
847/* Called when a timer expires for a neighbour entry. */ 860/* Called when a timer expires for a neighbour entry. */
848 861
849static void neigh_timer_handler(unsigned long arg) 862static void neigh_timer_handler(unsigned long arg)
@@ -920,14 +933,7 @@ static void neigh_timer_handler(unsigned long arg)
920 neigh_hold(neigh); 933 neigh_hold(neigh);
921 } 934 }
922 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) { 935 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
923 struct sk_buff *skb = skb_peek(&neigh->arp_queue); 936 neigh_probe(neigh);
924 /* keep skb alive even if arp_queue overflows */
925 if (skb)
926 skb = skb_copy(skb, GFP_ATOMIC);
927 write_unlock(&neigh->lock);
928 neigh->ops->solicit(neigh, skb);
929 atomic_inc(&neigh->probes);
930 kfree_skb(skb);
931 } else { 937 } else {
932out: 938out:
933 write_unlock(&neigh->lock); 939 write_unlock(&neigh->lock);
@@ -942,7 +948,7 @@ out:
942int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) 948int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
943{ 949{
944 int rc; 950 int rc;
945 unsigned long now; 951 bool immediate_probe = false;
946 952
947 write_lock_bh(&neigh->lock); 953 write_lock_bh(&neigh->lock);
948 954
@@ -950,14 +956,16 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
950 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE)) 956 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
951 goto out_unlock_bh; 957 goto out_unlock_bh;
952 958
953 now = jiffies;
954
955 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) { 959 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
956 if (neigh->parms->mcast_probes + neigh->parms->app_probes) { 960 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
961 unsigned long next, now = jiffies;
962
957 atomic_set(&neigh->probes, neigh->parms->ucast_probes); 963 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
958 neigh->nud_state = NUD_INCOMPLETE; 964 neigh->nud_state = NUD_INCOMPLETE;
959 neigh->updated = jiffies; 965 neigh->updated = now;
960 neigh_add_timer(neigh, now + 1); 966 next = now + max(neigh->parms->retrans_time, HZ/2);
967 neigh_add_timer(neigh, next);
968 immediate_probe = true;
961 } else { 969 } else {
962 neigh->nud_state = NUD_FAILED; 970 neigh->nud_state = NUD_FAILED;
963 neigh->updated = jiffies; 971 neigh->updated = jiffies;
@@ -989,7 +997,11 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
989 rc = 1; 997 rc = 1;
990 } 998 }
991out_unlock_bh: 999out_unlock_bh:
992 write_unlock_bh(&neigh->lock); 1000 if (immediate_probe)
1001 neigh_probe(neigh);
1002 else
1003 write_unlock(&neigh->lock);
1004 local_bh_enable();
993 return rc; 1005 return rc;
994} 1006}
995EXPORT_SYMBOL(__neigh_event_send); 1007EXPORT_SYMBOL(__neigh_event_send);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 1683e5db2f27..56e42ab7cbc6 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -712,13 +712,13 @@ static void rx_queue_release(struct kobject *kobj)
712 struct rps_dev_flow_table *flow_table; 712 struct rps_dev_flow_table *flow_table;
713 713
714 714
715 map = rcu_dereference_raw(queue->rps_map); 715 map = rcu_dereference_protected(queue->rps_map, 1);
716 if (map) { 716 if (map) {
717 RCU_INIT_POINTER(queue->rps_map, NULL); 717 RCU_INIT_POINTER(queue->rps_map, NULL);
718 kfree_rcu(map, rcu); 718 kfree_rcu(map, rcu);
719 } 719 }
720 720
721 flow_table = rcu_dereference_raw(queue->rps_flow_table); 721 flow_table = rcu_dereference_protected(queue->rps_flow_table, 1);
722 if (flow_table) { 722 if (flow_table) {
723 RCU_INIT_POINTER(queue->rps_flow_table, NULL); 723 RCU_INIT_POINTER(queue->rps_flow_table, NULL);
724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release); 724 call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
@@ -987,10 +987,10 @@ static ssize_t store_xps_map(struct netdev_queue *queue,
987 } 987 }
988 988
989 if (nonempty) 989 if (nonempty)
990 rcu_assign_pointer(dev->xps_maps, new_dev_maps); 990 RCU_INIT_POINTER(dev->xps_maps, new_dev_maps);
991 else { 991 else {
992 kfree(new_dev_maps); 992 kfree(new_dev_maps);
993 rcu_assign_pointer(dev->xps_maps, NULL); 993 RCU_INIT_POINTER(dev->xps_maps, NULL);
994 } 994 }
995 995
996 if (dev_maps) 996 if (dev_maps)
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index adf84dd8c7b5..d676a561d983 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -760,7 +760,7 @@ int __netpoll_setup(struct netpoll *np)
760 } 760 }
761 761
762 /* last thing to do is link it to the net device structure */ 762 /* last thing to do is link it to the net device structure */
763 rcu_assign_pointer(ndev->npinfo, npinfo); 763 RCU_INIT_POINTER(ndev->npinfo, npinfo);
764 764
765 return 0; 765 return 0;
766 766
@@ -901,7 +901,7 @@ void __netpoll_cleanup(struct netpoll *np)
901 if (ops->ndo_netpoll_cleanup) 901 if (ops->ndo_netpoll_cleanup)
902 ops->ndo_netpoll_cleanup(np->dev); 902 ops->ndo_netpoll_cleanup(np->dev);
903 903
904 rcu_assign_pointer(np->dev->npinfo, NULL); 904 RCU_INIT_POINTER(np->dev->npinfo, NULL);
905 905
906 /* avoid racing with NAPI reading npinfo */ 906 /* avoid racing with NAPI reading npinfo */
907 synchronize_rcu_bh(); 907 synchronize_rcu_bh();
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 99d9e953fe39..39f8dd6a2821 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1604,7 +1604,6 @@ struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
1604 dev_net_set(dev, net); 1604 dev_net_set(dev, net);
1605 dev->rtnl_link_ops = ops; 1605 dev->rtnl_link_ops = ops;
1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING; 1606 dev->rtnl_link_state = RTNL_LINK_INITIALIZING;
1607 dev->real_num_tx_queues = real_num_queues;
1608 1607
1609 if (tb[IFLA_MTU]) 1608 if (tb[IFLA_MTU])
1610 dev->mtu = nla_get_u32(tb[IFLA_MTU]); 1609 dev->mtu = nla_get_u32(tb[IFLA_MTU]);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 27002dffe7ed..edb66f3e24f1 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -529,6 +529,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
529 new->mac_header = old->mac_header; 529 new->mac_header = old->mac_header;
530 skb_dst_copy(new, old); 530 skb_dst_copy(new, old);
531 new->rxhash = old->rxhash; 531 new->rxhash = old->rxhash;
532 new->l4_rxhash = old->l4_rxhash;
532#ifdef CONFIG_XFRM 533#ifdef CONFIG_XFRM
533 new->sp = secpath_get(old->sp); 534 new->sp = secpath_get(old->sp);
534#endif 535#endif
diff --git a/net/core/sock.c b/net/core/sock.c
index bc745d00ea4d..9997026b44b2 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -387,7 +387,7 @@ struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
387 387
388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 388 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
389 sk_tx_queue_clear(sk); 389 sk_tx_queue_clear(sk);
390 rcu_assign_pointer(sk->sk_dst_cache, NULL); 390 RCU_INIT_POINTER(sk->sk_dst_cache, NULL);
391 dst_release(dst); 391 dst_release(dst);
392 return NULL; 392 return NULL;
393 } 393 }
@@ -1158,7 +1158,7 @@ static void __sk_free(struct sock *sk)
1158 atomic_read(&sk->sk_wmem_alloc) == 0); 1158 atomic_read(&sk->sk_wmem_alloc) == 0);
1159 if (filter) { 1159 if (filter) {
1160 sk_filter_uncharge(sk, filter); 1160 sk_filter_uncharge(sk, filter);
1161 rcu_assign_pointer(sk->sk_filter, NULL); 1161 RCU_INIT_POINTER(sk->sk_filter, NULL);
1162 } 1162 }
1163 1163
1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP); 1164 sock_disable_timestamp(sk, SOCK_TIMESTAMP);