diff options
author | Tom Herbert <therbert@google.com> | 2014-01-02 14:48:33 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-01-03 19:40:57 -0500 |
commit | 9a4aa9af447f784f0a47313c8dcb79ac63442cf7 (patch) | |
tree | e5e6d38f79704d282679a06d6bf00cf819a85d7a /net/ipv4/ip_tunnel.c | |
parent | 7d442fab0a6777fd7612cfcada32ea859553d370 (diff) |
ipv4: Use percpu Cache route in IP tunnels
percpu route cache eliminates share of dst refcnt between CPUs.
Signed-off-by: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_tunnel.c')
-rw-r--r-- | net/ipv4/ip_tunnel.c | 44 |
1 files changed, 32 insertions, 12 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index 27d756f8f870..e2c9cff26eb5 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
@@ -68,23 +68,24 @@ static unsigned int ip_tunnel_hash(struct ip_tunnel_net *itn, | |||
68 | IP_TNL_HASH_BITS); | 68 | IP_TNL_HASH_BITS); |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline void __tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) | 71 | static inline void __tunnel_dst_set(struct ip_tunnel_dst *idst, |
72 | struct dst_entry *dst) | ||
72 | { | 73 | { |
73 | struct dst_entry *old_dst; | 74 | struct dst_entry *old_dst; |
74 | 75 | ||
75 | if (dst && (dst->flags & DST_NOCACHE)) | 76 | if (dst && (dst->flags & DST_NOCACHE)) |
76 | dst = NULL; | 77 | dst = NULL; |
77 | 78 | ||
78 | spin_lock_bh(&t->dst_lock); | 79 | spin_lock_bh(&idst->lock); |
79 | old_dst = rcu_dereference_raw(t->dst_cache); | 80 | old_dst = rcu_dereference(idst->dst); |
80 | rcu_assign_pointer(t->dst_cache, dst); | 81 | rcu_assign_pointer(idst->dst, dst); |
81 | dst_release(old_dst); | 82 | dst_release(old_dst); |
82 | spin_unlock_bh(&t->dst_lock); | 83 | spin_unlock_bh(&idst->lock); |
83 | } | 84 | } |
84 | 85 | ||
85 | static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) | 86 | static inline void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) |
86 | { | 87 | { |
87 | __tunnel_dst_set(t, dst); | 88 | __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst); |
88 | } | 89 | } |
89 | 90 | ||
90 | static inline void tunnel_dst_reset(struct ip_tunnel *t) | 91 | static inline void tunnel_dst_reset(struct ip_tunnel *t) |
@@ -92,12 +93,20 @@ static inline void tunnel_dst_reset(struct ip_tunnel *t) | |||
92 | tunnel_dst_set(t, NULL); | 93 | tunnel_dst_set(t, NULL); |
93 | } | 94 | } |
94 | 95 | ||
96 | static void tunnel_dst_reset_all(struct ip_tunnel *t) | ||
97 | { | ||
98 | int i; | ||
99 | |||
100 | for_each_possible_cpu(i) | ||
101 | __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); | ||
102 | } | ||
103 | |||
95 | static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) | 104 | static inline struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) |
96 | { | 105 | { |
97 | struct dst_entry *dst; | 106 | struct dst_entry *dst; |
98 | 107 | ||
99 | rcu_read_lock(); | 108 | rcu_read_lock(); |
100 | dst = rcu_dereference(t->dst_cache); | 109 | dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); |
101 | if (dst) | 110 | if (dst) |
102 | dst_hold(dst); | 111 | dst_hold(dst); |
103 | rcu_read_unlock(); | 112 | rcu_read_unlock(); |
@@ -755,7 +764,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn, | |||
755 | if (set_mtu) | 764 | if (set_mtu) |
756 | dev->mtu = mtu; | 765 | dev->mtu = mtu; |
757 | } | 766 | } |
758 | tunnel_dst_reset(t); | 767 | tunnel_dst_reset_all(t); |
759 | netdev_state_change(dev); | 768 | netdev_state_change(dev); |
760 | } | 769 | } |
761 | 770 | ||
@@ -871,6 +880,7 @@ static void ip_tunnel_dev_free(struct net_device *dev) | |||
871 | struct ip_tunnel *tunnel = netdev_priv(dev); | 880 | struct ip_tunnel *tunnel = netdev_priv(dev); |
872 | 881 | ||
873 | gro_cells_destroy(&tunnel->gro_cells); | 882 | gro_cells_destroy(&tunnel->gro_cells); |
883 | free_percpu(tunnel->dst_cache); | ||
874 | free_percpu(dev->tstats); | 884 | free_percpu(dev->tstats); |
875 | free_netdev(dev); | 885 | free_netdev(dev); |
876 | } | 886 | } |
@@ -1049,8 +1059,21 @@ int ip_tunnel_init(struct net_device *dev) | |||
1049 | u64_stats_init(&ipt_stats->syncp); | 1059 | u64_stats_init(&ipt_stats->syncp); |
1050 | } | 1060 | } |
1051 | 1061 | ||
1062 | tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst); | ||
1063 | if (!tunnel->dst_cache) { | ||
1064 | free_percpu(dev->tstats); | ||
1065 | return -ENOMEM; | ||
1066 | } | ||
1067 | |||
1068 | for_each_possible_cpu(i) { | ||
1069 | struct ip_tunnel_dst *idst = per_cpu_ptr(tunnel->dst_cache, i); | ||
1070 | idst-> dst = NULL; | ||
1071 | spin_lock_init(&idst->lock); | ||
1072 | } | ||
1073 | |||
1052 | err = gro_cells_init(&tunnel->gro_cells, dev); | 1074 | err = gro_cells_init(&tunnel->gro_cells, dev); |
1053 | if (err) { | 1075 | if (err) { |
1076 | free_percpu(tunnel->dst_cache); | ||
1054 | free_percpu(dev->tstats); | 1077 | free_percpu(dev->tstats); |
1055 | return err; | 1078 | return err; |
1056 | } | 1079 | } |
@@ -1061,9 +1084,6 @@ int ip_tunnel_init(struct net_device *dev) | |||
1061 | iph->version = 4; | 1084 | iph->version = 4; |
1062 | iph->ihl = 5; | 1085 | iph->ihl = 5; |
1063 | 1086 | ||
1064 | tunnel->dst_cache = NULL; | ||
1065 | spin_lock_init(&tunnel->dst_lock); | ||
1066 | |||
1067 | return 0; | 1087 | return 0; |
1068 | } | 1088 | } |
1069 | EXPORT_SYMBOL_GPL(ip_tunnel_init); | 1089 | EXPORT_SYMBOL_GPL(ip_tunnel_init); |
@@ -1079,7 +1099,7 @@ void ip_tunnel_uninit(struct net_device *dev) | |||
1079 | if (itn->fb_tunnel_dev != dev) | 1099 | if (itn->fb_tunnel_dev != dev) |
1080 | ip_tunnel_del(netdev_priv(dev)); | 1100 | ip_tunnel_del(netdev_priv(dev)); |
1081 | 1101 | ||
1082 | tunnel_dst_reset(tunnel); | 1102 | tunnel_dst_reset_all(tunnel); |
1083 | } | 1103 | } |
1084 | EXPORT_SYMBOL_GPL(ip_tunnel_uninit); | 1104 | EXPORT_SYMBOL_GPL(ip_tunnel_uninit); |
1085 | 1105 | ||