aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_tunnel.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_tunnel.c')
-rw-r--r--net/ipv4/ip_tunnel.c82
1 files changed, 15 insertions, 67 deletions
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd28f386bd02..78a89e61925d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -93,83 +93,32 @@ static void tunnel_dst_reset(struct ip_tunnel *t)
93 tunnel_dst_set(t, NULL); 93 tunnel_dst_set(t, NULL);
94} 94}
95 95
96static void tunnel_dst_reset_all(struct ip_tunnel *t) 96void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
97{ 97{
98 int i; 98 int i;
99 99
100 for_each_possible_cpu(i) 100 for_each_possible_cpu(i)
101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); 101 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
102} 102}
103EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
103 104
104static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t) 105static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
105{ 106{
106 struct dst_entry *dst; 107 struct dst_entry *dst;
107 108
108 rcu_read_lock(); 109 rcu_read_lock();
109 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
110 if (dst) 111 if (dst) {
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
113 rcu_read_unlock();
114 tunnel_dst_reset(t);
115 return NULL;
116 }
111 dst_hold(dst); 117 dst_hold(dst);
112 rcu_read_unlock();
113 return dst;
114}
115
116static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
117{
118 struct dst_entry *dst = tunnel_dst_get(t);
119
120 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
121 tunnel_dst_reset(t);
122 return NULL;
123 }
124
125 return dst;
126}
127
128/* Often modified stats are per cpu, other are shared (netdev->stats) */
129struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *tot)
131{
132 int i;
133
134 for_each_possible_cpu(i) {
135 const struct pcpu_sw_netstats *tstats =
136 per_cpu_ptr(dev->tstats, i);
137 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
138 unsigned int start;
139
140 do {
141 start = u64_stats_fetch_begin_bh(&tstats->syncp);
142 rx_packets = tstats->rx_packets;
143 tx_packets = tstats->tx_packets;
144 rx_bytes = tstats->rx_bytes;
145 tx_bytes = tstats->tx_bytes;
146 } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
147
148 tot->rx_packets += rx_packets;
149 tot->tx_packets += tx_packets;
150 tot->rx_bytes += rx_bytes;
151 tot->tx_bytes += tx_bytes;
152 } 118 }
153 119 rcu_read_unlock();
154 tot->multicast = dev->stats.multicast; 120 return (struct rtable *)dst;
155
156 tot->rx_crc_errors = dev->stats.rx_crc_errors;
157 tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
158 tot->rx_length_errors = dev->stats.rx_length_errors;
159 tot->rx_frame_errors = dev->stats.rx_frame_errors;
160 tot->rx_errors = dev->stats.rx_errors;
161
162 tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
163 tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
164 tot->tx_dropped = dev->stats.tx_dropped;
165 tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
166 tot->tx_errors = dev->stats.tx_errors;
167
168 tot->collisions = dev->stats.collisions;
169
170 return tot;
171} 121}
172EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
173 122
174static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p, 123static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
175 __be16 flags, __be32 key) 124 __be16 flags, __be32 key)
@@ -584,7 +533,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
584 struct flowi4 fl4; 533 struct flowi4 fl4;
585 u8 tos, ttl; 534 u8 tos, ttl;
586 __be16 df; 535 __be16 df;
587 struct rtable *rt = NULL; /* Route to the other host */ 536 struct rtable *rt; /* Route to the other host */
588 unsigned int max_headroom; /* The extra header space needed */ 537 unsigned int max_headroom; /* The extra header space needed */
589 __be32 dst; 538 __be32 dst;
590 int err; 539 int err;
@@ -657,8 +606,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
657 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 606 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
658 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); 607 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
659 608
660 if (connected) 609 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
661 rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
662 610
663 if (!rt) { 611 if (!rt) {
664 rt = ip_route_output_key(tunnel->net, &fl4); 612 rt = ip_route_output_key(tunnel->net, &fl4);
@@ -766,7 +714,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
766 if (set_mtu) 714 if (set_mtu)
767 dev->mtu = mtu; 715 dev->mtu = mtu;
768 } 716 }
769 tunnel_dst_reset_all(t); 717 ip_tunnel_dst_reset_all(t);
770 netdev_state_change(dev); 718 netdev_state_change(dev);
771} 719}
772 720
@@ -1095,7 +1043,7 @@ void ip_tunnel_uninit(struct net_device *dev)
1095 if (itn->fb_tunnel_dev != dev) 1043 if (itn->fb_tunnel_dev != dev)
1096 ip_tunnel_del(netdev_priv(dev)); 1044 ip_tunnel_del(netdev_priv(dev));
1097 1045
1098 tunnel_dst_reset_all(tunnel); 1046 ip_tunnel_dst_reset_all(tunnel);
1099} 1047}
1100EXPORT_SYMBOL_GPL(ip_tunnel_uninit); 1048EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
1101 1049