aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/dst.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/core/dst.c')
-rw-r--r--net/core/dst.c57
1 files changed, 29 insertions, 28 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 91104d35de7d..9ccca038444f 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -19,6 +19,7 @@
19#include <linux/types.h> 19#include <linux/types.h>
20#include <net/net_namespace.h> 20#include <net/net_namespace.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/prefetch.h>
22 23
23#include <net/dst.h> 24#include <net/dst.h>
24 25
@@ -33,9 +34,6 @@
33 * 3) This list is guarded by a mutex, 34 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized. 35 * so that the gc_task and dst_dev_event() can be synchronized.
35 */ 36 */
36#if RT_CACHE_DEBUG >= 2
37static atomic_t dst_total = ATOMIC_INIT(0);
38#endif
39 37
40/* 38/*
41 * We want to keep lock & list close together 39 * We want to keep lock & list close together
@@ -69,10 +67,6 @@ static void dst_gc_task(struct work_struct *work)
69 unsigned long expires = ~0L; 67 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head; 68 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head; 69 struct dst_entry *last = &head;
72#if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75#endif
76 70
77 mutex_lock(&dst_gc_mutex); 71 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list; 72 next = dst_busy_list;
@@ -146,15 +140,6 @@ loop:
146 140
147 spin_unlock_bh(&dst_garbage.lock); 141 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex); 142 mutex_unlock(&dst_gc_mutex);
149#if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157#endif
158} 143}
159 144
160int dst_discard(struct sk_buff *skb) 145int dst_discard(struct sk_buff *skb)
@@ -166,7 +151,8 @@ EXPORT_SYMBOL(dst_discard);
166 151
167const u32 dst_default_metrics[RTAX_MAX]; 152const u32 dst_default_metrics[RTAX_MAX];
168 153
169void *dst_alloc(struct dst_ops *ops, int initial_ref) 154void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
155 int initial_ref, int initial_obsolete, int flags)
170{ 156{
171 struct dst_entry *dst; 157 struct dst_entry *dst;
172 158
@@ -174,18 +160,36 @@ void *dst_alloc(struct dst_ops *ops, int initial_ref)
174 if (ops->gc(ops)) 160 if (ops->gc(ops))
175 return NULL; 161 return NULL;
176 } 162 }
177 dst = kmem_cache_zalloc(ops->kmem_cachep, GFP_ATOMIC); 163 dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
178 if (!dst) 164 if (!dst)
179 return NULL; 165 return NULL;
180 atomic_set(&dst->__refcnt, initial_ref); 166 dst->child = NULL;
167 dst->dev = dev;
168 if (dev)
169 dev_hold(dev);
181 dst->ops = ops; 170 dst->ops = ops;
182 dst->lastuse = jiffies;
183 dst->path = dst;
184 dst->input = dst->output = dst_discard;
185 dst_init_metrics(dst, dst_default_metrics, true); 171 dst_init_metrics(dst, dst_default_metrics, true);
186#if RT_CACHE_DEBUG >= 2 172 dst->expires = 0UL;
187 atomic_inc(&dst_total); 173 dst->path = dst;
174 dst->neighbour = NULL;
175 dst->hh = NULL;
176#ifdef CONFIG_XFRM
177 dst->xfrm = NULL;
178#endif
179 dst->input = dst_discard;
180 dst->output = dst_discard;
181 dst->error = 0;
182 dst->obsolete = initial_obsolete;
183 dst->header_len = 0;
184 dst->trailer_len = 0;
185#ifdef CONFIG_IP_ROUTE_CLASSID
186 dst->tclassid = 0;
188#endif 187#endif
188 atomic_set(&dst->__refcnt, initial_ref);
189 dst->__use = 0;
190 dst->lastuse = jiffies;
191 dst->flags = flags;
192 dst->next = NULL;
189 dst_entries_add(ops, 1); 193 dst_entries_add(ops, 1);
190 return dst; 194 return dst;
191} 195}
@@ -245,9 +249,6 @@ again:
245 dst->ops->destroy(dst); 249 dst->ops->destroy(dst);
246 if (dst->dev) 250 if (dst->dev)
247 dev_put(dst->dev); 251 dev_put(dst->dev);
248#if RT_CACHE_DEBUG >= 2
249 atomic_dec(&dst_total);
250#endif
251 kmem_cache_free(dst->ops->kmem_cachep, dst); 252 kmem_cache_free(dst->ops->kmem_cachep, dst);
252 253
253 dst = child; 254 dst = child;
@@ -314,7 +315,7 @@ void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
314{ 315{
315 unsigned long prev, new; 316 unsigned long prev, new;
316 317
317 new = (unsigned long) dst_default_metrics; 318 new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
318 prev = cmpxchg(&dst->_metrics, old, new); 319 prev = cmpxchg(&dst->_metrics, old, new);
319 if (prev == old) 320 if (prev == old)
320 kfree(__DST_METRICS_PTR(old)); 321 kfree(__DST_METRICS_PTR(old));