aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
commit6882f933ccee5c3a86443ffc7621ce888b93ab6b (patch)
tree07998f54bd459c5345491fbaeae03bd60540c6e8 /net/core
parent12f4d0a8770ab26639091d0b2509b19681daad69 (diff)
ipv4: Kill RT_CACHE_DEBUG
It's way past it's usefulness. And this gets rid of a bunch of stray ->rt_{dst,src} references. Even the comment documenting the macro was inaccurate (stated default was 1 when it's 0). If reintroduced, it should be done properly, with dynamic debug facilities. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/dst.c22
1 files changed, 0 insertions, 22 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 30f009327b62..da47a299618a 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -33,9 +33,6 @@
33 * 3) This list is guarded by a mutex, 33 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized. 34 * so that the gc_task and dst_dev_event() can be synchronized.
35 */ 35 */
36#if RT_CACHE_DEBUG >= 2
37static atomic_t dst_total = ATOMIC_INIT(0);
38#endif
39 36
40/* 37/*
41 * We want to keep lock & list close together 38 * We want to keep lock & list close together
@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work)
69 unsigned long expires = ~0L; 66 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head; 67 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head; 68 struct dst_entry *last = &head;
72#if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75#endif
76 69
77 mutex_lock(&dst_gc_mutex); 70 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list; 71 next = dst_busy_list;
@@ -146,15 +139,6 @@ loop:
146 139
147 spin_unlock_bh(&dst_garbage.lock); 140 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex); 141 mutex_unlock(&dst_gc_mutex);
149#if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157#endif
158} 142}
159 143
160int dst_discard(struct sk_buff *skb) 144int dst_discard(struct sk_buff *skb)
@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
205 dst->lastuse = jiffies; 189 dst->lastuse = jiffies;
206 dst->flags = flags; 190 dst->flags = flags;
207 dst->next = NULL; 191 dst->next = NULL;
208#if RT_CACHE_DEBUG >= 2
209 atomic_inc(&dst_total);
210#endif
211 dst_entries_add(ops, 1); 192 dst_entries_add(ops, 1);
212 return dst; 193 return dst;
213} 194}
@@ -267,9 +248,6 @@ again:
267 dst->ops->destroy(dst); 248 dst->ops->destroy(dst);
268 if (dst->dev) 249 if (dst->dev)
269 dev_put(dst->dev); 250 dev_put(dst->dev);
270#if RT_CACHE_DEBUG >= 2
271 atomic_dec(&dst_total);
272#endif
273 kmem_cache_free(dst->ops->kmem_cachep, dst); 251 kmem_cache_free(dst->ops->kmem_cachep, dst);
274 252
275 dst = child; 253 dst = child;