aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
commit6882f933ccee5c3a86443ffc7621ce888b93ab6b (patch)
tree07998f54bd459c5345491fbaeae03bd60540c6e8 /net
parent12f4d0a8770ab26639091d0b2509b19681daad69 (diff)
ipv4: Kill RT_CACHE_DEBUG
It's way past it's usefulness. And this gets rid of a bunch of stray ->rt_{dst,src} references. Even the comment documenting the macro was inaccurate (stated default was 1 when it's 0). If reintroduced, it should be done properly, with dynamic debug facilities. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dst.c22
-rw-r--r--net/ipv4/route.c22
2 files changed, 0 insertions, 44 deletions
diff --git a/net/core/dst.c b/net/core/dst.c
index 30f009327b62..da47a299618a 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -33,9 +33,6 @@
33 * 3) This list is guarded by a mutex, 33 * 3) This list is guarded by a mutex,
34 * so that the gc_task and dst_dev_event() can be synchronized. 34 * so that the gc_task and dst_dev_event() can be synchronized.
35 */ 35 */
36#if RT_CACHE_DEBUG >= 2
37static atomic_t dst_total = ATOMIC_INIT(0);
38#endif
39 36
40/* 37/*
41 * We want to keep lock & list close together 38 * We want to keep lock & list close together
@@ -69,10 +66,6 @@ static void dst_gc_task(struct work_struct *work)
69 unsigned long expires = ~0L; 66 unsigned long expires = ~0L;
70 struct dst_entry *dst, *next, head; 67 struct dst_entry *dst, *next, head;
71 struct dst_entry *last = &head; 68 struct dst_entry *last = &head;
72#if RT_CACHE_DEBUG >= 2
73 ktime_t time_start = ktime_get();
74 struct timespec elapsed;
75#endif
76 69
77 mutex_lock(&dst_gc_mutex); 70 mutex_lock(&dst_gc_mutex);
78 next = dst_busy_list; 71 next = dst_busy_list;
@@ -146,15 +139,6 @@ loop:
146 139
147 spin_unlock_bh(&dst_garbage.lock); 140 spin_unlock_bh(&dst_garbage.lock);
148 mutex_unlock(&dst_gc_mutex); 141 mutex_unlock(&dst_gc_mutex);
149#if RT_CACHE_DEBUG >= 2
150 elapsed = ktime_to_timespec(ktime_sub(ktime_get(), time_start));
151 printk(KERN_DEBUG "dst_total: %d delayed: %d work_perf: %d"
152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed,
154 expires,
155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
157#endif
158} 142}
159 143
160int dst_discard(struct sk_buff *skb) 144int dst_discard(struct sk_buff *skb)
@@ -205,9 +189,6 @@ void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
205 dst->lastuse = jiffies; 189 dst->lastuse = jiffies;
206 dst->flags = flags; 190 dst->flags = flags;
207 dst->next = NULL; 191 dst->next = NULL;
208#if RT_CACHE_DEBUG >= 2
209 atomic_inc(&dst_total);
210#endif
211 dst_entries_add(ops, 1); 192 dst_entries_add(ops, 1);
212 return dst; 193 return dst;
213} 194}
@@ -267,9 +248,6 @@ again:
267 dst->ops->destroy(dst); 248 dst->ops->destroy(dst);
268 if (dst->dev) 249 if (dst->dev)
269 dev_put(dst->dev); 250 dev_put(dst->dev);
270#if RT_CACHE_DEBUG >= 2
271 atomic_dec(&dst_total);
272#endif
273 kmem_cache_free(dst->ops->kmem_cachep, dst); 251 kmem_cache_free(dst->ops->kmem_cachep, dst);
274 252
275 dst = child; 253 dst = child;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb93c32027d7..9c5ad86bc783 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
968 break; 968 break;
969 969
970 expire >>= 1; 970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
974#endif
975 971
976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) 972 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
977 goto out; 973 goto out;
@@ -992,10 +988,6 @@ work_done:
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || 988 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) 989 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
994 expire = ip_rt_gc_timeout; 990 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
998#endif
999out: return 0; 991out: return 0;
1000} 992}
1001 993
@@ -1179,16 +1171,6 @@ restart:
1179 1171
1180 rt->dst.rt_next = rt_hash_table[hash].chain; 1172 rt->dst.rt_next = rt_hash_table[hash].chain;
1181 1173
1182#if RT_CACHE_DEBUG >= 2
1183 if (rt->dst.rt_next) {
1184 struct rtable *trt;
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1188 printk(" . %pI4", &trt->rt_dst);
1189 printk("\n");
1190 }
1191#endif
1192 /* 1174 /*
1193 * Since lookup is lockfree, we must make sure 1175 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are committed to memory 1176 * previous writes to rt are committed to memory
@@ -1347,10 +1329,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1329 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif, 1330 rt->rt_oif,
1349 rt_genid(dev_net(dst->dev))); 1331 rt_genid(dev_net(dst->dev)));
1350#if RT_CACHE_DEBUG >= 1
1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1352 &rt->rt_dst, rt->rt_key_tos);
1353#endif
1354 rt_del(hash, rt); 1332 rt_del(hash, rt);
1355 ret = NULL; 1333 ret = NULL;
1356 } else if (rt->peer && 1334 } else if (rt->peer &&