aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-18 18:23:21 -0400
commit6882f933ccee5c3a86443ffc7621ce888b93ab6b (patch)
tree07998f54bd459c5345491fbaeae03bd60540c6e8 /net/ipv4
parent12f4d0a8770ab26639091d0b2509b19681daad69 (diff)
ipv4: Kill RT_CACHE_DEBUG
It's way past it's usefulness. And this gets rid of a bunch of stray ->rt_{dst,src} references. Even the comment documenting the macro was inaccurate (stated default was 1 when it's 0). If reintroduced, it should be done properly, with dynamic debug facilities. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/route.c22
1 files changed, 0 insertions, 22 deletions
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb93c32027d7..9c5ad86bc783 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -968,10 +968,6 @@ static int rt_garbage_collect(struct dst_ops *ops)
968 break; 968 break;
969 969
970 expire >>= 1; 970 expire >>= 1;
971#if RT_CACHE_DEBUG >= 2
972 printk(KERN_DEBUG "expire>> %u %d %d %d\n", expire,
973 dst_entries_get_fast(&ipv4_dst_ops), goal, i);
974#endif
975 971
976 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size) 972 if (dst_entries_get_fast(&ipv4_dst_ops) < ip_rt_max_size)
977 goto out; 973 goto out;
@@ -992,10 +988,6 @@ work_done:
992 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh || 988 dst_entries_get_fast(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh ||
993 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh) 989 dst_entries_get_slow(&ipv4_dst_ops) < ipv4_dst_ops.gc_thresh)
994 expire = ip_rt_gc_timeout; 990 expire = ip_rt_gc_timeout;
995#if RT_CACHE_DEBUG >= 2
996 printk(KERN_DEBUG "expire++ %u %d %d %d\n", expire,
997 dst_entries_get_fast(&ipv4_dst_ops), goal, rover);
998#endif
999out: return 0; 991out: return 0;
1000} 992}
1001 993
@@ -1179,16 +1171,6 @@ restart:
1179 1171
1180 rt->dst.rt_next = rt_hash_table[hash].chain; 1172 rt->dst.rt_next = rt_hash_table[hash].chain;
1181 1173
1182#if RT_CACHE_DEBUG >= 2
1183 if (rt->dst.rt_next) {
1184 struct rtable *trt;
1185 printk(KERN_DEBUG "rt_cache @%02x: %pI4",
1186 hash, &rt->rt_dst);
1187 for (trt = rt->dst.rt_next; trt; trt = trt->dst.rt_next)
1188 printk(" . %pI4", &trt->rt_dst);
1189 printk("\n");
1190 }
1191#endif
1192 /* 1174 /*
1193 * Since lookup is lockfree, we must make sure 1175 * Since lookup is lockfree, we must make sure
1194 * previous writes to rt are committed to memory 1176 * previous writes to rt are committed to memory
@@ -1347,10 +1329,6 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1347 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, 1329 unsigned hash = rt_hash(rt->rt_key_dst, rt->rt_key_src,
1348 rt->rt_oif, 1330 rt->rt_oif,
1349 rt_genid(dev_net(dst->dev))); 1331 rt_genid(dev_net(dst->dev)));
1350#if RT_CACHE_DEBUG >= 1
1351 printk(KERN_DEBUG "ipv4_negative_advice: redirect to %pI4/%02x dropped\n",
1352 &rt->rt_dst, rt->rt_key_tos);
1353#endif
1354 rt_del(hash, rt); 1332 rt_del(hash, rt);
1355 ret = NULL; 1333 ret = NULL;
1356 } else if (rt->peer && 1334 } else if (rt->peer &&