aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorYasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>2005-11-16 15:55:37 -0500
committerDavid S. Miller <davem@davemloft.net>2005-11-16 15:55:37 -0500
commite7c8a41e817f381ac5c2a59ecc81b483bd68a7df (patch)
tree650b33c804a1a66c72acaef9595eb9fe6cc2c0e9 /net/ipv6
parent0db169f9703115bab09eda5d89a8f6937a7bf98e (diff)
[IPV4,IPV6]: replace handmade list with hlist in IPv{4,6} reassembly
Both of ipq and frag_queue have *next and **prev, and they can be replaced with hlist. Thanks Arnaldo Carvalho de Melo for the suggestion. Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> Acked-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/reassembly.c41
1 files changed, 16 insertions, 25 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e4fe9ee484dd..5d316cb72ec9 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -74,7 +74,7 @@ struct ip6frag_skb_cb
74 74
75struct frag_queue 75struct frag_queue
76{ 76{
77 struct frag_queue *next; 77 struct hlist_node list;
78 struct list_head lru_list; /* lru list member */ 78 struct list_head lru_list; /* lru list member */
79 79
80 __u32 id; /* fragment id */ 80 __u32 id; /* fragment id */
@@ -95,14 +95,13 @@ struct frag_queue
95#define FIRST_IN 2 95#define FIRST_IN 2
96#define LAST_IN 1 96#define LAST_IN 1
97 __u16 nhoffset; 97 __u16 nhoffset;
98 struct frag_queue **pprev;
99}; 98};
100 99
101/* Hash table. */ 100/* Hash table. */
102 101
103#define IP6Q_HASHSZ 64 102#define IP6Q_HASHSZ 64
104 103
105static struct frag_queue *ip6_frag_hash[IP6Q_HASHSZ]; 104static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ];
106static DEFINE_RWLOCK(ip6_frag_lock); 105static DEFINE_RWLOCK(ip6_frag_lock);
107static u32 ip6_frag_hash_rnd; 106static u32 ip6_frag_hash_rnd;
108static LIST_HEAD(ip6_frag_lru_list); 107static LIST_HEAD(ip6_frag_lru_list);
@@ -110,9 +109,7 @@ int ip6_frag_nqueues = 0;
110 109
111static __inline__ void __fq_unlink(struct frag_queue *fq) 110static __inline__ void __fq_unlink(struct frag_queue *fq)
112{ 111{
113 if(fq->next) 112 hlist_del(&fq->list);
114 fq->next->pprev = fq->pprev;
115 *fq->pprev = fq->next;
116 list_del(&fq->lru_list); 113 list_del(&fq->lru_list);
117 ip6_frag_nqueues--; 114 ip6_frag_nqueues--;
118} 115}
@@ -163,28 +160,21 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
163 get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); 160 get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
164 for (i = 0; i < IP6Q_HASHSZ; i++) { 161 for (i = 0; i < IP6Q_HASHSZ; i++) {
165 struct frag_queue *q; 162 struct frag_queue *q;
163 struct hlist_node *p, *n;
166 164
167 q = ip6_frag_hash[i]; 165 hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
168 while (q) {
169 struct frag_queue *next = q->next;
170 unsigned int hval = ip6qhashfn(q->id, 166 unsigned int hval = ip6qhashfn(q->id,
171 &q->saddr, 167 &q->saddr,
172 &q->daddr); 168 &q->daddr);
173 169
174 if (hval != i) { 170 if (hval != i) {
175 /* Unlink. */ 171 hlist_del(&q->list);
176 if (q->next)
177 q->next->pprev = q->pprev;
178 *q->pprev = q->next;
179 172
180 /* Relink to new hash chain. */ 173 /* Relink to new hash chain. */
181 if ((q->next = ip6_frag_hash[hval]) != NULL) 174 hlist_add_head(&q->list,
182 q->next->pprev = &q->next; 175 &ip6_frag_hash[hval]);
183 ip6_frag_hash[hval] = q;
184 q->pprev = &ip6_frag_hash[hval];
185 }
186 176
187 q = next; 177 }
188 } 178 }
189 } 179 }
190 write_unlock(&ip6_frag_lock); 180 write_unlock(&ip6_frag_lock);
@@ -337,10 +327,13 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
337 struct frag_queue *fq_in) 327 struct frag_queue *fq_in)
338{ 328{
339 struct frag_queue *fq; 329 struct frag_queue *fq;
330#ifdef CONFIG_SMP
331 struct hlist_node *n;
332#endif
340 333
341 write_lock(&ip6_frag_lock); 334 write_lock(&ip6_frag_lock);
342#ifdef CONFIG_SMP 335#ifdef CONFIG_SMP
343 for (fq = ip6_frag_hash[hash]; fq; fq = fq->next) { 336 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
344 if (fq->id == fq_in->id && 337 if (fq->id == fq_in->id &&
345 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 338 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
346 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 339 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
@@ -358,10 +351,7 @@ static struct frag_queue *ip6_frag_intern(unsigned int hash,
358 atomic_inc(&fq->refcnt); 351 atomic_inc(&fq->refcnt);
359 352
360 atomic_inc(&fq->refcnt); 353 atomic_inc(&fq->refcnt);
361 if((fq->next = ip6_frag_hash[hash]) != NULL) 354 hlist_add_head(&fq->list, &ip6_frag_hash[hash]);
362 fq->next->pprev = &fq->next;
363 ip6_frag_hash[hash] = fq;
364 fq->pprev = &ip6_frag_hash[hash];
365 INIT_LIST_HEAD(&fq->lru_list); 355 INIT_LIST_HEAD(&fq->lru_list);
366 list_add_tail(&fq->lru_list, &ip6_frag_lru_list); 356 list_add_tail(&fq->lru_list, &ip6_frag_lru_list);
367 ip6_frag_nqueues++; 357 ip6_frag_nqueues++;
@@ -401,10 +391,11 @@ static __inline__ struct frag_queue *
401fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst) 391fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
402{ 392{
403 struct frag_queue *fq; 393 struct frag_queue *fq;
394 struct hlist_node *n;
404 unsigned int hash = ip6qhashfn(id, src, dst); 395 unsigned int hash = ip6qhashfn(id, src, dst);
405 396
406 read_lock(&ip6_frag_lock); 397 read_lock(&ip6_frag_lock);
407 for(fq = ip6_frag_hash[hash]; fq; fq = fq->next) { 398 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) {
408 if (fq->id == id && 399 if (fq->id == id &&
409 ipv6_addr_equal(src, &fq->saddr) && 400 ipv6_addr_equal(src, &fq->saddr) &&
410 ipv6_addr_equal(dst, &fq->daddr)) { 401 ipv6_addr_equal(dst, &fq->daddr)) {