diff options
author | Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp> | 2005-11-16 15:55:37 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-11-16 15:55:37 -0500 |
commit | e7c8a41e817f381ac5c2a59ecc81b483bd68a7df (patch) | |
tree | 650b33c804a1a66c72acaef9595eb9fe6cc2c0e9 /net/ipv4/ip_fragment.c | |
parent | 0db169f9703115bab09eda5d89a8f6937a7bf98e (diff) |
[IPV4,IPV6]: replace handmade list with hlist in IPv{4,6} reassembly
Both of ipq and frag_queue have *next and **prev, and they can be replaced
with hlist. Thanks Arnaldo Carvalho de Melo for the suggestion.
Signed-off-by: Yasuyuki Kozakai <yasuyuki.kozakai@toshiba.co.jp>
Acked-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_fragment.c')
-rw-r--r-- | net/ipv4/ip_fragment.c | 40 |
1 files changed, 14 insertions, 26 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e7d26d9943c2..8ce0ce2ee48e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -71,7 +71,7 @@ struct ipfrag_skb_cb | |||
71 | 71 | ||
72 | /* Describe an entry in the "incomplete datagrams" queue. */ | 72 | /* Describe an entry in the "incomplete datagrams" queue. */ |
73 | struct ipq { | 73 | struct ipq { |
74 | struct ipq *next; /* linked list pointers */ | 74 | struct hlist_node list; |
75 | struct list_head lru_list; /* lru list member */ | 75 | struct list_head lru_list; /* lru list member */ |
76 | u32 user; | 76 | u32 user; |
77 | u32 saddr; | 77 | u32 saddr; |
@@ -89,7 +89,6 @@ struct ipq { | |||
89 | spinlock_t lock; | 89 | spinlock_t lock; |
90 | atomic_t refcnt; | 90 | atomic_t refcnt; |
91 | struct timer_list timer; /* when will this queue expire? */ | 91 | struct timer_list timer; /* when will this queue expire? */ |
92 | struct ipq **pprev; | ||
93 | int iif; | 92 | int iif; |
94 | struct timeval stamp; | 93 | struct timeval stamp; |
95 | }; | 94 | }; |
@@ -99,7 +98,7 @@ struct ipq { | |||
99 | #define IPQ_HASHSZ 64 | 98 | #define IPQ_HASHSZ 64 |
100 | 99 | ||
101 | /* Per-bucket lock is easy to add now. */ | 100 | /* Per-bucket lock is easy to add now. */ |
102 | static struct ipq *ipq_hash[IPQ_HASHSZ]; | 101 | static struct hlist_head ipq_hash[IPQ_HASHSZ]; |
103 | static DEFINE_RWLOCK(ipfrag_lock); | 102 | static DEFINE_RWLOCK(ipfrag_lock); |
104 | static u32 ipfrag_hash_rnd; | 103 | static u32 ipfrag_hash_rnd; |
105 | static LIST_HEAD(ipq_lru_list); | 104 | static LIST_HEAD(ipq_lru_list); |
@@ -107,9 +106,7 @@ int ip_frag_nqueues = 0; | |||
107 | 106 | ||
108 | static __inline__ void __ipq_unlink(struct ipq *qp) | 107 | static __inline__ void __ipq_unlink(struct ipq *qp) |
109 | { | 108 | { |
110 | if(qp->next) | 109 | hlist_del(&qp->list); |
111 | qp->next->pprev = qp->pprev; | ||
112 | *qp->pprev = qp->next; | ||
113 | list_del(&qp->lru_list); | 110 | list_del(&qp->lru_list); |
114 | ip_frag_nqueues--; | 111 | ip_frag_nqueues--; |
115 | } | 112 | } |
@@ -139,27 +136,18 @@ static void ipfrag_secret_rebuild(unsigned long dummy) | |||
139 | get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); | 136 | get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); |
140 | for (i = 0; i < IPQ_HASHSZ; i++) { | 137 | for (i = 0; i < IPQ_HASHSZ; i++) { |
141 | struct ipq *q; | 138 | struct ipq *q; |
139 | struct hlist_node *p, *n; | ||
142 | 140 | ||
143 | q = ipq_hash[i]; | 141 | hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], list) { |
144 | while (q) { | ||
145 | struct ipq *next = q->next; | ||
146 | unsigned int hval = ipqhashfn(q->id, q->saddr, | 142 | unsigned int hval = ipqhashfn(q->id, q->saddr, |
147 | q->daddr, q->protocol); | 143 | q->daddr, q->protocol); |
148 | 144 | ||
149 | if (hval != i) { | 145 | if (hval != i) { |
150 | /* Unlink. */ | 146 | hlist_del(&q->list); |
151 | if (q->next) | ||
152 | q->next->pprev = q->pprev; | ||
153 | *q->pprev = q->next; | ||
154 | 147 | ||
155 | /* Relink to new hash chain. */ | 148 | /* Relink to new hash chain. */ |
156 | if ((q->next = ipq_hash[hval]) != NULL) | 149 | hlist_add_head(&q->list, &ipq_hash[hval]); |
157 | q->next->pprev = &q->next; | ||
158 | ipq_hash[hval] = q; | ||
159 | q->pprev = &ipq_hash[hval]; | ||
160 | } | 150 | } |
161 | |||
162 | q = next; | ||
163 | } | 151 | } |
164 | } | 152 | } |
165 | write_unlock(&ipfrag_lock); | 153 | write_unlock(&ipfrag_lock); |
@@ -310,14 +298,16 @@ out: | |||
310 | static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) | 298 | static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) |
311 | { | 299 | { |
312 | struct ipq *qp; | 300 | struct ipq *qp; |
313 | 301 | #ifdef CONFIG_SMP | |
302 | struct hlist_node *n; | ||
303 | #endif | ||
314 | write_lock(&ipfrag_lock); | 304 | write_lock(&ipfrag_lock); |
315 | #ifdef CONFIG_SMP | 305 | #ifdef CONFIG_SMP |
316 | /* With SMP race we have to recheck hash table, because | 306 | /* With SMP race we have to recheck hash table, because |
317 | * such entry could be created on other cpu, while we | 307 | * such entry could be created on other cpu, while we |
318 | * promoted read lock to write lock. | 308 | * promoted read lock to write lock. |
319 | */ | 309 | */ |
320 | for(qp = ipq_hash[hash]; qp; qp = qp->next) { | 310 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { |
321 | if(qp->id == qp_in->id && | 311 | if(qp->id == qp_in->id && |
322 | qp->saddr == qp_in->saddr && | 312 | qp->saddr == qp_in->saddr && |
323 | qp->daddr == qp_in->daddr && | 313 | qp->daddr == qp_in->daddr && |
@@ -337,10 +327,7 @@ static struct ipq *ip_frag_intern(unsigned int hash, struct ipq *qp_in) | |||
337 | atomic_inc(&qp->refcnt); | 327 | atomic_inc(&qp->refcnt); |
338 | 328 | ||
339 | atomic_inc(&qp->refcnt); | 329 | atomic_inc(&qp->refcnt); |
340 | if((qp->next = ipq_hash[hash]) != NULL) | 330 | hlist_add_head(&qp->list, &ipq_hash[hash]); |
341 | qp->next->pprev = &qp->next; | ||
342 | ipq_hash[hash] = qp; | ||
343 | qp->pprev = &ipq_hash[hash]; | ||
344 | INIT_LIST_HEAD(&qp->lru_list); | 331 | INIT_LIST_HEAD(&qp->lru_list); |
345 | list_add_tail(&qp->lru_list, &ipq_lru_list); | 332 | list_add_tail(&qp->lru_list, &ipq_lru_list); |
346 | ip_frag_nqueues++; | 333 | ip_frag_nqueues++; |
@@ -392,9 +379,10 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user) | |||
392 | __u8 protocol = iph->protocol; | 379 | __u8 protocol = iph->protocol; |
393 | unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); | 380 | unsigned int hash = ipqhashfn(id, saddr, daddr, protocol); |
394 | struct ipq *qp; | 381 | struct ipq *qp; |
382 | struct hlist_node *n; | ||
395 | 383 | ||
396 | read_lock(&ipfrag_lock); | 384 | read_lock(&ipfrag_lock); |
397 | for(qp = ipq_hash[hash]; qp; qp = qp->next) { | 385 | hlist_for_each_entry(qp, n, &ipq_hash[hash], list) { |
398 | if(qp->id == id && | 386 | if(qp->id == id && |
399 | qp->saddr == saddr && | 387 | qp->saddr == saddr && |
400 | qp->daddr == daddr && | 388 | qp->daddr == daddr && |