aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/reassembly.c
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-10-15 05:31:52 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-15 15:26:39 -0400
commit7eb95156d9dce2f59794264db336ce007d71638b (patch)
treec283a095f1a9d530edb1a7058454ba30b4f7d028 /net/ipv6/reassembly.c
parent5ab11c98d3a950faf6922b6166e5f8fc874590e7 (diff)
[INET]: Collect frag queues management objects together
There are some objects that are common in all the places which are used to keep track of frag queues, they are: * hash table * LRU list * rw lock * rnd number for hash function * the number of queues * the amount of memory occupied by queues * secret timer Move all this stuff into one structure (struct inet_frags) to make it possible use them uniformly in the future. Like with the previous patch this mostly consists of hunks like - write_lock(&ipfrag_lock); + write_lock(&ip4_frags.lock); To address the issue with exporting the number of queues and the amount of memory occupied by queues outside the .c file they are declared in, I introduce a couple of helpers. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/reassembly.c')
-rw-r--r--net/ipv6/reassembly.c110
1 files changed, 54 insertions, 56 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index aef5dd1ebc8a..ecf340047cde 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -87,15 +87,17 @@ struct frag_queue
87 __u16 nhoffset; 87 __u16 nhoffset;
88}; 88};
89 89
90/* Hash table. */ 90static struct inet_frags ip6_frags;
91 91
92#define IP6Q_HASHSZ 64 92int ip6_frag_nqueues(void)
93{
94 return ip6_frags.nqueues;
95}
93 96
94static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ]; 97int ip6_frag_mem(void)
95static DEFINE_RWLOCK(ip6_frag_lock); 98{
96static u32 ip6_frag_hash_rnd; 99 return atomic_read(&ip6_frags.mem);
97static LIST_HEAD(ip6_frag_lru_list); 100}
98int ip6_frag_nqueues = 0;
99 101
100static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 102static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
101 struct net_device *dev); 103 struct net_device *dev);
@@ -104,14 +106,14 @@ static __inline__ void __fq_unlink(struct frag_queue *fq)
104{ 106{
105 hlist_del(&fq->q.list); 107 hlist_del(&fq->q.list);
106 list_del(&fq->q.lru_list); 108 list_del(&fq->q.lru_list);
107 ip6_frag_nqueues--; 109 ip6_frags.nqueues--;
108} 110}
109 111
110static __inline__ void fq_unlink(struct frag_queue *fq) 112static __inline__ void fq_unlink(struct frag_queue *fq)
111{ 113{
112 write_lock(&ip6_frag_lock); 114 write_lock(&ip6_frags.lock);
113 __fq_unlink(fq); 115 __fq_unlink(fq);
114 write_unlock(&ip6_frag_lock); 116 write_unlock(&ip6_frags.lock);
115} 117}
116 118
117/* 119/*
@@ -129,7 +131,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
129 131
130 a += JHASH_GOLDEN_RATIO; 132 a += JHASH_GOLDEN_RATIO;
131 b += JHASH_GOLDEN_RATIO; 133 b += JHASH_GOLDEN_RATIO;
132 c += ip6_frag_hash_rnd; 134 c += ip6_frags.rnd;
133 __jhash_mix(a, b, c); 135 __jhash_mix(a, b, c);
134 136
135 a += (__force u32)saddr->s6_addr32[3]; 137 a += (__force u32)saddr->s6_addr32[3];
@@ -142,10 +144,9 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
142 c += (__force u32)id; 144 c += (__force u32)id;
143 __jhash_mix(a, b, c); 145 __jhash_mix(a, b, c);
144 146
145 return c & (IP6Q_HASHSZ - 1); 147 return c & (INETFRAGS_HASHSZ - 1);
146} 148}
147 149
148static struct timer_list ip6_frag_secret_timer;
149int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ; 150int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ;
150 151
151static void ip6_frag_secret_rebuild(unsigned long dummy) 152static void ip6_frag_secret_rebuild(unsigned long dummy)
@@ -153,13 +154,13 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
153 unsigned long now = jiffies; 154 unsigned long now = jiffies;
154 int i; 155 int i;
155 156
156 write_lock(&ip6_frag_lock); 157 write_lock(&ip6_frags.lock);
157 get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); 158 get_random_bytes(&ip6_frags.rnd, sizeof(u32));
158 for (i = 0; i < IP6Q_HASHSZ; i++) { 159 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
159 struct frag_queue *q; 160 struct frag_queue *q;
160 struct hlist_node *p, *n; 161 struct hlist_node *p, *n;
161 162
162 hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], q.list) { 163 hlist_for_each_entry_safe(q, p, n, &ip6_frags.hash[i], q.list) {
163 unsigned int hval = ip6qhashfn(q->id, 164 unsigned int hval = ip6qhashfn(q->id,
164 &q->saddr, 165 &q->saddr,
165 &q->daddr); 166 &q->daddr);
@@ -169,24 +170,22 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
169 170
170 /* Relink to new hash chain. */ 171 /* Relink to new hash chain. */
171 hlist_add_head(&q->q.list, 172 hlist_add_head(&q->q.list,
172 &ip6_frag_hash[hval]); 173 &ip6_frags.hash[hval]);
173 174
174 } 175 }
175 } 176 }
176 } 177 }
177 write_unlock(&ip6_frag_lock); 178 write_unlock(&ip6_frags.lock);
178 179
179 mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval); 180 mod_timer(&ip6_frags.secret_timer, now + sysctl_ip6frag_secret_interval);
180} 181}
181 182
182atomic_t ip6_frag_mem = ATOMIC_INIT(0);
183
184/* Memory Tracking Functions. */ 183/* Memory Tracking Functions. */
185static inline void frag_kfree_skb(struct sk_buff *skb, int *work) 184static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
186{ 185{
187 if (work) 186 if (work)
188 *work -= skb->truesize; 187 *work -= skb->truesize;
189 atomic_sub(skb->truesize, &ip6_frag_mem); 188 atomic_sub(skb->truesize, &ip6_frags.mem);
190 kfree_skb(skb); 189 kfree_skb(skb);
191} 190}
192 191
@@ -194,7 +193,7 @@ static inline void frag_free_queue(struct frag_queue *fq, int *work)
194{ 193{
195 if (work) 194 if (work)
196 *work -= sizeof(struct frag_queue); 195 *work -= sizeof(struct frag_queue);
197 atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem); 196 atomic_sub(sizeof(struct frag_queue), &ip6_frags.mem);
198 kfree(fq); 197 kfree(fq);
199} 198}
200 199
@@ -204,7 +203,7 @@ static inline struct frag_queue *frag_alloc_queue(void)
204 203
205 if(!fq) 204 if(!fq)
206 return NULL; 205 return NULL;
207 atomic_add(sizeof(struct frag_queue), &ip6_frag_mem); 206 atomic_add(sizeof(struct frag_queue), &ip6_frags.mem);
208 return fq; 207 return fq;
209} 208}
210 209
@@ -257,20 +256,20 @@ static void ip6_evictor(struct inet6_dev *idev)
257 struct list_head *tmp; 256 struct list_head *tmp;
258 int work; 257 int work;
259 258
260 work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh; 259 work = atomic_read(&ip6_frags.mem) - sysctl_ip6frag_low_thresh;
261 if (work <= 0) 260 if (work <= 0)
262 return; 261 return;
263 262
264 while(work > 0) { 263 while(work > 0) {
265 read_lock(&ip6_frag_lock); 264 read_lock(&ip6_frags.lock);
266 if (list_empty(&ip6_frag_lru_list)) { 265 if (list_empty(&ip6_frags.lru_list)) {
267 read_unlock(&ip6_frag_lock); 266 read_unlock(&ip6_frags.lock);
268 return; 267 return;
269 } 268 }
270 tmp = ip6_frag_lru_list.next; 269 tmp = ip6_frags.lru_list.next;
271 fq = list_entry(tmp, struct frag_queue, q.lru_list); 270 fq = list_entry(tmp, struct frag_queue, q.lru_list);
272 atomic_inc(&fq->q.refcnt); 271 atomic_inc(&fq->q.refcnt);
273 read_unlock(&ip6_frag_lock); 272 read_unlock(&ip6_frags.lock);
274 273
275 spin_lock(&fq->q.lock); 274 spin_lock(&fq->q.lock);
276 if (!(fq->q.last_in&COMPLETE)) 275 if (!(fq->q.last_in&COMPLETE))
@@ -332,15 +331,15 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
332 struct hlist_node *n; 331 struct hlist_node *n;
333#endif 332#endif
334 333
335 write_lock(&ip6_frag_lock); 334 write_lock(&ip6_frags.lock);
336 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); 335 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
337#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
338 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { 337 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
339 if (fq->id == fq_in->id && 338 if (fq->id == fq_in->id &&
340 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 339 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
341 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 340 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
342 atomic_inc(&fq->q.refcnt); 341 atomic_inc(&fq->q.refcnt);
343 write_unlock(&ip6_frag_lock); 342 write_unlock(&ip6_frags.lock);
344 fq_in->q.last_in |= COMPLETE; 343 fq_in->q.last_in |= COMPLETE;
345 fq_put(fq_in, NULL); 344 fq_put(fq_in, NULL);
346 return fq; 345 return fq;
@@ -353,11 +352,11 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
353 atomic_inc(&fq->q.refcnt); 352 atomic_inc(&fq->q.refcnt);
354 353
355 atomic_inc(&fq->q.refcnt); 354 atomic_inc(&fq->q.refcnt);
356 hlist_add_head(&fq->q.list, &ip6_frag_hash[hash]); 355 hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
357 INIT_LIST_HEAD(&fq->q.lru_list); 356 INIT_LIST_HEAD(&fq->q.lru_list);
358 list_add_tail(&fq->q.lru_list, &ip6_frag_lru_list); 357 list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
359 ip6_frag_nqueues++; 358 ip6_frags.nqueues++;
360 write_unlock(&ip6_frag_lock); 359 write_unlock(&ip6_frags.lock);
361 return fq; 360 return fq;
362} 361}
363 362
@@ -396,18 +395,18 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
396 struct hlist_node *n; 395 struct hlist_node *n;
397 unsigned int hash; 396 unsigned int hash;
398 397
399 read_lock(&ip6_frag_lock); 398 read_lock(&ip6_frags.lock);
400 hash = ip6qhashfn(id, src, dst); 399 hash = ip6qhashfn(id, src, dst);
401 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { 400 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
402 if (fq->id == id && 401 if (fq->id == id &&
403 ipv6_addr_equal(src, &fq->saddr) && 402 ipv6_addr_equal(src, &fq->saddr) &&
404 ipv6_addr_equal(dst, &fq->daddr)) { 403 ipv6_addr_equal(dst, &fq->daddr)) {
405 atomic_inc(&fq->q.refcnt); 404 atomic_inc(&fq->q.refcnt);
406 read_unlock(&ip6_frag_lock); 405 read_unlock(&ip6_frags.lock);
407 return fq; 406 return fq;
408 } 407 }
409 } 408 }
410 read_unlock(&ip6_frag_lock); 409 read_unlock(&ip6_frags.lock);
411 410
412 return ip6_frag_create(id, src, dst, idev); 411 return ip6_frag_create(id, src, dst, idev);
413} 412}
@@ -565,7 +564,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
565 } 564 }
566 fq->q.stamp = skb->tstamp; 565 fq->q.stamp = skb->tstamp;
567 fq->q.meat += skb->len; 566 fq->q.meat += skb->len;
568 atomic_add(skb->truesize, &ip6_frag_mem); 567 atomic_add(skb->truesize, &ip6_frags.mem);
569 568
570 /* The first fragment. 569 /* The first fragment.
571 * nhoffset is obtained from the first fragment, of course. 570 * nhoffset is obtained from the first fragment, of course.
@@ -578,9 +577,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
578 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) 577 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
579 return ip6_frag_reasm(fq, prev, dev); 578 return ip6_frag_reasm(fq, prev, dev);
580 579
581 write_lock(&ip6_frag_lock); 580 write_lock(&ip6_frags.lock);
582 list_move_tail(&fq->q.lru_list, &ip6_frag_lru_list); 581 list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
583 write_unlock(&ip6_frag_lock); 582 write_unlock(&ip6_frags.lock);
584 return -1; 583 return -1;
585 584
586err: 585err:
@@ -659,7 +658,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
659 head->len -= clone->len; 658 head->len -= clone->len;
660 clone->csum = 0; 659 clone->csum = 0;
661 clone->ip_summed = head->ip_summed; 660 clone->ip_summed = head->ip_summed;
662 atomic_add(clone->truesize, &ip6_frag_mem); 661 atomic_add(clone->truesize, &ip6_frags.mem);
663 } 662 }
664 663
665 /* We have to remove fragment header from datagram and to relocate 664 /* We have to remove fragment header from datagram and to relocate
@@ -674,7 +673,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
674 skb_shinfo(head)->frag_list = head->next; 673 skb_shinfo(head)->frag_list = head->next;
675 skb_reset_transport_header(head); 674 skb_reset_transport_header(head);
676 skb_push(head, head->data - skb_network_header(head)); 675 skb_push(head, head->data - skb_network_header(head));
677 atomic_sub(head->truesize, &ip6_frag_mem); 676 atomic_sub(head->truesize, &ip6_frags.mem);
678 677
679 for (fp=head->next; fp; fp = fp->next) { 678 for (fp=head->next; fp; fp = fp->next) {
680 head->data_len += fp->len; 679 head->data_len += fp->len;
@@ -684,7 +683,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
684 else if (head->ip_summed == CHECKSUM_COMPLETE) 683 else if (head->ip_summed == CHECKSUM_COMPLETE)
685 head->csum = csum_add(head->csum, fp->csum); 684 head->csum = csum_add(head->csum, fp->csum);
686 head->truesize += fp->truesize; 685 head->truesize += fp->truesize;
687 atomic_sub(fp->truesize, &ip6_frag_mem); 686 atomic_sub(fp->truesize, &ip6_frags.mem);
688 } 687 }
689 688
690 head->next = NULL; 689 head->next = NULL;
@@ -755,7 +754,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp)
755 return 1; 754 return 1;
756 } 755 }
757 756
758 if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) 757 if (atomic_read(&ip6_frags.mem) > sysctl_ip6frag_high_thresh)
759 ip6_evictor(ip6_dst_idev(skb->dst)); 758 ip6_evictor(ip6_dst_idev(skb->dst));
760 759
761 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, 760 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
@@ -787,11 +786,10 @@ void __init ipv6_frag_init(void)
787 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0) 786 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
788 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n"); 787 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
789 788
790 ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 789 init_timer(&ip6_frags.secret_timer);
791 (jiffies ^ (jiffies >> 6))); 790 ip6_frags.secret_timer.function = ip6_frag_secret_rebuild;
791 ip6_frags.secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
792 add_timer(&ip6_frags.secret_timer);
792 793
793 init_timer(&ip6_frag_secret_timer); 794 inet_frags_init(&ip6_frags);
794 ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
795 ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
796 add_timer(&ip6_frag_secret_timer);
797} 795}