aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPavel Emelyanov <xemul@openvz.org>2007-10-15 05:31:52 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-15 15:26:39 -0400
commit7eb95156d9dce2f59794264db336ce007d71638b (patch)
treec283a095f1a9d530edb1a7058454ba30b4f7d028
parent5ab11c98d3a950faf6922b6166e5f8fc874590e7 (diff)
[INET]: Collect frag queues management objects together
There are some objects that are common in all the places which are used to keep track of frag queues, they are: * hash table * LRU list * rw lock * rnd number for hash function * the number of queues * the amount of memory occupied by queues * secret timer Move all this stuff into one structure (struct inet_frags) to make it possible use them uniformly in the future. Like with the previous patch this mostly consists of hunks like - write_lock(&ipfrag_lock); + write_lock(&ip4_frags.lock); To address the issue with exporting the number of queues and the amount of memory occupied by queues outside the .c file they are declared in, I introduce a couple of helpers. Signed-off-by: Pavel Emelyanov <xemul@openvz.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inet_frag.h15
-rw-r--r--include/net/ip.h4
-rw-r--r--include/net/ipv6.h4
-rw-r--r--net/ipv4/Makefile3
-rw-r--r--net/ipv4/inet_fragment.c44
-rw-r--r--net/ipv4/ip_fragment.c109
-rw-r--r--net/ipv4/proc.c4
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c108
-rw-r--r--net/ipv6/proc.c2
-rw-r--r--net/ipv6/reassembly.c110
10 files changed, 224 insertions, 179 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 74e9cb9b6943..d51f23873da9 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -18,4 +18,19 @@ struct inet_frag_queue {
18#define LAST_IN 1 18#define LAST_IN 1
19}; 19};
20 20
21#define INETFRAGS_HASHSZ 64
22
23struct inet_frags {
24 struct list_head lru_list;
25 struct hlist_head hash[INETFRAGS_HASHSZ];
26 rwlock_t lock;
27 u32 rnd;
28 int nqueues;
29 atomic_t mem;
30 struct timer_list secret_timer;
31};
32
33void inet_frags_init(struct inet_frags *);
34void inet_frags_fini(struct inet_frags *);
35
21#endif 36#endif
diff --git a/include/net/ip.h b/include/net/ip.h
index 875c5ed53343..c08c59e2384c 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -333,8 +333,8 @@ enum ip_defrag_users
333}; 333};
334 334
335int ip_defrag(struct sk_buff *skb, u32 user); 335int ip_defrag(struct sk_buff *skb, u32 user);
336extern int ip_frag_nqueues; 336int ip_frag_mem(void);
337extern atomic_t ip_frag_mem; 337int ip_frag_nqueues(void);
338 338
339/* 339/*
340 * Functions provided by ip_forward.c 340 * Functions provided by ip_forward.c
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 31b3f1b45a2b..77cdab3ce160 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -252,8 +252,8 @@ struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
252 252
253extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb); 253extern int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb);
254 254
255extern int ip6_frag_nqueues; 255int ip6_frag_nqueues(void);
256extern atomic_t ip6_frag_mem; 256int ip6_frag_mem(void);
257 257
258#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */ 258#define IPV6_FRAG_TIMEOUT (60*HZ) /* 60 seconds */
259 259
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index a02c36d0a13e..93fe3966805d 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -10,7 +10,8 @@ obj-y := route.o inetpeer.o protocol.o \
10 tcp_minisocks.o tcp_cong.o \ 10 tcp_minisocks.o tcp_cong.o \
11 datagram.o raw.o udp.o udplite.o \ 11 datagram.o raw.o udp.o udplite.o \
12 arp.o icmp.o devinet.o af_inet.o igmp.o \ 12 arp.o icmp.o devinet.o af_inet.o igmp.o \
13 sysctl_net_ipv4.o fib_frontend.o fib_semantics.o 13 sysctl_net_ipv4.o fib_frontend.o fib_semantics.o \
14 inet_fragment.o
14 15
15obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o 16obj-$(CONFIG_IP_FIB_HASH) += fib_hash.o
16obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o 17obj-$(CONFIG_IP_FIB_TRIE) += fib_trie.o
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
new file mode 100644
index 000000000000..69623ff4e4c6
--- /dev/null
+++ b/net/ipv4/inet_fragment.c
@@ -0,0 +1,44 @@
1/*
2 * inet fragments management
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Pavel Emelyanov <xemul@openvz.org>
10 * Started as consolidation of ipv4/ip_fragment.c,
11 * ipv6/reassembly. and ipv6 nf conntrack reassembly
12 */
13
14#include <linux/list.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19
20#include <net/inet_frag.h>
21
22void inet_frags_init(struct inet_frags *f)
23{
24 int i;
25
26 for (i = 0; i < INETFRAGS_HASHSZ; i++)
27 INIT_HLIST_HEAD(&f->hash[i]);
28
29 INIT_LIST_HEAD(&f->lru_list);
30 rwlock_init(&f->lock);
31
32 f->rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
33 (jiffies ^ (jiffies >> 6)));
34
35 f->nqueues = 0;
36 atomic_set(&f->mem, 0);
37
38}
39EXPORT_SYMBOL(inet_frags_init);
40
41void inet_frags_fini(struct inet_frags *f)
42{
43}
44EXPORT_SYMBOL(inet_frags_fini);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 428eaa502ec2..321e694b72e8 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -87,16 +87,17 @@ struct ipq {
87 struct inet_peer *peer; 87 struct inet_peer *peer;
88}; 88};
89 89
90/* Hash table. */ 90static struct inet_frags ip4_frags;
91 91
92#define IPQ_HASHSZ 64 92int ip_frag_nqueues(void)
93{
94 return ip4_frags.nqueues;
95}
93 96
94/* Per-bucket lock is easy to add now. */ 97int ip_frag_mem(void)
95static struct hlist_head ipq_hash[IPQ_HASHSZ]; 98{
96static DEFINE_RWLOCK(ipfrag_lock); 99 return atomic_read(&ip4_frags.mem);
97static u32 ipfrag_hash_rnd; 100}
98static LIST_HEAD(ipq_lru_list);
99int ip_frag_nqueues = 0;
100 101
101static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, 102static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
102 struct net_device *dev); 103 struct net_device *dev);
@@ -105,24 +106,23 @@ static __inline__ void __ipq_unlink(struct ipq *qp)
105{ 106{
106 hlist_del(&qp->q.list); 107 hlist_del(&qp->q.list);
107 list_del(&qp->q.lru_list); 108 list_del(&qp->q.lru_list);
108 ip_frag_nqueues--; 109 ip4_frags.nqueues--;
109} 110}
110 111
111static __inline__ void ipq_unlink(struct ipq *ipq) 112static __inline__ void ipq_unlink(struct ipq *ipq)
112{ 113{
113 write_lock(&ipfrag_lock); 114 write_lock(&ip4_frags.lock);
114 __ipq_unlink(ipq); 115 __ipq_unlink(ipq);
115 write_unlock(&ipfrag_lock); 116 write_unlock(&ip4_frags.lock);
116} 117}
117 118
118static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) 119static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
119{ 120{
120 return jhash_3words((__force u32)id << 16 | prot, 121 return jhash_3words((__force u32)id << 16 | prot,
121 (__force u32)saddr, (__force u32)daddr, 122 (__force u32)saddr, (__force u32)daddr,
122 ipfrag_hash_rnd) & (IPQ_HASHSZ - 1); 123 ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
123} 124}
124 125
125static struct timer_list ipfrag_secret_timer;
126int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ; 126int sysctl_ipfrag_secret_interval __read_mostly = 10 * 60 * HZ;
127 127
128static void ipfrag_secret_rebuild(unsigned long dummy) 128static void ipfrag_secret_rebuild(unsigned long dummy)
@@ -130,13 +130,13 @@ static void ipfrag_secret_rebuild(unsigned long dummy)
130 unsigned long now = jiffies; 130 unsigned long now = jiffies;
131 int i; 131 int i;
132 132
133 write_lock(&ipfrag_lock); 133 write_lock(&ip4_frags.lock);
134 get_random_bytes(&ipfrag_hash_rnd, sizeof(u32)); 134 get_random_bytes(&ip4_frags.rnd, sizeof(u32));
135 for (i = 0; i < IPQ_HASHSZ; i++) { 135 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
136 struct ipq *q; 136 struct ipq *q;
137 struct hlist_node *p, *n; 137 struct hlist_node *p, *n;
138 138
139 hlist_for_each_entry_safe(q, p, n, &ipq_hash[i], q.list) { 139 hlist_for_each_entry_safe(q, p, n, &ip4_frags.hash[i], q.list) {
140 unsigned int hval = ipqhashfn(q->id, q->saddr, 140 unsigned int hval = ipqhashfn(q->id, q->saddr,
141 q->daddr, q->protocol); 141 q->daddr, q->protocol);
142 142
@@ -144,23 +144,21 @@ static void ipfrag_secret_rebuild(unsigned long dummy)
144 hlist_del(&q->q.list); 144 hlist_del(&q->q.list);
145 145
146 /* Relink to new hash chain. */ 146 /* Relink to new hash chain. */
147 hlist_add_head(&q->q.list, &ipq_hash[hval]); 147 hlist_add_head(&q->q.list, &ip4_frags.hash[hval]);
148 } 148 }
149 } 149 }
150 } 150 }
151 write_unlock(&ipfrag_lock); 151 write_unlock(&ip4_frags.lock);
152 152
153 mod_timer(&ipfrag_secret_timer, now + sysctl_ipfrag_secret_interval); 153 mod_timer(&ip4_frags.secret_timer, now + sysctl_ipfrag_secret_interval);
154} 154}
155 155
156atomic_t ip_frag_mem = ATOMIC_INIT(0); /* Memory used for fragments */
157
158/* Memory Tracking Functions. */ 156/* Memory Tracking Functions. */
159static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work) 157static __inline__ void frag_kfree_skb(struct sk_buff *skb, int *work)
160{ 158{
161 if (work) 159 if (work)
162 *work -= skb->truesize; 160 *work -= skb->truesize;
163 atomic_sub(skb->truesize, &ip_frag_mem); 161 atomic_sub(skb->truesize, &ip4_frags.mem);
164 kfree_skb(skb); 162 kfree_skb(skb);
165} 163}
166 164
@@ -168,7 +166,7 @@ static __inline__ void frag_free_queue(struct ipq *qp, int *work)
168{ 166{
169 if (work) 167 if (work)
170 *work -= sizeof(struct ipq); 168 *work -= sizeof(struct ipq);
171 atomic_sub(sizeof(struct ipq), &ip_frag_mem); 169 atomic_sub(sizeof(struct ipq), &ip4_frags.mem);
172 kfree(qp); 170 kfree(qp);
173} 171}
174 172
@@ -178,7 +176,7 @@ static __inline__ struct ipq *frag_alloc_queue(void)
178 176
179 if (!qp) 177 if (!qp)
180 return NULL; 178 return NULL;
181 atomic_add(sizeof(struct ipq), &ip_frag_mem); 179 atomic_add(sizeof(struct ipq), &ip4_frags.mem);
182 return qp; 180 return qp;
183} 181}
184 182
@@ -239,20 +237,20 @@ static void ip_evictor(void)
239 struct list_head *tmp; 237 struct list_head *tmp;
240 int work; 238 int work;
241 239
242 work = atomic_read(&ip_frag_mem) - sysctl_ipfrag_low_thresh; 240 work = atomic_read(&ip4_frags.mem) - sysctl_ipfrag_low_thresh;
243 if (work <= 0) 241 if (work <= 0)
244 return; 242 return;
245 243
246 while (work > 0) { 244 while (work > 0) {
247 read_lock(&ipfrag_lock); 245 read_lock(&ip4_frags.lock);
248 if (list_empty(&ipq_lru_list)) { 246 if (list_empty(&ip4_frags.lru_list)) {
249 read_unlock(&ipfrag_lock); 247 read_unlock(&ip4_frags.lock);
250 return; 248 return;
251 } 249 }
252 tmp = ipq_lru_list.next; 250 tmp = ip4_frags.lru_list.next;
253 qp = list_entry(tmp, struct ipq, q.lru_list); 251 qp = list_entry(tmp, struct ipq, q.lru_list);
254 atomic_inc(&qp->q.refcnt); 252 atomic_inc(&qp->q.refcnt);
255 read_unlock(&ipfrag_lock); 253 read_unlock(&ip4_frags.lock);
256 254
257 spin_lock(&qp->q.lock); 255 spin_lock(&qp->q.lock);
258 if (!(qp->q.last_in&COMPLETE)) 256 if (!(qp->q.last_in&COMPLETE))
@@ -304,7 +302,7 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
304#endif 302#endif
305 unsigned int hash; 303 unsigned int hash;
306 304
307 write_lock(&ipfrag_lock); 305 write_lock(&ip4_frags.lock);
308 hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr, 306 hash = ipqhashfn(qp_in->id, qp_in->saddr, qp_in->daddr,
309 qp_in->protocol); 307 qp_in->protocol);
310#ifdef CONFIG_SMP 308#ifdef CONFIG_SMP
@@ -312,14 +310,14 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
312 * such entry could be created on other cpu, while we 310 * such entry could be created on other cpu, while we
313 * promoted read lock to write lock. 311 * promoted read lock to write lock.
314 */ 312 */
315 hlist_for_each_entry(qp, n, &ipq_hash[hash], q.list) { 313 hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
316 if (qp->id == qp_in->id && 314 if (qp->id == qp_in->id &&
317 qp->saddr == qp_in->saddr && 315 qp->saddr == qp_in->saddr &&
318 qp->daddr == qp_in->daddr && 316 qp->daddr == qp_in->daddr &&
319 qp->protocol == qp_in->protocol && 317 qp->protocol == qp_in->protocol &&
320 qp->user == qp_in->user) { 318 qp->user == qp_in->user) {
321 atomic_inc(&qp->q.refcnt); 319 atomic_inc(&qp->q.refcnt);
322 write_unlock(&ipfrag_lock); 320 write_unlock(&ip4_frags.lock);
323 qp_in->q.last_in |= COMPLETE; 321 qp_in->q.last_in |= COMPLETE;
324 ipq_put(qp_in, NULL); 322 ipq_put(qp_in, NULL);
325 return qp; 323 return qp;
@@ -332,11 +330,11 @@ static struct ipq *ip_frag_intern(struct ipq *qp_in)
332 atomic_inc(&qp->q.refcnt); 330 atomic_inc(&qp->q.refcnt);
333 331
334 atomic_inc(&qp->q.refcnt); 332 atomic_inc(&qp->q.refcnt);
335 hlist_add_head(&qp->q.list, &ipq_hash[hash]); 333 hlist_add_head(&qp->q.list, &ip4_frags.hash[hash]);
336 INIT_LIST_HEAD(&qp->q.lru_list); 334 INIT_LIST_HEAD(&qp->q.lru_list);
337 list_add_tail(&qp->q.lru_list, &ipq_lru_list); 335 list_add_tail(&qp->q.lru_list, &ip4_frags.lru_list);
338 ip_frag_nqueues++; 336 ip4_frags.nqueues++;
339 write_unlock(&ipfrag_lock); 337 write_unlock(&ip4_frags.lock);
340 return qp; 338 return qp;
341} 339}
342 340
@@ -387,20 +385,20 @@ static inline struct ipq *ip_find(struct iphdr *iph, u32 user)
387 struct ipq *qp; 385 struct ipq *qp;
388 struct hlist_node *n; 386 struct hlist_node *n;
389 387
390 read_lock(&ipfrag_lock); 388 read_lock(&ip4_frags.lock);
391 hash = ipqhashfn(id, saddr, daddr, protocol); 389 hash = ipqhashfn(id, saddr, daddr, protocol);
392 hlist_for_each_entry(qp, n, &ipq_hash[hash], q.list) { 390 hlist_for_each_entry(qp, n, &ip4_frags.hash[hash], q.list) {
393 if (qp->id == id && 391 if (qp->id == id &&
394 qp->saddr == saddr && 392 qp->saddr == saddr &&
395 qp->daddr == daddr && 393 qp->daddr == daddr &&
396 qp->protocol == protocol && 394 qp->protocol == protocol &&
397 qp->user == user) { 395 qp->user == user) {
398 atomic_inc(&qp->q.refcnt); 396 atomic_inc(&qp->q.refcnt);
399 read_unlock(&ipfrag_lock); 397 read_unlock(&ip4_frags.lock);
400 return qp; 398 return qp;
401 } 399 }
402 } 400 }
403 read_unlock(&ipfrag_lock); 401 read_unlock(&ip4_frags.lock);
404 402
405 return ip_frag_create(iph, user); 403 return ip_frag_create(iph, user);
406} 404}
@@ -599,16 +597,16 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
599 } 597 }
600 qp->q.stamp = skb->tstamp; 598 qp->q.stamp = skb->tstamp;
601 qp->q.meat += skb->len; 599 qp->q.meat += skb->len;
602 atomic_add(skb->truesize, &ip_frag_mem); 600 atomic_add(skb->truesize, &ip4_frags.mem);
603 if (offset == 0) 601 if (offset == 0)
604 qp->q.last_in |= FIRST_IN; 602 qp->q.last_in |= FIRST_IN;
605 603
606 if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len) 604 if (qp->q.last_in == (FIRST_IN | LAST_IN) && qp->q.meat == qp->q.len)
607 return ip_frag_reasm(qp, prev, dev); 605 return ip_frag_reasm(qp, prev, dev);
608 606
609 write_lock(&ipfrag_lock); 607 write_lock(&ip4_frags.lock);
610 list_move_tail(&qp->q.lru_list, &ipq_lru_list); 608 list_move_tail(&qp->q.lru_list, &ip4_frags.lru_list);
611 write_unlock(&ipfrag_lock); 609 write_unlock(&ip4_frags.lock);
612 return -EINPROGRESS; 610 return -EINPROGRESS;
613 611
614err: 612err:
@@ -684,12 +682,12 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
684 head->len -= clone->len; 682 head->len -= clone->len;
685 clone->csum = 0; 683 clone->csum = 0;
686 clone->ip_summed = head->ip_summed; 684 clone->ip_summed = head->ip_summed;
687 atomic_add(clone->truesize, &ip_frag_mem); 685 atomic_add(clone->truesize, &ip4_frags.mem);
688 } 686 }
689 687
690 skb_shinfo(head)->frag_list = head->next; 688 skb_shinfo(head)->frag_list = head->next;
691 skb_push(head, head->data - skb_network_header(head)); 689 skb_push(head, head->data - skb_network_header(head));
692 atomic_sub(head->truesize, &ip_frag_mem); 690 atomic_sub(head->truesize, &ip4_frags.mem);
693 691
694 for (fp=head->next; fp; fp = fp->next) { 692 for (fp=head->next; fp; fp = fp->next) {
695 head->data_len += fp->len; 693 head->data_len += fp->len;
@@ -699,7 +697,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
699 else if (head->ip_summed == CHECKSUM_COMPLETE) 697 else if (head->ip_summed == CHECKSUM_COMPLETE)
700 head->csum = csum_add(head->csum, fp->csum); 698 head->csum = csum_add(head->csum, fp->csum);
701 head->truesize += fp->truesize; 699 head->truesize += fp->truesize;
702 atomic_sub(fp->truesize, &ip_frag_mem); 700 atomic_sub(fp->truesize, &ip4_frags.mem);
703 } 701 }
704 702
705 head->next = NULL; 703 head->next = NULL;
@@ -735,7 +733,7 @@ int ip_defrag(struct sk_buff *skb, u32 user)
735 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS); 733 IP_INC_STATS_BH(IPSTATS_MIB_REASMREQDS);
736 734
737 /* Start by cleaning up the memory. */ 735 /* Start by cleaning up the memory. */
738 if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh) 736 if (atomic_read(&ip4_frags.mem) > sysctl_ipfrag_high_thresh)
739 ip_evictor(); 737 ip_evictor();
740 738
741 /* Lookup (or create) queue header */ 739 /* Lookup (or create) queue header */
@@ -758,13 +756,12 @@ int ip_defrag(struct sk_buff *skb, u32 user)
758 756
759void __init ipfrag_init(void) 757void __init ipfrag_init(void)
760{ 758{
761 ipfrag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 759 init_timer(&ip4_frags.secret_timer);
762 (jiffies ^ (jiffies >> 6))); 760 ip4_frags.secret_timer.function = ipfrag_secret_rebuild;
761 ip4_frags.secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval;
762 add_timer(&ip4_frags.secret_timer);
763 763
764 init_timer(&ipfrag_secret_timer); 764 inet_frags_init(&ip4_frags);
765 ipfrag_secret_timer.function = ipfrag_secret_rebuild;
766 ipfrag_secret_timer.expires = jiffies + sysctl_ipfrag_secret_interval;
767 add_timer(&ipfrag_secret_timer);
768} 765}
769 766
770EXPORT_SYMBOL(ip_defrag); 767EXPORT_SYMBOL(ip_defrag);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index e5b05b039101..fd16cb8f8abe 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -70,8 +70,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
70 seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot)); 70 seq_printf(seq, "UDP: inuse %d\n", fold_prot_inuse(&udp_prot));
71 seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot)); 71 seq_printf(seq, "UDPLITE: inuse %d\n", fold_prot_inuse(&udplite_prot));
72 seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot)); 72 seq_printf(seq, "RAW: inuse %d\n", fold_prot_inuse(&raw_prot));
73 seq_printf(seq, "FRAG: inuse %d memory %d\n", ip_frag_nqueues, 73 seq_printf(seq, "FRAG: inuse %d memory %d\n",
74 atomic_read(&ip_frag_mem)); 74 ip_frag_nqueues(), ip_frag_mem());
75 return 0; 75 return 0;
76} 76}
77 77
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 52e9f6a3995d..eb2ca1b7ddab 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -74,28 +74,20 @@ struct nf_ct_frag6_queue
74 __u16 nhoffset; 74 __u16 nhoffset;
75}; 75};
76 76
77/* Hash table. */ 77static struct inet_frags nf_frags;
78
79#define FRAG6Q_HASHSZ 64
80
81static struct hlist_head nf_ct_frag6_hash[FRAG6Q_HASHSZ];
82static DEFINE_RWLOCK(nf_ct_frag6_lock);
83static u32 nf_ct_frag6_hash_rnd;
84static LIST_HEAD(nf_ct_frag6_lru_list);
85int nf_ct_frag6_nqueues = 0;
86 78
87static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) 79static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
88{ 80{
89 hlist_del(&fq->q.list); 81 hlist_del(&fq->q.list);
90 list_del(&fq->q.lru_list); 82 list_del(&fq->q.lru_list);
91 nf_ct_frag6_nqueues--; 83 nf_frags.nqueues--;
92} 84}
93 85
94static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq) 86static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
95{ 87{
96 write_lock(&nf_ct_frag6_lock); 88 write_lock(&nf_frags.lock);
97 __fq_unlink(fq); 89 __fq_unlink(fq);
98 write_unlock(&nf_ct_frag6_lock); 90 write_unlock(&nf_frags.lock);
99} 91}
100 92
101static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr, 93static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
@@ -109,7 +101,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
109 101
110 a += JHASH_GOLDEN_RATIO; 102 a += JHASH_GOLDEN_RATIO;
111 b += JHASH_GOLDEN_RATIO; 103 b += JHASH_GOLDEN_RATIO;
112 c += nf_ct_frag6_hash_rnd; 104 c += nf_frags.rnd;
113 __jhash_mix(a, b, c); 105 __jhash_mix(a, b, c);
114 106
115 a += (__force u32)saddr->s6_addr32[3]; 107 a += (__force u32)saddr->s6_addr32[3];
@@ -122,10 +114,9 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
122 c += (__force u32)id; 114 c += (__force u32)id;
123 __jhash_mix(a, b, c); 115 __jhash_mix(a, b, c);
124 116
125 return c & (FRAG6Q_HASHSZ - 1); 117 return c & (INETFRAGS_HASHSZ - 1);
126} 118}
127 119
128static struct timer_list nf_ct_frag6_secret_timer;
129int nf_ct_frag6_secret_interval = 10 * 60 * HZ; 120int nf_ct_frag6_secret_interval = 10 * 60 * HZ;
130 121
131static void nf_ct_frag6_secret_rebuild(unsigned long dummy) 122static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
@@ -133,13 +124,13 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
133 unsigned long now = jiffies; 124 unsigned long now = jiffies;
134 int i; 125 int i;
135 126
136 write_lock(&nf_ct_frag6_lock); 127 write_lock(&nf_frags.lock);
137 get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32)); 128 get_random_bytes(&nf_frags.rnd, sizeof(u32));
138 for (i = 0; i < FRAG6Q_HASHSZ; i++) { 129 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
139 struct nf_ct_frag6_queue *q; 130 struct nf_ct_frag6_queue *q;
140 struct hlist_node *p, *n; 131 struct hlist_node *p, *n;
141 132
142 hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], q.list) { 133 hlist_for_each_entry_safe(q, p, n, &nf_frags.hash[i], q.list) {
143 unsigned int hval = ip6qhashfn(q->id, 134 unsigned int hval = ip6qhashfn(q->id,
144 &q->saddr, 135 &q->saddr,
145 &q->daddr); 136 &q->daddr);
@@ -147,23 +138,21 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
147 hlist_del(&q->q.list); 138 hlist_del(&q->q.list);
148 /* Relink to new hash chain. */ 139 /* Relink to new hash chain. */
149 hlist_add_head(&q->q.list, 140 hlist_add_head(&q->q.list,
150 &nf_ct_frag6_hash[hval]); 141 &nf_frags.hash[hval]);
151 } 142 }
152 } 143 }
153 } 144 }
154 write_unlock(&nf_ct_frag6_lock); 145 write_unlock(&nf_frags.lock);
155 146
156 mod_timer(&nf_ct_frag6_secret_timer, now + nf_ct_frag6_secret_interval); 147 mod_timer(&nf_frags.secret_timer, now + nf_ct_frag6_secret_interval);
157} 148}
158 149
159atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0);
160
161/* Memory Tracking Functions. */ 150/* Memory Tracking Functions. */
162static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work) 151static inline void frag_kfree_skb(struct sk_buff *skb, unsigned int *work)
163{ 152{
164 if (work) 153 if (work)
165 *work -= skb->truesize; 154 *work -= skb->truesize;
166 atomic_sub(skb->truesize, &nf_ct_frag6_mem); 155 atomic_sub(skb->truesize, &nf_frags.mem);
167 if (NFCT_FRAG6_CB(skb)->orig) 156 if (NFCT_FRAG6_CB(skb)->orig)
168 kfree_skb(NFCT_FRAG6_CB(skb)->orig); 157 kfree_skb(NFCT_FRAG6_CB(skb)->orig);
169 158
@@ -175,7 +164,7 @@ static inline void frag_free_queue(struct nf_ct_frag6_queue *fq,
175{ 164{
176 if (work) 165 if (work)
177 *work -= sizeof(struct nf_ct_frag6_queue); 166 *work -= sizeof(struct nf_ct_frag6_queue);
178 atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); 167 atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem);
179 kfree(fq); 168 kfree(fq);
180} 169}
181 170
@@ -185,7 +174,7 @@ static inline struct nf_ct_frag6_queue *frag_alloc_queue(void)
185 174
186 if (!fq) 175 if (!fq)
187 return NULL; 176 return NULL;
188 atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem); 177 atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_frags.mem);
189 return fq; 178 return fq;
190} 179}
191 180
@@ -239,22 +228,22 @@ static void nf_ct_frag6_evictor(void)
239 struct list_head *tmp; 228 struct list_head *tmp;
240 unsigned int work; 229 unsigned int work;
241 230
242 work = atomic_read(&nf_ct_frag6_mem); 231 work = atomic_read(&nf_frags.mem);
243 if (work <= nf_ct_frag6_low_thresh) 232 if (work <= nf_ct_frag6_low_thresh)
244 return; 233 return;
245 234
246 work -= nf_ct_frag6_low_thresh; 235 work -= nf_ct_frag6_low_thresh;
247 while (work > 0) { 236 while (work > 0) {
248 read_lock(&nf_ct_frag6_lock); 237 read_lock(&nf_frags.lock);
249 if (list_empty(&nf_ct_frag6_lru_list)) { 238 if (list_empty(&nf_frags.lru_list)) {
250 read_unlock(&nf_ct_frag6_lock); 239 read_unlock(&nf_frags.lock);
251 return; 240 return;
252 } 241 }
253 tmp = nf_ct_frag6_lru_list.next; 242 tmp = nf_frags.lru_list.next;
254 BUG_ON(tmp == NULL); 243 BUG_ON(tmp == NULL);
255 fq = list_entry(tmp, struct nf_ct_frag6_queue, q.lru_list); 244 fq = list_entry(tmp, struct nf_ct_frag6_queue, q.lru_list);
256 atomic_inc(&fq->q.refcnt); 245 atomic_inc(&fq->q.refcnt);
257 read_unlock(&nf_ct_frag6_lock); 246 read_unlock(&nf_frags.lock);
258 247
259 spin_lock(&fq->q.lock); 248 spin_lock(&fq->q.lock);
260 if (!(fq->q.last_in&COMPLETE)) 249 if (!(fq->q.last_in&COMPLETE))
@@ -291,14 +280,14 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
291 struct hlist_node *n; 280 struct hlist_node *n;
292#endif 281#endif
293 282
294 write_lock(&nf_ct_frag6_lock); 283 write_lock(&nf_frags.lock);
295#ifdef CONFIG_SMP 284#ifdef CONFIG_SMP
296 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { 285 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
297 if (fq->id == fq_in->id && 286 if (fq->id == fq_in->id &&
298 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 287 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
299 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 288 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
300 atomic_inc(&fq->q.refcnt); 289 atomic_inc(&fq->q.refcnt);
301 write_unlock(&nf_ct_frag6_lock); 290 write_unlock(&nf_frags.lock);
302 fq_in->q.last_in |= COMPLETE; 291 fq_in->q.last_in |= COMPLETE;
303 fq_put(fq_in, NULL); 292 fq_put(fq_in, NULL);
304 return fq; 293 return fq;
@@ -311,11 +300,11 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
311 atomic_inc(&fq->q.refcnt); 300 atomic_inc(&fq->q.refcnt);
312 301
313 atomic_inc(&fq->q.refcnt); 302 atomic_inc(&fq->q.refcnt);
314 hlist_add_head(&fq->q.list, &nf_ct_frag6_hash[hash]); 303 hlist_add_head(&fq->q.list, &nf_frags.hash[hash]);
315 INIT_LIST_HEAD(&fq->q.lru_list); 304 INIT_LIST_HEAD(&fq->q.lru_list);
316 list_add_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); 305 list_add_tail(&fq->q.lru_list, &nf_frags.lru_list);
317 nf_ct_frag6_nqueues++; 306 nf_frags.nqueues++;
318 write_unlock(&nf_ct_frag6_lock); 307 write_unlock(&nf_frags.lock);
319 return fq; 308 return fq;
320} 309}
321 310
@@ -353,17 +342,17 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst)
353 struct hlist_node *n; 342 struct hlist_node *n;
354 unsigned int hash = ip6qhashfn(id, src, dst); 343 unsigned int hash = ip6qhashfn(id, src, dst);
355 344
356 read_lock(&nf_ct_frag6_lock); 345 read_lock(&nf_frags.lock);
357 hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { 346 hlist_for_each_entry(fq, n, &nf_frags.hash[hash], q.list) {
358 if (fq->id == id && 347 if (fq->id == id &&
359 ipv6_addr_equal(src, &fq->saddr) && 348 ipv6_addr_equal(src, &fq->saddr) &&
360 ipv6_addr_equal(dst, &fq->daddr)) { 349 ipv6_addr_equal(dst, &fq->daddr)) {
361 atomic_inc(&fq->q.refcnt); 350 atomic_inc(&fq->q.refcnt);
362 read_unlock(&nf_ct_frag6_lock); 351 read_unlock(&nf_frags.lock);
363 return fq; 352 return fq;
364 } 353 }
365 } 354 }
366 read_unlock(&nf_ct_frag6_lock); 355 read_unlock(&nf_frags.lock);
367 356
368 return nf_ct_frag6_create(hash, id, src, dst); 357 return nf_ct_frag6_create(hash, id, src, dst);
369} 358}
@@ -526,7 +515,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
526 skb->dev = NULL; 515 skb->dev = NULL;
527 fq->q.stamp = skb->tstamp; 516 fq->q.stamp = skb->tstamp;
528 fq->q.meat += skb->len; 517 fq->q.meat += skb->len;
529 atomic_add(skb->truesize, &nf_ct_frag6_mem); 518 atomic_add(skb->truesize, &nf_frags.mem);
530 519
531 /* The first fragment. 520 /* The first fragment.
532 * nhoffset is obtained from the first fragment, of course. 521 * nhoffset is obtained from the first fragment, of course.
@@ -535,9 +524,9 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
535 fq->nhoffset = nhoff; 524 fq->nhoffset = nhoff;
536 fq->q.last_in |= FIRST_IN; 525 fq->q.last_in |= FIRST_IN;
537 } 526 }
538 write_lock(&nf_ct_frag6_lock); 527 write_lock(&nf_frags.lock);
539 list_move_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); 528 list_move_tail(&fq->q.lru_list, &nf_frags.lru_list);
540 write_unlock(&nf_ct_frag6_lock); 529 write_unlock(&nf_frags.lock);
541 return 0; 530 return 0;
542 531
543err: 532err:
@@ -603,7 +592,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
603 clone->ip_summed = head->ip_summed; 592 clone->ip_summed = head->ip_summed;
604 593
605 NFCT_FRAG6_CB(clone)->orig = NULL; 594 NFCT_FRAG6_CB(clone)->orig = NULL;
606 atomic_add(clone->truesize, &nf_ct_frag6_mem); 595 atomic_add(clone->truesize, &nf_frags.mem);
607 } 596 }
608 597
609 /* We have to remove fragment header from datagram and to relocate 598 /* We have to remove fragment header from datagram and to relocate
@@ -617,7 +606,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
617 skb_shinfo(head)->frag_list = head->next; 606 skb_shinfo(head)->frag_list = head->next;
618 skb_reset_transport_header(head); 607 skb_reset_transport_header(head);
619 skb_push(head, head->data - skb_network_header(head)); 608 skb_push(head, head->data - skb_network_header(head));
620 atomic_sub(head->truesize, &nf_ct_frag6_mem); 609 atomic_sub(head->truesize, &nf_frags.mem);
621 610
622 for (fp=head->next; fp; fp = fp->next) { 611 for (fp=head->next; fp; fp = fp->next) {
623 head->data_len += fp->len; 612 head->data_len += fp->len;
@@ -627,7 +616,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
627 else if (head->ip_summed == CHECKSUM_COMPLETE) 616 else if (head->ip_summed == CHECKSUM_COMPLETE)
628 head->csum = csum_add(head->csum, fp->csum); 617 head->csum = csum_add(head->csum, fp->csum);
629 head->truesize += fp->truesize; 618 head->truesize += fp->truesize;
630 atomic_sub(fp->truesize, &nf_ct_frag6_mem); 619 atomic_sub(fp->truesize, &nf_frags.mem);
631 } 620 }
632 621
633 head->next = NULL; 622 head->next = NULL;
@@ -777,7 +766,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
777 goto ret_orig; 766 goto ret_orig;
778 } 767 }
779 768
780 if (atomic_read(&nf_ct_frag6_mem) > nf_ct_frag6_high_thresh) 769 if (atomic_read(&nf_frags.mem) > nf_ct_frag6_high_thresh)
781 nf_ct_frag6_evictor(); 770 nf_ct_frag6_evictor();
782 771
783 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr); 772 fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
@@ -848,20 +837,21 @@ int nf_ct_frag6_kfree_frags(struct sk_buff *skb)
848 837
849int nf_ct_frag6_init(void) 838int nf_ct_frag6_init(void)
850{ 839{
851 nf_ct_frag6_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 840 setup_timer(&nf_frags.secret_timer, nf_ct_frag6_secret_rebuild, 0);
852 (jiffies ^ (jiffies >> 6))); 841 nf_frags.secret_timer.expires = jiffies
853
854 setup_timer(&nf_ct_frag6_secret_timer, nf_ct_frag6_secret_rebuild, 0);
855 nf_ct_frag6_secret_timer.expires = jiffies
856 + nf_ct_frag6_secret_interval; 842 + nf_ct_frag6_secret_interval;
857 add_timer(&nf_ct_frag6_secret_timer); 843 add_timer(&nf_frags.secret_timer);
844
845 inet_frags_init(&nf_frags);
858 846
859 return 0; 847 return 0;
860} 848}
861 849
862void nf_ct_frag6_cleanup(void) 850void nf_ct_frag6_cleanup(void)
863{ 851{
864 del_timer(&nf_ct_frag6_secret_timer); 852 inet_frags_fini(&nf_frags);
853
854 del_timer(&nf_frags.secret_timer);
865 nf_ct_frag6_low_thresh = 0; 855 nf_ct_frag6_low_thresh = 0;
866 nf_ct_frag6_evictor(); 856 nf_ct_frag6_evictor();
867} 857}
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index db945018579e..be526ad92543 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -54,7 +54,7 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
54 seq_printf(seq, "RAW6: inuse %d\n", 54 seq_printf(seq, "RAW6: inuse %d\n",
55 fold_prot_inuse(&rawv6_prot)); 55 fold_prot_inuse(&rawv6_prot));
56 seq_printf(seq, "FRAG6: inuse %d memory %d\n", 56 seq_printf(seq, "FRAG6: inuse %d memory %d\n",
57 ip6_frag_nqueues, atomic_read(&ip6_frag_mem)); 57 ip6_frag_nqueues(), ip6_frag_mem());
58 return 0; 58 return 0;
59} 59}
60 60
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index aef5dd1ebc8a..ecf340047cde 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -87,15 +87,17 @@ struct frag_queue
87 __u16 nhoffset; 87 __u16 nhoffset;
88}; 88};
89 89
90/* Hash table. */ 90static struct inet_frags ip6_frags;
91 91
92#define IP6Q_HASHSZ 64 92int ip6_frag_nqueues(void)
93{
94 return ip6_frags.nqueues;
95}
93 96
94static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ]; 97int ip6_frag_mem(void)
95static DEFINE_RWLOCK(ip6_frag_lock); 98{
96static u32 ip6_frag_hash_rnd; 99 return atomic_read(&ip6_frags.mem);
97static LIST_HEAD(ip6_frag_lru_list); 100}
98int ip6_frag_nqueues = 0;
99 101
100static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, 102static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
101 struct net_device *dev); 103 struct net_device *dev);
@@ -104,14 +106,14 @@ static __inline__ void __fq_unlink(struct frag_queue *fq)
104{ 106{
105 hlist_del(&fq->q.list); 107 hlist_del(&fq->q.list);
106 list_del(&fq->q.lru_list); 108 list_del(&fq->q.lru_list);
107 ip6_frag_nqueues--; 109 ip6_frags.nqueues--;
108} 110}
109 111
110static __inline__ void fq_unlink(struct frag_queue *fq) 112static __inline__ void fq_unlink(struct frag_queue *fq)
111{ 113{
112 write_lock(&ip6_frag_lock); 114 write_lock(&ip6_frags.lock);
113 __fq_unlink(fq); 115 __fq_unlink(fq);
114 write_unlock(&ip6_frag_lock); 116 write_unlock(&ip6_frags.lock);
115} 117}
116 118
117/* 119/*
@@ -129,7 +131,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
129 131
130 a += JHASH_GOLDEN_RATIO; 132 a += JHASH_GOLDEN_RATIO;
131 b += JHASH_GOLDEN_RATIO; 133 b += JHASH_GOLDEN_RATIO;
132 c += ip6_frag_hash_rnd; 134 c += ip6_frags.rnd;
133 __jhash_mix(a, b, c); 135 __jhash_mix(a, b, c);
134 136
135 a += (__force u32)saddr->s6_addr32[3]; 137 a += (__force u32)saddr->s6_addr32[3];
@@ -142,10 +144,9 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
142 c += (__force u32)id; 144 c += (__force u32)id;
143 __jhash_mix(a, b, c); 145 __jhash_mix(a, b, c);
144 146
145 return c & (IP6Q_HASHSZ - 1); 147 return c & (INETFRAGS_HASHSZ - 1);
146} 148}
147 149
148static struct timer_list ip6_frag_secret_timer;
149int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ; 150int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ;
150 151
151static void ip6_frag_secret_rebuild(unsigned long dummy) 152static void ip6_frag_secret_rebuild(unsigned long dummy)
@@ -153,13 +154,13 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
153 unsigned long now = jiffies; 154 unsigned long now = jiffies;
154 int i; 155 int i;
155 156
156 write_lock(&ip6_frag_lock); 157 write_lock(&ip6_frags.lock);
157 get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32)); 158 get_random_bytes(&ip6_frags.rnd, sizeof(u32));
158 for (i = 0; i < IP6Q_HASHSZ; i++) { 159 for (i = 0; i < INETFRAGS_HASHSZ; i++) {
159 struct frag_queue *q; 160 struct frag_queue *q;
160 struct hlist_node *p, *n; 161 struct hlist_node *p, *n;
161 162
162 hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], q.list) { 163 hlist_for_each_entry_safe(q, p, n, &ip6_frags.hash[i], q.list) {
163 unsigned int hval = ip6qhashfn(q->id, 164 unsigned int hval = ip6qhashfn(q->id,
164 &q->saddr, 165 &q->saddr,
165 &q->daddr); 166 &q->daddr);
@@ -169,24 +170,22 @@ static void ip6_frag_secret_rebuild(unsigned long dummy)
169 170
170 /* Relink to new hash chain. */ 171 /* Relink to new hash chain. */
171 hlist_add_head(&q->q.list, 172 hlist_add_head(&q->q.list,
172 &ip6_frag_hash[hval]); 173 &ip6_frags.hash[hval]);
173 174
174 } 175 }
175 } 176 }
176 } 177 }
177 write_unlock(&ip6_frag_lock); 178 write_unlock(&ip6_frags.lock);
178 179
179 mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval); 180 mod_timer(&ip6_frags.secret_timer, now + sysctl_ip6frag_secret_interval);
180} 181}
181 182
182atomic_t ip6_frag_mem = ATOMIC_INIT(0);
183
184/* Memory Tracking Functions. */ 183/* Memory Tracking Functions. */
185static inline void frag_kfree_skb(struct sk_buff *skb, int *work) 184static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
186{ 185{
187 if (work) 186 if (work)
188 *work -= skb->truesize; 187 *work -= skb->truesize;
189 atomic_sub(skb->truesize, &ip6_frag_mem); 188 atomic_sub(skb->truesize, &ip6_frags.mem);
190 kfree_skb(skb); 189 kfree_skb(skb);
191} 190}
192 191
@@ -194,7 +193,7 @@ static inline void frag_free_queue(struct frag_queue *fq, int *work)
194{ 193{
195 if (work) 194 if (work)
196 *work -= sizeof(struct frag_queue); 195 *work -= sizeof(struct frag_queue);
197 atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem); 196 atomic_sub(sizeof(struct frag_queue), &ip6_frags.mem);
198 kfree(fq); 197 kfree(fq);
199} 198}
200 199
@@ -204,7 +203,7 @@ static inline struct frag_queue *frag_alloc_queue(void)
204 203
205 if(!fq) 204 if(!fq)
206 return NULL; 205 return NULL;
207 atomic_add(sizeof(struct frag_queue), &ip6_frag_mem); 206 atomic_add(sizeof(struct frag_queue), &ip6_frags.mem);
208 return fq; 207 return fq;
209} 208}
210 209
@@ -257,20 +256,20 @@ static void ip6_evictor(struct inet6_dev *idev)
257 struct list_head *tmp; 256 struct list_head *tmp;
258 int work; 257 int work;
259 258
260 work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh; 259 work = atomic_read(&ip6_frags.mem) - sysctl_ip6frag_low_thresh;
261 if (work <= 0) 260 if (work <= 0)
262 return; 261 return;
263 262
264 while(work > 0) { 263 while(work > 0) {
265 read_lock(&ip6_frag_lock); 264 read_lock(&ip6_frags.lock);
266 if (list_empty(&ip6_frag_lru_list)) { 265 if (list_empty(&ip6_frags.lru_list)) {
267 read_unlock(&ip6_frag_lock); 266 read_unlock(&ip6_frags.lock);
268 return; 267 return;
269 } 268 }
270 tmp = ip6_frag_lru_list.next; 269 tmp = ip6_frags.lru_list.next;
271 fq = list_entry(tmp, struct frag_queue, q.lru_list); 270 fq = list_entry(tmp, struct frag_queue, q.lru_list);
272 atomic_inc(&fq->q.refcnt); 271 atomic_inc(&fq->q.refcnt);
273 read_unlock(&ip6_frag_lock); 272 read_unlock(&ip6_frags.lock);
274 273
275 spin_lock(&fq->q.lock); 274 spin_lock(&fq->q.lock);
276 if (!(fq->q.last_in&COMPLETE)) 275 if (!(fq->q.last_in&COMPLETE))
@@ -332,15 +331,15 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
332 struct hlist_node *n; 331 struct hlist_node *n;
333#endif 332#endif
334 333
335 write_lock(&ip6_frag_lock); 334 write_lock(&ip6_frags.lock);
336 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); 335 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
337#ifdef CONFIG_SMP 336#ifdef CONFIG_SMP
338 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { 337 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
339 if (fq->id == fq_in->id && 338 if (fq->id == fq_in->id &&
340 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 339 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
341 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 340 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
342 atomic_inc(&fq->q.refcnt); 341 atomic_inc(&fq->q.refcnt);
343 write_unlock(&ip6_frag_lock); 342 write_unlock(&ip6_frags.lock);
344 fq_in->q.last_in |= COMPLETE; 343 fq_in->q.last_in |= COMPLETE;
345 fq_put(fq_in, NULL); 344 fq_put(fq_in, NULL);
346 return fq; 345 return fq;
@@ -353,11 +352,11 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
353 atomic_inc(&fq->q.refcnt); 352 atomic_inc(&fq->q.refcnt);
354 353
355 atomic_inc(&fq->q.refcnt); 354 atomic_inc(&fq->q.refcnt);
356 hlist_add_head(&fq->q.list, &ip6_frag_hash[hash]); 355 hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
357 INIT_LIST_HEAD(&fq->q.lru_list); 356 INIT_LIST_HEAD(&fq->q.lru_list);
358 list_add_tail(&fq->q.lru_list, &ip6_frag_lru_list); 357 list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
359 ip6_frag_nqueues++; 358 ip6_frags.nqueues++;
360 write_unlock(&ip6_frag_lock); 359 write_unlock(&ip6_frags.lock);
361 return fq; 360 return fq;
362} 361}
363 362
@@ -396,18 +395,18 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
396 struct hlist_node *n; 395 struct hlist_node *n;
397 unsigned int hash; 396 unsigned int hash;
398 397
399 read_lock(&ip6_frag_lock); 398 read_lock(&ip6_frags.lock);
400 hash = ip6qhashfn(id, src, dst); 399 hash = ip6qhashfn(id, src, dst);
401 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { 400 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
402 if (fq->id == id && 401 if (fq->id == id &&
403 ipv6_addr_equal(src, &fq->saddr) && 402 ipv6_addr_equal(src, &fq->saddr) &&
404 ipv6_addr_equal(dst, &fq->daddr)) { 403 ipv6_addr_equal(dst, &fq->daddr)) {
405 atomic_inc(&fq->q.refcnt); 404 atomic_inc(&fq->q.refcnt);
406 read_unlock(&ip6_frag_lock); 405 read_unlock(&ip6_frags.lock);
407 return fq; 406 return fq;
408 } 407 }
409 } 408 }
410 read_unlock(&ip6_frag_lock); 409 read_unlock(&ip6_frags.lock);
411 410
412 return ip6_frag_create(id, src, dst, idev); 411 return ip6_frag_create(id, src, dst, idev);
413} 412}
@@ -565,7 +564,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
565 } 564 }
566 fq->q.stamp = skb->tstamp; 565 fq->q.stamp = skb->tstamp;
567 fq->q.meat += skb->len; 566 fq->q.meat += skb->len;
568 atomic_add(skb->truesize, &ip6_frag_mem); 567 atomic_add(skb->truesize, &ip6_frags.mem);
569 568
570 /* The first fragment. 569 /* The first fragment.
571 * nhoffset is obtained from the first fragment, of course. 570 * nhoffset is obtained from the first fragment, of course.
@@ -578,9 +577,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
578 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) 577 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
579 return ip6_frag_reasm(fq, prev, dev); 578 return ip6_frag_reasm(fq, prev, dev);
580 579
581 write_lock(&ip6_frag_lock); 580 write_lock(&ip6_frags.lock);
582 list_move_tail(&fq->q.lru_list, &ip6_frag_lru_list); 581 list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
583 write_unlock(&ip6_frag_lock); 582 write_unlock(&ip6_frags.lock);
584 return -1; 583 return -1;
585 584
586err: 585err:
@@ -659,7 +658,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
659 head->len -= clone->len; 658 head->len -= clone->len;
660 clone->csum = 0; 659 clone->csum = 0;
661 clone->ip_summed = head->ip_summed; 660 clone->ip_summed = head->ip_summed;
662 atomic_add(clone->truesize, &ip6_frag_mem); 661 atomic_add(clone->truesize, &ip6_frags.mem);
663 } 662 }
664 663
665 /* We have to remove fragment header from datagram and to relocate 664 /* We have to remove fragment header from datagram and to relocate
@@ -674,7 +673,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
674 skb_shinfo(head)->frag_list = head->next; 673 skb_shinfo(head)->frag_list = head->next;
675 skb_reset_transport_header(head); 674 skb_reset_transport_header(head);
676 skb_push(head, head->data - skb_network_header(head)); 675 skb_push(head, head->data - skb_network_header(head));
677 atomic_sub(head->truesize, &ip6_frag_mem); 676 atomic_sub(head->truesize, &ip6_frags.mem);
678 677
679 for (fp=head->next; fp; fp = fp->next) { 678 for (fp=head->next; fp; fp = fp->next) {
680 head->data_len += fp->len; 679 head->data_len += fp->len;
@@ -684,7 +683,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
684 else if (head->ip_summed == CHECKSUM_COMPLETE) 683 else if (head->ip_summed == CHECKSUM_COMPLETE)
685 head->csum = csum_add(head->csum, fp->csum); 684 head->csum = csum_add(head->csum, fp->csum);
686 head->truesize += fp->truesize; 685 head->truesize += fp->truesize;
687 atomic_sub(fp->truesize, &ip6_frag_mem); 686 atomic_sub(fp->truesize, &ip6_frags.mem);
688 } 687 }
689 688
690 head->next = NULL; 689 head->next = NULL;
@@ -755,7 +754,7 @@ static int ipv6_frag_rcv(struct sk_buff **skbp)
755 return 1; 754 return 1;
756 } 755 }
757 756
758 if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) 757 if (atomic_read(&ip6_frags.mem) > sysctl_ip6frag_high_thresh)
759 ip6_evictor(ip6_dst_idev(skb->dst)); 758 ip6_evictor(ip6_dst_idev(skb->dst));
760 759
761 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, 760 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
@@ -787,11 +786,10 @@ void __init ipv6_frag_init(void)
787 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0) 786 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
788 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n"); 787 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
789 788
790 ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 789 init_timer(&ip6_frags.secret_timer);
791 (jiffies ^ (jiffies >> 6))); 790 ip6_frags.secret_timer.function = ip6_frag_secret_rebuild;
791 ip6_frags.secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
792 add_timer(&ip6_frags.secret_timer);
792 793
793 init_timer(&ip6_frag_secret_timer); 794 inet_frags_init(&ip6_frags);
794 ip6_frag_secret_timer.function = ip6_frag_secret_rebuild;
795 ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval;
796 add_timer(&ip6_frag_secret_timer);
797} 795}