aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/reassembly.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv6/reassembly.c')
-rw-r--r--net/ipv6/reassembly.c361
1 files changed, 140 insertions, 221 deletions
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 31601c993541..6ad19cfc2025 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -42,6 +42,7 @@
42#include <linux/icmpv6.h> 42#include <linux/icmpv6.h>
43#include <linux/random.h> 43#include <linux/random.h>
44#include <linux/jhash.h> 44#include <linux/jhash.h>
45#include <linux/skbuff.h>
45 46
46#include <net/sock.h> 47#include <net/sock.h>
47#include <net/snmp.h> 48#include <net/snmp.h>
@@ -53,11 +54,7 @@
53#include <net/rawv6.h> 54#include <net/rawv6.h>
54#include <net/ndisc.h> 55#include <net/ndisc.h>
55#include <net/addrconf.h> 56#include <net/addrconf.h>
56 57#include <net/inet_frag.h>
57int sysctl_ip6frag_high_thresh __read_mostly = 256*1024;
58int sysctl_ip6frag_low_thresh __read_mostly = 192*1024;
59
60int sysctl_ip6frag_time __read_mostly = IPV6_FRAG_TIMEOUT;
61 58
62struct ip6frag_skb_cb 59struct ip6frag_skb_cb
63{ 60{
@@ -74,53 +71,39 @@ struct ip6frag_skb_cb
74 71
75struct frag_queue 72struct frag_queue
76{ 73{
77 struct hlist_node list; 74 struct inet_frag_queue q;
78 struct list_head lru_list; /* lru list member */
79 75
80 __be32 id; /* fragment id */ 76 __be32 id; /* fragment id */
81 struct in6_addr saddr; 77 struct in6_addr saddr;
82 struct in6_addr daddr; 78 struct in6_addr daddr;
83 79
84 spinlock_t lock;
85 atomic_t refcnt;
86 struct timer_list timer; /* expire timer */
87 struct sk_buff *fragments;
88 int len;
89 int meat;
90 int iif; 80 int iif;
91 ktime_t stamp;
92 unsigned int csum; 81 unsigned int csum;
93 __u8 last_in; /* has first/last segment arrived? */
94#define COMPLETE 4
95#define FIRST_IN 2
96#define LAST_IN 1
97 __u16 nhoffset; 82 __u16 nhoffset;
98}; 83};
99 84
100/* Hash table. */ 85struct inet_frags_ctl ip6_frags_ctl __read_mostly = {
101 86 .high_thresh = 256 * 1024,
102#define IP6Q_HASHSZ 64 87 .low_thresh = 192 * 1024,
88 .timeout = IPV6_FRAG_TIMEOUT,
89 .secret_interval = 10 * 60 * HZ,
90};
103 91
104static struct hlist_head ip6_frag_hash[IP6Q_HASHSZ]; 92static struct inet_frags ip6_frags;
105static DEFINE_RWLOCK(ip6_frag_lock);
106static u32 ip6_frag_hash_rnd;
107static LIST_HEAD(ip6_frag_lru_list);
108int ip6_frag_nqueues = 0;
109 93
110static __inline__ void __fq_unlink(struct frag_queue *fq) 94int ip6_frag_nqueues(void)
111{ 95{
112 hlist_del(&fq->list); 96 return ip6_frags.nqueues;
113 list_del(&fq->lru_list);
114 ip6_frag_nqueues--;
115} 97}
116 98
117static __inline__ void fq_unlink(struct frag_queue *fq) 99int ip6_frag_mem(void)
118{ 100{
119 write_lock(&ip6_frag_lock); 101 return atomic_read(&ip6_frags.mem);
120 __fq_unlink(fq);
121 write_unlock(&ip6_frag_lock);
122} 102}
123 103
104static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
105 struct net_device *dev);
106
124/* 107/*
125 * callers should be careful not to use the hash value outside the ipfrag_lock 108 * callers should be careful not to use the hash value outside the ipfrag_lock
126 * as doing so could race with ipfrag_hash_rnd being recalculated. 109 * as doing so could race with ipfrag_hash_rnd being recalculated.
@@ -136,7 +119,7 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
136 119
137 a += JHASH_GOLDEN_RATIO; 120 a += JHASH_GOLDEN_RATIO;
138 b += JHASH_GOLDEN_RATIO; 121 b += JHASH_GOLDEN_RATIO;
139 c += ip6_frag_hash_rnd; 122 c += ip6_frags.rnd;
140 __jhash_mix(a, b, c); 123 __jhash_mix(a, b, c);
141 124
142 a += (__force u32)saddr->s6_addr32[3]; 125 a += (__force u32)saddr->s6_addr32[3];
@@ -149,60 +132,29 @@ static unsigned int ip6qhashfn(__be32 id, struct in6_addr *saddr,
149 c += (__force u32)id; 132 c += (__force u32)id;
150 __jhash_mix(a, b, c); 133 __jhash_mix(a, b, c);
151 134
152 return c & (IP6Q_HASHSZ - 1); 135 return c & (INETFRAGS_HASHSZ - 1);
153} 136}
154 137
155static struct timer_list ip6_frag_secret_timer; 138static unsigned int ip6_hashfn(struct inet_frag_queue *q)
156int sysctl_ip6frag_secret_interval __read_mostly = 10 * 60 * HZ;
157
158static void ip6_frag_secret_rebuild(unsigned long dummy)
159{ 139{
160 unsigned long now = jiffies; 140 struct frag_queue *fq;
161 int i;
162
163 write_lock(&ip6_frag_lock);
164 get_random_bytes(&ip6_frag_hash_rnd, sizeof(u32));
165 for (i = 0; i < IP6Q_HASHSZ; i++) {
166 struct frag_queue *q;
167 struct hlist_node *p, *n;
168
169 hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) {
170 unsigned int hval = ip6qhashfn(q->id,
171 &q->saddr,
172 &q->daddr);
173
174 if (hval != i) {
175 hlist_del(&q->list);
176
177 /* Relink to new hash chain. */
178 hlist_add_head(&q->list,
179 &ip6_frag_hash[hval]);
180
181 }
182 }
183 }
184 write_unlock(&ip6_frag_lock);
185 141
186 mod_timer(&ip6_frag_secret_timer, now + sysctl_ip6frag_secret_interval); 142 fq = container_of(q, struct frag_queue, q);
143 return ip6qhashfn(fq->id, &fq->saddr, &fq->daddr);
187} 144}
188 145
189atomic_t ip6_frag_mem = ATOMIC_INIT(0);
190
191/* Memory Tracking Functions. */ 146/* Memory Tracking Functions. */
192static inline void frag_kfree_skb(struct sk_buff *skb, int *work) 147static inline void frag_kfree_skb(struct sk_buff *skb, int *work)
193{ 148{
194 if (work) 149 if (work)
195 *work -= skb->truesize; 150 *work -= skb->truesize;
196 atomic_sub(skb->truesize, &ip6_frag_mem); 151 atomic_sub(skb->truesize, &ip6_frags.mem);
197 kfree_skb(skb); 152 kfree_skb(skb);
198} 153}
199 154
200static inline void frag_free_queue(struct frag_queue *fq, int *work) 155static void ip6_frag_free(struct inet_frag_queue *fq)
201{ 156{
202 if (work) 157 kfree(container_of(fq, struct frag_queue, q));
203 *work -= sizeof(struct frag_queue);
204 atomic_sub(sizeof(struct frag_queue), &ip6_frag_mem);
205 kfree(fq);
206} 158}
207 159
208static inline struct frag_queue *frag_alloc_queue(void) 160static inline struct frag_queue *frag_alloc_queue(void)
@@ -211,36 +163,15 @@ static inline struct frag_queue *frag_alloc_queue(void)
211 163
212 if(!fq) 164 if(!fq)
213 return NULL; 165 return NULL;
214 atomic_add(sizeof(struct frag_queue), &ip6_frag_mem); 166 atomic_add(sizeof(struct frag_queue), &ip6_frags.mem);
215 return fq; 167 return fq;
216} 168}
217 169
218/* Destruction primitives. */ 170/* Destruction primitives. */
219 171
220/* Complete destruction of fq. */ 172static __inline__ void fq_put(struct frag_queue *fq)
221static void ip6_frag_destroy(struct frag_queue *fq, int *work)
222{
223 struct sk_buff *fp;
224
225 BUG_TRAP(fq->last_in&COMPLETE);
226 BUG_TRAP(del_timer(&fq->timer) == 0);
227
228 /* Release all fragment data. */
229 fp = fq->fragments;
230 while (fp) {
231 struct sk_buff *xp = fp->next;
232
233 frag_kfree_skb(fp, work);
234 fp = xp;
235 }
236
237 frag_free_queue(fq, work);
238}
239
240static __inline__ void fq_put(struct frag_queue *fq, int *work)
241{ 173{
242 if (atomic_dec_and_test(&fq->refcnt)) 174 inet_frag_put(&fq->q, &ip6_frags);
243 ip6_frag_destroy(fq, work);
244} 175}
245 176
246/* Kill fq entry. It is not destroyed immediately, 177/* Kill fq entry. It is not destroyed immediately,
@@ -248,45 +179,16 @@ static __inline__ void fq_put(struct frag_queue *fq, int *work)
248 */ 179 */
249static __inline__ void fq_kill(struct frag_queue *fq) 180static __inline__ void fq_kill(struct frag_queue *fq)
250{ 181{
251 if (del_timer(&fq->timer)) 182 inet_frag_kill(&fq->q, &ip6_frags);
252 atomic_dec(&fq->refcnt);
253
254 if (!(fq->last_in & COMPLETE)) {
255 fq_unlink(fq);
256 atomic_dec(&fq->refcnt);
257 fq->last_in |= COMPLETE;
258 }
259} 183}
260 184
261static void ip6_evictor(struct inet6_dev *idev) 185static void ip6_evictor(struct inet6_dev *idev)
262{ 186{
263 struct frag_queue *fq; 187 int evicted;
264 struct list_head *tmp; 188
265 int work; 189 evicted = inet_frag_evictor(&ip6_frags);
266 190 if (evicted)
267 work = atomic_read(&ip6_frag_mem) - sysctl_ip6frag_low_thresh; 191 IP6_ADD_STATS_BH(idev, IPSTATS_MIB_REASMFAILS, evicted);
268 if (work <= 0)
269 return;
270
271 while(work > 0) {
272 read_lock(&ip6_frag_lock);
273 if (list_empty(&ip6_frag_lru_list)) {
274 read_unlock(&ip6_frag_lock);
275 return;
276 }
277 tmp = ip6_frag_lru_list.next;
278 fq = list_entry(tmp, struct frag_queue, lru_list);
279 atomic_inc(&fq->refcnt);
280 read_unlock(&ip6_frag_lock);
281
282 spin_lock(&fq->lock);
283 if (!(fq->last_in&COMPLETE))
284 fq_kill(fq);
285 spin_unlock(&fq->lock);
286
287 fq_put(fq, &work);
288 IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS);
289 }
290} 192}
291 193
292static void ip6_frag_expire(unsigned long data) 194static void ip6_frag_expire(unsigned long data)
@@ -294,9 +196,9 @@ static void ip6_frag_expire(unsigned long data)
294 struct frag_queue *fq = (struct frag_queue *) data; 196 struct frag_queue *fq = (struct frag_queue *) data;
295 struct net_device *dev = NULL; 197 struct net_device *dev = NULL;
296 198
297 spin_lock(&fq->lock); 199 spin_lock(&fq->q.lock);
298 200
299 if (fq->last_in & COMPLETE) 201 if (fq->q.last_in & COMPLETE)
300 goto out; 202 goto out;
301 203
302 fq_kill(fq); 204 fq_kill(fq);
@@ -311,7 +213,7 @@ static void ip6_frag_expire(unsigned long data)
311 rcu_read_unlock(); 213 rcu_read_unlock();
312 214
313 /* Don't send error if the first segment did not arrive. */ 215 /* Don't send error if the first segment did not arrive. */
314 if (!(fq->last_in&FIRST_IN) || !fq->fragments) 216 if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments)
315 goto out; 217 goto out;
316 218
317 /* 219 /*
@@ -319,13 +221,13 @@ static void ip6_frag_expire(unsigned long data)
319 segment was received. And do not use fq->dev 221 segment was received. And do not use fq->dev
320 pointer directly, device might already disappeared. 222 pointer directly, device might already disappeared.
321 */ 223 */
322 fq->fragments->dev = dev; 224 fq->q.fragments->dev = dev;
323 icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); 225 icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev);
324out: 226out:
325 if (dev) 227 if (dev)
326 dev_put(dev); 228 dev_put(dev);
327 spin_unlock(&fq->lock); 229 spin_unlock(&fq->q.lock);
328 fq_put(fq, NULL); 230 fq_put(fq);
329} 231}
330 232
331/* Creation primitives. */ 233/* Creation primitives. */
@@ -339,32 +241,32 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in)
339 struct hlist_node *n; 241 struct hlist_node *n;
340#endif 242#endif
341 243
342 write_lock(&ip6_frag_lock); 244 write_lock(&ip6_frags.lock);
343 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); 245 hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr);
344#ifdef CONFIG_SMP 246#ifdef CONFIG_SMP
345 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 247 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
346 if (fq->id == fq_in->id && 248 if (fq->id == fq_in->id &&
347 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && 249 ipv6_addr_equal(&fq_in->saddr, &fq->saddr) &&
348 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { 250 ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) {
349 atomic_inc(&fq->refcnt); 251 atomic_inc(&fq->q.refcnt);
350 write_unlock(&ip6_frag_lock); 252 write_unlock(&ip6_frags.lock);
351 fq_in->last_in |= COMPLETE; 253 fq_in->q.last_in |= COMPLETE;
352 fq_put(fq_in, NULL); 254 fq_put(fq_in);
353 return fq; 255 return fq;
354 } 256 }
355 } 257 }
356#endif 258#endif
357 fq = fq_in; 259 fq = fq_in;
358 260
359 if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time)) 261 if (!mod_timer(&fq->q.timer, jiffies + ip6_frags_ctl.timeout))
360 atomic_inc(&fq->refcnt); 262 atomic_inc(&fq->q.refcnt);
361 263
362 atomic_inc(&fq->refcnt); 264 atomic_inc(&fq->q.refcnt);
363 hlist_add_head(&fq->list, &ip6_frag_hash[hash]); 265 hlist_add_head(&fq->q.list, &ip6_frags.hash[hash]);
364 INIT_LIST_HEAD(&fq->lru_list); 266 INIT_LIST_HEAD(&fq->q.lru_list);
365 list_add_tail(&fq->lru_list, &ip6_frag_lru_list); 267 list_add_tail(&fq->q.lru_list, &ip6_frags.lru_list);
366 ip6_frag_nqueues++; 268 ip6_frags.nqueues++;
367 write_unlock(&ip6_frag_lock); 269 write_unlock(&ip6_frags.lock);
368 return fq; 270 return fq;
369} 271}
370 272
@@ -382,11 +284,11 @@ ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst,
382 ipv6_addr_copy(&fq->saddr, src); 284 ipv6_addr_copy(&fq->saddr, src);
383 ipv6_addr_copy(&fq->daddr, dst); 285 ipv6_addr_copy(&fq->daddr, dst);
384 286
385 init_timer(&fq->timer); 287 init_timer(&fq->q.timer);
386 fq->timer.function = ip6_frag_expire; 288 fq->q.timer.function = ip6_frag_expire;
387 fq->timer.data = (long) fq; 289 fq->q.timer.data = (long) fq;
388 spin_lock_init(&fq->lock); 290 spin_lock_init(&fq->q.lock);
389 atomic_set(&fq->refcnt, 1); 291 atomic_set(&fq->q.refcnt, 1);
390 292
391 return ip6_frag_intern(fq); 293 return ip6_frag_intern(fq);
392 294
@@ -403,30 +305,31 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst,
403 struct hlist_node *n; 305 struct hlist_node *n;
404 unsigned int hash; 306 unsigned int hash;
405 307
406 read_lock(&ip6_frag_lock); 308 read_lock(&ip6_frags.lock);
407 hash = ip6qhashfn(id, src, dst); 309 hash = ip6qhashfn(id, src, dst);
408 hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { 310 hlist_for_each_entry(fq, n, &ip6_frags.hash[hash], q.list) {
409 if (fq->id == id && 311 if (fq->id == id &&
410 ipv6_addr_equal(src, &fq->saddr) && 312 ipv6_addr_equal(src, &fq->saddr) &&
411 ipv6_addr_equal(dst, &fq->daddr)) { 313 ipv6_addr_equal(dst, &fq->daddr)) {
412 atomic_inc(&fq->refcnt); 314 atomic_inc(&fq->q.refcnt);
413 read_unlock(&ip6_frag_lock); 315 read_unlock(&ip6_frags.lock);
414 return fq; 316 return fq;
415 } 317 }
416 } 318 }
417 read_unlock(&ip6_frag_lock); 319 read_unlock(&ip6_frags.lock);
418 320
419 return ip6_frag_create(id, src, dst, idev); 321 return ip6_frag_create(id, src, dst, idev);
420} 322}
421 323
422 324
423static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, 325static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
424 struct frag_hdr *fhdr, int nhoff) 326 struct frag_hdr *fhdr, int nhoff)
425{ 327{
426 struct sk_buff *prev, *next; 328 struct sk_buff *prev, *next;
329 struct net_device *dev;
427 int offset, end; 330 int offset, end;
428 331
429 if (fq->last_in & COMPLETE) 332 if (fq->q.last_in & COMPLETE)
430 goto err; 333 goto err;
431 334
432 offset = ntohs(fhdr->frag_off) & ~0x7; 335 offset = ntohs(fhdr->frag_off) & ~0x7;
@@ -439,7 +342,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
439 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 342 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
440 ((u8 *)&fhdr->frag_off - 343 ((u8 *)&fhdr->frag_off -
441 skb_network_header(skb))); 344 skb_network_header(skb)));
442 return; 345 return -1;
443 } 346 }
444 347
445 if (skb->ip_summed == CHECKSUM_COMPLETE) { 348 if (skb->ip_summed == CHECKSUM_COMPLETE) {
@@ -454,11 +357,11 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
454 /* If we already have some bits beyond end 357 /* If we already have some bits beyond end
455 * or have different end, the segment is corrupted. 358 * or have different end, the segment is corrupted.
456 */ 359 */
457 if (end < fq->len || 360 if (end < fq->q.len ||
458 ((fq->last_in & LAST_IN) && end != fq->len)) 361 ((fq->q.last_in & LAST_IN) && end != fq->q.len))
459 goto err; 362 goto err;
460 fq->last_in |= LAST_IN; 363 fq->q.last_in |= LAST_IN;
461 fq->len = end; 364 fq->q.len = end;
462 } else { 365 } else {
463 /* Check if the fragment is rounded to 8 bytes. 366 /* Check if the fragment is rounded to 8 bytes.
464 * Required by the RFC. 367 * Required by the RFC.
@@ -471,13 +374,13 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
471 IPSTATS_MIB_INHDRERRORS); 374 IPSTATS_MIB_INHDRERRORS);
472 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 375 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
473 offsetof(struct ipv6hdr, payload_len)); 376 offsetof(struct ipv6hdr, payload_len));
474 return; 377 return -1;
475 } 378 }
476 if (end > fq->len) { 379 if (end > fq->q.len) {
477 /* Some bits beyond end -> corruption. */ 380 /* Some bits beyond end -> corruption. */
478 if (fq->last_in & LAST_IN) 381 if (fq->q.last_in & LAST_IN)
479 goto err; 382 goto err;
480 fq->len = end; 383 fq->q.len = end;
481 } 384 }
482 } 385 }
483 386
@@ -496,7 +399,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
496 * this fragment, right? 399 * this fragment, right?
497 */ 400 */
498 prev = NULL; 401 prev = NULL;
499 for(next = fq->fragments; next != NULL; next = next->next) { 402 for(next = fq->q.fragments; next != NULL; next = next->next) {
500 if (FRAG6_CB(next)->offset >= offset) 403 if (FRAG6_CB(next)->offset >= offset)
501 break; /* bingo! */ 404 break; /* bingo! */
502 prev = next; 405 prev = next;
@@ -533,7 +436,7 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
533 if (!pskb_pull(next, i)) 436 if (!pskb_pull(next, i))
534 goto err; 437 goto err;
535 FRAG6_CB(next)->offset += i; /* next fragment */ 438 FRAG6_CB(next)->offset += i; /* next fragment */
536 fq->meat -= i; 439 fq->q.meat -= i;
537 if (next->ip_summed != CHECKSUM_UNNECESSARY) 440 if (next->ip_summed != CHECKSUM_UNNECESSARY)
538 next->ip_summed = CHECKSUM_NONE; 441 next->ip_summed = CHECKSUM_NONE;
539 break; 442 break;
@@ -548,9 +451,9 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
548 if (prev) 451 if (prev)
549 prev->next = next; 452 prev->next = next;
550 else 453 else
551 fq->fragments = next; 454 fq->q.fragments = next;
552 455
553 fq->meat -= free_it->len; 456 fq->q.meat -= free_it->len;
554 frag_kfree_skb(free_it, NULL); 457 frag_kfree_skb(free_it, NULL);
555 } 458 }
556 } 459 }
@@ -562,30 +465,37 @@ static void ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
562 if (prev) 465 if (prev)
563 prev->next = skb; 466 prev->next = skb;
564 else 467 else
565 fq->fragments = skb; 468 fq->q.fragments = skb;
566 469
567 if (skb->dev) 470 dev = skb->dev;
568 fq->iif = skb->dev->ifindex; 471 if (dev) {
569 skb->dev = NULL; 472 fq->iif = dev->ifindex;
570 fq->stamp = skb->tstamp; 473 skb->dev = NULL;
571 fq->meat += skb->len; 474 }
572 atomic_add(skb->truesize, &ip6_frag_mem); 475 fq->q.stamp = skb->tstamp;
476 fq->q.meat += skb->len;
477 atomic_add(skb->truesize, &ip6_frags.mem);
573 478
574 /* The first fragment. 479 /* The first fragment.
575 * nhoffset is obtained from the first fragment, of course. 480 * nhoffset is obtained from the first fragment, of course.
576 */ 481 */
577 if (offset == 0) { 482 if (offset == 0) {
578 fq->nhoffset = nhoff; 483 fq->nhoffset = nhoff;
579 fq->last_in |= FIRST_IN; 484 fq->q.last_in |= FIRST_IN;
580 } 485 }
581 write_lock(&ip6_frag_lock); 486
582 list_move_tail(&fq->lru_list, &ip6_frag_lru_list); 487 if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len)
583 write_unlock(&ip6_frag_lock); 488 return ip6_frag_reasm(fq, prev, dev);
584 return; 489
490 write_lock(&ip6_frags.lock);
491 list_move_tail(&fq->q.lru_list, &ip6_frags.lru_list);
492 write_unlock(&ip6_frags.lock);
493 return -1;
585 494
586err: 495err:
587 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS); 496 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMFAILS);
588 kfree_skb(skb); 497 kfree_skb(skb);
498 return -1;
589} 499}
590 500
591/* 501/*
@@ -597,21 +507,39 @@ err:
597 * queue is eligible for reassembly i.e. it is not COMPLETE, 507 * queue is eligible for reassembly i.e. it is not COMPLETE,
598 * the last and the first frames arrived and all the bits are here. 508 * the last and the first frames arrived and all the bits are here.
599 */ 509 */
600static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in, 510static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
601 struct net_device *dev) 511 struct net_device *dev)
602{ 512{
603 struct sk_buff *fp, *head = fq->fragments; 513 struct sk_buff *fp, *head = fq->q.fragments;
604 int payload_len; 514 int payload_len;
605 unsigned int nhoff; 515 unsigned int nhoff;
606 516
607 fq_kill(fq); 517 fq_kill(fq);
608 518
519 /* Make the one we just received the head. */
520 if (prev) {
521 head = prev->next;
522 fp = skb_clone(head, GFP_ATOMIC);
523
524 if (!fp)
525 goto out_oom;
526
527 fp->next = head->next;
528 prev->next = fp;
529
530 skb_morph(head, fq->q.fragments);
531 head->next = fq->q.fragments->next;
532
533 kfree_skb(fq->q.fragments);
534 fq->q.fragments = head;
535 }
536
609 BUG_TRAP(head != NULL); 537 BUG_TRAP(head != NULL);
610 BUG_TRAP(FRAG6_CB(head)->offset == 0); 538 BUG_TRAP(FRAG6_CB(head)->offset == 0);
611 539
612 /* Unfragmented part is taken from the first segment. */ 540 /* Unfragmented part is taken from the first segment. */
613 payload_len = ((head->data - skb_network_header(head)) - 541 payload_len = ((head->data - skb_network_header(head)) -
614 sizeof(struct ipv6hdr) + fq->len - 542 sizeof(struct ipv6hdr) + fq->q.len -
615 sizeof(struct frag_hdr)); 543 sizeof(struct frag_hdr));
616 if (payload_len > IPV6_MAXPLEN) 544 if (payload_len > IPV6_MAXPLEN)
617 goto out_oversize; 545 goto out_oversize;
@@ -640,7 +568,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
640 head->len -= clone->len; 568 head->len -= clone->len;
641 clone->csum = 0; 569 clone->csum = 0;
642 clone->ip_summed = head->ip_summed; 570 clone->ip_summed = head->ip_summed;
643 atomic_add(clone->truesize, &ip6_frag_mem); 571 atomic_add(clone->truesize, &ip6_frags.mem);
644 } 572 }
645 573
646 /* We have to remove fragment header from datagram and to relocate 574 /* We have to remove fragment header from datagram and to relocate
@@ -655,7 +583,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
655 skb_shinfo(head)->frag_list = head->next; 583 skb_shinfo(head)->frag_list = head->next;
656 skb_reset_transport_header(head); 584 skb_reset_transport_header(head);
657 skb_push(head, head->data - skb_network_header(head)); 585 skb_push(head, head->data - skb_network_header(head));
658 atomic_sub(head->truesize, &ip6_frag_mem); 586 atomic_sub(head->truesize, &ip6_frags.mem);
659 587
660 for (fp=head->next; fp; fp = fp->next) { 588 for (fp=head->next; fp; fp = fp->next) {
661 head->data_len += fp->len; 589 head->data_len += fp->len;
@@ -665,17 +593,15 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
665 else if (head->ip_summed == CHECKSUM_COMPLETE) 593 else if (head->ip_summed == CHECKSUM_COMPLETE)
666 head->csum = csum_add(head->csum, fp->csum); 594 head->csum = csum_add(head->csum, fp->csum);
667 head->truesize += fp->truesize; 595 head->truesize += fp->truesize;
668 atomic_sub(fp->truesize, &ip6_frag_mem); 596 atomic_sub(fp->truesize, &ip6_frags.mem);
669 } 597 }
670 598
671 head->next = NULL; 599 head->next = NULL;
672 head->dev = dev; 600 head->dev = dev;
673 head->tstamp = fq->stamp; 601 head->tstamp = fq->q.stamp;
674 ipv6_hdr(head)->payload_len = htons(payload_len); 602 ipv6_hdr(head)->payload_len = htons(payload_len);
675 IP6CB(head)->nhoff = nhoff; 603 IP6CB(head)->nhoff = nhoff;
676 604
677 *skb_in = head;
678
679 /* Yes, and fold redundant checksum back. 8) */ 605 /* Yes, and fold redundant checksum back. 8) */
680 if (head->ip_summed == CHECKSUM_COMPLETE) 606 if (head->ip_summed == CHECKSUM_COMPLETE)
681 head->csum = csum_partial(skb_network_header(head), 607 head->csum = csum_partial(skb_network_header(head),
@@ -685,7 +611,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
685 rcu_read_lock(); 611 rcu_read_lock();
686 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); 612 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
687 rcu_read_unlock(); 613 rcu_read_unlock();
688 fq->fragments = NULL; 614 fq->q.fragments = NULL;
689 return 1; 615 return 1;
690 616
691out_oversize: 617out_oversize:
@@ -702,10 +628,8 @@ out_fail:
702 return -1; 628 return -1;
703} 629}
704 630
705static int ipv6_frag_rcv(struct sk_buff **skbp) 631static int ipv6_frag_rcv(struct sk_buff *skb)
706{ 632{
707 struct sk_buff *skb = *skbp;
708 struct net_device *dev = skb->dev;
709 struct frag_hdr *fhdr; 633 struct frag_hdr *fhdr;
710 struct frag_queue *fq; 634 struct frag_queue *fq;
711 struct ipv6hdr *hdr = ipv6_hdr(skb); 635 struct ipv6hdr *hdr = ipv6_hdr(skb);
@@ -739,23 +663,19 @@ static int ipv6_frag_rcv(struct sk_buff **skbp)
739 return 1; 663 return 1;
740 } 664 }
741 665
742 if (atomic_read(&ip6_frag_mem) > sysctl_ip6frag_high_thresh) 666 if (atomic_read(&ip6_frags.mem) > ip6_frags_ctl.high_thresh)
743 ip6_evictor(ip6_dst_idev(skb->dst)); 667 ip6_evictor(ip6_dst_idev(skb->dst));
744 668
745 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr, 669 if ((fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr,
746 ip6_dst_idev(skb->dst))) != NULL) { 670 ip6_dst_idev(skb->dst))) != NULL) {
747 int ret = -1; 671 int ret;
748 672
749 spin_lock(&fq->lock); 673 spin_lock(&fq->q.lock);
750 674
751 ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); 675 ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
752 676
753 if (fq->last_in == (FIRST_IN|LAST_IN) && 677 spin_unlock(&fq->q.lock);
754 fq->meat == fq->len) 678 fq_put(fq);
755 ret = ip6_frag_reasm(fq, skbp, dev);
756
757 spin_unlock(&fq->lock);
758 fq_put(fq, NULL);
759 return ret; 679 return ret;
760 } 680 }
761 681
@@ -775,11 +695,10 @@ void __init ipv6_frag_init(void)
775 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0) 695 if (inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT) < 0)
776 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n"); 696 printk(KERN_ERR "ipv6_frag_init: Could not register protocol\n");
777 697
778 ip6_frag_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^ 698 ip6_frags.ctl = &ip6_frags_ctl;
779 (jiffies ^ (jiffies >> 6))); 699 ip6_frags.hashfn = ip6_hashfn;
780 700 ip6_frags.destructor = ip6_frag_free;
781 init_timer(&ip6_frag_secret_timer); 701 ip6_frags.skb_free = NULL;
782 ip6_frag_secret_timer.function = ip6_frag_secret_rebuild; 702 ip6_frags.qsize = sizeof(struct frag_queue);
783 ip6_frag_secret_timer.expires = jiffies + sysctl_ip6frag_secret_interval; 703 inet_frags_init(&ip6_frags);
784 add_timer(&ip6_frag_secret_timer);
785} 704}