diff options
author | Pavel Emelyanov <xemul@openvz.org> | 2007-10-15 05:24:19 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-15 15:26:38 -0400 |
commit | 5ab11c98d3a950faf6922b6166e5f8fc874590e7 (patch) | |
tree | ef9ab897361f106309df37b6d4f2e95fdecdb240 /net/ipv6 | |
parent | 114342f2d38439cb1a54f1f724fa38729b093c48 (diff) |
[INET]: Move common fields from frag_queues in one place.
Introduce the struct inet_frag_queue in include/net/inet_frag.h
file and place there all the common fields from three structs:
* struct ipq in ipv4/ip_fragment.c
* struct nf_ct_frag6_queue in nf_conntrack_reasm.c
* struct frag_queue in ipv6/reassembly.c
After this, replace these fields on appropriate structures with
this structure instance and fix the users to use correct names
i.e. hunks like
- atomic_dec(&fq->refcnt);
+ atomic_dec(&fq->q.refcnt);
(these occupy most of the patch)
Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r-- | net/ipv6/netfilter/nf_conntrack_reasm.c | 137 | ||||
-rw-r--r-- | net/ipv6/reassembly.c | 153 |
2 files changed, 134 insertions, 156 deletions
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 25442a8c1ba8..52e9f6a3995d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <net/sock.h> | 32 | #include <net/sock.h> |
33 | #include <net/snmp.h> | 33 | #include <net/snmp.h> |
34 | #include <net/inet_frag.h> | ||
34 | 35 | ||
35 | #include <net/ipv6.h> | 36 | #include <net/ipv6.h> |
36 | #include <net/protocol.h> | 37 | #include <net/protocol.h> |
@@ -63,25 +64,13 @@ struct nf_ct_frag6_skb_cb | |||
63 | 64 | ||
64 | struct nf_ct_frag6_queue | 65 | struct nf_ct_frag6_queue |
65 | { | 66 | { |
66 | struct hlist_node list; | 67 | struct inet_frag_queue q; |
67 | struct list_head lru_list; /* lru list member */ | ||
68 | 68 | ||
69 | __be32 id; /* fragment id */ | 69 | __be32 id; /* fragment id */ |
70 | struct in6_addr saddr; | 70 | struct in6_addr saddr; |
71 | struct in6_addr daddr; | 71 | struct in6_addr daddr; |
72 | 72 | ||
73 | spinlock_t lock; | ||
74 | atomic_t refcnt; | ||
75 | struct timer_list timer; /* expire timer */ | ||
76 | struct sk_buff *fragments; | ||
77 | int len; | ||
78 | int meat; | ||
79 | ktime_t stamp; | ||
80 | unsigned int csum; | 73 | unsigned int csum; |
81 | __u8 last_in; /* has first/last segment arrived? */ | ||
82 | #define COMPLETE 4 | ||
83 | #define FIRST_IN 2 | ||
84 | #define LAST_IN 1 | ||
85 | __u16 nhoffset; | 74 | __u16 nhoffset; |
86 | }; | 75 | }; |
87 | 76 | ||
@@ -97,8 +86,8 @@ int nf_ct_frag6_nqueues = 0; | |||
97 | 86 | ||
98 | static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) | 87 | static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq) |
99 | { | 88 | { |
100 | hlist_del(&fq->list); | 89 | hlist_del(&fq->q.list); |
101 | list_del(&fq->lru_list); | 90 | list_del(&fq->q.lru_list); |
102 | nf_ct_frag6_nqueues--; | 91 | nf_ct_frag6_nqueues--; |
103 | } | 92 | } |
104 | 93 | ||
@@ -150,14 +139,14 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy) | |||
150 | struct nf_ct_frag6_queue *q; | 139 | struct nf_ct_frag6_queue *q; |
151 | struct hlist_node *p, *n; | 140 | struct hlist_node *p, *n; |
152 | 141 | ||
153 | hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], list) { | 142 | hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], q.list) { |
154 | unsigned int hval = ip6qhashfn(q->id, | 143 | unsigned int hval = ip6qhashfn(q->id, |
155 | &q->saddr, | 144 | &q->saddr, |
156 | &q->daddr); | 145 | &q->daddr); |
157 | if (hval != i) { | 146 | if (hval != i) { |
158 | hlist_del(&q->list); | 147 | hlist_del(&q->q.list); |
159 | /* Relink to new hash chain. */ | 148 | /* Relink to new hash chain. */ |
160 | hlist_add_head(&q->list, | 149 | hlist_add_head(&q->q.list, |
161 | &nf_ct_frag6_hash[hval]); | 150 | &nf_ct_frag6_hash[hval]); |
162 | } | 151 | } |
163 | } | 152 | } |
@@ -208,11 +197,11 @@ static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | |||
208 | { | 197 | { |
209 | struct sk_buff *fp; | 198 | struct sk_buff *fp; |
210 | 199 | ||
211 | BUG_TRAP(fq->last_in&COMPLETE); | 200 | BUG_TRAP(fq->q.last_in&COMPLETE); |
212 | BUG_TRAP(del_timer(&fq->timer) == 0); | 201 | BUG_TRAP(del_timer(&fq->q.timer) == 0); |
213 | 202 | ||
214 | /* Release all fragment data. */ | 203 | /* Release all fragment data. */ |
215 | fp = fq->fragments; | 204 | fp = fq->q.fragments; |
216 | while (fp) { | 205 | while (fp) { |
217 | struct sk_buff *xp = fp->next; | 206 | struct sk_buff *xp = fp->next; |
218 | 207 | ||
@@ -225,7 +214,7 @@ static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq, | |||
225 | 214 | ||
226 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | 215 | static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) |
227 | { | 216 | { |
228 | if (atomic_dec_and_test(&fq->refcnt)) | 217 | if (atomic_dec_and_test(&fq->q.refcnt)) |
229 | nf_ct_frag6_destroy(fq, work); | 218 | nf_ct_frag6_destroy(fq, work); |
230 | } | 219 | } |
231 | 220 | ||
@@ -234,13 +223,13 @@ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq, unsigned int *work) | |||
234 | */ | 223 | */ |
235 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) | 224 | static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) |
236 | { | 225 | { |
237 | if (del_timer(&fq->timer)) | 226 | if (del_timer(&fq->q.timer)) |
238 | atomic_dec(&fq->refcnt); | 227 | atomic_dec(&fq->q.refcnt); |
239 | 228 | ||
240 | if (!(fq->last_in & COMPLETE)) { | 229 | if (!(fq->q.last_in & COMPLETE)) { |
241 | fq_unlink(fq); | 230 | fq_unlink(fq); |
242 | atomic_dec(&fq->refcnt); | 231 | atomic_dec(&fq->q.refcnt); |
243 | fq->last_in |= COMPLETE; | 232 | fq->q.last_in |= COMPLETE; |
244 | } | 233 | } |
245 | } | 234 | } |
246 | 235 | ||
@@ -263,14 +252,14 @@ static void nf_ct_frag6_evictor(void) | |||
263 | } | 252 | } |
264 | tmp = nf_ct_frag6_lru_list.next; | 253 | tmp = nf_ct_frag6_lru_list.next; |
265 | BUG_ON(tmp == NULL); | 254 | BUG_ON(tmp == NULL); |
266 | fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list); | 255 | fq = list_entry(tmp, struct nf_ct_frag6_queue, q.lru_list); |
267 | atomic_inc(&fq->refcnt); | 256 | atomic_inc(&fq->q.refcnt); |
268 | read_unlock(&nf_ct_frag6_lock); | 257 | read_unlock(&nf_ct_frag6_lock); |
269 | 258 | ||
270 | spin_lock(&fq->lock); | 259 | spin_lock(&fq->q.lock); |
271 | if (!(fq->last_in&COMPLETE)) | 260 | if (!(fq->q.last_in&COMPLETE)) |
272 | fq_kill(fq); | 261 | fq_kill(fq); |
273 | spin_unlock(&fq->lock); | 262 | spin_unlock(&fq->q.lock); |
274 | 263 | ||
275 | fq_put(fq, &work); | 264 | fq_put(fq, &work); |
276 | } | 265 | } |
@@ -280,15 +269,15 @@ static void nf_ct_frag6_expire(unsigned long data) | |||
280 | { | 269 | { |
281 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; | 270 | struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data; |
282 | 271 | ||
283 | spin_lock(&fq->lock); | 272 | spin_lock(&fq->q.lock); |
284 | 273 | ||
285 | if (fq->last_in & COMPLETE) | 274 | if (fq->q.last_in & COMPLETE) |
286 | goto out; | 275 | goto out; |
287 | 276 | ||
288 | fq_kill(fq); | 277 | fq_kill(fq); |
289 | 278 | ||
290 | out: | 279 | out: |
291 | spin_unlock(&fq->lock); | 280 | spin_unlock(&fq->q.lock); |
292 | fq_put(fq, NULL); | 281 | fq_put(fq, NULL); |
293 | } | 282 | } |
294 | 283 | ||
@@ -304,13 +293,13 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
304 | 293 | ||
305 | write_lock(&nf_ct_frag6_lock); | 294 | write_lock(&nf_ct_frag6_lock); |
306 | #ifdef CONFIG_SMP | 295 | #ifdef CONFIG_SMP |
307 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 296 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { |
308 | if (fq->id == fq_in->id && | 297 | if (fq->id == fq_in->id && |
309 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 298 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
310 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 299 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
311 | atomic_inc(&fq->refcnt); | 300 | atomic_inc(&fq->q.refcnt); |
312 | write_unlock(&nf_ct_frag6_lock); | 301 | write_unlock(&nf_ct_frag6_lock); |
313 | fq_in->last_in |= COMPLETE; | 302 | fq_in->q.last_in |= COMPLETE; |
314 | fq_put(fq_in, NULL); | 303 | fq_put(fq_in, NULL); |
315 | return fq; | 304 | return fq; |
316 | } | 305 | } |
@@ -318,13 +307,13 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash, | |||
318 | #endif | 307 | #endif |
319 | fq = fq_in; | 308 | fq = fq_in; |
320 | 309 | ||
321 | if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout)) | 310 | if (!mod_timer(&fq->q.timer, jiffies + nf_ct_frag6_timeout)) |
322 | atomic_inc(&fq->refcnt); | 311 | atomic_inc(&fq->q.refcnt); |
323 | 312 | ||
324 | atomic_inc(&fq->refcnt); | 313 | atomic_inc(&fq->q.refcnt); |
325 | hlist_add_head(&fq->list, &nf_ct_frag6_hash[hash]); | 314 | hlist_add_head(&fq->q.list, &nf_ct_frag6_hash[hash]); |
326 | INIT_LIST_HEAD(&fq->lru_list); | 315 | INIT_LIST_HEAD(&fq->q.lru_list); |
327 | list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 316 | list_add_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); |
328 | nf_ct_frag6_nqueues++; | 317 | nf_ct_frag6_nqueues++; |
329 | write_unlock(&nf_ct_frag6_lock); | 318 | write_unlock(&nf_ct_frag6_lock); |
330 | return fq; | 319 | return fq; |
@@ -347,9 +336,9 @@ nf_ct_frag6_create(unsigned int hash, __be32 id, struct in6_addr *src, str | |||
347 | ipv6_addr_copy(&fq->saddr, src); | 336 | ipv6_addr_copy(&fq->saddr, src); |
348 | ipv6_addr_copy(&fq->daddr, dst); | 337 | ipv6_addr_copy(&fq->daddr, dst); |
349 | 338 | ||
350 | setup_timer(&fq->timer, nf_ct_frag6_expire, (unsigned long)fq); | 339 | setup_timer(&fq->q.timer, nf_ct_frag6_expire, (unsigned long)fq); |
351 | spin_lock_init(&fq->lock); | 340 | spin_lock_init(&fq->q.lock); |
352 | atomic_set(&fq->refcnt, 1); | 341 | atomic_set(&fq->q.refcnt, 1); |
353 | 342 | ||
354 | return nf_ct_frag6_intern(hash, fq); | 343 | return nf_ct_frag6_intern(hash, fq); |
355 | 344 | ||
@@ -365,11 +354,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst) | |||
365 | unsigned int hash = ip6qhashfn(id, src, dst); | 354 | unsigned int hash = ip6qhashfn(id, src, dst); |
366 | 355 | ||
367 | read_lock(&nf_ct_frag6_lock); | 356 | read_lock(&nf_ct_frag6_lock); |
368 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) { | 357 | hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], q.list) { |
369 | if (fq->id == id && | 358 | if (fq->id == id && |
370 | ipv6_addr_equal(src, &fq->saddr) && | 359 | ipv6_addr_equal(src, &fq->saddr) && |
371 | ipv6_addr_equal(dst, &fq->daddr)) { | 360 | ipv6_addr_equal(dst, &fq->daddr)) { |
372 | atomic_inc(&fq->refcnt); | 361 | atomic_inc(&fq->q.refcnt); |
373 | read_unlock(&nf_ct_frag6_lock); | 362 | read_unlock(&nf_ct_frag6_lock); |
374 | return fq; | 363 | return fq; |
375 | } | 364 | } |
@@ -386,7 +375,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
386 | struct sk_buff *prev, *next; | 375 | struct sk_buff *prev, *next; |
387 | int offset, end; | 376 | int offset, end; |
388 | 377 | ||
389 | if (fq->last_in & COMPLETE) { | 378 | if (fq->q.last_in & COMPLETE) { |
390 | pr_debug("Allready completed\n"); | 379 | pr_debug("Allready completed\n"); |
391 | goto err; | 380 | goto err; |
392 | } | 381 | } |
@@ -412,13 +401,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
412 | /* If we already have some bits beyond end | 401 | /* If we already have some bits beyond end |
413 | * or have different end, the segment is corrupted. | 402 | * or have different end, the segment is corrupted. |
414 | */ | 403 | */ |
415 | if (end < fq->len || | 404 | if (end < fq->q.len || |
416 | ((fq->last_in & LAST_IN) && end != fq->len)) { | 405 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) { |
417 | pr_debug("already received last fragment\n"); | 406 | pr_debug("already received last fragment\n"); |
418 | goto err; | 407 | goto err; |
419 | } | 408 | } |
420 | fq->last_in |= LAST_IN; | 409 | fq->q.last_in |= LAST_IN; |
421 | fq->len = end; | 410 | fq->q.len = end; |
422 | } else { | 411 | } else { |
423 | /* Check if the fragment is rounded to 8 bytes. | 412 | /* Check if the fragment is rounded to 8 bytes. |
424 | * Required by the RFC. | 413 | * Required by the RFC. |
@@ -430,13 +419,13 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
430 | pr_debug("end of fragment not rounded to 8 bytes.\n"); | 419 | pr_debug("end of fragment not rounded to 8 bytes.\n"); |
431 | return -1; | 420 | return -1; |
432 | } | 421 | } |
433 | if (end > fq->len) { | 422 | if (end > fq->q.len) { |
434 | /* Some bits beyond end -> corruption. */ | 423 | /* Some bits beyond end -> corruption. */ |
435 | if (fq->last_in & LAST_IN) { | 424 | if (fq->q.last_in & LAST_IN) { |
436 | pr_debug("last packet already reached.\n"); | 425 | pr_debug("last packet already reached.\n"); |
437 | goto err; | 426 | goto err; |
438 | } | 427 | } |
439 | fq->len = end; | 428 | fq->q.len = end; |
440 | } | 429 | } |
441 | } | 430 | } |
442 | 431 | ||
@@ -458,7 +447,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
458 | * this fragment, right? | 447 | * this fragment, right? |
459 | */ | 448 | */ |
460 | prev = NULL; | 449 | prev = NULL; |
461 | for (next = fq->fragments; next != NULL; next = next->next) { | 450 | for (next = fq->q.fragments; next != NULL; next = next->next) { |
462 | if (NFCT_FRAG6_CB(next)->offset >= offset) | 451 | if (NFCT_FRAG6_CB(next)->offset >= offset) |
463 | break; /* bingo! */ | 452 | break; /* bingo! */ |
464 | prev = next; | 453 | prev = next; |
@@ -503,7 +492,7 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
503 | 492 | ||
504 | /* next fragment */ | 493 | /* next fragment */ |
505 | NFCT_FRAG6_CB(next)->offset += i; | 494 | NFCT_FRAG6_CB(next)->offset += i; |
506 | fq->meat -= i; | 495 | fq->q.meat -= i; |
507 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 496 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
508 | next->ip_summed = CHECKSUM_NONE; | 497 | next->ip_summed = CHECKSUM_NONE; |
509 | break; | 498 | break; |
@@ -518,9 +507,9 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
518 | if (prev) | 507 | if (prev) |
519 | prev->next = next; | 508 | prev->next = next; |
520 | else | 509 | else |
521 | fq->fragments = next; | 510 | fq->q.fragments = next; |
522 | 511 | ||
523 | fq->meat -= free_it->len; | 512 | fq->q.meat -= free_it->len; |
524 | frag_kfree_skb(free_it, NULL); | 513 | frag_kfree_skb(free_it, NULL); |
525 | } | 514 | } |
526 | } | 515 | } |
@@ -532,11 +521,11 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
532 | if (prev) | 521 | if (prev) |
533 | prev->next = skb; | 522 | prev->next = skb; |
534 | else | 523 | else |
535 | fq->fragments = skb; | 524 | fq->q.fragments = skb; |
536 | 525 | ||
537 | skb->dev = NULL; | 526 | skb->dev = NULL; |
538 | fq->stamp = skb->tstamp; | 527 | fq->q.stamp = skb->tstamp; |
539 | fq->meat += skb->len; | 528 | fq->q.meat += skb->len; |
540 | atomic_add(skb->truesize, &nf_ct_frag6_mem); | 529 | atomic_add(skb->truesize, &nf_ct_frag6_mem); |
541 | 530 | ||
542 | /* The first fragment. | 531 | /* The first fragment. |
@@ -544,10 +533,10 @@ static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, | |||
544 | */ | 533 | */ |
545 | if (offset == 0) { | 534 | if (offset == 0) { |
546 | fq->nhoffset = nhoff; | 535 | fq->nhoffset = nhoff; |
547 | fq->last_in |= FIRST_IN; | 536 | fq->q.last_in |= FIRST_IN; |
548 | } | 537 | } |
549 | write_lock(&nf_ct_frag6_lock); | 538 | write_lock(&nf_ct_frag6_lock); |
550 | list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list); | 539 | list_move_tail(&fq->q.lru_list, &nf_ct_frag6_lru_list); |
551 | write_unlock(&nf_ct_frag6_lock); | 540 | write_unlock(&nf_ct_frag6_lock); |
552 | return 0; | 541 | return 0; |
553 | 542 | ||
@@ -567,7 +556,7 @@ err: | |||
567 | static struct sk_buff * | 556 | static struct sk_buff * |
568 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | 557 | nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) |
569 | { | 558 | { |
570 | struct sk_buff *fp, *op, *head = fq->fragments; | 559 | struct sk_buff *fp, *op, *head = fq->q.fragments; |
571 | int payload_len; | 560 | int payload_len; |
572 | 561 | ||
573 | fq_kill(fq); | 562 | fq_kill(fq); |
@@ -577,7 +566,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
577 | 566 | ||
578 | /* Unfragmented part is taken from the first segment. */ | 567 | /* Unfragmented part is taken from the first segment. */ |
579 | payload_len = ((head->data - skb_network_header(head)) - | 568 | payload_len = ((head->data - skb_network_header(head)) - |
580 | sizeof(struct ipv6hdr) + fq->len - | 569 | sizeof(struct ipv6hdr) + fq->q.len - |
581 | sizeof(struct frag_hdr)); | 570 | sizeof(struct frag_hdr)); |
582 | if (payload_len > IPV6_MAXPLEN) { | 571 | if (payload_len > IPV6_MAXPLEN) { |
583 | pr_debug("payload len is too large.\n"); | 572 | pr_debug("payload len is too large.\n"); |
@@ -643,7 +632,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
643 | 632 | ||
644 | head->next = NULL; | 633 | head->next = NULL; |
645 | head->dev = dev; | 634 | head->dev = dev; |
646 | head->tstamp = fq->stamp; | 635 | head->tstamp = fq->q.stamp; |
647 | ipv6_hdr(head)->payload_len = htons(payload_len); | 636 | ipv6_hdr(head)->payload_len = htons(payload_len); |
648 | 637 | ||
649 | /* Yes, and fold redundant checksum back. 8) */ | 638 | /* Yes, and fold redundant checksum back. 8) */ |
@@ -652,7 +641,7 @@ nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) | |||
652 | skb_network_header_len(head), | 641 | skb_network_header_len(head), |
653 | head->csum); | 642 | head->csum); |
654 | 643 | ||
655 | fq->fragments = NULL; | 644 | fq->q.fragments = NULL; |
656 | 645 | ||
657 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ | 646 | /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ |
658 | fp = skb_shinfo(head)->frag_list; | 647 | fp = skb_shinfo(head)->frag_list; |
@@ -797,21 +786,21 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb) | |||
797 | goto ret_orig; | 786 | goto ret_orig; |
798 | } | 787 | } |
799 | 788 | ||
800 | spin_lock(&fq->lock); | 789 | spin_lock(&fq->q.lock); |
801 | 790 | ||
802 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { | 791 | if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { |
803 | spin_unlock(&fq->lock); | 792 | spin_unlock(&fq->q.lock); |
804 | pr_debug("Can't insert skb to queue\n"); | 793 | pr_debug("Can't insert skb to queue\n"); |
805 | fq_put(fq, NULL); | 794 | fq_put(fq, NULL); |
806 | goto ret_orig; | 795 | goto ret_orig; |
807 | } | 796 | } |
808 | 797 | ||
809 | if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) { | 798 | if (fq->q.last_in == (FIRST_IN|LAST_IN) && fq->q.meat == fq->q.len) { |
810 | ret_skb = nf_ct_frag6_reasm(fq, dev); | 799 | ret_skb = nf_ct_frag6_reasm(fq, dev); |
811 | if (ret_skb == NULL) | 800 | if (ret_skb == NULL) |
812 | pr_debug("Can't reassemble fragmented packets\n"); | 801 | pr_debug("Can't reassemble fragmented packets\n"); |
813 | } | 802 | } |
814 | spin_unlock(&fq->lock); | 803 | spin_unlock(&fq->q.lock); |
815 | 804 | ||
816 | fq_put(fq, NULL); | 805 | fq_put(fq, NULL); |
817 | return ret_skb; | 806 | return ret_skb; |
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index fa1055b669d1..aef5dd1ebc8a 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include <net/rawv6.h> | 54 | #include <net/rawv6.h> |
55 | #include <net/ndisc.h> | 55 | #include <net/ndisc.h> |
56 | #include <net/addrconf.h> | 56 | #include <net/addrconf.h> |
57 | #include <net/inet_frag.h> | ||
57 | 58 | ||
58 | int sysctl_ip6frag_high_thresh __read_mostly = 256*1024; | 59 | int sysctl_ip6frag_high_thresh __read_mostly = 256*1024; |
59 | int sysctl_ip6frag_low_thresh __read_mostly = 192*1024; | 60 | int sysctl_ip6frag_low_thresh __read_mostly = 192*1024; |
@@ -75,26 +76,14 @@ struct ip6frag_skb_cb | |||
75 | 76 | ||
76 | struct frag_queue | 77 | struct frag_queue |
77 | { | 78 | { |
78 | struct hlist_node list; | 79 | struct inet_frag_queue q; |
79 | struct list_head lru_list; /* lru list member */ | ||
80 | 80 | ||
81 | __be32 id; /* fragment id */ | 81 | __be32 id; /* fragment id */ |
82 | struct in6_addr saddr; | 82 | struct in6_addr saddr; |
83 | struct in6_addr daddr; | 83 | struct in6_addr daddr; |
84 | 84 | ||
85 | spinlock_t lock; | ||
86 | atomic_t refcnt; | ||
87 | struct timer_list timer; /* expire timer */ | ||
88 | struct sk_buff *fragments; | ||
89 | int len; | ||
90 | int meat; | ||
91 | int iif; | 85 | int iif; |
92 | ktime_t stamp; | ||
93 | unsigned int csum; | 86 | unsigned int csum; |
94 | __u8 last_in; /* has first/last segment arrived? */ | ||
95 | #define COMPLETE 4 | ||
96 | #define FIRST_IN 2 | ||
97 | #define LAST_IN 1 | ||
98 | __u16 nhoffset; | 87 | __u16 nhoffset; |
99 | }; | 88 | }; |
100 | 89 | ||
@@ -113,8 +102,8 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
113 | 102 | ||
114 | static __inline__ void __fq_unlink(struct frag_queue *fq) | 103 | static __inline__ void __fq_unlink(struct frag_queue *fq) |
115 | { | 104 | { |
116 | hlist_del(&fq->list); | 105 | hlist_del(&fq->q.list); |
117 | list_del(&fq->lru_list); | 106 | list_del(&fq->q.lru_list); |
118 | ip6_frag_nqueues--; | 107 | ip6_frag_nqueues--; |
119 | } | 108 | } |
120 | 109 | ||
@@ -170,16 +159,16 @@ static void ip6_frag_secret_rebuild(unsigned long dummy) | |||
170 | struct frag_queue *q; | 159 | struct frag_queue *q; |
171 | struct hlist_node *p, *n; | 160 | struct hlist_node *p, *n; |
172 | 161 | ||
173 | hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], list) { | 162 | hlist_for_each_entry_safe(q, p, n, &ip6_frag_hash[i], q.list) { |
174 | unsigned int hval = ip6qhashfn(q->id, | 163 | unsigned int hval = ip6qhashfn(q->id, |
175 | &q->saddr, | 164 | &q->saddr, |
176 | &q->daddr); | 165 | &q->daddr); |
177 | 166 | ||
178 | if (hval != i) { | 167 | if (hval != i) { |
179 | hlist_del(&q->list); | 168 | hlist_del(&q->q.list); |
180 | 169 | ||
181 | /* Relink to new hash chain. */ | 170 | /* Relink to new hash chain. */ |
182 | hlist_add_head(&q->list, | 171 | hlist_add_head(&q->q.list, |
183 | &ip6_frag_hash[hval]); | 172 | &ip6_frag_hash[hval]); |
184 | 173 | ||
185 | } | 174 | } |
@@ -226,11 +215,11 @@ static void ip6_frag_destroy(struct frag_queue *fq, int *work) | |||
226 | { | 215 | { |
227 | struct sk_buff *fp; | 216 | struct sk_buff *fp; |
228 | 217 | ||
229 | BUG_TRAP(fq->last_in&COMPLETE); | 218 | BUG_TRAP(fq->q.last_in&COMPLETE); |
230 | BUG_TRAP(del_timer(&fq->timer) == 0); | 219 | BUG_TRAP(del_timer(&fq->q.timer) == 0); |
231 | 220 | ||
232 | /* Release all fragment data. */ | 221 | /* Release all fragment data. */ |
233 | fp = fq->fragments; | 222 | fp = fq->q.fragments; |
234 | while (fp) { | 223 | while (fp) { |
235 | struct sk_buff *xp = fp->next; | 224 | struct sk_buff *xp = fp->next; |
236 | 225 | ||
@@ -243,7 +232,7 @@ static void ip6_frag_destroy(struct frag_queue *fq, int *work) | |||
243 | 232 | ||
244 | static __inline__ void fq_put(struct frag_queue *fq, int *work) | 233 | static __inline__ void fq_put(struct frag_queue *fq, int *work) |
245 | { | 234 | { |
246 | if (atomic_dec_and_test(&fq->refcnt)) | 235 | if (atomic_dec_and_test(&fq->q.refcnt)) |
247 | ip6_frag_destroy(fq, work); | 236 | ip6_frag_destroy(fq, work); |
248 | } | 237 | } |
249 | 238 | ||
@@ -252,13 +241,13 @@ static __inline__ void fq_put(struct frag_queue *fq, int *work) | |||
252 | */ | 241 | */ |
253 | static __inline__ void fq_kill(struct frag_queue *fq) | 242 | static __inline__ void fq_kill(struct frag_queue *fq) |
254 | { | 243 | { |
255 | if (del_timer(&fq->timer)) | 244 | if (del_timer(&fq->q.timer)) |
256 | atomic_dec(&fq->refcnt); | 245 | atomic_dec(&fq->q.refcnt); |
257 | 246 | ||
258 | if (!(fq->last_in & COMPLETE)) { | 247 | if (!(fq->q.last_in & COMPLETE)) { |
259 | fq_unlink(fq); | 248 | fq_unlink(fq); |
260 | atomic_dec(&fq->refcnt); | 249 | atomic_dec(&fq->q.refcnt); |
261 | fq->last_in |= COMPLETE; | 250 | fq->q.last_in |= COMPLETE; |
262 | } | 251 | } |
263 | } | 252 | } |
264 | 253 | ||
@@ -279,14 +268,14 @@ static void ip6_evictor(struct inet6_dev *idev) | |||
279 | return; | 268 | return; |
280 | } | 269 | } |
281 | tmp = ip6_frag_lru_list.next; | 270 | tmp = ip6_frag_lru_list.next; |
282 | fq = list_entry(tmp, struct frag_queue, lru_list); | 271 | fq = list_entry(tmp, struct frag_queue, q.lru_list); |
283 | atomic_inc(&fq->refcnt); | 272 | atomic_inc(&fq->q.refcnt); |
284 | read_unlock(&ip6_frag_lock); | 273 | read_unlock(&ip6_frag_lock); |
285 | 274 | ||
286 | spin_lock(&fq->lock); | 275 | spin_lock(&fq->q.lock); |
287 | if (!(fq->last_in&COMPLETE)) | 276 | if (!(fq->q.last_in&COMPLETE)) |
288 | fq_kill(fq); | 277 | fq_kill(fq); |
289 | spin_unlock(&fq->lock); | 278 | spin_unlock(&fq->q.lock); |
290 | 279 | ||
291 | fq_put(fq, &work); | 280 | fq_put(fq, &work); |
292 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); | 281 | IP6_INC_STATS_BH(idev, IPSTATS_MIB_REASMFAILS); |
@@ -298,9 +287,9 @@ static void ip6_frag_expire(unsigned long data) | |||
298 | struct frag_queue *fq = (struct frag_queue *) data; | 287 | struct frag_queue *fq = (struct frag_queue *) data; |
299 | struct net_device *dev = NULL; | 288 | struct net_device *dev = NULL; |
300 | 289 | ||
301 | spin_lock(&fq->lock); | 290 | spin_lock(&fq->q.lock); |
302 | 291 | ||
303 | if (fq->last_in & COMPLETE) | 292 | if (fq->q.last_in & COMPLETE) |
304 | goto out; | 293 | goto out; |
305 | 294 | ||
306 | fq_kill(fq); | 295 | fq_kill(fq); |
@@ -315,7 +304,7 @@ static void ip6_frag_expire(unsigned long data) | |||
315 | rcu_read_unlock(); | 304 | rcu_read_unlock(); |
316 | 305 | ||
317 | /* Don't send error if the first segment did not arrive. */ | 306 | /* Don't send error if the first segment did not arrive. */ |
318 | if (!(fq->last_in&FIRST_IN) || !fq->fragments) | 307 | if (!(fq->q.last_in&FIRST_IN) || !fq->q.fragments) |
319 | goto out; | 308 | goto out; |
320 | 309 | ||
321 | /* | 310 | /* |
@@ -323,12 +312,12 @@ static void ip6_frag_expire(unsigned long data) | |||
323 | segment was received. And do not use fq->dev | 312 | segment was received. And do not use fq->dev |
324 | pointer directly, device might already disappeared. | 313 | pointer directly, device might already disappeared. |
325 | */ | 314 | */ |
326 | fq->fragments->dev = dev; | 315 | fq->q.fragments->dev = dev; |
327 | icmpv6_send(fq->fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); | 316 | icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0, dev); |
328 | out: | 317 | out: |
329 | if (dev) | 318 | if (dev) |
330 | dev_put(dev); | 319 | dev_put(dev); |
331 | spin_unlock(&fq->lock); | 320 | spin_unlock(&fq->q.lock); |
332 | fq_put(fq, NULL); | 321 | fq_put(fq, NULL); |
333 | } | 322 | } |
334 | 323 | ||
@@ -346,13 +335,13 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
346 | write_lock(&ip6_frag_lock); | 335 | write_lock(&ip6_frag_lock); |
347 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); | 336 | hash = ip6qhashfn(fq_in->id, &fq_in->saddr, &fq_in->daddr); |
348 | #ifdef CONFIG_SMP | 337 | #ifdef CONFIG_SMP |
349 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 338 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { |
350 | if (fq->id == fq_in->id && | 339 | if (fq->id == fq_in->id && |
351 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && | 340 | ipv6_addr_equal(&fq_in->saddr, &fq->saddr) && |
352 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { | 341 | ipv6_addr_equal(&fq_in->daddr, &fq->daddr)) { |
353 | atomic_inc(&fq->refcnt); | 342 | atomic_inc(&fq->q.refcnt); |
354 | write_unlock(&ip6_frag_lock); | 343 | write_unlock(&ip6_frag_lock); |
355 | fq_in->last_in |= COMPLETE; | 344 | fq_in->q.last_in |= COMPLETE; |
356 | fq_put(fq_in, NULL); | 345 | fq_put(fq_in, NULL); |
357 | return fq; | 346 | return fq; |
358 | } | 347 | } |
@@ -360,13 +349,13 @@ static struct frag_queue *ip6_frag_intern(struct frag_queue *fq_in) | |||
360 | #endif | 349 | #endif |
361 | fq = fq_in; | 350 | fq = fq_in; |
362 | 351 | ||
363 | if (!mod_timer(&fq->timer, jiffies + sysctl_ip6frag_time)) | 352 | if (!mod_timer(&fq->q.timer, jiffies + sysctl_ip6frag_time)) |
364 | atomic_inc(&fq->refcnt); | 353 | atomic_inc(&fq->q.refcnt); |
365 | 354 | ||
366 | atomic_inc(&fq->refcnt); | 355 | atomic_inc(&fq->q.refcnt); |
367 | hlist_add_head(&fq->list, &ip6_frag_hash[hash]); | 356 | hlist_add_head(&fq->q.list, &ip6_frag_hash[hash]); |
368 | INIT_LIST_HEAD(&fq->lru_list); | 357 | INIT_LIST_HEAD(&fq->q.lru_list); |
369 | list_add_tail(&fq->lru_list, &ip6_frag_lru_list); | 358 | list_add_tail(&fq->q.lru_list, &ip6_frag_lru_list); |
370 | ip6_frag_nqueues++; | 359 | ip6_frag_nqueues++; |
371 | write_unlock(&ip6_frag_lock); | 360 | write_unlock(&ip6_frag_lock); |
372 | return fq; | 361 | return fq; |
@@ -386,11 +375,11 @@ ip6_frag_create(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
386 | ipv6_addr_copy(&fq->saddr, src); | 375 | ipv6_addr_copy(&fq->saddr, src); |
387 | ipv6_addr_copy(&fq->daddr, dst); | 376 | ipv6_addr_copy(&fq->daddr, dst); |
388 | 377 | ||
389 | init_timer(&fq->timer); | 378 | init_timer(&fq->q.timer); |
390 | fq->timer.function = ip6_frag_expire; | 379 | fq->q.timer.function = ip6_frag_expire; |
391 | fq->timer.data = (long) fq; | 380 | fq->q.timer.data = (long) fq; |
392 | spin_lock_init(&fq->lock); | 381 | spin_lock_init(&fq->q.lock); |
393 | atomic_set(&fq->refcnt, 1); | 382 | atomic_set(&fq->q.refcnt, 1); |
394 | 383 | ||
395 | return ip6_frag_intern(fq); | 384 | return ip6_frag_intern(fq); |
396 | 385 | ||
@@ -409,11 +398,11 @@ fq_find(__be32 id, struct in6_addr *src, struct in6_addr *dst, | |||
409 | 398 | ||
410 | read_lock(&ip6_frag_lock); | 399 | read_lock(&ip6_frag_lock); |
411 | hash = ip6qhashfn(id, src, dst); | 400 | hash = ip6qhashfn(id, src, dst); |
412 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], list) { | 401 | hlist_for_each_entry(fq, n, &ip6_frag_hash[hash], q.list) { |
413 | if (fq->id == id && | 402 | if (fq->id == id && |
414 | ipv6_addr_equal(src, &fq->saddr) && | 403 | ipv6_addr_equal(src, &fq->saddr) && |
415 | ipv6_addr_equal(dst, &fq->daddr)) { | 404 | ipv6_addr_equal(dst, &fq->daddr)) { |
416 | atomic_inc(&fq->refcnt); | 405 | atomic_inc(&fq->q.refcnt); |
417 | read_unlock(&ip6_frag_lock); | 406 | read_unlock(&ip6_frag_lock); |
418 | return fq; | 407 | return fq; |
419 | } | 408 | } |
@@ -431,7 +420,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
431 | struct net_device *dev; | 420 | struct net_device *dev; |
432 | int offset, end; | 421 | int offset, end; |
433 | 422 | ||
434 | if (fq->last_in & COMPLETE) | 423 | if (fq->q.last_in & COMPLETE) |
435 | goto err; | 424 | goto err; |
436 | 425 | ||
437 | offset = ntohs(fhdr->frag_off) & ~0x7; | 426 | offset = ntohs(fhdr->frag_off) & ~0x7; |
@@ -459,11 +448,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
459 | /* If we already have some bits beyond end | 448 | /* If we already have some bits beyond end |
460 | * or have different end, the segment is corrupted. | 449 | * or have different end, the segment is corrupted. |
461 | */ | 450 | */ |
462 | if (end < fq->len || | 451 | if (end < fq->q.len || |
463 | ((fq->last_in & LAST_IN) && end != fq->len)) | 452 | ((fq->q.last_in & LAST_IN) && end != fq->q.len)) |
464 | goto err; | 453 | goto err; |
465 | fq->last_in |= LAST_IN; | 454 | fq->q.last_in |= LAST_IN; |
466 | fq->len = end; | 455 | fq->q.len = end; |
467 | } else { | 456 | } else { |
468 | /* Check if the fragment is rounded to 8 bytes. | 457 | /* Check if the fragment is rounded to 8 bytes. |
469 | * Required by the RFC. | 458 | * Required by the RFC. |
@@ -478,11 +467,11 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
478 | offsetof(struct ipv6hdr, payload_len)); | 467 | offsetof(struct ipv6hdr, payload_len)); |
479 | return -1; | 468 | return -1; |
480 | } | 469 | } |
481 | if (end > fq->len) { | 470 | if (end > fq->q.len) { |
482 | /* Some bits beyond end -> corruption. */ | 471 | /* Some bits beyond end -> corruption. */ |
483 | if (fq->last_in & LAST_IN) | 472 | if (fq->q.last_in & LAST_IN) |
484 | goto err; | 473 | goto err; |
485 | fq->len = end; | 474 | fq->q.len = end; |
486 | } | 475 | } |
487 | } | 476 | } |
488 | 477 | ||
@@ -501,7 +490,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
501 | * this fragment, right? | 490 | * this fragment, right? |
502 | */ | 491 | */ |
503 | prev = NULL; | 492 | prev = NULL; |
504 | for(next = fq->fragments; next != NULL; next = next->next) { | 493 | for(next = fq->q.fragments; next != NULL; next = next->next) { |
505 | if (FRAG6_CB(next)->offset >= offset) | 494 | if (FRAG6_CB(next)->offset >= offset) |
506 | break; /* bingo! */ | 495 | break; /* bingo! */ |
507 | prev = next; | 496 | prev = next; |
@@ -538,7 +527,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
538 | if (!pskb_pull(next, i)) | 527 | if (!pskb_pull(next, i)) |
539 | goto err; | 528 | goto err; |
540 | FRAG6_CB(next)->offset += i; /* next fragment */ | 529 | FRAG6_CB(next)->offset += i; /* next fragment */ |
541 | fq->meat -= i; | 530 | fq->q.meat -= i; |
542 | if (next->ip_summed != CHECKSUM_UNNECESSARY) | 531 | if (next->ip_summed != CHECKSUM_UNNECESSARY) |
543 | next->ip_summed = CHECKSUM_NONE; | 532 | next->ip_summed = CHECKSUM_NONE; |
544 | break; | 533 | break; |
@@ -553,9 +542,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
553 | if (prev) | 542 | if (prev) |
554 | prev->next = next; | 543 | prev->next = next; |
555 | else | 544 | else |
556 | fq->fragments = next; | 545 | fq->q.fragments = next; |
557 | 546 | ||
558 | fq->meat -= free_it->len; | 547 | fq->q.meat -= free_it->len; |
559 | frag_kfree_skb(free_it, NULL); | 548 | frag_kfree_skb(free_it, NULL); |
560 | } | 549 | } |
561 | } | 550 | } |
@@ -567,15 +556,15 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
567 | if (prev) | 556 | if (prev) |
568 | prev->next = skb; | 557 | prev->next = skb; |
569 | else | 558 | else |
570 | fq->fragments = skb; | 559 | fq->q.fragments = skb; |
571 | 560 | ||
572 | dev = skb->dev; | 561 | dev = skb->dev; |
573 | if (dev) { | 562 | if (dev) { |
574 | fq->iif = dev->ifindex; | 563 | fq->iif = dev->ifindex; |
575 | skb->dev = NULL; | 564 | skb->dev = NULL; |
576 | } | 565 | } |
577 | fq->stamp = skb->tstamp; | 566 | fq->q.stamp = skb->tstamp; |
578 | fq->meat += skb->len; | 567 | fq->q.meat += skb->len; |
579 | atomic_add(skb->truesize, &ip6_frag_mem); | 568 | atomic_add(skb->truesize, &ip6_frag_mem); |
580 | 569 | ||
581 | /* The first fragment. | 570 | /* The first fragment. |
@@ -583,14 +572,14 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, | |||
583 | */ | 572 | */ |
584 | if (offset == 0) { | 573 | if (offset == 0) { |
585 | fq->nhoffset = nhoff; | 574 | fq->nhoffset = nhoff; |
586 | fq->last_in |= FIRST_IN; | 575 | fq->q.last_in |= FIRST_IN; |
587 | } | 576 | } |
588 | 577 | ||
589 | if (fq->last_in == (FIRST_IN | LAST_IN) && fq->meat == fq->len) | 578 | if (fq->q.last_in == (FIRST_IN | LAST_IN) && fq->q.meat == fq->q.len) |
590 | return ip6_frag_reasm(fq, prev, dev); | 579 | return ip6_frag_reasm(fq, prev, dev); |
591 | 580 | ||
592 | write_lock(&ip6_frag_lock); | 581 | write_lock(&ip6_frag_lock); |
593 | list_move_tail(&fq->lru_list, &ip6_frag_lru_list); | 582 | list_move_tail(&fq->q.lru_list, &ip6_frag_lru_list); |
594 | write_unlock(&ip6_frag_lock); | 583 | write_unlock(&ip6_frag_lock); |
595 | return -1; | 584 | return -1; |
596 | 585 | ||
@@ -612,7 +601,7 @@ err: | |||
612 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | 601 | static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, |
613 | struct net_device *dev) | 602 | struct net_device *dev) |
614 | { | 603 | { |
615 | struct sk_buff *fp, *head = fq->fragments; | 604 | struct sk_buff *fp, *head = fq->q.fragments; |
616 | int payload_len; | 605 | int payload_len; |
617 | unsigned int nhoff; | 606 | unsigned int nhoff; |
618 | 607 | ||
@@ -629,11 +618,11 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
629 | fp->next = head->next; | 618 | fp->next = head->next; |
630 | prev->next = fp; | 619 | prev->next = fp; |
631 | 620 | ||
632 | skb_morph(head, fq->fragments); | 621 | skb_morph(head, fq->q.fragments); |
633 | head->next = fq->fragments->next; | 622 | head->next = fq->q.fragments->next; |
634 | 623 | ||
635 | kfree_skb(fq->fragments); | 624 | kfree_skb(fq->q.fragments); |
636 | fq->fragments = head; | 625 | fq->q.fragments = head; |
637 | } | 626 | } |
638 | 627 | ||
639 | BUG_TRAP(head != NULL); | 628 | BUG_TRAP(head != NULL); |
@@ -641,7 +630,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
641 | 630 | ||
642 | /* Unfragmented part is taken from the first segment. */ | 631 | /* Unfragmented part is taken from the first segment. */ |
643 | payload_len = ((head->data - skb_network_header(head)) - | 632 | payload_len = ((head->data - skb_network_header(head)) - |
644 | sizeof(struct ipv6hdr) + fq->len - | 633 | sizeof(struct ipv6hdr) + fq->q.len - |
645 | sizeof(struct frag_hdr)); | 634 | sizeof(struct frag_hdr)); |
646 | if (payload_len > IPV6_MAXPLEN) | 635 | if (payload_len > IPV6_MAXPLEN) |
647 | goto out_oversize; | 636 | goto out_oversize; |
@@ -700,7 +689,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
700 | 689 | ||
701 | head->next = NULL; | 690 | head->next = NULL; |
702 | head->dev = dev; | 691 | head->dev = dev; |
703 | head->tstamp = fq->stamp; | 692 | head->tstamp = fq->q.stamp; |
704 | ipv6_hdr(head)->payload_len = htons(payload_len); | 693 | ipv6_hdr(head)->payload_len = htons(payload_len); |
705 | IP6CB(head)->nhoff = nhoff; | 694 | IP6CB(head)->nhoff = nhoff; |
706 | 695 | ||
@@ -713,7 +702,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, | |||
713 | rcu_read_lock(); | 702 | rcu_read_lock(); |
714 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); | 703 | IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); |
715 | rcu_read_unlock(); | 704 | rcu_read_unlock(); |
716 | fq->fragments = NULL; | 705 | fq->q.fragments = NULL; |
717 | return 1; | 706 | return 1; |
718 | 707 | ||
719 | out_oversize: | 708 | out_oversize: |
@@ -773,11 +762,11 @@ static int ipv6_frag_rcv(struct sk_buff **skbp) | |||
773 | ip6_dst_idev(skb->dst))) != NULL) { | 762 | ip6_dst_idev(skb->dst))) != NULL) { |
774 | int ret; | 763 | int ret; |
775 | 764 | ||
776 | spin_lock(&fq->lock); | 765 | spin_lock(&fq->q.lock); |
777 | 766 | ||
778 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); | 767 | ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff); |
779 | 768 | ||
780 | spin_unlock(&fq->lock); | 769 | spin_unlock(&fq->q.lock); |
781 | fq_put(fq, NULL); | 770 | fq_put(fq, NULL); |
782 | return ret; | 771 | return ret; |
783 | } | 772 | } |