diff options
Diffstat (limited to 'net/ipv4/ip_fragment.c')
| -rw-r--r-- | net/ipv4/ip_fragment.c | 67 |
1 files changed, 50 insertions, 17 deletions
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 86964b353c31..b7c41654dde5 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
| @@ -32,6 +32,9 @@ | |||
| 32 | #include <linux/netdevice.h> | 32 | #include <linux/netdevice.h> |
| 33 | #include <linux/jhash.h> | 33 | #include <linux/jhash.h> |
| 34 | #include <linux/random.h> | 34 | #include <linux/random.h> |
| 35 | #include <linux/slab.h> | ||
| 36 | #include <net/route.h> | ||
| 37 | #include <net/dst.h> | ||
| 35 | #include <net/sock.h> | 38 | #include <net/sock.h> |
| 36 | #include <net/ip.h> | 39 | #include <net/ip.h> |
| 37 | #include <net/icmp.h> | 40 | #include <net/icmp.h> |
| @@ -121,11 +124,8 @@ static int ip4_frag_match(struct inet_frag_queue *q, void *a) | |||
| 121 | } | 124 | } |
| 122 | 125 | ||
| 123 | /* Memory Tracking Functions. */ | 126 | /* Memory Tracking Functions. */ |
| 124 | static __inline__ void frag_kfree_skb(struct netns_frags *nf, | 127 | static void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb) |
| 125 | struct sk_buff *skb, int *work) | ||
| 126 | { | 128 | { |
| 127 | if (work) | ||
| 128 | *work -= skb->truesize; | ||
| 129 | atomic_sub(skb->truesize, &nf->mem); | 129 | atomic_sub(skb->truesize, &nf->mem); |
| 130 | kfree_skb(skb); | 130 | kfree_skb(skb); |
| 131 | } | 131 | } |
| @@ -205,11 +205,34 @@ static void ip_expire(unsigned long arg) | |||
| 205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { | 205 | if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { |
| 206 | struct sk_buff *head = qp->q.fragments; | 206 | struct sk_buff *head = qp->q.fragments; |
| 207 | 207 | ||
| 208 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | ||
| 209 | rcu_read_lock(); | 208 | rcu_read_lock(); |
| 210 | head->dev = dev_get_by_index_rcu(net, qp->iif); | 209 | head->dev = dev_get_by_index_rcu(net, qp->iif); |
| 211 | if (head->dev) | 210 | if (!head->dev) |
| 212 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | 211 | goto out_rcu_unlock; |
| 212 | |||
| 213 | /* | ||
| 214 | * Only search router table for the head fragment, | ||
| 215 | * when defraging timeout at PRE_ROUTING HOOK. | ||
| 216 | */ | ||
| 217 | if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { | ||
| 218 | const struct iphdr *iph = ip_hdr(head); | ||
| 219 | int err = ip_route_input(head, iph->daddr, iph->saddr, | ||
| 220 | iph->tos, head->dev); | ||
| 221 | if (unlikely(err)) | ||
| 222 | goto out_rcu_unlock; | ||
| 223 | |||
| 224 | /* | ||
| 225 | * Only an end host needs to send an ICMP | ||
| 226 | * "Fragment Reassembly Timeout" message, per RFC792. | ||
| 227 | */ | ||
| 228 | if (skb_rtable(head)->rt_type != RTN_LOCAL) | ||
| 229 | goto out_rcu_unlock; | ||
| 230 | |||
| 231 | } | ||
| 232 | |||
| 233 | /* Send an ICMP "Fragment Reassembly Timeout" message. */ | ||
| 234 | icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); | ||
| 235 | out_rcu_unlock: | ||
| 213 | rcu_read_unlock(); | 236 | rcu_read_unlock(); |
| 214 | } | 237 | } |
| 215 | out: | 238 | out: |
| @@ -283,7 +306,7 @@ static int ip_frag_reinit(struct ipq *qp) | |||
| 283 | fp = qp->q.fragments; | 306 | fp = qp->q.fragments; |
| 284 | do { | 307 | do { |
| 285 | struct sk_buff *xp = fp->next; | 308 | struct sk_buff *xp = fp->next; |
| 286 | frag_kfree_skb(qp->q.net, fp, NULL); | 309 | frag_kfree_skb(qp->q.net, fp); |
| 287 | fp = xp; | 310 | fp = xp; |
| 288 | } while (fp); | 311 | } while (fp); |
| 289 | 312 | ||
| @@ -291,6 +314,7 @@ static int ip_frag_reinit(struct ipq *qp) | |||
| 291 | qp->q.len = 0; | 314 | qp->q.len = 0; |
| 292 | qp->q.meat = 0; | 315 | qp->q.meat = 0; |
| 293 | qp->q.fragments = NULL; | 316 | qp->q.fragments = NULL; |
| 317 | qp->q.fragments_tail = NULL; | ||
| 294 | qp->iif = 0; | 318 | qp->iif = 0; |
| 295 | 319 | ||
| 296 | return 0; | 320 | return 0; |
| @@ -363,6 +387,11 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 363 | * in the chain of fragments so far. We must know where to put | 387 | * in the chain of fragments so far. We must know where to put |
| 364 | * this fragment, right? | 388 | * this fragment, right? |
| 365 | */ | 389 | */ |
| 390 | prev = qp->q.fragments_tail; | ||
| 391 | if (!prev || FRAG_CB(prev)->offset < offset) { | ||
| 392 | next = NULL; | ||
| 393 | goto found; | ||
| 394 | } | ||
| 366 | prev = NULL; | 395 | prev = NULL; |
| 367 | for (next = qp->q.fragments; next != NULL; next = next->next) { | 396 | for (next = qp->q.fragments; next != NULL; next = next->next) { |
| 368 | if (FRAG_CB(next)->offset >= offset) | 397 | if (FRAG_CB(next)->offset >= offset) |
| @@ -370,6 +399,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 370 | prev = next; | 399 | prev = next; |
| 371 | } | 400 | } |
| 372 | 401 | ||
| 402 | found: | ||
| 373 | /* We found where to put this one. Check for overlap with | 403 | /* We found where to put this one. Check for overlap with |
| 374 | * preceding fragment, and, if needed, align things so that | 404 | * preceding fragment, and, if needed, align things so that |
| 375 | * any overlaps are eliminated. | 405 | * any overlaps are eliminated. |
| @@ -420,7 +450,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 420 | qp->q.fragments = next; | 450 | qp->q.fragments = next; |
| 421 | 451 | ||
| 422 | qp->q.meat -= free_it->len; | 452 | qp->q.meat -= free_it->len; |
| 423 | frag_kfree_skb(qp->q.net, free_it, NULL); | 453 | frag_kfree_skb(qp->q.net, free_it); |
| 424 | } | 454 | } |
| 425 | } | 455 | } |
| 426 | 456 | ||
| @@ -428,6 +458,8 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
| 428 | 458 | ||
| 429 | /* Insert this fragment in the chain of fragments. */ | 459 | /* Insert this fragment in the chain of fragments. */ |
| 430 | skb->next = next; | 460 | skb->next = next; |
| 461 | if (!next) | ||
| 462 | qp->q.fragments_tail = skb; | ||
| 431 | if (prev) | 463 | if (prev) |
| 432 | prev->next = skb; | 464 | prev->next = skb; |
| 433 | else | 465 | else |
| @@ -481,6 +513,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 481 | goto out_nomem; | 513 | goto out_nomem; |
| 482 | 514 | ||
| 483 | fp->next = head->next; | 515 | fp->next = head->next; |
| 516 | if (!fp->next) | ||
| 517 | qp->q.fragments_tail = fp; | ||
| 484 | prev->next = fp; | 518 | prev->next = fp; |
| 485 | 519 | ||
| 486 | skb_morph(head, qp->q.fragments); | 520 | skb_morph(head, qp->q.fragments); |
| @@ -530,7 +564,6 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 530 | 564 | ||
| 531 | skb_shinfo(head)->frag_list = head->next; | 565 | skb_shinfo(head)->frag_list = head->next; |
| 532 | skb_push(head, head->data - skb_network_header(head)); | 566 | skb_push(head, head->data - skb_network_header(head)); |
| 533 | atomic_sub(head->truesize, &qp->q.net->mem); | ||
| 534 | 567 | ||
| 535 | for (fp=head->next; fp; fp = fp->next) { | 568 | for (fp=head->next; fp; fp = fp->next) { |
| 536 | head->data_len += fp->len; | 569 | head->data_len += fp->len; |
| @@ -540,8 +573,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 540 | else if (head->ip_summed == CHECKSUM_COMPLETE) | 573 | else if (head->ip_summed == CHECKSUM_COMPLETE) |
| 541 | head->csum = csum_add(head->csum, fp->csum); | 574 | head->csum = csum_add(head->csum, fp->csum); |
| 542 | head->truesize += fp->truesize; | 575 | head->truesize += fp->truesize; |
| 543 | atomic_sub(fp->truesize, &qp->q.net->mem); | ||
| 544 | } | 576 | } |
| 577 | atomic_sub(head->truesize, &qp->q.net->mem); | ||
| 545 | 578 | ||
| 546 | head->next = NULL; | 579 | head->next = NULL; |
| 547 | head->dev = dev; | 580 | head->dev = dev; |
| @@ -552,6 +585,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
| 552 | iph->tot_len = htons(len); | 585 | iph->tot_len = htons(len); |
| 553 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); | 586 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
| 554 | qp->q.fragments = NULL; | 587 | qp->q.fragments = NULL; |
| 588 | qp->q.fragments_tail = NULL; | ||
| 555 | return 0; | 589 | return 0; |
| 556 | 590 | ||
| 557 | out_nomem: | 591 | out_nomem: |
| @@ -598,6 +632,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
| 598 | kfree_skb(skb); | 632 | kfree_skb(skb); |
| 599 | return -ENOMEM; | 633 | return -ENOMEM; |
| 600 | } | 634 | } |
| 635 | EXPORT_SYMBOL(ip_defrag); | ||
| 601 | 636 | ||
| 602 | #ifdef CONFIG_SYSCTL | 637 | #ifdef CONFIG_SYSCTL |
| 603 | static int zero; | 638 | static int zero; |
| @@ -646,7 +681,7 @@ static struct ctl_table ip4_frags_ctl_table[] = { | |||
| 646 | { } | 681 | { } |
| 647 | }; | 682 | }; |
| 648 | 683 | ||
| 649 | static int ip4_frags_ns_ctl_register(struct net *net) | 684 | static int __net_init ip4_frags_ns_ctl_register(struct net *net) |
| 650 | { | 685 | { |
| 651 | struct ctl_table *table; | 686 | struct ctl_table *table; |
| 652 | struct ctl_table_header *hdr; | 687 | struct ctl_table_header *hdr; |
| @@ -676,7 +711,7 @@ err_alloc: | |||
| 676 | return -ENOMEM; | 711 | return -ENOMEM; |
| 677 | } | 712 | } |
| 678 | 713 | ||
| 679 | static void ip4_frags_ns_ctl_unregister(struct net *net) | 714 | static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) |
| 680 | { | 715 | { |
| 681 | struct ctl_table *table; | 716 | struct ctl_table *table; |
| 682 | 717 | ||
| @@ -704,7 +739,7 @@ static inline void ip4_frags_ctl_register(void) | |||
| 704 | } | 739 | } |
| 705 | #endif | 740 | #endif |
| 706 | 741 | ||
| 707 | static int ipv4_frags_init_net(struct net *net) | 742 | static int __net_init ipv4_frags_init_net(struct net *net) |
| 708 | { | 743 | { |
| 709 | /* | 744 | /* |
| 710 | * Fragment cache limits. We will commit 256K at one time. Should we | 745 | * Fragment cache limits. We will commit 256K at one time. Should we |
| @@ -726,7 +761,7 @@ static int ipv4_frags_init_net(struct net *net) | |||
| 726 | return ip4_frags_ns_ctl_register(net); | 761 | return ip4_frags_ns_ctl_register(net); |
| 727 | } | 762 | } |
| 728 | 763 | ||
| 729 | static void ipv4_frags_exit_net(struct net *net) | 764 | static void __net_exit ipv4_frags_exit_net(struct net *net) |
| 730 | { | 765 | { |
| 731 | ip4_frags_ns_ctl_unregister(net); | 766 | ip4_frags_ns_ctl_unregister(net); |
| 732 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); | 767 | inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); |
| @@ -751,5 +786,3 @@ void __init ipfrag_init(void) | |||
| 751 | ip4_frags.secret_interval = 10 * 60 * HZ; | 786 | ip4_frags.secret_interval = 10 * 60 * HZ; |
| 752 | inet_frags_init(&ip4_frags); | 787 | inet_frags_init(&ip4_frags); |
| 753 | } | 788 | } |
| 754 | |||
| 755 | EXPORT_SYMBOL(ip_defrag); | ||
