diff options
author | Patrick McHardy <kaber@trash.net> | 2009-06-11 10:00:49 -0400 |
---|---|---|
committer | Patrick McHardy <kaber@trash.net> | 2009-06-11 10:00:49 -0400 |
commit | 36432dae73cf2c90a59b39c8df9fd8219272b005 (patch) | |
tree | 660b9104305a809ec4fdeb295ca13d6e90790ecc /net/ipv4 | |
parent | 440f0d588555892601cfe511728a0fc0c8204063 (diff) | |
parent | bb400801c2f40bbd9a688818323ad09abfc4e581 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next-2.6
Diffstat (limited to 'net/ipv4')
30 files changed, 287 insertions, 223 deletions
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 5abee4c97449..566ea6c4321d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -116,7 +116,6 @@ | |||
116 | #include <linux/mroute.h> | 116 | #include <linux/mroute.h> |
117 | #endif | 117 | #endif |
118 | 118 | ||
119 | extern void ip_mc_drop_socket(struct sock *sk); | ||
120 | 119 | ||
121 | /* The inetsw table contains everything that inet_create needs to | 120 | /* The inetsw table contains everything that inet_create needs to |
122 | * build a new socket. | 121 | * build a new socket. |
@@ -375,6 +374,7 @@ lookup_protocol: | |||
375 | inet->uc_ttl = -1; | 374 | inet->uc_ttl = -1; |
376 | inet->mc_loop = 1; | 375 | inet->mc_loop = 1; |
377 | inet->mc_ttl = 1; | 376 | inet->mc_ttl = 1; |
377 | inet->mc_all = 1; | ||
378 | inet->mc_index = 0; | 378 | inet->mc_index = 0; |
379 | inet->mc_list = NULL; | 379 | inet->mc_list = NULL; |
380 | 380 | ||
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index f11931c18381..8a3881e28aca 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c | |||
@@ -468,13 +468,13 @@ int arp_find(unsigned char *haddr, struct sk_buff *skb) | |||
468 | __be32 paddr; | 468 | __be32 paddr; |
469 | struct neighbour *n; | 469 | struct neighbour *n; |
470 | 470 | ||
471 | if (!skb->dst) { | 471 | if (!skb_dst(skb)) { |
472 | printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); | 472 | printk(KERN_DEBUG "arp_find is called with dst==NULL\n"); |
473 | kfree_skb(skb); | 473 | kfree_skb(skb); |
474 | return 1; | 474 | return 1; |
475 | } | 475 | } |
476 | 476 | ||
477 | paddr = skb->rtable->rt_gateway; | 477 | paddr = skb_rtable(skb)->rt_gateway; |
478 | 478 | ||
479 | if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) | 479 | if (arp_set_predefined(inet_addr_type(dev_net(dev), paddr), haddr, paddr, dev)) |
480 | return 0; | 480 | return 0; |
@@ -817,7 +817,7 @@ static int arp_process(struct sk_buff *skb) | |||
817 | if (arp->ar_op == htons(ARPOP_REQUEST) && | 817 | if (arp->ar_op == htons(ARPOP_REQUEST) && |
818 | ip_route_input(skb, tip, sip, 0, dev) == 0) { | 818 | ip_route_input(skb, tip, sip, 0, dev) == 0) { |
819 | 819 | ||
820 | rt = skb->rtable; | 820 | rt = skb_rtable(skb); |
821 | addr_type = rt->rt_type; | 821 | addr_type = rt->rt_type; |
822 | 822 | ||
823 | if (addr_type == RTN_LOCAL) { | 823 | if (addr_type == RTN_LOCAL) { |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 3f50807237e0..97c410e84388 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, | |||
356 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) | 356 | static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb) |
357 | { | 357 | { |
358 | struct ipcm_cookie ipc; | 358 | struct ipcm_cookie ipc; |
359 | struct rtable *rt = skb->rtable; | 359 | struct rtable *rt = skb_rtable(skb); |
360 | struct net *net = dev_net(rt->u.dst.dev); | 360 | struct net *net = dev_net(rt->u.dst.dev); |
361 | struct sock *sk; | 361 | struct sock *sk; |
362 | struct inet_sock *inet; | 362 | struct inet_sock *inet; |
@@ -416,7 +416,7 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
416 | struct iphdr *iph; | 416 | struct iphdr *iph; |
417 | int room; | 417 | int room; |
418 | struct icmp_bxm icmp_param; | 418 | struct icmp_bxm icmp_param; |
419 | struct rtable *rt = skb_in->rtable; | 419 | struct rtable *rt = skb_rtable(skb_in); |
420 | struct ipcm_cookie ipc; | 420 | struct ipcm_cookie ipc; |
421 | __be32 saddr; | 421 | __be32 saddr; |
422 | u8 tos; | 422 | u8 tos; |
@@ -591,13 +591,13 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) | |||
591 | goto relookup_failed; | 591 | goto relookup_failed; |
592 | 592 | ||
593 | /* Ugh! */ | 593 | /* Ugh! */ |
594 | odst = skb_in->dst; | 594 | odst = skb_dst(skb_in); |
595 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, | 595 | err = ip_route_input(skb_in, fl.fl4_dst, fl.fl4_src, |
596 | RT_TOS(tos), rt2->u.dst.dev); | 596 | RT_TOS(tos), rt2->u.dst.dev); |
597 | 597 | ||
598 | dst_release(&rt2->u.dst); | 598 | dst_release(&rt2->u.dst); |
599 | rt2 = skb_in->rtable; | 599 | rt2 = skb_rtable(skb_in); |
600 | skb_in->dst = odst; | 600 | skb_dst_set(skb_in, odst); |
601 | } | 601 | } |
602 | 602 | ||
603 | if (err) | 603 | if (err) |
@@ -659,7 +659,7 @@ static void icmp_unreach(struct sk_buff *skb) | |||
659 | u32 info = 0; | 659 | u32 info = 0; |
660 | struct net *net; | 660 | struct net *net; |
661 | 661 | ||
662 | net = dev_net(skb->dst->dev); | 662 | net = dev_net(skb_dst(skb)->dev); |
663 | 663 | ||
664 | /* | 664 | /* |
665 | * Incomplete header ? | 665 | * Incomplete header ? |
@@ -822,7 +822,7 @@ static void icmp_echo(struct sk_buff *skb) | |||
822 | { | 822 | { |
823 | struct net *net; | 823 | struct net *net; |
824 | 824 | ||
825 | net = dev_net(skb->dst->dev); | 825 | net = dev_net(skb_dst(skb)->dev); |
826 | if (!net->ipv4.sysctl_icmp_echo_ignore_all) { | 826 | if (!net->ipv4.sysctl_icmp_echo_ignore_all) { |
827 | struct icmp_bxm icmp_param; | 827 | struct icmp_bxm icmp_param; |
828 | 828 | ||
@@ -873,7 +873,7 @@ static void icmp_timestamp(struct sk_buff *skb) | |||
873 | out: | 873 | out: |
874 | return; | 874 | return; |
875 | out_err: | 875 | out_err: |
876 | ICMP_INC_STATS_BH(dev_net(skb->dst->dev), ICMP_MIB_INERRORS); | 876 | ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); |
877 | goto out; | 877 | goto out; |
878 | } | 878 | } |
879 | 879 | ||
@@ -926,7 +926,7 @@ static void icmp_address(struct sk_buff *skb) | |||
926 | 926 | ||
927 | static void icmp_address_reply(struct sk_buff *skb) | 927 | static void icmp_address_reply(struct sk_buff *skb) |
928 | { | 928 | { |
929 | struct rtable *rt = skb->rtable; | 929 | struct rtable *rt = skb_rtable(skb); |
930 | struct net_device *dev = skb->dev; | 930 | struct net_device *dev = skb->dev; |
931 | struct in_device *in_dev; | 931 | struct in_device *in_dev; |
932 | struct in_ifaddr *ifa; | 932 | struct in_ifaddr *ifa; |
@@ -970,7 +970,7 @@ static void icmp_discard(struct sk_buff *skb) | |||
970 | int icmp_rcv(struct sk_buff *skb) | 970 | int icmp_rcv(struct sk_buff *skb) |
971 | { | 971 | { |
972 | struct icmphdr *icmph; | 972 | struct icmphdr *icmph; |
973 | struct rtable *rt = skb->rtable; | 973 | struct rtable *rt = skb_rtable(skb); |
974 | struct net *net = dev_net(rt->u.dst.dev); | 974 | struct net *net = dev_net(rt->u.dst.dev); |
975 | 975 | ||
976 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { | 976 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 9eb6219af615..01b4284ed694 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -311,7 +311,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) | |||
311 | return NULL; | 311 | return NULL; |
312 | } | 312 | } |
313 | 313 | ||
314 | skb->dst = &rt->u.dst; | 314 | skb_dst_set(skb, &rt->u.dst); |
315 | skb->dev = dev; | 315 | skb->dev = dev; |
316 | 316 | ||
317 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 317 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
@@ -659,7 +659,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, | |||
659 | return -1; | 659 | return -1; |
660 | } | 660 | } |
661 | 661 | ||
662 | skb->dst = &rt->u.dst; | 662 | skb_dst_set(skb, &rt->u.dst); |
663 | 663 | ||
664 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); | 664 | skb_reserve(skb, LL_RESERVED_SPACE(dev)); |
665 | 665 | ||
@@ -948,7 +948,7 @@ int igmp_rcv(struct sk_buff *skb) | |||
948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: | 948 | case IGMPV2_HOST_MEMBERSHIP_REPORT: |
949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: | 949 | case IGMPV3_HOST_MEMBERSHIP_REPORT: |
950 | /* Is it our report looped back? */ | 950 | /* Is it our report looped back? */ |
951 | if (skb->rtable->fl.iif == 0) | 951 | if (skb_rtable(skb)->fl.iif == 0) |
952 | break; | 952 | break; |
953 | /* don't rely on MC router hearing unicast reports */ | 953 | /* don't rely on MC router hearing unicast reports */ |
954 | if (skb->pkt_type == PACKET_MULTICAST || | 954 | if (skb->pkt_type == PACKET_MULTICAST || |
@@ -2196,7 +2196,7 @@ int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif) | |||
2196 | break; | 2196 | break; |
2197 | } | 2197 | } |
2198 | if (!pmc) | 2198 | if (!pmc) |
2199 | return 1; | 2199 | return inet->mc_all; |
2200 | psl = pmc->sflist; | 2200 | psl = pmc->sflist; |
2201 | if (!psl) | 2201 | if (!psl) |
2202 | return pmc->sfmode == MCAST_EXCLUDE; | 2202 | return pmc->sfmode == MCAST_EXCLUDE; |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index df3fe50bbf0d..a2991bc8e32e 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -42,7 +42,7 @@ static int ip_forward_finish(struct sk_buff *skb) | |||
42 | { | 42 | { |
43 | struct ip_options * opt = &(IPCB(skb)->opt); | 43 | struct ip_options * opt = &(IPCB(skb)->opt); |
44 | 44 | ||
45 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 45 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
46 | 46 | ||
47 | if (unlikely(opt->optlen)) | 47 | if (unlikely(opt->optlen)) |
48 | ip_forward_options(skb); | 48 | ip_forward_options(skb); |
@@ -81,7 +81,7 @@ int ip_forward(struct sk_buff *skb) | |||
81 | if (!xfrm4_route_forward(skb)) | 81 | if (!xfrm4_route_forward(skb)) |
82 | goto drop; | 82 | goto drop; |
83 | 83 | ||
84 | rt = skb->rtable; | 84 | rt = skb_rtable(skb); |
85 | 85 | ||
86 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 86 | if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
87 | goto sr_failed; | 87 | goto sr_failed; |
@@ -123,7 +123,7 @@ sr_failed: | |||
123 | 123 | ||
124 | too_many_hops: | 124 | too_many_hops: |
125 | /* Tell the sender its packet died... */ | 125 | /* Tell the sender its packet died... */ |
126 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_INHDRERRORS); | 126 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); |
127 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); | 127 | icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); |
128 | drop: | 128 | drop: |
129 | kfree_skb(skb); | 129 | kfree_skb(skb); |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 7985346653bd..575f9bd51ccd 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -507,7 +507,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
507 | /* If the first fragment is fragmented itself, we split | 507 | /* If the first fragment is fragmented itself, we split |
508 | * it to two chunks: the first with data and paged part | 508 | * it to two chunks: the first with data and paged part |
509 | * and the second, holding only fragments. */ | 509 | * and the second, holding only fragments. */ |
510 | if (skb_shinfo(head)->frag_list) { | 510 | if (skb_has_frags(head)) { |
511 | struct sk_buff *clone; | 511 | struct sk_buff *clone; |
512 | int i, plen = 0; | 512 | int i, plen = 0; |
513 | 513 | ||
@@ -516,7 +516,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
516 | clone->next = head->next; | 516 | clone->next = head->next; |
517 | head->next = clone; | 517 | head->next = clone; |
518 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; | 518 | skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; |
519 | skb_shinfo(head)->frag_list = NULL; | 519 | skb_frag_list_init(head); |
520 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) | 520 | for (i=0; i<skb_shinfo(head)->nr_frags; i++) |
521 | plen += skb_shinfo(head)->frags[i].size; | 521 | plen += skb_shinfo(head)->frags[i].size; |
522 | clone->len = clone->data_len = head->data_len - plen; | 522 | clone->len = clone->data_len = head->data_len - plen; |
@@ -573,7 +573,7 @@ int ip_defrag(struct sk_buff *skb, u32 user) | |||
573 | struct ipq *qp; | 573 | struct ipq *qp; |
574 | struct net *net; | 574 | struct net *net; |
575 | 575 | ||
576 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb->dst->dev); | 576 | net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); |
577 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); | 577 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); |
578 | 578 | ||
579 | /* Start by cleaning up the memory. */ | 579 | /* Start by cleaning up the memory. */ |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index e62510d5ea5a..44e2a3d2359a 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -602,7 +602,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
602 | #ifdef CONFIG_NET_IPGRE_BROADCAST | 602 | #ifdef CONFIG_NET_IPGRE_BROADCAST |
603 | if (ipv4_is_multicast(iph->daddr)) { | 603 | if (ipv4_is_multicast(iph->daddr)) { |
604 | /* Looped back packet, drop it! */ | 604 | /* Looped back packet, drop it! */ |
605 | if (skb->rtable->fl.iif == 0) | 605 | if (skb_rtable(skb)->fl.iif == 0) |
606 | goto drop; | 606 | goto drop; |
607 | stats->multicast++; | 607 | stats->multicast++; |
608 | skb->pkt_type = PACKET_BROADCAST; | 608 | skb->pkt_type = PACKET_BROADCAST; |
@@ -643,8 +643,7 @@ static int ipgre_rcv(struct sk_buff *skb) | |||
643 | stats->rx_packets++; | 643 | stats->rx_packets++; |
644 | stats->rx_bytes += len; | 644 | stats->rx_bytes += len; |
645 | skb->dev = tunnel->dev; | 645 | skb->dev = tunnel->dev; |
646 | dst_release(skb->dst); | 646 | skb_dst_drop(skb); |
647 | skb->dst = NULL; | ||
648 | nf_reset(skb); | 647 | nf_reset(skb); |
649 | 648 | ||
650 | skb_reset_network_header(skb); | 649 | skb_reset_network_header(skb); |
@@ -698,13 +697,13 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
698 | if ((dst = tiph->daddr) == 0) { | 697 | if ((dst = tiph->daddr) == 0) { |
699 | /* NBMA tunnel */ | 698 | /* NBMA tunnel */ |
700 | 699 | ||
701 | if (skb->dst == NULL) { | 700 | if (skb_dst(skb) == NULL) { |
702 | stats->tx_fifo_errors++; | 701 | stats->tx_fifo_errors++; |
703 | goto tx_error; | 702 | goto tx_error; |
704 | } | 703 | } |
705 | 704 | ||
706 | if (skb->protocol == htons(ETH_P_IP)) { | 705 | if (skb->protocol == htons(ETH_P_IP)) { |
707 | rt = skb->rtable; | 706 | rt = skb_rtable(skb); |
708 | if ((dst = rt->rt_gateway) == 0) | 707 | if ((dst = rt->rt_gateway) == 0) |
709 | goto tx_error_icmp; | 708 | goto tx_error_icmp; |
710 | } | 709 | } |
@@ -712,7 +711,7 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
712 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 711 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
713 | struct in6_addr *addr6; | 712 | struct in6_addr *addr6; |
714 | int addr_type; | 713 | int addr_type; |
715 | struct neighbour *neigh = skb->dst->neighbour; | 714 | struct neighbour *neigh = skb_dst(skb)->neighbour; |
716 | 715 | ||
717 | if (neigh == NULL) | 716 | if (neigh == NULL) |
718 | goto tx_error; | 717 | goto tx_error; |
@@ -766,10 +765,10 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
766 | if (df) | 765 | if (df) |
767 | mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; | 766 | mtu = dst_mtu(&rt->u.dst) - dev->hard_header_len - tunnel->hlen; |
768 | else | 767 | else |
769 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 768 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
770 | 769 | ||
771 | if (skb->dst) | 770 | if (skb_dst(skb)) |
772 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 771 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
773 | 772 | ||
774 | if (skb->protocol == htons(ETH_P_IP)) { | 773 | if (skb->protocol == htons(ETH_P_IP)) { |
775 | df |= (old_iph->frag_off&htons(IP_DF)); | 774 | df |= (old_iph->frag_off&htons(IP_DF)); |
@@ -783,14 +782,14 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
783 | } | 782 | } |
784 | #ifdef CONFIG_IPV6 | 783 | #ifdef CONFIG_IPV6 |
785 | else if (skb->protocol == htons(ETH_P_IPV6)) { | 784 | else if (skb->protocol == htons(ETH_P_IPV6)) { |
786 | struct rt6_info *rt6 = (struct rt6_info *)skb->dst; | 785 | struct rt6_info *rt6 = (struct rt6_info *)skb_dst(skb); |
787 | 786 | ||
788 | if (rt6 && mtu < dst_mtu(skb->dst) && mtu >= IPV6_MIN_MTU) { | 787 | if (rt6 && mtu < dst_mtu(skb_dst(skb)) && mtu >= IPV6_MIN_MTU) { |
789 | if ((tunnel->parms.iph.daddr && | 788 | if ((tunnel->parms.iph.daddr && |
790 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || | 789 | !ipv4_is_multicast(tunnel->parms.iph.daddr)) || |
791 | rt6->rt6i_dst.plen == 128) { | 790 | rt6->rt6i_dst.plen == 128) { |
792 | rt6->rt6i_flags |= RTF_MODIFIED; | 791 | rt6->rt6i_flags |= RTF_MODIFIED; |
793 | skb->dst->metrics[RTAX_MTU-1] = mtu; | 792 | skb_dst(skb)->metrics[RTAX_MTU-1] = mtu; |
794 | } | 793 | } |
795 | } | 794 | } |
796 | 795 | ||
@@ -837,8 +836,8 @@ static int ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
837 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 836 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
838 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 837 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
839 | IPSKB_REROUTED); | 838 | IPSKB_REROUTED); |
840 | dst_release(skb->dst); | 839 | skb_dst_drop(skb); |
841 | skb->dst = &rt->u.dst; | 840 | skb_dst_set(skb, &rt->u.dst); |
842 | 841 | ||
843 | /* | 842 | /* |
844 | * Push down and install the IPIP header. | 843 | * Push down and install the IPIP header. |
@@ -1238,6 +1237,7 @@ static void ipgre_tunnel_setup(struct net_device *dev) | |||
1238 | dev->iflink = 0; | 1237 | dev->iflink = 0; |
1239 | dev->addr_len = 4; | 1238 | dev->addr_len = 4; |
1240 | dev->features |= NETIF_F_NETNS_LOCAL; | 1239 | dev->features |= NETIF_F_NETNS_LOCAL; |
1240 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
1241 | } | 1241 | } |
1242 | 1242 | ||
1243 | static int ipgre_tunnel_init(struct net_device *dev) | 1243 | static int ipgre_tunnel_init(struct net_device *dev) |
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 40f6206b2aa9..490ce20faf38 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c | |||
@@ -329,7 +329,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
329 | * Initialise the virtual path cache for the packet. It describes | 329 | * Initialise the virtual path cache for the packet. It describes |
330 | * how the packet travels inside Linux networking. | 330 | * how the packet travels inside Linux networking. |
331 | */ | 331 | */ |
332 | if (skb->dst == NULL) { | 332 | if (skb_dst(skb) == NULL) { |
333 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 333 | int err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
334 | skb->dev); | 334 | skb->dev); |
335 | if (unlikely(err)) { | 335 | if (unlikely(err)) { |
@@ -344,9 +344,9 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
344 | } | 344 | } |
345 | 345 | ||
346 | #ifdef CONFIG_NET_CLS_ROUTE | 346 | #ifdef CONFIG_NET_CLS_ROUTE |
347 | if (unlikely(skb->dst->tclassid)) { | 347 | if (unlikely(skb_dst(skb)->tclassid)) { |
348 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); | 348 | struct ip_rt_acct *st = per_cpu_ptr(ip_rt_acct, smp_processor_id()); |
349 | u32 idx = skb->dst->tclassid; | 349 | u32 idx = skb_dst(skb)->tclassid; |
350 | st[idx&0xFF].o_packets++; | 350 | st[idx&0xFF].o_packets++; |
351 | st[idx&0xFF].o_bytes += skb->len; | 351 | st[idx&0xFF].o_bytes += skb->len; |
352 | st[(idx>>16)&0xFF].i_packets++; | 352 | st[(idx>>16)&0xFF].i_packets++; |
@@ -357,7 +357,7 @@ static int ip_rcv_finish(struct sk_buff *skb) | |||
357 | if (iph->ihl > 5 && ip_rcv_options(skb)) | 357 | if (iph->ihl > 5 && ip_rcv_options(skb)) |
358 | goto drop; | 358 | goto drop; |
359 | 359 | ||
360 | rt = skb->rtable; | 360 | rt = skb_rtable(skb); |
361 | if (rt->rt_type == RTN_MULTICAST) { | 361 | if (rt->rt_type == RTN_MULTICAST) { |
362 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, | 362 | IP_UPD_PO_STATS_BH(dev_net(rt->u.dst.dev), IPSTATS_MIB_INMCAST, |
363 | skb->len); | 363 | skb->len); |
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index 2c88da6e7862..94bf105ef3c9 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c | |||
@@ -102,7 +102,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
102 | sptr = skb_network_header(skb); | 102 | sptr = skb_network_header(skb); |
103 | dptr = dopt->__data; | 103 | dptr = dopt->__data; |
104 | 104 | ||
105 | daddr = skb->rtable->rt_spec_dst; | 105 | daddr = skb_rtable(skb)->rt_spec_dst; |
106 | 106 | ||
107 | if (sopt->rr) { | 107 | if (sopt->rr) { |
108 | optlen = sptr[sopt->rr+1]; | 108 | optlen = sptr[sopt->rr+1]; |
@@ -143,7 +143,7 @@ int ip_options_echo(struct ip_options * dopt, struct sk_buff * skb) | |||
143 | __be32 addr; | 143 | __be32 addr; |
144 | 144 | ||
145 | memcpy(&addr, sptr+soffset-1, 4); | 145 | memcpy(&addr, sptr+soffset-1, 4); |
146 | if (inet_addr_type(dev_net(skb->dst->dev), addr) != RTN_LOCAL) { | 146 | if (inet_addr_type(dev_net(skb_dst(skb)->dev), addr) != RTN_LOCAL) { |
147 | dopt->ts_needtime = 1; | 147 | dopt->ts_needtime = 1; |
148 | soffset += 8; | 148 | soffset += 8; |
149 | } | 149 | } |
@@ -257,7 +257,7 @@ int ip_options_compile(struct net *net, | |||
257 | struct rtable *rt = NULL; | 257 | struct rtable *rt = NULL; |
258 | 258 | ||
259 | if (skb != NULL) { | 259 | if (skb != NULL) { |
260 | rt = skb->rtable; | 260 | rt = skb_rtable(skb); |
261 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); | 261 | optptr = (unsigned char *)&(ip_hdr(skb)[1]); |
262 | } else | 262 | } else |
263 | optptr = opt->__data; | 263 | optptr = opt->__data; |
@@ -550,7 +550,7 @@ void ip_forward_options(struct sk_buff *skb) | |||
550 | { | 550 | { |
551 | struct ip_options * opt = &(IPCB(skb)->opt); | 551 | struct ip_options * opt = &(IPCB(skb)->opt); |
552 | unsigned char * optptr; | 552 | unsigned char * optptr; |
553 | struct rtable *rt = skb->rtable; | 553 | struct rtable *rt = skb_rtable(skb); |
554 | unsigned char *raw = skb_network_header(skb); | 554 | unsigned char *raw = skb_network_header(skb); |
555 | 555 | ||
556 | if (opt->rr_needaddr) { | 556 | if (opt->rr_needaddr) { |
@@ -598,7 +598,7 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
598 | __be32 nexthop; | 598 | __be32 nexthop; |
599 | struct iphdr *iph = ip_hdr(skb); | 599 | struct iphdr *iph = ip_hdr(skb); |
600 | unsigned char *optptr = skb_network_header(skb) + opt->srr; | 600 | unsigned char *optptr = skb_network_header(skb) + opt->srr; |
601 | struct rtable *rt = skb->rtable; | 601 | struct rtable *rt = skb_rtable(skb); |
602 | struct rtable *rt2; | 602 | struct rtable *rt2; |
603 | int err; | 603 | int err; |
604 | 604 | ||
@@ -623,13 +623,13 @@ int ip_options_rcv_srr(struct sk_buff *skb) | |||
623 | } | 623 | } |
624 | memcpy(&nexthop, &optptr[srrptr-1], 4); | 624 | memcpy(&nexthop, &optptr[srrptr-1], 4); |
625 | 625 | ||
626 | rt = skb->rtable; | 626 | rt = skb_rtable(skb); |
627 | skb->rtable = NULL; | 627 | skb_dst_set(skb, NULL); |
628 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); | 628 | err = ip_route_input(skb, nexthop, iph->saddr, iph->tos, skb->dev); |
629 | rt2 = skb->rtable; | 629 | rt2 = skb_rtable(skb); |
630 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { | 630 | if (err || (rt2->rt_type != RTN_UNICAST && rt2->rt_type != RTN_LOCAL)) { |
631 | ip_rt_put(rt2); | 631 | ip_rt_put(rt2); |
632 | skb->rtable = rt; | 632 | skb_dst_set(skb, &rt->u.dst); |
633 | return -EINVAL; | 633 | return -EINVAL; |
634 | } | 634 | } |
635 | ip_rt_put(rt); | 635 | ip_rt_put(rt); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ea19c37ccc0c..247026282669 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -95,7 +95,7 @@ int __ip_local_out(struct sk_buff *skb) | |||
95 | 95 | ||
96 | iph->tot_len = htons(skb->len); | 96 | iph->tot_len = htons(skb->len); |
97 | ip_send_check(iph); | 97 | ip_send_check(iph); |
98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, | 98 | return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, |
99 | dst_output); | 99 | dst_output); |
100 | } | 100 | } |
101 | 101 | ||
@@ -118,7 +118,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
118 | __skb_pull(newskb, skb_network_offset(newskb)); | 118 | __skb_pull(newskb, skb_network_offset(newskb)); |
119 | newskb->pkt_type = PACKET_LOOPBACK; | 119 | newskb->pkt_type = PACKET_LOOPBACK; |
120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; | 120 | newskb->ip_summed = CHECKSUM_UNNECESSARY; |
121 | WARN_ON(!newskb->dst); | 121 | WARN_ON(!skb_dst(newskb)); |
122 | netif_rx(newskb); | 122 | netif_rx(newskb); |
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
@@ -140,7 +140,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, | |||
140 | __be32 saddr, __be32 daddr, struct ip_options *opt) | 140 | __be32 saddr, __be32 daddr, struct ip_options *opt) |
141 | { | 141 | { |
142 | struct inet_sock *inet = inet_sk(sk); | 142 | struct inet_sock *inet = inet_sk(sk); |
143 | struct rtable *rt = skb->rtable; | 143 | struct rtable *rt = skb_rtable(skb); |
144 | struct iphdr *iph; | 144 | struct iphdr *iph; |
145 | 145 | ||
146 | /* Build the IP header. */ | 146 | /* Build the IP header. */ |
@@ -176,7 +176,7 @@ EXPORT_SYMBOL_GPL(ip_build_and_send_pkt); | |||
176 | 176 | ||
177 | static inline int ip_finish_output2(struct sk_buff *skb) | 177 | static inline int ip_finish_output2(struct sk_buff *skb) |
178 | { | 178 | { |
179 | struct dst_entry *dst = skb->dst; | 179 | struct dst_entry *dst = skb_dst(skb); |
180 | struct rtable *rt = (struct rtable *)dst; | 180 | struct rtable *rt = (struct rtable *)dst; |
181 | struct net_device *dev = dst->dev; | 181 | struct net_device *dev = dst->dev; |
182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); | 182 | unsigned int hh_len = LL_RESERVED_SPACE(dev); |
@@ -217,14 +217,14 @@ static inline int ip_skb_dst_mtu(struct sk_buff *skb) | |||
217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; | 217 | struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL; |
218 | 218 | ||
219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? | 219 | return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ? |
220 | skb->dst->dev->mtu : dst_mtu(skb->dst); | 220 | skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); |
221 | } | 221 | } |
222 | 222 | ||
223 | static int ip_finish_output(struct sk_buff *skb) | 223 | static int ip_finish_output(struct sk_buff *skb) |
224 | { | 224 | { |
225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) | 225 | #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) |
226 | /* Policy lookup after SNAT yielded a new policy */ | 226 | /* Policy lookup after SNAT yielded a new policy */ |
227 | if (skb->dst->xfrm != NULL) { | 227 | if (skb_dst(skb)->xfrm != NULL) { |
228 | IPCB(skb)->flags |= IPSKB_REROUTED; | 228 | IPCB(skb)->flags |= IPSKB_REROUTED; |
229 | return dst_output(skb); | 229 | return dst_output(skb); |
230 | } | 230 | } |
@@ -238,7 +238,7 @@ static int ip_finish_output(struct sk_buff *skb) | |||
238 | int ip_mc_output(struct sk_buff *skb) | 238 | int ip_mc_output(struct sk_buff *skb) |
239 | { | 239 | { |
240 | struct sock *sk = skb->sk; | 240 | struct sock *sk = skb->sk; |
241 | struct rtable *rt = skb->rtable; | 241 | struct rtable *rt = skb_rtable(skb); |
242 | struct net_device *dev = rt->u.dst.dev; | 242 | struct net_device *dev = rt->u.dst.dev; |
243 | 243 | ||
244 | /* | 244 | /* |
@@ -296,7 +296,7 @@ int ip_mc_output(struct sk_buff *skb) | |||
296 | 296 | ||
297 | int ip_output(struct sk_buff *skb) | 297 | int ip_output(struct sk_buff *skb) |
298 | { | 298 | { |
299 | struct net_device *dev = skb->dst->dev; | 299 | struct net_device *dev = skb_dst(skb)->dev; |
300 | 300 | ||
301 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); | 301 | IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len); |
302 | 302 | ||
@@ -319,7 +319,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
319 | /* Skip all of this if the packet is already routed, | 319 | /* Skip all of this if the packet is already routed, |
320 | * f.e. by something like SCTP. | 320 | * f.e. by something like SCTP. |
321 | */ | 321 | */ |
322 | rt = skb->rtable; | 322 | rt = skb_rtable(skb); |
323 | if (rt != NULL) | 323 | if (rt != NULL) |
324 | goto packet_routed; | 324 | goto packet_routed; |
325 | 325 | ||
@@ -355,7 +355,7 @@ int ip_queue_xmit(struct sk_buff *skb, int ipfragok) | |||
355 | } | 355 | } |
356 | sk_setup_caps(sk, &rt->u.dst); | 356 | sk_setup_caps(sk, &rt->u.dst); |
357 | } | 357 | } |
358 | skb->dst = dst_clone(&rt->u.dst); | 358 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
359 | 359 | ||
360 | packet_routed: | 360 | packet_routed: |
361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) | 361 | if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway) |
@@ -401,8 +401,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) | |||
401 | to->pkt_type = from->pkt_type; | 401 | to->pkt_type = from->pkt_type; |
402 | to->priority = from->priority; | 402 | to->priority = from->priority; |
403 | to->protocol = from->protocol; | 403 | to->protocol = from->protocol; |
404 | dst_release(to->dst); | 404 | skb_dst_drop(to); |
405 | to->dst = dst_clone(from->dst); | 405 | skb_dst_set(to, dst_clone(skb_dst(from))); |
406 | to->dev = from->dev; | 406 | to->dev = from->dev; |
407 | to->mark = from->mark; | 407 | to->mark = from->mark; |
408 | 408 | ||
@@ -440,7 +440,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
440 | unsigned int mtu, hlen, left, len, ll_rs, pad; | 440 | unsigned int mtu, hlen, left, len, ll_rs, pad; |
441 | int offset; | 441 | int offset; |
442 | __be16 not_last_frag; | 442 | __be16 not_last_frag; |
443 | struct rtable *rt = skb->rtable; | 443 | struct rtable *rt = skb_rtable(skb); |
444 | int err = 0; | 444 | int err = 0; |
445 | 445 | ||
446 | dev = rt->u.dst.dev; | 446 | dev = rt->u.dst.dev; |
@@ -474,7 +474,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
474 | * LATER: this step can be merged to real generation of fragments, | 474 | * LATER: this step can be merged to real generation of fragments, |
475 | * we can switch to copy when see the first bad fragment. | 475 | * we can switch to copy when see the first bad fragment. |
476 | */ | 476 | */ |
477 | if (skb_shinfo(skb)->frag_list) { | 477 | if (skb_has_frags(skb)) { |
478 | struct sk_buff *frag; | 478 | struct sk_buff *frag; |
479 | int first_len = skb_pagelen(skb); | 479 | int first_len = skb_pagelen(skb); |
480 | int truesizes = 0; | 480 | int truesizes = 0; |
@@ -485,7 +485,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
485 | skb_cloned(skb)) | 485 | skb_cloned(skb)) |
486 | goto slow_path; | 486 | goto slow_path; |
487 | 487 | ||
488 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | 488 | skb_walk_frags(skb, frag) { |
489 | /* Correct geometry. */ | 489 | /* Correct geometry. */ |
490 | if (frag->len > mtu || | 490 | if (frag->len > mtu || |
491 | ((frag->len & 7) && frag->next) || | 491 | ((frag->len & 7) && frag->next) || |
@@ -498,7 +498,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
498 | 498 | ||
499 | BUG_ON(frag->sk); | 499 | BUG_ON(frag->sk); |
500 | if (skb->sk) { | 500 | if (skb->sk) { |
501 | sock_hold(skb->sk); | ||
502 | frag->sk = skb->sk; | 501 | frag->sk = skb->sk; |
503 | frag->destructor = sock_wfree; | 502 | frag->destructor = sock_wfree; |
504 | truesizes += frag->truesize; | 503 | truesizes += frag->truesize; |
@@ -510,7 +509,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *)) | |||
510 | err = 0; | 509 | err = 0; |
511 | offset = 0; | 510 | offset = 0; |
512 | frag = skb_shinfo(skb)->frag_list; | 511 | frag = skb_shinfo(skb)->frag_list; |
513 | skb_shinfo(skb)->frag_list = NULL; | 512 | skb_frag_list_init(skb); |
514 | skb->data_len = first_len - skb_headlen(skb); | 513 | skb->data_len = first_len - skb_headlen(skb); |
515 | skb->truesize -= truesizes; | 514 | skb->truesize -= truesizes; |
516 | skb->len = first_len; | 515 | skb->len = first_len; |
@@ -1294,7 +1293,7 @@ int ip_push_pending_frames(struct sock *sk) | |||
1294 | * on dst refcount | 1293 | * on dst refcount |
1295 | */ | 1294 | */ |
1296 | inet->cork.dst = NULL; | 1295 | inet->cork.dst = NULL; |
1297 | skb->dst = &rt->u.dst; | 1296 | skb_dst_set(skb, &rt->u.dst); |
1298 | 1297 | ||
1299 | if (iph->protocol == IPPROTO_ICMP) | 1298 | if (iph->protocol == IPPROTO_ICMP) |
1300 | icmp_out_count(net, ((struct icmphdr *) | 1299 | icmp_out_count(net, ((struct icmphdr *) |
@@ -1362,7 +1361,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar | |||
1362 | } replyopts; | 1361 | } replyopts; |
1363 | struct ipcm_cookie ipc; | 1362 | struct ipcm_cookie ipc; |
1364 | __be32 daddr; | 1363 | __be32 daddr; |
1365 | struct rtable *rt = skb->rtable; | 1364 | struct rtable *rt = skb_rtable(skb); |
1366 | 1365 | ||
1367 | if (ip_options_echo(&replyopts.opt, skb)) | 1366 | if (ip_options_echo(&replyopts.opt, skb)) |
1368 | return; | 1367 | return; |
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 43c05854d752..fc7993e9061f 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c | |||
@@ -57,7 +57,7 @@ | |||
57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) | 57 | static void ip_cmsg_recv_pktinfo(struct msghdr *msg, struct sk_buff *skb) |
58 | { | 58 | { |
59 | struct in_pktinfo info; | 59 | struct in_pktinfo info; |
60 | struct rtable *rt = skb->rtable; | 60 | struct rtable *rt = skb_rtable(skb); |
61 | 61 | ||
62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; | 62 | info.ipi_addr.s_addr = ip_hdr(skb)->daddr; |
63 | if (rt) { | 63 | if (rt) { |
@@ -157,38 +157,39 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb) | |||
157 | /* Ordered by supposed usage frequency */ | 157 | /* Ordered by supposed usage frequency */ |
158 | if (flags & 1) | 158 | if (flags & 1) |
159 | ip_cmsg_recv_pktinfo(msg, skb); | 159 | ip_cmsg_recv_pktinfo(msg, skb); |
160 | if ((flags>>=1) == 0) | 160 | if ((flags >>= 1) == 0) |
161 | return; | 161 | return; |
162 | 162 | ||
163 | if (flags & 1) | 163 | if (flags & 1) |
164 | ip_cmsg_recv_ttl(msg, skb); | 164 | ip_cmsg_recv_ttl(msg, skb); |
165 | if ((flags>>=1) == 0) | 165 | if ((flags >>= 1) == 0) |
166 | return; | 166 | return; |
167 | 167 | ||
168 | if (flags & 1) | 168 | if (flags & 1) |
169 | ip_cmsg_recv_tos(msg, skb); | 169 | ip_cmsg_recv_tos(msg, skb); |
170 | if ((flags>>=1) == 0) | 170 | if ((flags >>= 1) == 0) |
171 | return; | 171 | return; |
172 | 172 | ||
173 | if (flags & 1) | 173 | if (flags & 1) |
174 | ip_cmsg_recv_opts(msg, skb); | 174 | ip_cmsg_recv_opts(msg, skb); |
175 | if ((flags>>=1) == 0) | 175 | if ((flags >>= 1) == 0) |
176 | return; | 176 | return; |
177 | 177 | ||
178 | if (flags & 1) | 178 | if (flags & 1) |
179 | ip_cmsg_recv_retopts(msg, skb); | 179 | ip_cmsg_recv_retopts(msg, skb); |
180 | if ((flags>>=1) == 0) | 180 | if ((flags >>= 1) == 0) |
181 | return; | 181 | return; |
182 | 182 | ||
183 | if (flags & 1) | 183 | if (flags & 1) |
184 | ip_cmsg_recv_security(msg, skb); | 184 | ip_cmsg_recv_security(msg, skb); |
185 | 185 | ||
186 | if ((flags>>=1) == 0) | 186 | if ((flags >>= 1) == 0) |
187 | return; | 187 | return; |
188 | if (flags & 1) | 188 | if (flags & 1) |
189 | ip_cmsg_recv_dstaddr(msg, skb); | 189 | ip_cmsg_recv_dstaddr(msg, skb); |
190 | 190 | ||
191 | } | 191 | } |
192 | EXPORT_SYMBOL(ip_cmsg_recv); | ||
192 | 193 | ||
193 | int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | 194 | int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) |
194 | { | 195 | { |
@@ -203,7 +204,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
203 | switch (cmsg->cmsg_type) { | 204 | switch (cmsg->cmsg_type) { |
204 | case IP_RETOPTS: | 205 | case IP_RETOPTS: |
205 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); | 206 | err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr)); |
206 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), err < 40 ? err : 40); | 207 | err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg), |
208 | err < 40 ? err : 40); | ||
207 | if (err) | 209 | if (err) |
208 | return err; | 210 | return err; |
209 | break; | 211 | break; |
@@ -238,7 +240,8 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc) | |||
238 | struct ip_ra_chain *ip_ra_chain; | 240 | struct ip_ra_chain *ip_ra_chain; |
239 | DEFINE_RWLOCK(ip_ra_lock); | 241 | DEFINE_RWLOCK(ip_ra_lock); |
240 | 242 | ||
241 | int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct sock *)) | 243 | int ip_ra_control(struct sock *sk, unsigned char on, |
244 | void (*destructor)(struct sock *)) | ||
242 | { | 245 | { |
243 | struct ip_ra_chain *ra, *new_ra, **rap; | 246 | struct ip_ra_chain *ra, *new_ra, **rap; |
244 | 247 | ||
@@ -248,7 +251,7 @@ int ip_ra_control(struct sock *sk, unsigned char on, void (*destructor)(struct s | |||
248 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 251 | new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
249 | 252 | ||
250 | write_lock_bh(&ip_ra_lock); | 253 | write_lock_bh(&ip_ra_lock); |
251 | for (rap = &ip_ra_chain; (ra=*rap) != NULL; rap = &ra->next) { | 254 | for (rap = &ip_ra_chain; (ra = *rap) != NULL; rap = &ra->next) { |
252 | if (ra->sk == sk) { | 255 | if (ra->sk == sk) { |
253 | if (on) { | 256 | if (on) { |
254 | write_unlock_bh(&ip_ra_lock); | 257 | write_unlock_bh(&ip_ra_lock); |
@@ -416,7 +419,8 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
416 | /* Reset and regenerate socket error */ | 419 | /* Reset and regenerate socket error */ |
417 | spin_lock_bh(&sk->sk_error_queue.lock); | 420 | spin_lock_bh(&sk->sk_error_queue.lock); |
418 | sk->sk_err = 0; | 421 | sk->sk_err = 0; |
419 | if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) { | 422 | skb2 = skb_peek(&sk->sk_error_queue); |
423 | if (skb2 != NULL) { | ||
420 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; | 424 | sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno; |
421 | spin_unlock_bh(&sk->sk_error_queue.lock); | 425 | spin_unlock_bh(&sk->sk_error_queue.lock); |
422 | sk->sk_error_report(sk); | 426 | sk->sk_error_report(sk); |
@@ -431,8 +435,8 @@ out: | |||
431 | 435 | ||
432 | 436 | ||
433 | /* | 437 | /* |
434 | * Socket option code for IP. This is the end of the line after any TCP,UDP etc options on | 438 | * Socket option code for IP. This is the end of the line after any |
435 | * an IP socket. | 439 | * TCP,UDP etc options on an IP socket. |
436 | */ | 440 | */ |
437 | 441 | ||
438 | static int do_ip_setsockopt(struct sock *sk, int level, | 442 | static int do_ip_setsockopt(struct sock *sk, int level, |
@@ -449,6 +453,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
449 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | | 453 | (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | |
450 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || | 454 | (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT))) || |
451 | optname == IP_MULTICAST_TTL || | 455 | optname == IP_MULTICAST_TTL || |
456 | optname == IP_MULTICAST_ALL || | ||
452 | optname == IP_MULTICAST_LOOP || | 457 | optname == IP_MULTICAST_LOOP || |
453 | optname == IP_RECVORIGDSTADDR) { | 458 | optname == IP_RECVORIGDSTADDR) { |
454 | if (optlen >= sizeof(int)) { | 459 | if (optlen >= sizeof(int)) { |
@@ -474,7 +479,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
474 | switch (optname) { | 479 | switch (optname) { |
475 | case IP_OPTIONS: | 480 | case IP_OPTIONS: |
476 | { | 481 | { |
477 | struct ip_options * opt = NULL; | 482 | struct ip_options *opt = NULL; |
478 | if (optlen > 40 || optlen < 0) | 483 | if (optlen > 40 || optlen < 0) |
479 | goto e_inval; | 484 | goto e_inval; |
480 | err = ip_options_get_from_user(sock_net(sk), &opt, | 485 | err = ip_options_get_from_user(sock_net(sk), &opt, |
@@ -556,9 +561,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
556 | } | 561 | } |
557 | break; | 562 | break; |
558 | case IP_TTL: | 563 | case IP_TTL: |
559 | if (optlen<1) | 564 | if (optlen < 1) |
560 | goto e_inval; | 565 | goto e_inval; |
561 | if (val != -1 && (val < 1 || val>255)) | 566 | if (val != -1 && (val < 0 || val > 255)) |
562 | goto e_inval; | 567 | goto e_inval; |
563 | inet->uc_ttl = val; | 568 | inet->uc_ttl = val; |
564 | break; | 569 | break; |
@@ -570,7 +575,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
570 | inet->hdrincl = val ? 1 : 0; | 575 | inet->hdrincl = val ? 1 : 0; |
571 | break; | 576 | break; |
572 | case IP_MTU_DISCOVER: | 577 | case IP_MTU_DISCOVER: |
573 | if (val<0 || val>3) | 578 | if (val < 0 || val > 3) |
574 | goto e_inval; | 579 | goto e_inval; |
575 | inet->pmtudisc = val; | 580 | inet->pmtudisc = val; |
576 | break; | 581 | break; |
@@ -582,7 +587,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
582 | case IP_MULTICAST_TTL: | 587 | case IP_MULTICAST_TTL: |
583 | if (sk->sk_type == SOCK_STREAM) | 588 | if (sk->sk_type == SOCK_STREAM) |
584 | goto e_inval; | 589 | goto e_inval; |
585 | if (optlen<1) | 590 | if (optlen < 1) |
586 | goto e_inval; | 591 | goto e_inval; |
587 | if (val == -1) | 592 | if (val == -1) |
588 | val = 1; | 593 | val = 1; |
@@ -591,7 +596,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
591 | inet->mc_ttl = val; | 596 | inet->mc_ttl = val; |
592 | break; | 597 | break; |
593 | case IP_MULTICAST_LOOP: | 598 | case IP_MULTICAST_LOOP: |
594 | if (optlen<1) | 599 | if (optlen < 1) |
595 | goto e_inval; | 600 | goto e_inval; |
596 | inet->mc_loop = !!val; | 601 | inet->mc_loop = !!val; |
597 | break; | 602 | break; |
@@ -613,7 +618,8 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
613 | } else { | 618 | } else { |
614 | memset(&mreq, 0, sizeof(mreq)); | 619 | memset(&mreq, 0, sizeof(mreq)); |
615 | if (optlen >= sizeof(struct in_addr) && | 620 | if (optlen >= sizeof(struct in_addr) && |
616 | copy_from_user(&mreq.imr_address, optval, sizeof(struct in_addr))) | 621 | copy_from_user(&mreq.imr_address, optval, |
622 | sizeof(struct in_addr))) | ||
617 | break; | 623 | break; |
618 | } | 624 | } |
619 | 625 | ||
@@ -677,7 +683,6 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
677 | } | 683 | } |
678 | case IP_MSFILTER: | 684 | case IP_MSFILTER: |
679 | { | 685 | { |
680 | extern int sysctl_igmp_max_msf; | ||
681 | struct ip_msfilter *msf; | 686 | struct ip_msfilter *msf; |
682 | 687 | ||
683 | if (optlen < IP_MSFILTER_SIZE(0)) | 688 | if (optlen < IP_MSFILTER_SIZE(0)) |
@@ -831,7 +836,6 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
831 | } | 836 | } |
832 | case MCAST_MSFILTER: | 837 | case MCAST_MSFILTER: |
833 | { | 838 | { |
834 | extern int sysctl_igmp_max_msf; | ||
835 | struct sockaddr_in *psin; | 839 | struct sockaddr_in *psin; |
836 | struct ip_msfilter *msf = NULL; | 840 | struct ip_msfilter *msf = NULL; |
837 | struct group_filter *gsf = NULL; | 841 | struct group_filter *gsf = NULL; |
@@ -849,9 +853,9 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
849 | break; | 853 | break; |
850 | } | 854 | } |
851 | err = -EFAULT; | 855 | err = -EFAULT; |
852 | if (copy_from_user(gsf, optval, optlen)) { | 856 | if (copy_from_user(gsf, optval, optlen)) |
853 | goto mc_msf_out; | 857 | goto mc_msf_out; |
854 | } | 858 | |
855 | /* numsrc >= (4G-140)/128 overflow in 32 bits */ | 859 | /* numsrc >= (4G-140)/128 overflow in 32 bits */ |
856 | if (gsf->gf_numsrc >= 0x1ffffff || | 860 | if (gsf->gf_numsrc >= 0x1ffffff || |
857 | gsf->gf_numsrc > sysctl_igmp_max_msf) { | 861 | gsf->gf_numsrc > sysctl_igmp_max_msf) { |
@@ -879,7 +883,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
879 | msf->imsf_fmode = gsf->gf_fmode; | 883 | msf->imsf_fmode = gsf->gf_fmode; |
880 | msf->imsf_numsrc = gsf->gf_numsrc; | 884 | msf->imsf_numsrc = gsf->gf_numsrc; |
881 | err = -EADDRNOTAVAIL; | 885 | err = -EADDRNOTAVAIL; |
882 | for (i=0; i<gsf->gf_numsrc; ++i) { | 886 | for (i = 0; i < gsf->gf_numsrc; ++i) { |
883 | psin = (struct sockaddr_in *)&gsf->gf_slist[i]; | 887 | psin = (struct sockaddr_in *)&gsf->gf_slist[i]; |
884 | 888 | ||
885 | if (psin->sin_family != AF_INET) | 889 | if (psin->sin_family != AF_INET) |
@@ -890,17 +894,24 @@ static int do_ip_setsockopt(struct sock *sk, int level, | |||
890 | gsf = NULL; | 894 | gsf = NULL; |
891 | 895 | ||
892 | err = ip_mc_msfilter(sk, msf, ifindex); | 896 | err = ip_mc_msfilter(sk, msf, ifindex); |
893 | mc_msf_out: | 897 | mc_msf_out: |
894 | kfree(msf); | 898 | kfree(msf); |
895 | kfree(gsf); | 899 | kfree(gsf); |
896 | break; | 900 | break; |
897 | } | 901 | } |
902 | case IP_MULTICAST_ALL: | ||
903 | if (optlen < 1) | ||
904 | goto e_inval; | ||
905 | if (val != 0 && val != 1) | ||
906 | goto e_inval; | ||
907 | inet->mc_all = val; | ||
908 | break; | ||
898 | case IP_ROUTER_ALERT: | 909 | case IP_ROUTER_ALERT: |
899 | err = ip_ra_control(sk, val ? 1 : 0, NULL); | 910 | err = ip_ra_control(sk, val ? 1 : 0, NULL); |
900 | break; | 911 | break; |
901 | 912 | ||
902 | case IP_FREEBIND: | 913 | case IP_FREEBIND: |
903 | if (optlen<1) | 914 | if (optlen < 1) |
904 | goto e_inval; | 915 | goto e_inval; |
905 | inet->freebind = !!val; | 916 | inet->freebind = !!val; |
906 | break; | 917 | break; |
@@ -957,6 +968,7 @@ int ip_setsockopt(struct sock *sk, int level, | |||
957 | #endif | 968 | #endif |
958 | return err; | 969 | return err; |
959 | } | 970 | } |
971 | EXPORT_SYMBOL(ip_setsockopt); | ||
960 | 972 | ||
961 | #ifdef CONFIG_COMPAT | 973 | #ifdef CONFIG_COMPAT |
962 | int compat_ip_setsockopt(struct sock *sk, int level, int optname, | 974 | int compat_ip_setsockopt(struct sock *sk, int level, int optname, |
@@ -986,13 +998,12 @@ int compat_ip_setsockopt(struct sock *sk, int level, int optname, | |||
986 | #endif | 998 | #endif |
987 | return err; | 999 | return err; |
988 | } | 1000 | } |
989 | |||
990 | EXPORT_SYMBOL(compat_ip_setsockopt); | 1001 | EXPORT_SYMBOL(compat_ip_setsockopt); |
991 | #endif | 1002 | #endif |
992 | 1003 | ||
993 | /* | 1004 | /* |
994 | * Get the options. Note for future reference. The GET of IP options gets the | 1005 | * Get the options. Note for future reference. The GET of IP options gets |
995 | * _received_ ones. The set sets the _sent_ ones. | 1006 | * the _received_ ones. The set sets the _sent_ ones. |
996 | */ | 1007 | */ |
997 | 1008 | ||
998 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, | 1009 | static int do_ip_getsockopt(struct sock *sk, int level, int optname, |
@@ -1143,10 +1154,14 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1143 | return -EFAULT; | 1154 | return -EFAULT; |
1144 | } | 1155 | } |
1145 | err = ip_mc_gsfget(sk, &gsf, | 1156 | err = ip_mc_gsfget(sk, &gsf, |
1146 | (struct group_filter __user *)optval, optlen); | 1157 | (struct group_filter __user *)optval, |
1158 | optlen); | ||
1147 | release_sock(sk); | 1159 | release_sock(sk); |
1148 | return err; | 1160 | return err; |
1149 | } | 1161 | } |
1162 | case IP_MULTICAST_ALL: | ||
1163 | val = inet->mc_all; | ||
1164 | break; | ||
1150 | case IP_PKTOPTIONS: | 1165 | case IP_PKTOPTIONS: |
1151 | { | 1166 | { |
1152 | struct msghdr msg; | 1167 | struct msghdr msg; |
@@ -1187,7 +1202,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1187 | } | 1202 | } |
1188 | release_sock(sk); | 1203 | release_sock(sk); |
1189 | 1204 | ||
1190 | if (len < sizeof(int) && len > 0 && val>=0 && val<=255) { | 1205 | if (len < sizeof(int) && len > 0 && val >= 0 && val <= 255) { |
1191 | unsigned char ucval = (unsigned char)val; | 1206 | unsigned char ucval = (unsigned char)val; |
1192 | len = 1; | 1207 | len = 1; |
1193 | if (put_user(len, optlen)) | 1208 | if (put_user(len, optlen)) |
@@ -1230,6 +1245,7 @@ int ip_getsockopt(struct sock *sk, int level, | |||
1230 | #endif | 1245 | #endif |
1231 | return err; | 1246 | return err; |
1232 | } | 1247 | } |
1248 | EXPORT_SYMBOL(ip_getsockopt); | ||
1233 | 1249 | ||
1234 | #ifdef CONFIG_COMPAT | 1250 | #ifdef CONFIG_COMPAT |
1235 | int compat_ip_getsockopt(struct sock *sk, int level, int optname, | 1251 | int compat_ip_getsockopt(struct sock *sk, int level, int optname, |
@@ -1262,11 +1278,5 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname, | |||
1262 | #endif | 1278 | #endif |
1263 | return err; | 1279 | return err; |
1264 | } | 1280 | } |
1265 | |||
1266 | EXPORT_SYMBOL(compat_ip_getsockopt); | 1281 | EXPORT_SYMBOL(compat_ip_getsockopt); |
1267 | #endif | 1282 | #endif |
1268 | |||
1269 | EXPORT_SYMBOL(ip_cmsg_recv); | ||
1270 | |||
1271 | EXPORT_SYMBOL(ip_getsockopt); | ||
1272 | EXPORT_SYMBOL(ip_setsockopt); | ||
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 9054139795af..93e2b787da20 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -370,8 +370,7 @@ static int ipip_rcv(struct sk_buff *skb) | |||
370 | tunnel->dev->stats.rx_packets++; | 370 | tunnel->dev->stats.rx_packets++; |
371 | tunnel->dev->stats.rx_bytes += skb->len; | 371 | tunnel->dev->stats.rx_bytes += skb->len; |
372 | skb->dev = tunnel->dev; | 372 | skb->dev = tunnel->dev; |
373 | dst_release(skb->dst); | 373 | skb_dst_drop(skb); |
374 | skb->dst = NULL; | ||
375 | nf_reset(skb); | 374 | nf_reset(skb); |
376 | ipip_ecn_decapsulate(iph, skb); | 375 | ipip_ecn_decapsulate(iph, skb); |
377 | netif_rx(skb); | 376 | netif_rx(skb); |
@@ -416,7 +415,7 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
416 | 415 | ||
417 | if (!dst) { | 416 | if (!dst) { |
418 | /* NBMA tunnel */ | 417 | /* NBMA tunnel */ |
419 | if ((rt = skb->rtable) == NULL) { | 418 | if ((rt = skb_rtable(skb)) == NULL) { |
420 | stats->tx_fifo_errors++; | 419 | stats->tx_fifo_errors++; |
421 | goto tx_error; | 420 | goto tx_error; |
422 | } | 421 | } |
@@ -447,15 +446,15 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
447 | if (tiph->frag_off) | 446 | if (tiph->frag_off) |
448 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); | 447 | mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); |
449 | else | 448 | else |
450 | mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; | 449 | mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; |
451 | 450 | ||
452 | if (mtu < 68) { | 451 | if (mtu < 68) { |
453 | stats->collisions++; | 452 | stats->collisions++; |
454 | ip_rt_put(rt); | 453 | ip_rt_put(rt); |
455 | goto tx_error; | 454 | goto tx_error; |
456 | } | 455 | } |
457 | if (skb->dst) | 456 | if (skb_dst(skb)) |
458 | skb->dst->ops->update_pmtu(skb->dst, mtu); | 457 | skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); |
459 | 458 | ||
460 | df |= (old_iph->frag_off&htons(IP_DF)); | 459 | df |= (old_iph->frag_off&htons(IP_DF)); |
461 | 460 | ||
@@ -502,8 +501,8 @@ static int ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) | |||
502 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 501 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
503 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | | 502 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | |
504 | IPSKB_REROUTED); | 503 | IPSKB_REROUTED); |
505 | dst_release(skb->dst); | 504 | skb_dst_drop(skb); |
506 | skb->dst = &rt->u.dst; | 505 | skb_dst_set(skb, &rt->u.dst); |
507 | 506 | ||
508 | /* | 507 | /* |
509 | * Push down and install the IPIP header. | 508 | * Push down and install the IPIP header. |
@@ -713,6 +712,7 @@ static void ipip_tunnel_setup(struct net_device *dev) | |||
713 | dev->iflink = 0; | 712 | dev->iflink = 0; |
714 | dev->addr_len = 4; | 713 | dev->addr_len = 4; |
715 | dev->features |= NETIF_F_NETNS_LOCAL; | 714 | dev->features |= NETIF_F_NETNS_LOCAL; |
715 | dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; | ||
716 | } | 716 | } |
717 | 717 | ||
718 | static void ipip_tunnel_init(struct net_device *dev) | 718 | static void ipip_tunnel_init(struct net_device *dev) |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 13e9dd3012b3..ffd986104468 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -651,7 +651,7 @@ static int ipmr_cache_report(struct net *net, | |||
651 | ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ | 651 | ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ |
652 | msg = (struct igmpmsg *)skb_network_header(skb); | 652 | msg = (struct igmpmsg *)skb_network_header(skb); |
653 | msg->im_vif = vifi; | 653 | msg->im_vif = vifi; |
654 | skb->dst = dst_clone(pkt->dst); | 654 | skb_dst_set(skb, dst_clone(skb_dst(pkt))); |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Add our header | 657 | * Add our header |
@@ -1201,7 +1201,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) | |||
1201 | iph->protocol = IPPROTO_IPIP; | 1201 | iph->protocol = IPPROTO_IPIP; |
1202 | iph->ihl = 5; | 1202 | iph->ihl = 5; |
1203 | iph->tot_len = htons(skb->len); | 1203 | iph->tot_len = htons(skb->len); |
1204 | ip_select_ident(iph, skb->dst, NULL); | 1204 | ip_select_ident(iph, skb_dst(skb), NULL); |
1205 | ip_send_check(iph); | 1205 | ip_send_check(iph); |
1206 | 1206 | ||
1207 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | 1207 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); |
@@ -1212,7 +1212,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) | |||
1212 | { | 1212 | { |
1213 | struct ip_options * opt = &(IPCB(skb)->opt); | 1213 | struct ip_options * opt = &(IPCB(skb)->opt); |
1214 | 1214 | ||
1215 | IP_INC_STATS_BH(dev_net(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); | 1215 | IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); |
1216 | 1216 | ||
1217 | if (unlikely(opt->optlen)) | 1217 | if (unlikely(opt->optlen)) |
1218 | ip_forward_options(skb); | 1218 | ip_forward_options(skb); |
@@ -1290,8 +1290,8 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) | |||
1290 | vif->pkt_out++; | 1290 | vif->pkt_out++; |
1291 | vif->bytes_out += skb->len; | 1291 | vif->bytes_out += skb->len; |
1292 | 1292 | ||
1293 | dst_release(skb->dst); | 1293 | skb_dst_drop(skb); |
1294 | skb->dst = &rt->u.dst; | 1294 | skb_dst_set(skb, &rt->u.dst); |
1295 | ip_decrease_ttl(ip_hdr(skb)); | 1295 | ip_decrease_ttl(ip_hdr(skb)); |
1296 | 1296 | ||
1297 | /* FIXME: forward and output firewalls used to be called here. | 1297 | /* FIXME: forward and output firewalls used to be called here. |
@@ -1354,7 +1354,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local | |||
1354 | if (net->ipv4.vif_table[vif].dev != skb->dev) { | 1354 | if (net->ipv4.vif_table[vif].dev != skb->dev) { |
1355 | int true_vifi; | 1355 | int true_vifi; |
1356 | 1356 | ||
1357 | if (skb->rtable->fl.iif == 0) { | 1357 | if (skb_rtable(skb)->fl.iif == 0) { |
1358 | /* It is our own packet, looped back. | 1358 | /* It is our own packet, looped back. |
1359 | Very complicated situation... | 1359 | Very complicated situation... |
1360 | 1360 | ||
@@ -1430,7 +1430,7 @@ int ip_mr_input(struct sk_buff *skb) | |||
1430 | { | 1430 | { |
1431 | struct mfc_cache *cache; | 1431 | struct mfc_cache *cache; |
1432 | struct net *net = dev_net(skb->dev); | 1432 | struct net *net = dev_net(skb->dev); |
1433 | int local = skb->rtable->rt_flags&RTCF_LOCAL; | 1433 | int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; |
1434 | 1434 | ||
1435 | /* Packet is looped back after forward, it should not be | 1435 | /* Packet is looped back after forward, it should not be |
1436 | forwarded second time, but still can be delivered locally. | 1436 | forwarded second time, but still can be delivered locally. |
@@ -1543,8 +1543,7 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) | |||
1543 | skb->protocol = htons(ETH_P_IP); | 1543 | skb->protocol = htons(ETH_P_IP); |
1544 | skb->ip_summed = 0; | 1544 | skb->ip_summed = 0; |
1545 | skb->pkt_type = PACKET_HOST; | 1545 | skb->pkt_type = PACKET_HOST; |
1546 | dst_release(skb->dst); | 1546 | skb_dst_drop(skb); |
1547 | skb->dst = NULL; | ||
1548 | reg_dev->stats.rx_bytes += skb->len; | 1547 | reg_dev->stats.rx_bytes += skb->len; |
1549 | reg_dev->stats.rx_packets++; | 1548 | reg_dev->stats.rx_packets++; |
1550 | nf_reset(skb); | 1549 | nf_reset(skb); |
@@ -1646,7 +1645,7 @@ int ipmr_get_route(struct net *net, | |||
1646 | { | 1645 | { |
1647 | int err; | 1646 | int err; |
1648 | struct mfc_cache *cache; | 1647 | struct mfc_cache *cache; |
1649 | struct rtable *rt = skb->rtable; | 1648 | struct rtable *rt = skb_rtable(skb); |
1650 | 1649 | ||
1651 | read_lock(&mrt_lock); | 1650 | read_lock(&mrt_lock); |
1652 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); | 1651 | cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); |
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index fdf6811c31a2..1725dc0ef688 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c | |||
@@ -12,7 +12,7 @@ | |||
12 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ | 12 | /* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ |
13 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | 13 | int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) |
14 | { | 14 | { |
15 | struct net *net = dev_net(skb->dst->dev); | 15 | struct net *net = dev_net(skb_dst(skb)->dev); |
16 | const struct iphdr *iph = ip_hdr(skb); | 16 | const struct iphdr *iph = ip_hdr(skb); |
17 | struct rtable *rt; | 17 | struct rtable *rt; |
18 | struct flowi fl = {}; | 18 | struct flowi fl = {}; |
@@ -41,8 +41,8 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
41 | return -1; | 41 | return -1; |
42 | 42 | ||
43 | /* Drop old route. */ | 43 | /* Drop old route. */ |
44 | dst_release(skb->dst); | 44 | skb_dst_drop(skb); |
45 | skb->dst = &rt->u.dst; | 45 | skb_dst_set(skb, &rt->u.dst); |
46 | } else { | 46 | } else { |
47 | /* non-local src, find valid iif to satisfy | 47 | /* non-local src, find valid iif to satisfy |
48 | * rp-filter when calling ip_route_input. */ | 48 | * rp-filter when calling ip_route_input. */ |
@@ -50,7 +50,7 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
50 | if (ip_route_output_key(net, &rt, &fl) != 0) | 50 | if (ip_route_output_key(net, &rt, &fl) != 0) |
51 | return -1; | 51 | return -1; |
52 | 52 | ||
53 | odst = skb->dst; | 53 | odst = skb_dst(skb); |
54 | if (ip_route_input(skb, iph->daddr, iph->saddr, | 54 | if (ip_route_input(skb, iph->daddr, iph->saddr, |
55 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { | 55 | RT_TOS(iph->tos), rt->u.dst.dev) != 0) { |
56 | dst_release(&rt->u.dst); | 56 | dst_release(&rt->u.dst); |
@@ -60,18 +60,22 @@ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) | |||
60 | dst_release(odst); | 60 | dst_release(odst); |
61 | } | 61 | } |
62 | 62 | ||
63 | if (skb->dst->error) | 63 | if (skb_dst(skb)->error) |
64 | return -1; | 64 | return -1; |
65 | 65 | ||
66 | #ifdef CONFIG_XFRM | 66 | #ifdef CONFIG_XFRM |
67 | if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && | 67 | if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && |
68 | xfrm_decode_session(skb, &fl, AF_INET) == 0) | 68 | xfrm_decode_session(skb, &fl, AF_INET) == 0) { |
69 | if (xfrm_lookup(net, &skb->dst, &fl, skb->sk, 0)) | 69 | struct dst_entry *dst = skb_dst(skb); |
70 | skb_dst_set(skb, NULL); | ||
71 | if (xfrm_lookup(net, &dst, &fl, skb->sk, 0)) | ||
70 | return -1; | 72 | return -1; |
73 | skb_dst_set(skb, dst); | ||
74 | } | ||
71 | #endif | 75 | #endif |
72 | 76 | ||
73 | /* Change in oif may mean change in hh_len. */ | 77 | /* Change in oif may mean change in hh_len. */ |
74 | hh_len = skb->dst->dev->hard_header_len; | 78 | hh_len = skb_dst(skb)->dev->hard_header_len; |
75 | if (skb_headroom(skb) < hh_len && | 79 | if (skb_headroom(skb) < hh_len && |
76 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 80 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
77 | return -1; | 81 | return -1; |
@@ -92,7 +96,7 @@ int ip_xfrm_me_harder(struct sk_buff *skb) | |||
92 | if (xfrm_decode_session(skb, &fl, AF_INET) < 0) | 96 | if (xfrm_decode_session(skb, &fl, AF_INET) < 0) |
93 | return -1; | 97 | return -1; |
94 | 98 | ||
95 | dst = skb->dst; | 99 | dst = skb_dst(skb); |
96 | if (dst->xfrm) | 100 | if (dst->xfrm) |
97 | dst = ((struct xfrm_dst *)dst)->route; | 101 | dst = ((struct xfrm_dst *)dst)->route; |
98 | dst_hold(dst); | 102 | dst_hold(dst); |
@@ -100,11 +104,11 @@ int ip_xfrm_me_harder(struct sk_buff *skb) | |||
100 | if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) | 104 | if (xfrm_lookup(dev_net(dst->dev), &dst, &fl, skb->sk, 0) < 0) |
101 | return -1; | 105 | return -1; |
102 | 106 | ||
103 | dst_release(skb->dst); | 107 | skb_dst_drop(skb); |
104 | skb->dst = dst; | 108 | skb_dst_set(skb, dst); |
105 | 109 | ||
106 | /* Change in oif may mean change in hh_len. */ | 110 | /* Change in oif may mean change in hh_len. */ |
107 | hh_len = skb->dst->dev->hard_header_len; | 111 | hh_len = skb_dst(skb)->dev->hard_header_len; |
108 | if (skb_headroom(skb) < hh_len && | 112 | if (skb_headroom(skb) < hh_len && |
109 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) | 113 | pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC)) |
110 | return -1; | 114 | return -1; |
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c index 855505d480d2..dada0863946d 100644 --- a/net/ipv4/netfilter/ipt_MASQUERADE.c +++ b/net/ipv4/netfilter/ipt_MASQUERADE.c | |||
@@ -69,7 +69,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_target_param *par) | |||
69 | return NF_ACCEPT; | 69 | return NF_ACCEPT; |
70 | 70 | ||
71 | mr = par->targinfo; | 71 | mr = par->targinfo; |
72 | rt = skb->rtable; | 72 | rt = skb_rtable(skb); |
73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); | 73 | newsrc = inet_select_addr(par->out, rt->rt_gateway, RT_SCOPE_UNIVERSE); |
74 | if (!newsrc) { | 74 | if (!newsrc) { |
75 | printk("MASQUERADE: %s ate my IP address\n", par->out->name); | 75 | printk("MASQUERADE: %s ate my IP address\n", par->out->name); |
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c index 0b4b6e0ff2b9..c93ae44bff2a 100644 --- a/net/ipv4/netfilter/ipt_REJECT.c +++ b/net/ipv4/netfilter/ipt_REJECT.c | |||
@@ -108,17 +108,16 @@ static void send_reset(struct sk_buff *oldskb, int hook) | |||
108 | addr_type = RTN_LOCAL; | 108 | addr_type = RTN_LOCAL; |
109 | 109 | ||
110 | /* ip_route_me_harder expects skb->dst to be set */ | 110 | /* ip_route_me_harder expects skb->dst to be set */ |
111 | dst_hold(oldskb->dst); | 111 | skb_dst_set(nskb, dst_clone(skb_dst(oldskb))); |
112 | nskb->dst = oldskb->dst; | ||
113 | 112 | ||
114 | if (ip_route_me_harder(nskb, addr_type)) | 113 | if (ip_route_me_harder(nskb, addr_type)) |
115 | goto free_nskb; | 114 | goto free_nskb; |
116 | 115 | ||
117 | niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT); | 116 | niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); |
118 | nskb->ip_summed = CHECKSUM_NONE; | 117 | nskb->ip_summed = CHECKSUM_NONE; |
119 | 118 | ||
120 | /* "Never happens" */ | 119 | /* "Never happens" */ |
121 | if (nskb->len > dst_mtu(nskb->dst)) | 120 | if (nskb->len > dst_mtu(skb_dst(nskb))) |
122 | goto free_nskb; | 121 | goto free_nskb; |
123 | 122 | ||
124 | nf_ct_attach(nskb, oldskb); | 123 | nf_ct_attach(nskb, oldskb); |
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c index cf7a42bf9820..155c008626c8 100644 --- a/net/ipv4/netfilter/nf_nat_helper.c +++ b/net/ipv4/netfilter/nf_nat_helper.c | |||
@@ -140,7 +140,7 @@ nf_nat_mangle_tcp_packet(struct sk_buff *skb, | |||
140 | const char *rep_buffer, | 140 | const char *rep_buffer, |
141 | unsigned int rep_len) | 141 | unsigned int rep_len) |
142 | { | 142 | { |
143 | struct rtable *rt = skb->rtable; | 143 | struct rtable *rt = skb_rtable(skb); |
144 | struct iphdr *iph; | 144 | struct iphdr *iph; |
145 | struct tcphdr *tcph; | 145 | struct tcphdr *tcph; |
146 | int oldlen, datalen; | 146 | int oldlen, datalen; |
@@ -218,7 +218,7 @@ nf_nat_mangle_udp_packet(struct sk_buff *skb, | |||
218 | const char *rep_buffer, | 218 | const char *rep_buffer, |
219 | unsigned int rep_len) | 219 | unsigned int rep_len) |
220 | { | 220 | { |
221 | struct rtable *rt = skb->rtable; | 221 | struct rtable *rt = skb_rtable(skb); |
222 | struct iphdr *iph; | 222 | struct iphdr *iph; |
223 | struct udphdr *udph; | 223 | struct udphdr *udph; |
224 | int datalen, oldlen; | 224 | int datalen, oldlen; |
diff --git a/net/ipv4/netfilter/nf_nat_proto_sctp.c b/net/ipv4/netfilter/nf_nat_proto_sctp.c index 65e470bc6123..3fc598eeeb1a 100644 --- a/net/ipv4/netfilter/nf_nat_proto_sctp.c +++ b/net/ipv4/netfilter/nf_nat_proto_sctp.c | |||
@@ -33,6 +33,7 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
33 | enum nf_nat_manip_type maniptype) | 33 | enum nf_nat_manip_type maniptype) |
34 | { | 34 | { |
35 | const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); | 35 | const struct iphdr *iph = (struct iphdr *)(skb->data + iphdroff); |
36 | struct sk_buff *frag; | ||
36 | sctp_sctphdr_t *hdr; | 37 | sctp_sctphdr_t *hdr; |
37 | unsigned int hdroff = iphdroff + iph->ihl*4; | 38 | unsigned int hdroff = iphdroff + iph->ihl*4; |
38 | __be32 oldip, newip; | 39 | __be32 oldip, newip; |
@@ -57,8 +58,8 @@ sctp_manip_pkt(struct sk_buff *skb, | |||
57 | } | 58 | } |
58 | 59 | ||
59 | crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); | 60 | crc32 = sctp_start_cksum((u8 *)hdr, skb_headlen(skb) - hdroff); |
60 | for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) | 61 | skb_walk_frags(skb, frag) |
61 | crc32 = sctp_update_cksum((u8 *)skb->data, skb_headlen(skb), | 62 | crc32 = sctp_update_cksum((u8 *)frag->data, skb_headlen(frag), |
62 | crc32); | 63 | crc32); |
63 | crc32 = sctp_end_cksum(crc32); | 64 | crc32 = sctp_end_cksum(crc32); |
64 | hdr->checksum = crc32; | 65 | hdr->checksum = crc32; |
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c index b7dd695691a0..5567bd0d0750 100644 --- a/net/ipv4/netfilter/nf_nat_standalone.c +++ b/net/ipv4/netfilter/nf_nat_standalone.c | |||
@@ -167,10 +167,9 @@ nf_nat_in(unsigned int hooknum, | |||
167 | 167 | ||
168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); | 168 | ret = nf_nat_fn(hooknum, skb, in, out, okfn); |
169 | if (ret != NF_DROP && ret != NF_STOLEN && | 169 | if (ret != NF_DROP && ret != NF_STOLEN && |
170 | daddr != ip_hdr(skb)->daddr) { | 170 | daddr != ip_hdr(skb)->daddr) |
171 | dst_release(skb->dst); | 171 | skb_dst_drop(skb); |
172 | skb->dst = NULL; | 172 | |
173 | } | ||
174 | return ret; | 173 | return ret; |
175 | } | 174 | } |
176 | 175 | ||
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index f774651f0a47..3dc9171a272f 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
@@ -343,7 +343,7 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length, | |||
343 | 343 | ||
344 | skb->priority = sk->sk_priority; | 344 | skb->priority = sk->sk_priority; |
345 | skb->mark = sk->sk_mark; | 345 | skb->mark = sk->sk_mark; |
346 | skb->dst = dst_clone(&rt->u.dst); | 346 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
347 | 347 | ||
348 | skb_reset_network_header(skb); | 348 | skb_reset_network_header(skb); |
349 | iph = ip_hdr(skb); | 349 | iph = ip_hdr(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 28205e5bfa9b..a849bb15d864 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -1064,7 +1064,8 @@ work_done: | |||
1064 | out: return 0; | 1064 | out: return 0; |
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | static int rt_intern_hash(unsigned hash, struct rtable *rt, struct rtable **rp) | 1067 | static int rt_intern_hash(unsigned hash, struct rtable *rt, |
1068 | struct rtable **rp, struct sk_buff *skb) | ||
1068 | { | 1069 | { |
1069 | struct rtable *rth, **rthp; | 1070 | struct rtable *rth, **rthp; |
1070 | unsigned long now; | 1071 | unsigned long now; |
@@ -1114,7 +1115,10 @@ restart: | |||
1114 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1115 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1115 | 1116 | ||
1116 | rt_drop(rt); | 1117 | rt_drop(rt); |
1117 | *rp = rth; | 1118 | if (rp) |
1119 | *rp = rth; | ||
1120 | else | ||
1121 | skb_dst_set(skb, &rth->u.dst); | ||
1118 | return 0; | 1122 | return 0; |
1119 | } | 1123 | } |
1120 | 1124 | ||
@@ -1210,7 +1214,10 @@ restart: | |||
1210 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); | 1214 | rcu_assign_pointer(rt_hash_table[hash].chain, rt); |
1211 | 1215 | ||
1212 | spin_unlock_bh(rt_hash_lock_addr(hash)); | 1216 | spin_unlock_bh(rt_hash_lock_addr(hash)); |
1213 | *rp = rt; | 1217 | if (rp) |
1218 | *rp = rt; | ||
1219 | else | ||
1220 | skb_dst_set(skb, &rt->u.dst); | ||
1214 | return 0; | 1221 | return 0; |
1215 | } | 1222 | } |
1216 | 1223 | ||
@@ -1407,7 +1414,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1407 | &netevent); | 1414 | &netevent); |
1408 | 1415 | ||
1409 | rt_del(hash, rth); | 1416 | rt_del(hash, rth); |
1410 | if (!rt_intern_hash(hash, rt, &rt)) | 1417 | if (!rt_intern_hash(hash, rt, &rt, NULL)) |
1411 | ip_rt_put(rt); | 1418 | ip_rt_put(rt); |
1412 | goto do_next; | 1419 | goto do_next; |
1413 | } | 1420 | } |
@@ -1473,7 +1480,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) | |||
1473 | 1480 | ||
1474 | void ip_rt_send_redirect(struct sk_buff *skb) | 1481 | void ip_rt_send_redirect(struct sk_buff *skb) |
1475 | { | 1482 | { |
1476 | struct rtable *rt = skb->rtable; | 1483 | struct rtable *rt = skb_rtable(skb); |
1477 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); | 1484 | struct in_device *in_dev = in_dev_get(rt->u.dst.dev); |
1478 | 1485 | ||
1479 | if (!in_dev) | 1486 | if (!in_dev) |
@@ -1521,7 +1528,7 @@ out: | |||
1521 | 1528 | ||
1522 | static int ip_error(struct sk_buff *skb) | 1529 | static int ip_error(struct sk_buff *skb) |
1523 | { | 1530 | { |
1524 | struct rtable *rt = skb->rtable; | 1531 | struct rtable *rt = skb_rtable(skb); |
1525 | unsigned long now; | 1532 | unsigned long now; |
1526 | int code; | 1533 | int code; |
1527 | 1534 | ||
@@ -1698,7 +1705,7 @@ static void ipv4_link_failure(struct sk_buff *skb) | |||
1698 | 1705 | ||
1699 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); | 1706 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); |
1700 | 1707 | ||
1701 | rt = skb->rtable; | 1708 | rt = skb_rtable(skb); |
1702 | if (rt) | 1709 | if (rt) |
1703 | dst_set_expires(&rt->u.dst, 0); | 1710 | dst_set_expires(&rt->u.dst, 0); |
1704 | } | 1711 | } |
@@ -1858,7 +1865,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
1858 | 1865 | ||
1859 | in_dev_put(in_dev); | 1866 | in_dev_put(in_dev); |
1860 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); | 1867 | hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); |
1861 | return rt_intern_hash(hash, rth, &skb->rtable); | 1868 | return rt_intern_hash(hash, rth, NULL, skb); |
1862 | 1869 | ||
1863 | e_nobufs: | 1870 | e_nobufs: |
1864 | in_dev_put(in_dev); | 1871 | in_dev_put(in_dev); |
@@ -2019,7 +2026,7 @@ static int ip_mkroute_input(struct sk_buff *skb, | |||
2019 | /* put it into the cache */ | 2026 | /* put it into the cache */ |
2020 | hash = rt_hash(daddr, saddr, fl->iif, | 2027 | hash = rt_hash(daddr, saddr, fl->iif, |
2021 | rt_genid(dev_net(rth->u.dst.dev))); | 2028 | rt_genid(dev_net(rth->u.dst.dev))); |
2022 | return rt_intern_hash(hash, rth, &skb->rtable); | 2029 | return rt_intern_hash(hash, rth, NULL, skb); |
2023 | } | 2030 | } |
2024 | 2031 | ||
2025 | /* | 2032 | /* |
@@ -2175,7 +2182,7 @@ local_input: | |||
2175 | } | 2182 | } |
2176 | rth->rt_type = res.type; | 2183 | rth->rt_type = res.type; |
2177 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); | 2184 | hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net)); |
2178 | err = rt_intern_hash(hash, rth, &skb->rtable); | 2185 | err = rt_intern_hash(hash, rth, NULL, skb); |
2179 | goto done; | 2186 | goto done; |
2180 | 2187 | ||
2181 | no_route: | 2188 | no_route: |
@@ -2244,7 +2251,7 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr, | |||
2244 | dst_use(&rth->u.dst, jiffies); | 2251 | dst_use(&rth->u.dst, jiffies); |
2245 | RT_CACHE_STAT_INC(in_hit); | 2252 | RT_CACHE_STAT_INC(in_hit); |
2246 | rcu_read_unlock(); | 2253 | rcu_read_unlock(); |
2247 | skb->rtable = rth; | 2254 | skb_dst_set(skb, &rth->u.dst); |
2248 | return 0; | 2255 | return 0; |
2249 | } | 2256 | } |
2250 | RT_CACHE_STAT_INC(in_hlist_search); | 2257 | RT_CACHE_STAT_INC(in_hlist_search); |
@@ -2420,7 +2427,7 @@ static int ip_mkroute_output(struct rtable **rp, | |||
2420 | if (err == 0) { | 2427 | if (err == 0) { |
2421 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, | 2428 | hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif, |
2422 | rt_genid(dev_net(dev_out))); | 2429 | rt_genid(dev_net(dev_out))); |
2423 | err = rt_intern_hash(hash, rth, rp); | 2430 | err = rt_intern_hash(hash, rth, rp, NULL); |
2424 | } | 2431 | } |
2425 | 2432 | ||
2426 | return err; | 2433 | return err; |
@@ -2763,7 +2770,7 @@ static int rt_fill_info(struct net *net, | |||
2763 | struct sk_buff *skb, u32 pid, u32 seq, int event, | 2770 | struct sk_buff *skb, u32 pid, u32 seq, int event, |
2764 | int nowait, unsigned int flags) | 2771 | int nowait, unsigned int flags) |
2765 | { | 2772 | { |
2766 | struct rtable *rt = skb->rtable; | 2773 | struct rtable *rt = skb_rtable(skb); |
2767 | struct rtmsg *r; | 2774 | struct rtmsg *r; |
2768 | struct nlmsghdr *nlh; | 2775 | struct nlmsghdr *nlh; |
2769 | long expires; | 2776 | long expires; |
@@ -2907,7 +2914,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2907 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); | 2914 | err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev); |
2908 | local_bh_enable(); | 2915 | local_bh_enable(); |
2909 | 2916 | ||
2910 | rt = skb->rtable; | 2917 | rt = skb_rtable(skb); |
2911 | if (err == 0 && rt->u.dst.error) | 2918 | if (err == 0 && rt->u.dst.error) |
2912 | err = -rt->u.dst.error; | 2919 | err = -rt->u.dst.error; |
2913 | } else { | 2920 | } else { |
@@ -2927,7 +2934,7 @@ static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void | |||
2927 | if (err) | 2934 | if (err) |
2928 | goto errout_free; | 2935 | goto errout_free; |
2929 | 2936 | ||
2930 | skb->rtable = rt; | 2937 | skb_dst_set(skb, &rt->u.dst); |
2931 | if (rtm->rtm_flags & RTM_F_NOTIFY) | 2938 | if (rtm->rtm_flags & RTM_F_NOTIFY) |
2932 | rt->rt_flags |= RTCF_NOTIFY; | 2939 | rt->rt_flags |= RTCF_NOTIFY; |
2933 | 2940 | ||
@@ -2968,15 +2975,15 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2968 | continue; | 2975 | continue; |
2969 | if (rt_is_expired(rt)) | 2976 | if (rt_is_expired(rt)) |
2970 | continue; | 2977 | continue; |
2971 | skb->dst = dst_clone(&rt->u.dst); | 2978 | skb_dst_set(skb, dst_clone(&rt->u.dst)); |
2972 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, | 2979 | if (rt_fill_info(net, skb, NETLINK_CB(cb->skb).pid, |
2973 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, | 2980 | cb->nlh->nlmsg_seq, RTM_NEWROUTE, |
2974 | 1, NLM_F_MULTI) <= 0) { | 2981 | 1, NLM_F_MULTI) <= 0) { |
2975 | dst_release(xchg(&skb->dst, NULL)); | 2982 | skb_dst_drop(skb); |
2976 | rcu_read_unlock_bh(); | 2983 | rcu_read_unlock_bh(); |
2977 | goto done; | 2984 | goto done; |
2978 | } | 2985 | } |
2979 | dst_release(xchg(&skb->dst, NULL)); | 2986 | skb_dst_drop(skb); |
2980 | } | 2987 | } |
2981 | rcu_read_unlock_bh(); | 2988 | rcu_read_unlock_bh(); |
2982 | } | 2989 | } |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 0fb8b441f1f9..17b89c523f9d 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -439,12 +439,14 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |||
439 | !tp->urg_data || | 439 | !tp->urg_data || |
440 | before(tp->urg_seq, tp->copied_seq) || | 440 | before(tp->urg_seq, tp->copied_seq) || |
441 | !before(tp->urg_seq, tp->rcv_nxt)) { | 441 | !before(tp->urg_seq, tp->rcv_nxt)) { |
442 | struct sk_buff *skb; | ||
443 | |||
442 | answ = tp->rcv_nxt - tp->copied_seq; | 444 | answ = tp->rcv_nxt - tp->copied_seq; |
443 | 445 | ||
444 | /* Subtract 1, if FIN is in queue. */ | 446 | /* Subtract 1, if FIN is in queue. */ |
445 | if (answ && !skb_queue_empty(&sk->sk_receive_queue)) | 447 | skb = skb_peek_tail(&sk->sk_receive_queue); |
446 | answ -= | 448 | if (answ && skb) |
447 | tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin; | 449 | answ -= tcp_hdr(skb)->fin; |
448 | } else | 450 | } else |
449 | answ = tp->urg_seq - tp->copied_seq; | 451 | answ = tp->urg_seq - tp->copied_seq; |
450 | release_sock(sk); | 452 | release_sock(sk); |
@@ -1382,11 +1384,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1382 | 1384 | ||
1383 | /* Next get a buffer. */ | 1385 | /* Next get a buffer. */ |
1384 | 1386 | ||
1385 | skb = skb_peek(&sk->sk_receive_queue); | 1387 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1386 | do { | ||
1387 | if (!skb) | ||
1388 | break; | ||
1389 | |||
1390 | /* Now that we have two receive queues this | 1388 | /* Now that we have two receive queues this |
1391 | * shouldn't happen. | 1389 | * shouldn't happen. |
1392 | */ | 1390 | */ |
@@ -1403,8 +1401,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |||
1403 | if (tcp_hdr(skb)->fin) | 1401 | if (tcp_hdr(skb)->fin) |
1404 | goto found_fin_ok; | 1402 | goto found_fin_ok; |
1405 | WARN_ON(!(flags & MSG_PEEK)); | 1403 | WARN_ON(!(flags & MSG_PEEK)); |
1406 | skb = skb->next; | 1404 | } |
1407 | } while (skb != (struct sk_buff *)&sk->sk_receive_queue); | ||
1408 | 1405 | ||
1409 | /* Well, if we have backlog, try to process it now yet. */ | 1406 | /* Well, if we have backlog, try to process it now yet. */ |
1410 | 1407 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eeb8a92aa416..2bdb0da237e6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -4426,7 +4426,7 @@ drop: | |||
4426 | } | 4426 | } |
4427 | __skb_queue_head(&tp->out_of_order_queue, skb); | 4427 | __skb_queue_head(&tp->out_of_order_queue, skb); |
4428 | } else { | 4428 | } else { |
4429 | struct sk_buff *skb1 = tp->out_of_order_queue.prev; | 4429 | struct sk_buff *skb1 = skb_peek_tail(&tp->out_of_order_queue); |
4430 | u32 seq = TCP_SKB_CB(skb)->seq; | 4430 | u32 seq = TCP_SKB_CB(skb)->seq; |
4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 4431 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
4432 | 4432 | ||
@@ -4443,15 +4443,18 @@ drop: | |||
4443 | } | 4443 | } |
4444 | 4444 | ||
4445 | /* Find place to insert this segment. */ | 4445 | /* Find place to insert this segment. */ |
4446 | do { | 4446 | while (1) { |
4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) | 4447 | if (!after(TCP_SKB_CB(skb1)->seq, seq)) |
4448 | break; | 4448 | break; |
4449 | } while ((skb1 = skb1->prev) != | 4449 | if (skb_queue_is_first(&tp->out_of_order_queue, skb1)) { |
4450 | (struct sk_buff *)&tp->out_of_order_queue); | 4450 | skb1 = NULL; |
4451 | break; | ||
4452 | } | ||
4453 | skb1 = skb_queue_prev(&tp->out_of_order_queue, skb1); | ||
4454 | } | ||
4451 | 4455 | ||
4452 | /* Do skb overlap to previous one? */ | 4456 | /* Do skb overlap to previous one? */ |
4453 | if (skb1 != (struct sk_buff *)&tp->out_of_order_queue && | 4457 | if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { |
4454 | before(seq, TCP_SKB_CB(skb1)->end_seq)) { | ||
4455 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4458 | if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4456 | /* All the bits are present. Drop. */ | 4459 | /* All the bits are present. Drop. */ |
4457 | __kfree_skb(skb); | 4460 | __kfree_skb(skb); |
@@ -4463,15 +4466,26 @@ drop: | |||
4463 | tcp_dsack_set(sk, seq, | 4466 | tcp_dsack_set(sk, seq, |
4464 | TCP_SKB_CB(skb1)->end_seq); | 4467 | TCP_SKB_CB(skb1)->end_seq); |
4465 | } else { | 4468 | } else { |
4466 | skb1 = skb1->prev; | 4469 | if (skb_queue_is_first(&tp->out_of_order_queue, |
4470 | skb1)) | ||
4471 | skb1 = NULL; | ||
4472 | else | ||
4473 | skb1 = skb_queue_prev( | ||
4474 | &tp->out_of_order_queue, | ||
4475 | skb1); | ||
4467 | } | 4476 | } |
4468 | } | 4477 | } |
4469 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | 4478 | if (!skb1) |
4479 | __skb_queue_head(&tp->out_of_order_queue, skb); | ||
4480 | else | ||
4481 | __skb_queue_after(&tp->out_of_order_queue, skb1, skb); | ||
4470 | 4482 | ||
4471 | /* And clean segments covered by new one as whole. */ | 4483 | /* And clean segments covered by new one as whole. */ |
4472 | while ((skb1 = skb->next) != | 4484 | while (!skb_queue_is_last(&tp->out_of_order_queue, skb)) { |
4473 | (struct sk_buff *)&tp->out_of_order_queue && | 4485 | skb1 = skb_queue_next(&tp->out_of_order_queue, skb); |
4474 | after(end_seq, TCP_SKB_CB(skb1)->seq)) { | 4486 | |
4487 | if (!after(end_seq, TCP_SKB_CB(skb1)->seq)) | ||
4488 | break; | ||
4475 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { | 4489 | if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) { |
4476 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, | 4490 | tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, |
4477 | end_seq); | 4491 | end_seq); |
@@ -4492,7 +4506,10 @@ add_sack: | |||
4492 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | 4506 | static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, |
4493 | struct sk_buff_head *list) | 4507 | struct sk_buff_head *list) |
4494 | { | 4508 | { |
4495 | struct sk_buff *next = skb->next; | 4509 | struct sk_buff *next = NULL; |
4510 | |||
4511 | if (!skb_queue_is_last(list, skb)) | ||
4512 | next = skb_queue_next(list, skb); | ||
4496 | 4513 | ||
4497 | __skb_unlink(skb, list); | 4514 | __skb_unlink(skb, list); |
4498 | __kfree_skb(skb); | 4515 | __kfree_skb(skb); |
@@ -4503,6 +4520,9 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb, | |||
4503 | 4520 | ||
4504 | /* Collapse contiguous sequence of skbs head..tail with | 4521 | /* Collapse contiguous sequence of skbs head..tail with |
4505 | * sequence numbers start..end. | 4522 | * sequence numbers start..end. |
4523 | * | ||
4524 | * If tail is NULL, this means until the end of the list. | ||
4525 | * | ||
4506 | * Segments with FIN/SYN are not collapsed (only because this | 4526 | * Segments with FIN/SYN are not collapsed (only because this |
4507 | * simplifies code) | 4527 | * simplifies code) |
4508 | */ | 4528 | */ |
@@ -4511,15 +4531,23 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4511 | struct sk_buff *head, struct sk_buff *tail, | 4531 | struct sk_buff *head, struct sk_buff *tail, |
4512 | u32 start, u32 end) | 4532 | u32 start, u32 end) |
4513 | { | 4533 | { |
4514 | struct sk_buff *skb; | 4534 | struct sk_buff *skb, *n; |
4535 | bool end_of_skbs; | ||
4515 | 4536 | ||
4516 | /* First, check that queue is collapsible and find | 4537 | /* First, check that queue is collapsible and find |
4517 | * the point where collapsing can be useful. */ | 4538 | * the point where collapsing can be useful. */ |
4518 | for (skb = head; skb != tail;) { | 4539 | skb = head; |
4540 | restart: | ||
4541 | end_of_skbs = true; | ||
4542 | skb_queue_walk_from_safe(list, skb, n) { | ||
4543 | if (skb == tail) | ||
4544 | break; | ||
4519 | /* No new bits? It is possible on ofo queue. */ | 4545 | /* No new bits? It is possible on ofo queue. */ |
4520 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4546 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4521 | skb = tcp_collapse_one(sk, skb, list); | 4547 | skb = tcp_collapse_one(sk, skb, list); |
4522 | continue; | 4548 | if (!skb) |
4549 | break; | ||
4550 | goto restart; | ||
4523 | } | 4551 | } |
4524 | 4552 | ||
4525 | /* The first skb to collapse is: | 4553 | /* The first skb to collapse is: |
@@ -4529,16 +4557,24 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4529 | */ | 4557 | */ |
4530 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && | 4558 | if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin && |
4531 | (tcp_win_from_space(skb->truesize) > skb->len || | 4559 | (tcp_win_from_space(skb->truesize) > skb->len || |
4532 | before(TCP_SKB_CB(skb)->seq, start) || | 4560 | before(TCP_SKB_CB(skb)->seq, start))) { |
4533 | (skb->next != tail && | 4561 | end_of_skbs = false; |
4534 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb->next)->seq))) | ||
4535 | break; | 4562 | break; |
4563 | } | ||
4564 | |||
4565 | if (!skb_queue_is_last(list, skb)) { | ||
4566 | struct sk_buff *next = skb_queue_next(list, skb); | ||
4567 | if (next != tail && | ||
4568 | TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(next)->seq) { | ||
4569 | end_of_skbs = false; | ||
4570 | break; | ||
4571 | } | ||
4572 | } | ||
4536 | 4573 | ||
4537 | /* Decided to skip this, advance start seq. */ | 4574 | /* Decided to skip this, advance start seq. */ |
4538 | start = TCP_SKB_CB(skb)->end_seq; | 4575 | start = TCP_SKB_CB(skb)->end_seq; |
4539 | skb = skb->next; | ||
4540 | } | 4576 | } |
4541 | if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) | 4577 | if (end_of_skbs || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin) |
4542 | return; | 4578 | return; |
4543 | 4579 | ||
4544 | while (before(start, end)) { | 4580 | while (before(start, end)) { |
@@ -4583,7 +4619,8 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, | |||
4583 | } | 4619 | } |
4584 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { | 4620 | if (!before(start, TCP_SKB_CB(skb)->end_seq)) { |
4585 | skb = tcp_collapse_one(sk, skb, list); | 4621 | skb = tcp_collapse_one(sk, skb, list); |
4586 | if (skb == tail || | 4622 | if (!skb || |
4623 | skb == tail || | ||
4587 | tcp_hdr(skb)->syn || | 4624 | tcp_hdr(skb)->syn || |
4588 | tcp_hdr(skb)->fin) | 4625 | tcp_hdr(skb)->fin) |
4589 | return; | 4626 | return; |
@@ -4610,17 +4647,21 @@ static void tcp_collapse_ofo_queue(struct sock *sk) | |||
4610 | head = skb; | 4647 | head = skb; |
4611 | 4648 | ||
4612 | for (;;) { | 4649 | for (;;) { |
4613 | skb = skb->next; | 4650 | struct sk_buff *next = NULL; |
4651 | |||
4652 | if (!skb_queue_is_last(&tp->out_of_order_queue, skb)) | ||
4653 | next = skb_queue_next(&tp->out_of_order_queue, skb); | ||
4654 | skb = next; | ||
4614 | 4655 | ||
4615 | /* Segment is terminated when we see gap or when | 4656 | /* Segment is terminated when we see gap or when |
4616 | * we are at the end of all the queue. */ | 4657 | * we are at the end of all the queue. */ |
4617 | if (skb == (struct sk_buff *)&tp->out_of_order_queue || | 4658 | if (!skb || |
4618 | after(TCP_SKB_CB(skb)->seq, end) || | 4659 | after(TCP_SKB_CB(skb)->seq, end) || |
4619 | before(TCP_SKB_CB(skb)->end_seq, start)) { | 4660 | before(TCP_SKB_CB(skb)->end_seq, start)) { |
4620 | tcp_collapse(sk, &tp->out_of_order_queue, | 4661 | tcp_collapse(sk, &tp->out_of_order_queue, |
4621 | head, skb, start, end); | 4662 | head, skb, start, end); |
4622 | head = skb; | 4663 | head = skb; |
4623 | if (skb == (struct sk_buff *)&tp->out_of_order_queue) | 4664 | if (!skb) |
4624 | break; | 4665 | break; |
4625 | /* Start new segment */ | 4666 | /* Start new segment */ |
4626 | start = TCP_SKB_CB(skb)->seq; | 4667 | start = TCP_SKB_CB(skb)->seq; |
@@ -4681,10 +4722,11 @@ static int tcp_prune_queue(struct sock *sk) | |||
4681 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 4722 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
4682 | 4723 | ||
4683 | tcp_collapse_ofo_queue(sk); | 4724 | tcp_collapse_ofo_queue(sk); |
4684 | tcp_collapse(sk, &sk->sk_receive_queue, | 4725 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
4685 | sk->sk_receive_queue.next, | 4726 | tcp_collapse(sk, &sk->sk_receive_queue, |
4686 | (struct sk_buff *)&sk->sk_receive_queue, | 4727 | skb_peek(&sk->sk_receive_queue), |
4687 | tp->copied_seq, tp->rcv_nxt); | 4728 | NULL, |
4729 | tp->copied_seq, tp->rcv_nxt); | ||
4688 | sk_mem_reclaim(sk); | 4730 | sk_mem_reclaim(sk); |
4689 | 4731 | ||
4690 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) | 4732 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index fc79e3416288..5a1ca2698c88 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -546,7 +546,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
546 | if (th->rst) | 546 | if (th->rst) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | if (skb->rtable->rt_type != RTN_LOCAL) | 549 | if (skb_rtable(skb)->rt_type != RTN_LOCAL) |
550 | return; | 550 | return; |
551 | 551 | ||
552 | /* Swap the send and the receive. */ | 552 | /* Swap the send and the receive. */ |
@@ -590,7 +590,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) | |||
590 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; | 590 | arg.csumoffset = offsetof(struct tcphdr, check) / 2; |
591 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; | 591 | arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; |
592 | 592 | ||
593 | net = dev_net(skb->dst->dev); | 593 | net = dev_net(skb_dst(skb)->dev); |
594 | ip_send_reply(net->ipv4.tcp_sock, skb, | 594 | ip_send_reply(net->ipv4.tcp_sock, skb, |
595 | &arg, arg.iov[0].iov_len); | 595 | &arg, arg.iov[0].iov_len); |
596 | 596 | ||
@@ -617,7 +617,7 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack, | |||
617 | ]; | 617 | ]; |
618 | } rep; | 618 | } rep; |
619 | struct ip_reply_arg arg; | 619 | struct ip_reply_arg arg; |
620 | struct net *net = dev_net(skb->dst->dev); | 620 | struct net *net = dev_net(skb_dst(skb)->dev); |
621 | 621 | ||
622 | memset(&rep.th, 0, sizeof(struct tcphdr)); | 622 | memset(&rep.th, 0, sizeof(struct tcphdr)); |
623 | memset(&arg, 0, sizeof(arg)); | 623 | memset(&arg, 0, sizeof(arg)); |
@@ -1185,7 +1185,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1185 | #endif | 1185 | #endif |
1186 | 1186 | ||
1187 | /* Never answer to SYNs send to broadcast or multicast */ | 1187 | /* Never answer to SYNs send to broadcast or multicast */ |
1188 | if (skb->rtable->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 1188 | if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
1189 | goto drop; | 1189 | goto drop; |
1190 | 1190 | ||
1191 | /* TW buckets are converted to open requests without | 1191 | /* TW buckets are converted to open requests without |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 79c39dc9b01c..416fc4c2e7eb 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2202,7 +2202,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2202 | /* Reserve space for headers. */ | 2202 | /* Reserve space for headers. */ |
2203 | skb_reserve(skb, MAX_TCP_HEADER); | 2203 | skb_reserve(skb, MAX_TCP_HEADER); |
2204 | 2204 | ||
2205 | skb->dst = dst_clone(dst); | 2205 | skb_dst_set(skb, dst_clone(dst)); |
2206 | 2206 | ||
2207 | mss = dst_metric(dst, RTAX_ADVMSS); | 2207 | mss = dst_metric(dst, RTAX_ADVMSS); |
2208 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | 2208 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index a453aac91bd3..c6743eec9b7d 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -158,6 +158,11 @@ void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event) | |||
158 | } | 158 | } |
159 | EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); | 159 | EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event); |
160 | 160 | ||
161 | static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) | ||
162 | { | ||
163 | return min(tp->snd_ssthresh, tp->snd_cwnd-1); | ||
164 | } | ||
165 | |||
161 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | 166 | static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) |
162 | { | 167 | { |
163 | struct tcp_sock *tp = tcp_sk(sk); | 168 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -221,11 +226,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
221 | */ | 226 | */ |
222 | diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; | 227 | diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; |
223 | 228 | ||
224 | if (diff > gamma && tp->snd_ssthresh > 2 ) { | 229 | if (diff > gamma && tp->snd_cwnd <= tp->snd_ssthresh) { |
225 | /* Going too fast. Time to slow down | 230 | /* Going too fast. Time to slow down |
226 | * and switch to congestion avoidance. | 231 | * and switch to congestion avoidance. |
227 | */ | 232 | */ |
228 | tp->snd_ssthresh = 2; | ||
229 | 233 | ||
230 | /* Set cwnd to match the actual rate | 234 | /* Set cwnd to match the actual rate |
231 | * exactly: | 235 | * exactly: |
@@ -235,6 +239,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
235 | * utilization. | 239 | * utilization. |
236 | */ | 240 | */ |
237 | tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); | 241 | tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); |
242 | tp->snd_ssthresh = tcp_vegas_ssthresh(tp); | ||
238 | 243 | ||
239 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { | 244 | } else if (tp->snd_cwnd <= tp->snd_ssthresh) { |
240 | /* Slow start. */ | 245 | /* Slow start. */ |
@@ -250,6 +255,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) | |||
250 | * we slow down. | 255 | * we slow down. |
251 | */ | 256 | */ |
252 | tp->snd_cwnd--; | 257 | tp->snd_cwnd--; |
258 | tp->snd_ssthresh | ||
259 | = tcp_vegas_ssthresh(tp); | ||
253 | } else if (diff < alpha) { | 260 | } else if (diff < alpha) { |
254 | /* We don't have enough extra packets | 261 | /* We don't have enough extra packets |
255 | * in the network, so speed up. | 262 | * in the network, so speed up. |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 7a1d1ce22e66..8f4158d7c9a6 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -328,7 +328,7 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb, | |||
328 | if (unlikely(sk = skb_steal_sock(skb))) | 328 | if (unlikely(sk = skb_steal_sock(skb))) |
329 | return sk; | 329 | return sk; |
330 | else | 330 | else |
331 | return __udp4_lib_lookup(dev_net(skb->dst->dev), iph->saddr, sport, | 331 | return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport, |
332 | iph->daddr, dport, inet_iif(skb), | 332 | iph->daddr, dport, inet_iif(skb), |
333 | udptable); | 333 | udptable); |
334 | } | 334 | } |
@@ -1237,7 +1237,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, | |||
1237 | struct sock *sk; | 1237 | struct sock *sk; |
1238 | struct udphdr *uh; | 1238 | struct udphdr *uh; |
1239 | unsigned short ulen; | 1239 | unsigned short ulen; |
1240 | struct rtable *rt = (struct rtable*)skb->dst; | 1240 | struct rtable *rt = skb_rtable(skb); |
1241 | __be32 saddr, daddr; | 1241 | __be32 saddr, daddr; |
1242 | struct net *net = dev_net(skb->dev); | 1242 | struct net *net = dev_net(skb->dev); |
1243 | 1243 | ||
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c index 4ec2162a437e..f9f922a0ba88 100644 --- a/net/ipv4/xfrm4_input.c +++ b/net/ipv4/xfrm4_input.c | |||
@@ -23,7 +23,7 @@ int xfrm4_extract_input(struct xfrm_state *x, struct sk_buff *skb) | |||
23 | 23 | ||
24 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) | 24 | static inline int xfrm4_rcv_encap_finish(struct sk_buff *skb) |
25 | { | 25 | { |
26 | if (skb->dst == NULL) { | 26 | if (skb_dst(skb) == NULL) { |
27 | const struct iphdr *iph = ip_hdr(skb); | 27 | const struct iphdr *iph = ip_hdr(skb); |
28 | 28 | ||
29 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, | 29 | if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, |
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 7135279f3f84..3444f3b34eca 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c | |||
@@ -28,7 +28,7 @@ static inline void ipip_ecn_decapsulate(struct sk_buff *skb) | |||
28 | */ | 28 | */ |
29 | static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | 29 | static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) |
30 | { | 30 | { |
31 | struct dst_entry *dst = skb->dst; | 31 | struct dst_entry *dst = skb_dst(skb); |
32 | struct iphdr *top_iph; | 32 | struct iphdr *top_iph; |
33 | int flags; | 33 | int flags; |
34 | 34 | ||
@@ -41,7 +41,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) | |||
41 | top_iph->ihl = 5; | 41 | top_iph->ihl = 5; |
42 | top_iph->version = 4; | 42 | top_iph->version = 4; |
43 | 43 | ||
44 | top_iph->protocol = xfrm_af2proto(skb->dst->ops->family); | 44 | top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family); |
45 | 45 | ||
46 | /* DS disclosed */ | 46 | /* DS disclosed */ |
47 | top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos, | 47 | top_iph->tos = INET_ECN_encapsulate(XFRM_MODE_SKB_CB(skb)->tos, |
diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 8c3180adddbf..c908bd99bcba 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c | |||
@@ -29,7 +29,7 @@ static int xfrm4_tunnel_check_size(struct sk_buff *skb) | |||
29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) | 29 | if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->local_df) |
30 | goto out; | 30 | goto out; |
31 | 31 | ||
32 | dst = skb->dst; | 32 | dst = skb_dst(skb); |
33 | mtu = dst_mtu(dst); | 33 | mtu = dst_mtu(dst); |
34 | if (skb->len > mtu) { | 34 | if (skb->len > mtu) { |
35 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); | 35 | icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); |
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(xfrm4_prepare_output); | |||
72 | static int xfrm4_output_finish(struct sk_buff *skb) | 72 | static int xfrm4_output_finish(struct sk_buff *skb) |
73 | { | 73 | { |
74 | #ifdef CONFIG_NETFILTER | 74 | #ifdef CONFIG_NETFILTER |
75 | if (!skb->dst->xfrm) { | 75 | if (!skb_dst(skb)->xfrm) { |
76 | IPCB(skb)->flags |= IPSKB_REROUTED; | 76 | IPCB(skb)->flags |= IPSKB_REROUTED; |
77 | return dst_output(skb); | 77 | return dst_output(skb); |
78 | } | 78 | } |
@@ -87,6 +87,6 @@ static int xfrm4_output_finish(struct sk_buff *skb) | |||
87 | int xfrm4_output(struct sk_buff *skb) | 87 | int xfrm4_output(struct sk_buff *skb) |
88 | { | 88 | { |
89 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, | 89 | return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, |
90 | NULL, skb->dst->dev, xfrm4_output_finish, | 90 | NULL, skb_dst(skb)->dev, xfrm4_output_finish, |
91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); | 91 | !(IPCB(skb)->flags & IPSKB_REROUTED)); |
92 | } | 92 | } |