diff options
author | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-05-05 08:59:37 -0400 |
---|---|---|
committer | David Woodhouse <dwmw2@shinybook.infradead.org> | 2005-05-05 08:59:37 -0400 |
commit | bfd4bda097f8758d28e632ff2035e25577f6b060 (patch) | |
tree | 022276b3625a432c7132e39776e7e448445087ac /net | |
parent | 488f2eaca1b0831a5a5e6a66e33bad2cdeff7238 (diff) | |
parent | b2d84f078a8be40f5ae3b4d2ac001e2a7f45fe4f (diff) |
Merge with master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Diffstat (limited to 'net')
-rw-r--r-- | net/core/link_watch.c | 7 | ||||
-rw-r--r-- | net/core/netfilter.c | 17 | ||||
-rw-r--r-- | net/core/rtnetlink.c | 67 | ||||
-rw-r--r-- | net/decnet/dn_dev.c | 25 | ||||
-rw-r--r-- | net/decnet/netfilter/dn_rtmsg.c | 3 | ||||
-rw-r--r-- | net/ipv4/devinet.c | 21 | ||||
-rw-r--r-- | net/ipv4/ip_output.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_conntrack_proto_tcp.c | 1 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_queue.c | 20 | ||||
-rw-r--r-- | net/ipv4/netfilter/iptable_raw.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_diag.c | 3 | ||||
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 7 | ||||
-rw-r--r-- | net/ipv4/xfrm4_policy.c | 42 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 2 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_queue.c | 20 | ||||
-rw-r--r-- | net/ipv6/raw.c | 7 | ||||
-rw-r--r-- | net/ipv6/tcp_ipv6.c | 7 | ||||
-rw-r--r-- | net/ipv6/xfrm6_policy.c | 43 | ||||
-rw-r--r-- | net/irda/irda_device.c | 2 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 3 | ||||
-rw-r--r-- | net/sched/Kconfig | 2 | ||||
-rw-r--r-- | net/sched/act_api.c | 4 | ||||
-rw-r--r-- | net/sched/sch_api.c | 1 | ||||
-rw-r--r-- | net/sched/sch_generic.c | 5 | ||||
-rw-r--r-- | net/sched/sch_htb.c | 4 | ||||
-rw-r--r-- | net/sched/sch_netem.c | 131 | ||||
-rw-r--r-- | net/xfrm/xfrm_policy.c | 25 | ||||
-rw-r--r-- | net/xfrm/xfrm_user.c | 87 |
28 files changed, 344 insertions, 219 deletions
diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4859b7446c6f..d43d1201275c 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/netdevice.h> | 16 | #include <linux/netdevice.h> |
17 | #include <linux/if.h> | 17 | #include <linux/if.h> |
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | #include <net/pkt_sched.h> | ||
19 | #include <linux/rtnetlink.h> | 20 | #include <linux/rtnetlink.h> |
20 | #include <linux/jiffies.h> | 21 | #include <linux/jiffies.h> |
21 | #include <linux/spinlock.h> | 22 | #include <linux/spinlock.h> |
@@ -74,6 +75,12 @@ void linkwatch_run_queue(void) | |||
74 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); | 75 | clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); |
75 | 76 | ||
76 | if (dev->flags & IFF_UP) { | 77 | if (dev->flags & IFF_UP) { |
78 | if (netif_carrier_ok(dev)) { | ||
79 | WARN_ON(dev->qdisc_sleeping == &noop_qdisc); | ||
80 | dev_activate(dev); | ||
81 | } else | ||
82 | dev_deactivate(dev); | ||
83 | |||
77 | netdev_state_change(dev); | 84 | netdev_state_change(dev); |
78 | } | 85 | } |
79 | 86 | ||
diff --git a/net/core/netfilter.c b/net/core/netfilter.c index e51cfa46950c..22a8f127c4aa 100644 --- a/net/core/netfilter.c +++ b/net/core/netfilter.c | |||
@@ -217,21 +217,10 @@ void nf_debug_ip_local_deliver(struct sk_buff *skb) | |||
217 | * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */ | 217 | * NF_IP_RAW_INPUT and NF_IP_PRE_ROUTING. */ |
218 | if (!skb->dev) { | 218 | if (!skb->dev) { |
219 | printk("ip_local_deliver: skb->dev is NULL.\n"); | 219 | printk("ip_local_deliver: skb->dev is NULL.\n"); |
220 | } | 220 | } else { |
221 | else if (strcmp(skb->dev->name, "lo") == 0) { | ||
222 | if (skb->nf_debug != ((1 << NF_IP_LOCAL_OUT) | ||
223 | | (1 << NF_IP_POST_ROUTING) | ||
224 | | (1 << NF_IP_PRE_ROUTING) | ||
225 | | (1 << NF_IP_LOCAL_IN))) { | ||
226 | printk("ip_local_deliver: bad loopback skb: "); | ||
227 | debug_print_hooks_ip(skb->nf_debug); | ||
228 | nf_dump_skb(PF_INET, skb); | ||
229 | } | ||
230 | } | ||
231 | else { | ||
232 | if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING) | 221 | if (skb->nf_debug != ((1<<NF_IP_PRE_ROUTING) |
233 | | (1<<NF_IP_LOCAL_IN))) { | 222 | | (1<<NF_IP_LOCAL_IN))) { |
234 | printk("ip_local_deliver: bad non-lo skb: "); | 223 | printk("ip_local_deliver: bad skb: "); |
235 | debug_print_hooks_ip(skb->nf_debug); | 224 | debug_print_hooks_ip(skb->nf_debug); |
236 | nf_dump_skb(PF_INET, skb); | 225 | nf_dump_skb(PF_INET, skb); |
237 | } | 226 | } |
@@ -247,8 +236,6 @@ void nf_debug_ip_loopback_xmit(struct sk_buff *newskb) | |||
247 | debug_print_hooks_ip(newskb->nf_debug); | 236 | debug_print_hooks_ip(newskb->nf_debug); |
248 | nf_dump_skb(PF_INET, newskb); | 237 | nf_dump_skb(PF_INET, newskb); |
249 | } | 238 | } |
250 | /* Clear to avoid confusing input check */ | ||
251 | newskb->nf_debug = 0; | ||
252 | } | 239 | } |
253 | 240 | ||
254 | void nf_debug_ip_finish_output2(struct sk_buff *skb) | 241 | void nf_debug_ip_finish_output2(struct sk_buff *skb) |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d8c198e42f90..00caf4b318b2 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -86,30 +86,33 @@ struct sock *rtnl; | |||
86 | 86 | ||
87 | struct rtnetlink_link * rtnetlink_links[NPROTO]; | 87 | struct rtnetlink_link * rtnetlink_links[NPROTO]; |
88 | 88 | ||
89 | static const int rtm_min[(RTM_MAX+1-RTM_BASE)/4] = | 89 | static const int rtm_min[RTM_NR_FAMILIES] = |
90 | { | 90 | { |
91 | NLMSG_LENGTH(sizeof(struct ifinfomsg)), | 91 | [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)), |
92 | NLMSG_LENGTH(sizeof(struct ifaddrmsg)), | 92 | [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)), |
93 | NLMSG_LENGTH(sizeof(struct rtmsg)), | 93 | [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), |
94 | NLMSG_LENGTH(sizeof(struct ndmsg)), | 94 | [RTM_FAM(RTM_NEWNEIGH)] = NLMSG_LENGTH(sizeof(struct ndmsg)), |
95 | NLMSG_LENGTH(sizeof(struct rtmsg)), | 95 | [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct rtmsg)), |
96 | NLMSG_LENGTH(sizeof(struct tcmsg)), | 96 | [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
97 | NLMSG_LENGTH(sizeof(struct tcmsg)), | 97 | [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
98 | NLMSG_LENGTH(sizeof(struct tcmsg)), | 98 | [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)), |
99 | NLMSG_LENGTH(sizeof(struct tcamsg)) | 99 | [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)), |
100 | [RTM_FAM(RTM_NEWPREFIX)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | ||
101 | [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | ||
102 | [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)), | ||
100 | }; | 103 | }; |
101 | 104 | ||
102 | static const int rta_max[(RTM_MAX+1-RTM_BASE)/4] = | 105 | static const int rta_max[RTM_NR_FAMILIES] = |
103 | { | 106 | { |
104 | IFLA_MAX, | 107 | [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX, |
105 | IFA_MAX, | 108 | [RTM_FAM(RTM_NEWADDR)] = IFA_MAX, |
106 | RTA_MAX, | 109 | [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX, |
107 | NDA_MAX, | 110 | [RTM_FAM(RTM_NEWNEIGH)] = NDA_MAX, |
108 | RTA_MAX, | 111 | [RTM_FAM(RTM_NEWRULE)] = RTA_MAX, |
109 | TCA_MAX, | 112 | [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX, |
110 | TCA_MAX, | 113 | [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX, |
111 | TCA_MAX, | 114 | [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX, |
112 | TCAA_MAX | 115 | [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX, |
113 | }; | 116 | }; |
114 | 117 | ||
115 | void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data) | 118 | void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data) |
@@ -606,27 +609,33 @@ static inline int rtnetlink_rcv_skb(struct sk_buff *skb) | |||
606 | 609 | ||
607 | /* | 610 | /* |
608 | * rtnetlink input queue processing routine: | 611 | * rtnetlink input queue processing routine: |
609 | * - try to acquire shared lock. If it is failed, defer processing. | 612 | * - process as much as there was in the queue upon entry. |
610 | * - feed skbs to rtnetlink_rcv_skb, until it refuse a message, | 613 | * - feed skbs to rtnetlink_rcv_skb, until it refuse a message, |
611 | * that will occur, when a dump started and/or acquisition of | 614 | * that will occur, when a dump started. |
612 | * exclusive lock failed. | ||
613 | */ | 615 | */ |
614 | 616 | ||
615 | static void rtnetlink_rcv(struct sock *sk, int len) | 617 | static void rtnetlink_rcv(struct sock *sk, int len) |
616 | { | 618 | { |
619 | unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); | ||
620 | |||
617 | do { | 621 | do { |
618 | struct sk_buff *skb; | 622 | struct sk_buff *skb; |
619 | 623 | ||
620 | if (rtnl_shlock_nowait()) | 624 | rtnl_lock(); |
621 | return; | 625 | |
626 | if (qlen > skb_queue_len(&sk->sk_receive_queue)) | ||
627 | qlen = skb_queue_len(&sk->sk_receive_queue); | ||
622 | 628 | ||
623 | while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 629 | for (; qlen; qlen--) { |
630 | skb = skb_dequeue(&sk->sk_receive_queue); | ||
624 | if (rtnetlink_rcv_skb(skb)) { | 631 | if (rtnetlink_rcv_skb(skb)) { |
625 | if (skb->len) | 632 | if (skb->len) |
626 | skb_queue_head(&sk->sk_receive_queue, | 633 | skb_queue_head(&sk->sk_receive_queue, |
627 | skb); | 634 | skb); |
628 | else | 635 | else { |
629 | kfree_skb(skb); | 636 | kfree_skb(skb); |
637 | qlen--; | ||
638 | } | ||
630 | break; | 639 | break; |
631 | } | 640 | } |
632 | kfree_skb(skb); | 641 | kfree_skb(skb); |
@@ -635,10 +644,10 @@ static void rtnetlink_rcv(struct sock *sk, int len) | |||
635 | up(&rtnl_sem); | 644 | up(&rtnl_sem); |
636 | 645 | ||
637 | netdev_run_todo(); | 646 | netdev_run_todo(); |
638 | } while (rtnl && rtnl->sk_receive_queue.qlen); | 647 | } while (qlen); |
639 | } | 648 | } |
640 | 649 | ||
641 | static struct rtnetlink_link link_rtnetlink_table[RTM_MAX-RTM_BASE+1] = | 650 | static struct rtnetlink_link link_rtnetlink_table[RTM_NR_MSGTYPES] = |
642 | { | 651 | { |
643 | [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo }, | 652 | [RTM_GETLINK - RTM_BASE] = { .dumpit = rtnetlink_dump_ifinfo }, |
644 | [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink }, | 653 | [RTM_SETLINK - RTM_BASE] = { .doit = do_setlink }, |
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index c2a0346f423b..e6e23eb14428 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c | |||
@@ -1411,21 +1411,22 @@ static struct file_operations dn_dev_seq_fops = { | |||
1411 | 1411 | ||
1412 | #endif /* CONFIG_PROC_FS */ | 1412 | #endif /* CONFIG_PROC_FS */ |
1413 | 1413 | ||
1414 | static struct rtnetlink_link dnet_rtnetlink_table[RTM_MAX-RTM_BASE+1] = | 1414 | static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] = |
1415 | { | 1415 | { |
1416 | [4] = { .doit = dn_dev_rtm_newaddr, }, | 1416 | [RTM_NEWADDR - RTM_BASE] = { .doit = dn_dev_rtm_newaddr, }, |
1417 | [5] = { .doit = dn_dev_rtm_deladdr, }, | 1417 | [RTM_DELADDR - RTM_BASE] = { .doit = dn_dev_rtm_deladdr, }, |
1418 | [6] = { .dumpit = dn_dev_dump_ifaddr, }, | 1418 | [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_dev_dump_ifaddr, }, |
1419 | |||
1420 | #ifdef CONFIG_DECNET_ROUTER | 1419 | #ifdef CONFIG_DECNET_ROUTER |
1421 | [8] = { .doit = dn_fib_rtm_newroute, }, | 1420 | [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, }, |
1422 | [9] = { .doit = dn_fib_rtm_delroute, }, | 1421 | [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, }, |
1423 | [10] = { .doit = dn_cache_getroute, .dumpit = dn_fib_dump, }, | 1422 | [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, |
1424 | [16] = { .doit = dn_fib_rtm_newrule, }, | 1423 | .dumpit = dn_fib_dump, }, |
1425 | [17] = { .doit = dn_fib_rtm_delrule, }, | 1424 | [RTM_NEWRULE - RTM_BASE] = { .doit = dn_fib_rtm_newrule, }, |
1426 | [18] = { .dumpit = dn_fib_dump_rules, }, | 1425 | [RTM_DELRULE - RTM_BASE] = { .doit = dn_fib_rtm_delrule, }, |
1426 | [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, }, | ||
1427 | #else | 1427 | #else |
1428 | [10] = { .doit = dn_cache_getroute, .dumpit = dn_cache_dump, }, | 1428 | [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute, |
1429 | .dumpit = dn_cache_dump, | ||
1429 | #endif | 1430 | #endif |
1430 | 1431 | ||
1431 | }; | 1432 | }; |
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c index f86a6259fd12..284a9998e53d 100644 --- a/net/decnet/netfilter/dn_rtmsg.c +++ b/net/decnet/netfilter/dn_rtmsg.c | |||
@@ -119,8 +119,9 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb) | |||
119 | static void dnrmg_receive_user_sk(struct sock *sk, int len) | 119 | static void dnrmg_receive_user_sk(struct sock *sk, int len) |
120 | { | 120 | { |
121 | struct sk_buff *skb; | 121 | struct sk_buff *skb; |
122 | unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); | ||
122 | 123 | ||
123 | while((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 124 | for (; qlen && (skb = skb_dequeue(&sk->sk_receive_queue)); qlen--) { |
124 | dnrmg_receive_user_skb(skb); | 125 | dnrmg_receive_user_skb(skb); |
125 | kfree_skb(skb); | 126 | kfree_skb(skb); |
126 | } | 127 | } |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index eea7ef010776..abbc6d5c183e 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1107,17 +1107,18 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa) | |||
1107 | } | 1107 | } |
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | static struct rtnetlink_link inet_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = { | 1110 | static struct rtnetlink_link inet_rtnetlink_table[RTM_NR_MSGTYPES] = { |
1111 | [4] = { .doit = inet_rtm_newaddr, }, | 1111 | [RTM_NEWADDR - RTM_BASE] = { .doit = inet_rtm_newaddr, }, |
1112 | [5] = { .doit = inet_rtm_deladdr, }, | 1112 | [RTM_DELADDR - RTM_BASE] = { .doit = inet_rtm_deladdr, }, |
1113 | [6] = { .dumpit = inet_dump_ifaddr, }, | 1113 | [RTM_GETADDR - RTM_BASE] = { .dumpit = inet_dump_ifaddr, }, |
1114 | [8] = { .doit = inet_rtm_newroute, }, | 1114 | [RTM_NEWROUTE - RTM_BASE] = { .doit = inet_rtm_newroute, }, |
1115 | [9] = { .doit = inet_rtm_delroute, }, | 1115 | [RTM_DELROUTE - RTM_BASE] = { .doit = inet_rtm_delroute, }, |
1116 | [10] = { .doit = inet_rtm_getroute, .dumpit = inet_dump_fib, }, | 1116 | [RTM_GETROUTE - RTM_BASE] = { .doit = inet_rtm_getroute, |
1117 | .dumpit = inet_dump_fib, }, | ||
1117 | #ifdef CONFIG_IP_MULTIPLE_TABLES | 1118 | #ifdef CONFIG_IP_MULTIPLE_TABLES |
1118 | [16] = { .doit = inet_rtm_newrule, }, | 1119 | [RTM_NEWRULE - RTM_BASE] = { .doit = inet_rtm_newrule, }, |
1119 | [17] = { .doit = inet_rtm_delrule, }, | 1120 | [RTM_DELRULE - RTM_BASE] = { .doit = inet_rtm_delrule, }, |
1120 | [18] = { .dumpit = inet_dump_rules, }, | 1121 | [RTM_GETRULE - RTM_BASE] = { .dumpit = inet_dump_rules, }, |
1121 | #endif | 1122 | #endif |
1122 | }; | 1123 | }; |
1123 | 1124 | ||
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 38f69532a029..24fe3e00b42b 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -111,6 +111,7 @@ static int ip_dev_loopback_xmit(struct sk_buff *newskb) | |||
111 | #ifdef CONFIG_NETFILTER_DEBUG | 111 | #ifdef CONFIG_NETFILTER_DEBUG |
112 | nf_debug_ip_loopback_xmit(newskb); | 112 | nf_debug_ip_loopback_xmit(newskb); |
113 | #endif | 113 | #endif |
114 | nf_reset(newskb); | ||
114 | netif_rx(newskb); | 115 | netif_rx(newskb); |
115 | return 0; | 116 | return 0; |
116 | } | 117 | } |
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 2b87c1974be6..721ddbf522b4 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c | |||
@@ -819,6 +819,7 @@ static int tcp_error(struct sk_buff *skb, | |||
819 | */ | 819 | */ |
820 | /* FIXME: Source route IP option packets --RR */ | 820 | /* FIXME: Source route IP option packets --RR */ |
821 | if (hooknum == NF_IP_PRE_ROUTING | 821 | if (hooknum == NF_IP_PRE_ROUTING |
822 | && skb->ip_summed != CHECKSUM_UNNECESSARY | ||
822 | && csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, | 823 | && csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP, |
823 | skb->ip_summed == CHECKSUM_HW ? skb->csum | 824 | skb->ip_summed == CHECKSUM_HW ? skb->csum |
824 | : skb_checksum(skb, iph->ihl*4, tcplen, 0))) { | 825 | : skb_checksum(skb, iph->ihl*4, tcplen, 0))) { |
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c index 9e40dffc204f..e5746b674413 100644 --- a/net/ipv4/netfilter/ip_queue.c +++ b/net/ipv4/netfilter/ip_queue.c | |||
@@ -546,20 +546,18 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
546 | static void | 546 | static void |
547 | ipq_rcv_sk(struct sock *sk, int len) | 547 | ipq_rcv_sk(struct sock *sk, int len) |
548 | { | 548 | { |
549 | do { | 549 | struct sk_buff *skb; |
550 | struct sk_buff *skb; | 550 | unsigned int qlen; |
551 | 551 | ||
552 | if (down_trylock(&ipqnl_sem)) | 552 | down(&ipqnl_sem); |
553 | return; | ||
554 | 553 | ||
555 | while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 554 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { |
556 | ipq_rcv_skb(skb); | 555 | skb = skb_dequeue(&sk->sk_receive_queue); |
557 | kfree_skb(skb); | 556 | ipq_rcv_skb(skb); |
558 | } | 557 | kfree_skb(skb); |
558 | } | ||
559 | 559 | ||
560 | up(&ipqnl_sem); | 560 | up(&ipqnl_sem); |
561 | |||
562 | } while (ipqnl && ipqnl->sk_receive_queue.qlen); | ||
563 | } | 561 | } |
564 | 562 | ||
565 | static int | 563 | static int |
diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 01b4a3c814d3..47449ba83eb9 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c | |||
@@ -103,13 +103,15 @@ static struct nf_hook_ops ipt_ops[] = { | |||
103 | .hook = ipt_hook, | 103 | .hook = ipt_hook, |
104 | .pf = PF_INET, | 104 | .pf = PF_INET, |
105 | .hooknum = NF_IP_PRE_ROUTING, | 105 | .hooknum = NF_IP_PRE_ROUTING, |
106 | .priority = NF_IP_PRI_RAW | 106 | .priority = NF_IP_PRI_RAW, |
107 | .owner = THIS_MODULE, | ||
107 | }, | 108 | }, |
108 | { | 109 | { |
109 | .hook = ipt_hook, | 110 | .hook = ipt_hook, |
110 | .pf = PF_INET, | 111 | .pf = PF_INET, |
111 | .hooknum = NF_IP_LOCAL_OUT, | 112 | .hooknum = NF_IP_LOCAL_OUT, |
112 | .priority = NF_IP_PRI_RAW | 113 | .priority = NF_IP_PRI_RAW, |
114 | .owner = THIS_MODULE, | ||
113 | }, | 115 | }, |
114 | }; | 116 | }; |
115 | 117 | ||
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c index 313c1408da33..8faa8948f75c 100644 --- a/net/ipv4/tcp_diag.c +++ b/net/ipv4/tcp_diag.c | |||
@@ -777,8 +777,9 @@ static inline void tcpdiag_rcv_skb(struct sk_buff *skb) | |||
777 | static void tcpdiag_rcv(struct sock *sk, int len) | 777 | static void tcpdiag_rcv(struct sock *sk, int len) |
778 | { | 778 | { |
779 | struct sk_buff *skb; | 779 | struct sk_buff *skb; |
780 | unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); | ||
780 | 781 | ||
781 | while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 782 | while (qlen-- && (skb = skb_dequeue(&sk->sk_receive_queue))) { |
782 | tcpdiag_rcv_skb(skb); | 783 | tcpdiag_rcv_skb(skb); |
783 | kfree_skb(skb); | 784 | kfree_skb(skb); |
784 | } | 785 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 3ac6659869c4..dad98e4a5043 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -222,10 +222,13 @@ static int tcp_v4_get_port(struct sock *sk, unsigned short snum) | |||
222 | int rover; | 222 | int rover; |
223 | 223 | ||
224 | spin_lock(&tcp_portalloc_lock); | 224 | spin_lock(&tcp_portalloc_lock); |
225 | rover = tcp_port_rover; | 225 | if (tcp_port_rover < low) |
226 | rover = low; | ||
227 | else | ||
228 | rover = tcp_port_rover; | ||
226 | do { | 229 | do { |
227 | rover++; | 230 | rover++; |
228 | if (rover < low || rover > high) | 231 | if (rover > high) |
229 | rover = low; | 232 | rover = low; |
230 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 233 | head = &tcp_bhash[tcp_bhashfn(rover)]; |
231 | spin_lock(&head->lock); | 234 | spin_lock(&head->lock); |
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c index 7fe2afd2e669..b2b60f3e9cdd 100644 --- a/net/ipv4/xfrm4_policy.c +++ b/net/ipv4/xfrm4_policy.c | |||
@@ -8,7 +8,10 @@ | |||
8 | * | 8 | * |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <asm/bug.h> | ||
12 | #include <linux/compiler.h> | ||
11 | #include <linux/config.h> | 13 | #include <linux/config.h> |
14 | #include <linux/inetdevice.h> | ||
12 | #include <net/xfrm.h> | 15 | #include <net/xfrm.h> |
13 | #include <net/ip.h> | 16 | #include <net/ip.h> |
14 | 17 | ||
@@ -152,6 +155,8 @@ __xfrm4_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
152 | x->u.rt.rt_dst = rt0->rt_dst; | 155 | x->u.rt.rt_dst = rt0->rt_dst; |
153 | x->u.rt.rt_gateway = rt->rt_gateway; | 156 | x->u.rt.rt_gateway = rt->rt_gateway; |
154 | x->u.rt.rt_spec_dst = rt0->rt_spec_dst; | 157 | x->u.rt.rt_spec_dst = rt0->rt_spec_dst; |
158 | x->u.rt.idev = rt0->idev; | ||
159 | in_dev_hold(rt0->idev); | ||
155 | header_len -= x->u.dst.xfrm->props.header_len; | 160 | header_len -= x->u.dst.xfrm->props.header_len; |
156 | trailer_len -= x->u.dst.xfrm->props.trailer_len; | 161 | trailer_len -= x->u.dst.xfrm->props.trailer_len; |
157 | } | 162 | } |
@@ -243,11 +248,48 @@ static void xfrm4_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
243 | path->ops->update_pmtu(path, mtu); | 248 | path->ops->update_pmtu(path, mtu); |
244 | } | 249 | } |
245 | 250 | ||
251 | static void xfrm4_dst_destroy(struct dst_entry *dst) | ||
252 | { | ||
253 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
254 | |||
255 | if (likely(xdst->u.rt.idev)) | ||
256 | in_dev_put(xdst->u.rt.idev); | ||
257 | xfrm_dst_destroy(xdst); | ||
258 | } | ||
259 | |||
260 | static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | ||
261 | int unregister) | ||
262 | { | ||
263 | struct xfrm_dst *xdst; | ||
264 | |||
265 | if (!unregister) | ||
266 | return; | ||
267 | |||
268 | xdst = (struct xfrm_dst *)dst; | ||
269 | if (xdst->u.rt.idev->dev == dev) { | ||
270 | struct in_device *loopback_idev = in_dev_get(&loopback_dev); | ||
271 | BUG_ON(!loopback_idev); | ||
272 | |||
273 | do { | ||
274 | in_dev_put(xdst->u.rt.idev); | ||
275 | xdst->u.rt.idev = loopback_idev; | ||
276 | in_dev_hold(loopback_idev); | ||
277 | xdst = (struct xfrm_dst *)xdst->u.dst.child; | ||
278 | } while (xdst->u.dst.xfrm); | ||
279 | |||
280 | __in_dev_put(loopback_idev); | ||
281 | } | ||
282 | |||
283 | xfrm_dst_ifdown(dst, dev); | ||
284 | } | ||
285 | |||
246 | static struct dst_ops xfrm4_dst_ops = { | 286 | static struct dst_ops xfrm4_dst_ops = { |
247 | .family = AF_INET, | 287 | .family = AF_INET, |
248 | .protocol = __constant_htons(ETH_P_IP), | 288 | .protocol = __constant_htons(ETH_P_IP), |
249 | .gc = xfrm4_garbage_collect, | 289 | .gc = xfrm4_garbage_collect, |
250 | .update_pmtu = xfrm4_update_pmtu, | 290 | .update_pmtu = xfrm4_update_pmtu, |
291 | .destroy = xfrm4_dst_destroy, | ||
292 | .ifdown = xfrm4_dst_ifdown, | ||
251 | .gc_thresh = 1024, | 293 | .gc_thresh = 1024, |
252 | .entry_size = sizeof(struct xfrm_dst), | 294 | .entry_size = sizeof(struct xfrm_dst), |
253 | }; | 295 | }; |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7196ac2f2d16..7744a2592693 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3076,7 +3076,7 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev, | |||
3076 | netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_PREFIX, GFP_ATOMIC); | 3076 | netlink_broadcast(rtnl, skb, 0, RTMGRP_IPV6_PREFIX, GFP_ATOMIC); |
3077 | } | 3077 | } |
3078 | 3078 | ||
3079 | static struct rtnetlink_link inet6_rtnetlink_table[RTM_MAX - RTM_BASE + 1] = { | 3079 | static struct rtnetlink_link inet6_rtnetlink_table[RTM_NR_MSGTYPES] = { |
3080 | [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, | 3080 | [RTM_GETLINK - RTM_BASE] = { .dumpit = inet6_dump_ifinfo, }, |
3081 | [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, | 3081 | [RTM_NEWADDR - RTM_BASE] = { .doit = inet6_rtm_newaddr, }, |
3082 | [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, | 3082 | [RTM_DELADDR - RTM_BASE] = { .doit = inet6_rtm_deladdr, }, |
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index c54830b89593..750943e2d34e 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -549,20 +549,18 @@ ipq_rcv_skb(struct sk_buff *skb) | |||
549 | static void | 549 | static void |
550 | ipq_rcv_sk(struct sock *sk, int len) | 550 | ipq_rcv_sk(struct sock *sk, int len) |
551 | { | 551 | { |
552 | do { | 552 | struct sk_buff *skb; |
553 | struct sk_buff *skb; | 553 | unsigned int qlen; |
554 | 554 | ||
555 | if (down_trylock(&ipqnl_sem)) | 555 | down(&ipqnl_sem); |
556 | return; | ||
557 | 556 | ||
558 | while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 557 | for (qlen = skb_queue_len(&sk->sk_receive_queue); qlen; qlen--) { |
559 | ipq_rcv_skb(skb); | 558 | skb = skb_dequeue(&sk->sk_receive_queue); |
560 | kfree_skb(skb); | 559 | ipq_rcv_skb(skb); |
561 | } | 560 | kfree_skb(skb); |
561 | } | ||
562 | 562 | ||
563 | up(&ipqnl_sem); | 563 | up(&ipqnl_sem); |
564 | |||
565 | } while (ipqnl && ipqnl->sk_receive_queue.qlen); | ||
566 | } | 564 | } |
567 | 565 | ||
568 | static int | 566 | static int |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 1352c1d9bf4d..617645bc5ed6 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -455,11 +455,11 @@ csum_copy_err: | |||
455 | static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | 455 | static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, |
456 | struct raw6_sock *rp) | 456 | struct raw6_sock *rp) |
457 | { | 457 | { |
458 | struct inet_sock *inet = inet_sk(sk); | ||
459 | struct sk_buff *skb; | 458 | struct sk_buff *skb; |
460 | int err = 0; | 459 | int err = 0; |
461 | int offset; | 460 | int offset; |
462 | int len; | 461 | int len; |
462 | int total_len; | ||
463 | u32 tmp_csum; | 463 | u32 tmp_csum; |
464 | u16 csum; | 464 | u16 csum; |
465 | 465 | ||
@@ -470,7 +470,8 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | |||
470 | goto out; | 470 | goto out; |
471 | 471 | ||
472 | offset = rp->offset; | 472 | offset = rp->offset; |
473 | if (offset >= inet->cork.length - 1) { | 473 | total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data); |
474 | if (offset >= total_len - 1) { | ||
474 | err = -EINVAL; | 475 | err = -EINVAL; |
475 | ip6_flush_pending_frames(sk); | 476 | ip6_flush_pending_frames(sk); |
476 | goto out; | 477 | goto out; |
@@ -514,7 +515,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi *fl, | |||
514 | 515 | ||
515 | tmp_csum = csum_ipv6_magic(&fl->fl6_src, | 516 | tmp_csum = csum_ipv6_magic(&fl->fl6_src, |
516 | &fl->fl6_dst, | 517 | &fl->fl6_dst, |
517 | inet->cork.length, fl->proto, tmp_csum); | 518 | total_len, fl->proto, tmp_csum); |
518 | 519 | ||
519 | if (tmp_csum == 0) | 520 | if (tmp_csum == 0) |
520 | tmp_csum = -1; | 521 | tmp_csum = -1; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 4760c85e19db..0f69e800a0ad 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -139,9 +139,12 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum) | |||
139 | int rover; | 139 | int rover; |
140 | 140 | ||
141 | spin_lock(&tcp_portalloc_lock); | 141 | spin_lock(&tcp_portalloc_lock); |
142 | rover = tcp_port_rover; | 142 | if (tcp_port_rover < low) |
143 | rover = low; | ||
144 | else | ||
145 | rover = tcp_port_rover; | ||
143 | do { rover++; | 146 | do { rover++; |
144 | if ((rover < low) || (rover > high)) | 147 | if (rover > high) |
145 | rover = low; | 148 | rover = low; |
146 | head = &tcp_bhash[tcp_bhashfn(rover)]; | 149 | head = &tcp_bhash[tcp_bhashfn(rover)]; |
147 | spin_lock(&head->lock); | 150 | spin_lock(&head->lock); |
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 8a4f37de4d2d..4429b1a1fe5f 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -11,7 +11,11 @@ | |||
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm/bug.h> | ||
15 | #include <linux/compiler.h> | ||
14 | #include <linux/config.h> | 16 | #include <linux/config.h> |
17 | #include <linux/netdevice.h> | ||
18 | #include <net/addrconf.h> | ||
15 | #include <net/xfrm.h> | 19 | #include <net/xfrm.h> |
16 | #include <net/ip.h> | 20 | #include <net/ip.h> |
17 | #include <net/ipv6.h> | 21 | #include <net/ipv6.h> |
@@ -166,6 +170,8 @@ __xfrm6_bundle_create(struct xfrm_policy *policy, struct xfrm_state **xfrm, int | |||
166 | memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway)); | 170 | memcpy(&x->u.rt6.rt6i_gateway, &rt0->rt6i_gateway, sizeof(x->u.rt6.rt6i_gateway)); |
167 | x->u.rt6.rt6i_dst = rt0->rt6i_dst; | 171 | x->u.rt6.rt6i_dst = rt0->rt6i_dst; |
168 | x->u.rt6.rt6i_src = rt0->rt6i_src; | 172 | x->u.rt6.rt6i_src = rt0->rt6i_src; |
173 | x->u.rt6.rt6i_idev = rt0->rt6i_idev; | ||
174 | in6_dev_hold(rt0->rt6i_idev); | ||
169 | header_len -= x->u.dst.xfrm->props.header_len; | 175 | header_len -= x->u.dst.xfrm->props.header_len; |
170 | trailer_len -= x->u.dst.xfrm->props.trailer_len; | 176 | trailer_len -= x->u.dst.xfrm->props.trailer_len; |
171 | } | 177 | } |
@@ -251,11 +257,48 @@ static void xfrm6_update_pmtu(struct dst_entry *dst, u32 mtu) | |||
251 | path->ops->update_pmtu(path, mtu); | 257 | path->ops->update_pmtu(path, mtu); |
252 | } | 258 | } |
253 | 259 | ||
260 | static void xfrm6_dst_destroy(struct dst_entry *dst) | ||
261 | { | ||
262 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
263 | |||
264 | if (likely(xdst->u.rt6.rt6i_idev)) | ||
265 | in6_dev_put(xdst->u.rt6.rt6i_idev); | ||
266 | xfrm_dst_destroy(xdst); | ||
267 | } | ||
268 | |||
269 | static void xfrm6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | ||
270 | int unregister) | ||
271 | { | ||
272 | struct xfrm_dst *xdst; | ||
273 | |||
274 | if (!unregister) | ||
275 | return; | ||
276 | |||
277 | xdst = (struct xfrm_dst *)dst; | ||
278 | if (xdst->u.rt6.rt6i_idev->dev == dev) { | ||
279 | struct inet6_dev *loopback_idev = in6_dev_get(&loopback_dev); | ||
280 | BUG_ON(!loopback_idev); | ||
281 | |||
282 | do { | ||
283 | in6_dev_put(xdst->u.rt6.rt6i_idev); | ||
284 | xdst->u.rt6.rt6i_idev = loopback_idev; | ||
285 | in6_dev_hold(loopback_idev); | ||
286 | xdst = (struct xfrm_dst *)xdst->u.dst.child; | ||
287 | } while (xdst->u.dst.xfrm); | ||
288 | |||
289 | __in6_dev_put(loopback_idev); | ||
290 | } | ||
291 | |||
292 | xfrm_dst_ifdown(dst, dev); | ||
293 | } | ||
294 | |||
254 | static struct dst_ops xfrm6_dst_ops = { | 295 | static struct dst_ops xfrm6_dst_ops = { |
255 | .family = AF_INET6, | 296 | .family = AF_INET6, |
256 | .protocol = __constant_htons(ETH_P_IPV6), | 297 | .protocol = __constant_htons(ETH_P_IPV6), |
257 | .gc = xfrm6_garbage_collect, | 298 | .gc = xfrm6_garbage_collect, |
258 | .update_pmtu = xfrm6_update_pmtu, | 299 | .update_pmtu = xfrm6_update_pmtu, |
300 | .destroy = xfrm6_dst_destroy, | ||
301 | .ifdown = xfrm6_dst_ifdown, | ||
259 | .gc_thresh = 1024, | 302 | .gc_thresh = 1024, |
260 | .entry_size = sizeof(struct xfrm_dst), | 303 | .entry_size = sizeof(struct xfrm_dst), |
261 | }; | 304 | }; |
diff --git a/net/irda/irda_device.c b/net/irda/irda_device.c index d6ccd3239dcf..70543d89438b 100644 --- a/net/irda/irda_device.c +++ b/net/irda/irda_device.c | |||
@@ -470,6 +470,7 @@ void irda_device_unregister_dongle(struct dongle_reg *dongle) | |||
470 | } | 470 | } |
471 | EXPORT_SYMBOL(irda_device_unregister_dongle); | 471 | EXPORT_SYMBOL(irda_device_unregister_dongle); |
472 | 472 | ||
473 | #ifdef CONFIG_ISA_DMA_API | ||
473 | /* | 474 | /* |
474 | * Function setup_dma (idev, buffer, count, mode) | 475 | * Function setup_dma (idev, buffer, count, mode) |
475 | * | 476 | * |
@@ -492,3 +493,4 @@ void irda_setup_dma(int channel, dma_addr_t buffer, int count, int mode) | |||
492 | release_dma_lock(flags); | 493 | release_dma_lock(flags); |
493 | } | 494 | } |
494 | EXPORT_SYMBOL(irda_setup_dma); | 495 | EXPORT_SYMBOL(irda_setup_dma); |
496 | #endif | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 379ed06e60a3..733bf52cef3e 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -375,7 +375,6 @@ static int netlink_release(struct socket *sock) | |||
375 | nlk->cb->done(nlk->cb); | 375 | nlk->cb->done(nlk->cb); |
376 | netlink_destroy_callback(nlk->cb); | 376 | netlink_destroy_callback(nlk->cb); |
377 | nlk->cb = NULL; | 377 | nlk->cb = NULL; |
378 | __sock_put(sk); | ||
379 | } | 378 | } |
380 | spin_unlock(&nlk->cb_lock); | 379 | spin_unlock(&nlk->cb_lock); |
381 | 380 | ||
@@ -1102,7 +1101,6 @@ static int netlink_dump(struct sock *sk) | |||
1102 | spin_unlock(&nlk->cb_lock); | 1101 | spin_unlock(&nlk->cb_lock); |
1103 | 1102 | ||
1104 | netlink_destroy_callback(cb); | 1103 | netlink_destroy_callback(cb); |
1105 | __sock_put(sk); | ||
1106 | return 0; | 1104 | return 0; |
1107 | } | 1105 | } |
1108 | 1106 | ||
@@ -1141,7 +1139,6 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1141 | return -EBUSY; | 1139 | return -EBUSY; |
1142 | } | 1140 | } |
1143 | nlk->cb = cb; | 1141 | nlk->cb = cb; |
1144 | sock_hold(sk); | ||
1145 | spin_unlock(&nlk->cb_lock); | 1142 | spin_unlock(&nlk->cb_lock); |
1146 | 1143 | ||
1147 | netlink_dump(sk); | 1144 | netlink_dump(sk); |
diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 9c118baed9dc..b0941186f867 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig | |||
@@ -185,7 +185,7 @@ config NET_SCH_GRED | |||
185 | depends on NET_SCHED | 185 | depends on NET_SCHED |
186 | help | 186 | help |
187 | Say Y here if you want to use the Generic Random Early Detection | 187 | Say Y here if you want to use the Generic Random Early Detection |
188 | (RED) packet scheduling algorithm for some of your network devices | 188 | (GRED) packet scheduling algorithm for some of your network devices |
189 | (see the top of <file:net/sched/sch_red.c> for details and | 189 | (see the top of <file:net/sched/sch_red.c> for details and |
190 | references about the algorithm). | 190 | references about the algorithm). |
191 | 191 | ||
diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 5e6cc371b39e..cafcb084098d 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c | |||
@@ -171,10 +171,10 @@ repeat: | |||
171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); | 171 | skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); |
172 | skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); | 172 | skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); |
173 | } | 173 | } |
174 | if (ret != TC_ACT_PIPE) | ||
175 | goto exec_done; | ||
176 | if (ret == TC_ACT_REPEAT) | 174 | if (ret == TC_ACT_REPEAT) |
177 | goto repeat; /* we need a ttl - JHS */ | 175 | goto repeat; /* we need a ttl - JHS */ |
176 | if (ret != TC_ACT_PIPE) | ||
177 | goto exec_done; | ||
178 | } | 178 | } |
179 | act = a->next; | 179 | act = a->next; |
180 | } | 180 | } |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 4323a74eea30..07977f8f2679 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1289,6 +1289,7 @@ static int __init pktsched_init(void) | |||
1289 | 1289 | ||
1290 | subsys_initcall(pktsched_init); | 1290 | subsys_initcall(pktsched_init); |
1291 | 1291 | ||
1292 | EXPORT_SYMBOL(qdisc_lookup); | ||
1292 | EXPORT_SYMBOL(qdisc_get_rtab); | 1293 | EXPORT_SYMBOL(qdisc_get_rtab); |
1293 | EXPORT_SYMBOL(qdisc_put_rtab); | 1294 | EXPORT_SYMBOL(qdisc_put_rtab); |
1294 | EXPORT_SYMBOL(register_qdisc); | 1295 | EXPORT_SYMBOL(register_qdisc); |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 8c01e023f02e..87e48a4e1051 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -179,6 +179,7 @@ requeue: | |||
179 | netif_schedule(dev); | 179 | netif_schedule(dev); |
180 | return 1; | 180 | return 1; |
181 | } | 181 | } |
182 | BUG_ON((int) q->q.qlen < 0); | ||
182 | return q->q.qlen; | 183 | return q->q.qlen; |
183 | } | 184 | } |
184 | 185 | ||
@@ -539,6 +540,10 @@ void dev_activate(struct net_device *dev) | |||
539 | write_unlock_bh(&qdisc_tree_lock); | 540 | write_unlock_bh(&qdisc_tree_lock); |
540 | } | 541 | } |
541 | 542 | ||
543 | if (!netif_carrier_ok(dev)) | ||
544 | /* Delay activation until next carrier-on event */ | ||
545 | return; | ||
546 | |||
542 | spin_lock_bh(&dev->queue_lock); | 547 | spin_lock_bh(&dev->queue_lock); |
543 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); | 548 | rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping); |
544 | if (dev->qdisc != &noqueue_qdisc) { | 549 | if (dev->qdisc != &noqueue_qdisc) { |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index a85935e7d53d..558cc087e602 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -717,6 +717,10 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
717 | if (q->direct_queue.qlen < q->direct_qlen) { | 717 | if (q->direct_queue.qlen < q->direct_qlen) { |
718 | __skb_queue_tail(&q->direct_queue, skb); | 718 | __skb_queue_tail(&q->direct_queue, skb); |
719 | q->direct_pkts++; | 719 | q->direct_pkts++; |
720 | } else { | ||
721 | kfree_skb(skb); | ||
722 | sch->qstats.drops++; | ||
723 | return NET_XMIT_DROP; | ||
720 | } | 724 | } |
721 | #ifdef CONFIG_NET_CLS_ACT | 725 | #ifdef CONFIG_NET_CLS_ACT |
722 | } else if (!cl) { | 726 | } else if (!cl) { |
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 31c29deb139d..e0c9fbe73b15 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -138,38 +138,77 @@ static long tabledist(unsigned long mu, long sigma, | |||
138 | } | 138 | } |
139 | 139 | ||
140 | /* Put skb in the private delayed queue. */ | 140 | /* Put skb in the private delayed queue. */ |
141 | static int delay_skb(struct Qdisc *sch, struct sk_buff *skb) | 141 | static int netem_delay(struct Qdisc *sch, struct sk_buff *skb) |
142 | { | 142 | { |
143 | struct netem_sched_data *q = qdisc_priv(sch); | 143 | struct netem_sched_data *q = qdisc_priv(sch); |
144 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
145 | psched_tdiff_t td; | 144 | psched_tdiff_t td; |
146 | psched_time_t now; | 145 | psched_time_t now; |
147 | 146 | ||
148 | PSCHED_GET_TIME(now); | 147 | PSCHED_GET_TIME(now); |
149 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); | 148 | td = tabledist(q->latency, q->jitter, &q->delay_cor, q->delay_dist); |
150 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
151 | 149 | ||
152 | /* Always queue at tail to keep packets in order */ | 150 | /* Always queue at tail to keep packets in order */ |
153 | if (likely(q->delayed.qlen < q->limit)) { | 151 | if (likely(q->delayed.qlen < q->limit)) { |
152 | struct netem_skb_cb *cb = (struct netem_skb_cb *)skb->cb; | ||
153 | |||
154 | PSCHED_TADD2(now, td, cb->time_to_send); | ||
155 | |||
156 | pr_debug("netem_delay: skb=%p now=%llu tosend=%llu\n", skb, | ||
157 | now, cb->time_to_send); | ||
158 | |||
154 | __skb_queue_tail(&q->delayed, skb); | 159 | __skb_queue_tail(&q->delayed, skb); |
155 | if (!timer_pending(&q->timer)) { | ||
156 | q->timer.expires = jiffies + PSCHED_US2JIFFIE(td); | ||
157 | add_timer(&q->timer); | ||
158 | } | ||
159 | return NET_XMIT_SUCCESS; | 160 | return NET_XMIT_SUCCESS; |
160 | } | 161 | } |
161 | 162 | ||
163 | pr_debug("netem_delay: queue over limit %d\n", q->limit); | ||
164 | sch->qstats.overlimits++; | ||
162 | kfree_skb(skb); | 165 | kfree_skb(skb); |
163 | return NET_XMIT_DROP; | 166 | return NET_XMIT_DROP; |
164 | } | 167 | } |
165 | 168 | ||
169 | /* | ||
170 | * Move a packet that is ready to send from the delay holding | ||
171 | * list to the underlying qdisc. | ||
172 | */ | ||
173 | static int netem_run(struct Qdisc *sch) | ||
174 | { | ||
175 | struct netem_sched_data *q = qdisc_priv(sch); | ||
176 | struct sk_buff *skb; | ||
177 | psched_time_t now; | ||
178 | |||
179 | PSCHED_GET_TIME(now); | ||
180 | |||
181 | skb = skb_peek(&q->delayed); | ||
182 | if (skb) { | ||
183 | const struct netem_skb_cb *cb | ||
184 | = (const struct netem_skb_cb *)skb->cb; | ||
185 | long delay | ||
186 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
187 | pr_debug("netem_run: skb=%p delay=%ld\n", skb, delay); | ||
188 | |||
189 | /* if more time remaining? */ | ||
190 | if (delay > 0) { | ||
191 | mod_timer(&q->timer, jiffies + delay); | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | __skb_unlink(skb, &q->delayed); | ||
196 | |||
197 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
198 | sch->q.qlen--; | ||
199 | sch->qstats.drops++; | ||
200 | } | ||
201 | } | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
166 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 206 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
167 | { | 207 | { |
168 | struct netem_sched_data *q = qdisc_priv(sch); | 208 | struct netem_sched_data *q = qdisc_priv(sch); |
169 | struct sk_buff *skb2; | ||
170 | int ret; | 209 | int ret; |
171 | 210 | ||
172 | pr_debug("netem_enqueue skb=%p @%lu\n", skb, jiffies); | 211 | pr_debug("netem_enqueue skb=%p\n", skb); |
173 | 212 | ||
174 | /* Random packet drop 0 => none, ~0 => all */ | 213 | /* Random packet drop 0 => none, ~0 => all */ |
175 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { | 214 | if (q->loss && q->loss >= get_crandom(&q->loss_cor)) { |
@@ -180,11 +219,21 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
180 | } | 219 | } |
181 | 220 | ||
182 | /* Random duplication */ | 221 | /* Random duplication */ |
183 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor) | 222 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) { |
184 | && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { | 223 | struct sk_buff *skb2; |
185 | pr_debug("netem_enqueue: dup %p\n", skb2); | 224 | |
225 | skb2 = skb_clone(skb, GFP_ATOMIC); | ||
226 | if (skb2 && netem_delay(sch, skb2) == NET_XMIT_SUCCESS) { | ||
227 | struct Qdisc *qp; | ||
228 | |||
229 | /* Since one packet can generate two packets in the | ||
230 | * queue, the parent's qlen accounting gets confused, | ||
231 | * so fix it. | ||
232 | */ | ||
233 | qp = qdisc_lookup(sch->dev, TC_H_MAJ(sch->parent)); | ||
234 | if (qp) | ||
235 | qp->q.qlen++; | ||
186 | 236 | ||
187 | if (delay_skb(sch, skb2)) { | ||
188 | sch->q.qlen++; | 237 | sch->q.qlen++; |
189 | sch->bstats.bytes += skb2->len; | 238 | sch->bstats.bytes += skb2->len; |
190 | sch->bstats.packets++; | 239 | sch->bstats.packets++; |
@@ -202,7 +251,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
202 | ret = q->qdisc->enqueue(skb, q->qdisc); | 251 | ret = q->qdisc->enqueue(skb, q->qdisc); |
203 | } else { | 252 | } else { |
204 | q->counter = 0; | 253 | q->counter = 0; |
205 | ret = delay_skb(sch, skb); | 254 | ret = netem_delay(sch, skb); |
255 | netem_run(sch); | ||
206 | } | 256 | } |
207 | 257 | ||
208 | if (likely(ret == NET_XMIT_SUCCESS)) { | 258 | if (likely(ret == NET_XMIT_SUCCESS)) { |
@@ -212,6 +262,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
212 | } else | 262 | } else |
213 | sch->qstats.drops++; | 263 | sch->qstats.drops++; |
214 | 264 | ||
265 | pr_debug("netem: enqueue ret %d\n", ret); | ||
215 | return ret; | 266 | return ret; |
216 | } | 267 | } |
217 | 268 | ||
@@ -241,56 +292,35 @@ static unsigned int netem_drop(struct Qdisc* sch) | |||
241 | return len; | 292 | return len; |
242 | } | 293 | } |
243 | 294 | ||
244 | /* Dequeue packet. | ||
245 | * Move all packets that are ready to send from the delay holding | ||
246 | * list to the underlying qdisc, then just call dequeue | ||
247 | */ | ||
248 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) | 295 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
249 | { | 296 | { |
250 | struct netem_sched_data *q = qdisc_priv(sch); | 297 | struct netem_sched_data *q = qdisc_priv(sch); |
251 | struct sk_buff *skb; | 298 | struct sk_buff *skb; |
299 | int pending; | ||
300 | |||
301 | pending = netem_run(sch); | ||
252 | 302 | ||
253 | skb = q->qdisc->dequeue(q->qdisc); | 303 | skb = q->qdisc->dequeue(q->qdisc); |
254 | if (skb) | 304 | if (skb) { |
305 | pr_debug("netem_dequeue: return skb=%p\n", skb); | ||
255 | sch->q.qlen--; | 306 | sch->q.qlen--; |
307 | sch->flags &= ~TCQ_F_THROTTLED; | ||
308 | } | ||
309 | else if (pending) { | ||
310 | pr_debug("netem_dequeue: throttling\n"); | ||
311 | sch->flags |= TCQ_F_THROTTLED; | ||
312 | } | ||
313 | |||
256 | return skb; | 314 | return skb; |
257 | } | 315 | } |
258 | 316 | ||
259 | static void netem_watchdog(unsigned long arg) | 317 | static void netem_watchdog(unsigned long arg) |
260 | { | 318 | { |
261 | struct Qdisc *sch = (struct Qdisc *)arg; | 319 | struct Qdisc *sch = (struct Qdisc *)arg; |
262 | struct netem_sched_data *q = qdisc_priv(sch); | ||
263 | struct net_device *dev = sch->dev; | ||
264 | struct sk_buff *skb; | ||
265 | psched_time_t now; | ||
266 | |||
267 | pr_debug("netem_watchdog: fired @%lu\n", jiffies); | ||
268 | |||
269 | spin_lock_bh(&dev->queue_lock); | ||
270 | PSCHED_GET_TIME(now); | ||
271 | |||
272 | while ((skb = skb_peek(&q->delayed)) != NULL) { | ||
273 | const struct netem_skb_cb *cb | ||
274 | = (const struct netem_skb_cb *)skb->cb; | ||
275 | long delay | ||
276 | = PSCHED_US2JIFFIE(PSCHED_TDIFF(cb->time_to_send, now)); | ||
277 | pr_debug("netem_watchdog: skb %p@%lu %ld\n", | ||
278 | skb, jiffies, delay); | ||
279 | 320 | ||
280 | /* if more time remaining? */ | 321 | pr_debug("netem_watchdog qlen=%d\n", sch->q.qlen); |
281 | if (delay > 0) { | 322 | sch->flags &= ~TCQ_F_THROTTLED; |
282 | mod_timer(&q->timer, jiffies + delay); | 323 | netif_schedule(sch->dev); |
283 | break; | ||
284 | } | ||
285 | __skb_unlink(skb, &q->delayed); | ||
286 | |||
287 | if (q->qdisc->enqueue(skb, q->qdisc)) { | ||
288 | sch->q.qlen--; | ||
289 | sch->qstats.drops++; | ||
290 | } | ||
291 | } | ||
292 | qdisc_run(dev); | ||
293 | spin_unlock_bh(&dev->queue_lock); | ||
294 | } | 324 | } |
295 | 325 | ||
296 | static void netem_reset(struct Qdisc *sch) | 326 | static void netem_reset(struct Qdisc *sch) |
@@ -301,6 +331,7 @@ static void netem_reset(struct Qdisc *sch) | |||
301 | skb_queue_purge(&q->delayed); | 331 | skb_queue_purge(&q->delayed); |
302 | 332 | ||
303 | sch->q.qlen = 0; | 333 | sch->q.qlen = 0; |
334 | sch->flags &= ~TCQ_F_THROTTLED; | ||
304 | del_timer_sync(&q->timer); | 335 | del_timer_sync(&q->timer); |
305 | } | 336 | } |
306 | 337 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 80828078733d..55ed979db144 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1028,30 +1028,15 @@ static int stale_bundle(struct dst_entry *dst) | |||
1028 | return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); | 1028 | return !xfrm_bundle_ok((struct xfrm_dst *)dst, NULL, AF_UNSPEC); |
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | static void xfrm_dst_destroy(struct dst_entry *dst) | 1031 | void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) |
1032 | { | 1032 | { |
1033 | struct xfrm_dst *xdst = (struct xfrm_dst *)dst; | ||
1034 | |||
1035 | dst_release(xdst->route); | ||
1036 | |||
1037 | if (!dst->xfrm) | ||
1038 | return; | ||
1039 | xfrm_state_put(dst->xfrm); | ||
1040 | dst->xfrm = NULL; | ||
1041 | } | ||
1042 | |||
1043 | static void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev, | ||
1044 | int unregister) | ||
1045 | { | ||
1046 | if (!unregister) | ||
1047 | return; | ||
1048 | |||
1049 | while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { | 1033 | while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { |
1050 | dst->dev = &loopback_dev; | 1034 | dst->dev = &loopback_dev; |
1051 | dev_hold(&loopback_dev); | 1035 | dev_hold(&loopback_dev); |
1052 | dev_put(dev); | 1036 | dev_put(dev); |
1053 | } | 1037 | } |
1054 | } | 1038 | } |
1039 | EXPORT_SYMBOL(xfrm_dst_ifdown); | ||
1055 | 1040 | ||
1056 | static void xfrm_link_failure(struct sk_buff *skb) | 1041 | static void xfrm_link_failure(struct sk_buff *skb) |
1057 | { | 1042 | { |
@@ -1262,10 +1247,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1262 | dst_ops->kmem_cachep = xfrm_dst_cache; | 1247 | dst_ops->kmem_cachep = xfrm_dst_cache; |
1263 | if (likely(dst_ops->check == NULL)) | 1248 | if (likely(dst_ops->check == NULL)) |
1264 | dst_ops->check = xfrm_dst_check; | 1249 | dst_ops->check = xfrm_dst_check; |
1265 | if (likely(dst_ops->destroy == NULL)) | ||
1266 | dst_ops->destroy = xfrm_dst_destroy; | ||
1267 | if (likely(dst_ops->ifdown == NULL)) | ||
1268 | dst_ops->ifdown = xfrm_dst_ifdown; | ||
1269 | if (likely(dst_ops->negative_advice == NULL)) | 1250 | if (likely(dst_ops->negative_advice == NULL)) |
1270 | dst_ops->negative_advice = xfrm_negative_advice; | 1251 | dst_ops->negative_advice = xfrm_negative_advice; |
1271 | if (likely(dst_ops->link_failure == NULL)) | 1252 | if (likely(dst_ops->link_failure == NULL)) |
@@ -1297,8 +1278,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) | |||
1297 | xfrm_policy_afinfo[afinfo->family] = NULL; | 1278 | xfrm_policy_afinfo[afinfo->family] = NULL; |
1298 | dst_ops->kmem_cachep = NULL; | 1279 | dst_ops->kmem_cachep = NULL; |
1299 | dst_ops->check = NULL; | 1280 | dst_ops->check = NULL; |
1300 | dst_ops->destroy = NULL; | ||
1301 | dst_ops->ifdown = NULL; | ||
1302 | dst_ops->negative_advice = NULL; | 1281 | dst_ops->negative_advice = NULL; |
1303 | dst_ops->link_failure = NULL; | 1282 | dst_ops->link_failure = NULL; |
1304 | dst_ops->get_mss = NULL; | 1283 | dst_ops->get_mss = NULL; |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 63661b0fd736..5ddda2c98af9 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -855,47 +855,44 @@ static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh, void **x | |||
855 | return 0; | 855 | return 0; |
856 | } | 856 | } |
857 | 857 | ||
858 | static const int xfrm_msg_min[(XFRM_MSG_MAX + 1 - XFRM_MSG_BASE)] = { | 858 | #define XMSGSIZE(type) NLMSG_LENGTH(sizeof(struct type)) |
859 | NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* NEW SA */ | 859 | |
860 | NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* DEL SA */ | 860 | static const int xfrm_msg_min[XFRM_NR_MSGTYPES] = { |
861 | NLMSG_LENGTH(sizeof(struct xfrm_usersa_id)), /* GET SA */ | 861 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), |
862 | NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* NEW POLICY */ | 862 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), |
863 | NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* DEL POLICY */ | 863 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id), |
864 | NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_id)), /* GET POLICY */ | 864 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), |
865 | NLMSG_LENGTH(sizeof(struct xfrm_userspi_info)), /* ALLOC SPI */ | 865 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), |
866 | NLMSG_LENGTH(sizeof(struct xfrm_user_acquire)), /* ACQUIRE */ | 866 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id), |
867 | NLMSG_LENGTH(sizeof(struct xfrm_user_expire)), /* EXPIRE */ | 867 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info), |
868 | NLMSG_LENGTH(sizeof(struct xfrm_userpolicy_info)),/* UPD POLICY */ | 868 | [XFRM_MSG_ACQUIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire), |
869 | NLMSG_LENGTH(sizeof(struct xfrm_usersa_info)), /* UPD SA */ | 869 | [XFRM_MSG_EXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire), |
870 | NLMSG_LENGTH(sizeof(struct xfrm_user_polexpire)), /* POLEXPIRE */ | 870 | [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info), |
871 | NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)), /* FLUSH SA */ | 871 | [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info), |
872 | NLMSG_LENGTH(0), /* FLUSH POLICY */ | 872 | [XFRM_MSG_POLEXPIRE - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire), |
873 | [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush), | ||
874 | [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = NLMSG_LENGTH(0), | ||
873 | }; | 875 | }; |
874 | 876 | ||
877 | #undef XMSGSIZE | ||
878 | |||
875 | static struct xfrm_link { | 879 | static struct xfrm_link { |
876 | int (*doit)(struct sk_buff *, struct nlmsghdr *, void **); | 880 | int (*doit)(struct sk_buff *, struct nlmsghdr *, void **); |
877 | int (*dump)(struct sk_buff *, struct netlink_callback *); | 881 | int (*dump)(struct sk_buff *, struct netlink_callback *); |
878 | } xfrm_dispatch[] = { | 882 | } xfrm_dispatch[XFRM_NR_MSGTYPES] = { |
879 | { .doit = xfrm_add_sa, }, | 883 | [XFRM_MSG_NEWSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, |
880 | { .doit = xfrm_del_sa, }, | 884 | [XFRM_MSG_DELSA - XFRM_MSG_BASE] = { .doit = xfrm_del_sa }, |
881 | { | 885 | [XFRM_MSG_GETSA - XFRM_MSG_BASE] = { .doit = xfrm_get_sa, |
882 | .doit = xfrm_get_sa, | 886 | .dump = xfrm_dump_sa }, |
883 | .dump = xfrm_dump_sa, | 887 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, |
884 | }, | 888 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, |
885 | { .doit = xfrm_add_policy }, | 889 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, |
886 | { .doit = xfrm_get_policy }, | 890 | .dump = xfrm_dump_policy }, |
887 | { | 891 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, |
888 | .doit = xfrm_get_policy, | 892 | [XFRM_MSG_UPDPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, |
889 | .dump = xfrm_dump_policy, | 893 | [XFRM_MSG_UPDSA - XFRM_MSG_BASE] = { .doit = xfrm_add_sa }, |
890 | }, | 894 | [XFRM_MSG_FLUSHSA - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa }, |
891 | { .doit = xfrm_alloc_userspi }, | 895 | [XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy }, |
892 | {}, | ||
893 | {}, | ||
894 | { .doit = xfrm_add_policy }, | ||
895 | { .doit = xfrm_add_sa, }, | ||
896 | {}, | ||
897 | { .doit = xfrm_flush_sa }, | ||
898 | { .doit = xfrm_flush_policy }, | ||
899 | }; | 896 | }; |
900 | 897 | ||
901 | static int xfrm_done(struct netlink_callback *cb) | 898 | static int xfrm_done(struct netlink_callback *cb) |
@@ -931,7 +928,9 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err | |||
931 | return -1; | 928 | return -1; |
932 | } | 929 | } |
933 | 930 | ||
934 | if ((type == 2 || type == 5) && (nlh->nlmsg_flags & NLM_F_DUMP)) { | 931 | if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || |
932 | type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && | ||
933 | (nlh->nlmsg_flags & NLM_F_DUMP)) { | ||
935 | u32 rlen; | 934 | u32 rlen; |
936 | 935 | ||
937 | if (link->dump == NULL) | 936 | if (link->dump == NULL) |
@@ -1009,18 +1008,26 @@ static int xfrm_user_rcv_skb(struct sk_buff *skb) | |||
1009 | 1008 | ||
1010 | static void xfrm_netlink_rcv(struct sock *sk, int len) | 1009 | static void xfrm_netlink_rcv(struct sock *sk, int len) |
1011 | { | 1010 | { |
1011 | unsigned int qlen = skb_queue_len(&sk->sk_receive_queue); | ||
1012 | |||
1012 | do { | 1013 | do { |
1013 | struct sk_buff *skb; | 1014 | struct sk_buff *skb; |
1014 | 1015 | ||
1015 | down(&xfrm_cfg_sem); | 1016 | down(&xfrm_cfg_sem); |
1016 | 1017 | ||
1017 | while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { | 1018 | if (qlen > skb_queue_len(&sk->sk_receive_queue)) |
1019 | qlen = skb_queue_len(&sk->sk_receive_queue); | ||
1020 | |||
1021 | for (; qlen; qlen--) { | ||
1022 | skb = skb_dequeue(&sk->sk_receive_queue); | ||
1018 | if (xfrm_user_rcv_skb(skb)) { | 1023 | if (xfrm_user_rcv_skb(skb)) { |
1019 | if (skb->len) | 1024 | if (skb->len) |
1020 | skb_queue_head(&sk->sk_receive_queue, | 1025 | skb_queue_head(&sk->sk_receive_queue, |
1021 | skb); | 1026 | skb); |
1022 | else | 1027 | else { |
1023 | kfree_skb(skb); | 1028 | kfree_skb(skb); |
1029 | qlen--; | ||
1030 | } | ||
1024 | break; | 1031 | break; |
1025 | } | 1032 | } |
1026 | kfree_skb(skb); | 1033 | kfree_skb(skb); |
@@ -1028,7 +1035,7 @@ static void xfrm_netlink_rcv(struct sock *sk, int len) | |||
1028 | 1035 | ||
1029 | up(&xfrm_cfg_sem); | 1036 | up(&xfrm_cfg_sem); |
1030 | 1037 | ||
1031 | } while (xfrm_nl && xfrm_nl->sk_receive_queue.qlen); | 1038 | } while (qlen); |
1032 | } | 1039 | } |
1033 | 1040 | ||
1034 | static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard) | 1041 | static int build_expire(struct sk_buff *skb, struct xfrm_state *x, int hard) |