diff options
author | David S. Miller <davem@davemloft.net> | 2012-03-09 17:34:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-03-09 17:34:20 -0500 |
commit | b2d3298e0916fa059712691c85a0e97becc4ab9f (patch) | |
tree | c7d5ea46a9dbf9cebdb122df4aaf0beda6e7621e /net | |
parent | 1a0bdadb4e36abac63b0a9787f372aac30c11a9e (diff) | |
parent | a7f4255f906f60f72e00aad2fb000939449ff32e (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/br_netfilter.c | 32 | ||||
-rw-r--r-- | net/bridge/br_stp.c | 2 | ||||
-rw-r--r-- | net/bridge/br_stp_if.c | 3 | ||||
-rw-r--r-- | net/bridge/netfilter/ebtables.c | 16 | ||||
-rw-r--r-- | net/ipv4/inetpeer.c | 81 | ||||
-rw-r--r-- | net/ipv4/route.c | 12 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
-rw-r--r-- | net/ipv6/addrconf.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 8 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_netlink.c | 3 | ||||
-rw-r--r-- | net/openvswitch/actions.c | 44 | ||||
-rw-r--r-- | net/openvswitch/datapath.c | 3 |
12 files changed, 164 insertions, 48 deletions
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 84122472656c..dec4f3817133 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c | |||
@@ -62,6 +62,15 @@ static int brnf_filter_pppoe_tagged __read_mostly = 0; | |||
62 | #define brnf_filter_pppoe_tagged 0 | 62 | #define brnf_filter_pppoe_tagged 0 |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define IS_IP(skb) \ | ||
66 | (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP)) | ||
67 | |||
68 | #define IS_IPV6(skb) \ | ||
69 | (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6)) | ||
70 | |||
71 | #define IS_ARP(skb) \ | ||
72 | (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP)) | ||
73 | |||
65 | static inline __be16 vlan_proto(const struct sk_buff *skb) | 74 | static inline __be16 vlan_proto(const struct sk_buff *skb) |
66 | { | 75 | { |
67 | if (vlan_tx_tag_present(skb)) | 76 | if (vlan_tx_tag_present(skb)) |
@@ -639,8 +648,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, | |||
639 | return NF_DROP; | 648 | return NF_DROP; |
640 | br = p->br; | 649 | br = p->br; |
641 | 650 | ||
642 | if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || | 651 | if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) { |
643 | IS_PPPOE_IPV6(skb)) { | ||
644 | if (!brnf_call_ip6tables && !br->nf_call_ip6tables) | 652 | if (!brnf_call_ip6tables && !br->nf_call_ip6tables) |
645 | return NF_ACCEPT; | 653 | return NF_ACCEPT; |
646 | 654 | ||
@@ -651,8 +659,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb, | |||
651 | if (!brnf_call_iptables && !br->nf_call_iptables) | 659 | if (!brnf_call_iptables && !br->nf_call_iptables) |
652 | return NF_ACCEPT; | 660 | return NF_ACCEPT; |
653 | 661 | ||
654 | if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) && | 662 | if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb)) |
655 | !IS_PPPOE_IP(skb)) | ||
656 | return NF_ACCEPT; | 663 | return NF_ACCEPT; |
657 | 664 | ||
658 | nf_bridge_pull_encap_header_rcsum(skb); | 665 | nf_bridge_pull_encap_header_rcsum(skb); |
@@ -701,7 +708,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) | |||
701 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; | 708 | struct nf_bridge_info *nf_bridge = skb->nf_bridge; |
702 | struct net_device *in; | 709 | struct net_device *in; |
703 | 710 | ||
704 | if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) { | 711 | if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) { |
705 | in = nf_bridge->physindev; | 712 | in = nf_bridge->physindev; |
706 | if (nf_bridge->mask & BRNF_PKT_TYPE) { | 713 | if (nf_bridge->mask & BRNF_PKT_TYPE) { |
707 | skb->pkt_type = PACKET_OTHERHOST; | 714 | skb->pkt_type = PACKET_OTHERHOST; |
@@ -718,6 +725,7 @@ static int br_nf_forward_finish(struct sk_buff *skb) | |||
718 | return 0; | 725 | return 0; |
719 | } | 726 | } |
720 | 727 | ||
728 | |||
721 | /* This is the 'purely bridged' case. For IP, we pass the packet to | 729 | /* This is the 'purely bridged' case. For IP, we pass the packet to |
722 | * netfilter with indev and outdev set to the bridge device, | 730 | * netfilter with indev and outdev set to the bridge device, |
723 | * but we are still able to filter on the 'real' indev/outdev | 731 | * but we are still able to filter on the 'real' indev/outdev |
@@ -744,11 +752,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb, | |||
744 | if (!parent) | 752 | if (!parent) |
745 | return NF_DROP; | 753 | return NF_DROP; |
746 | 754 | ||
747 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || | 755 | if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) |
748 | IS_PPPOE_IP(skb)) | ||
749 | pf = PF_INET; | 756 | pf = PF_INET; |
750 | else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || | 757 | else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) |
751 | IS_PPPOE_IPV6(skb)) | ||
752 | pf = PF_INET6; | 758 | pf = PF_INET6; |
753 | else | 759 | else |
754 | return NF_ACCEPT; | 760 | return NF_ACCEPT; |
@@ -795,7 +801,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb, | |||
795 | if (!brnf_call_arptables && !br->nf_call_arptables) | 801 | if (!brnf_call_arptables && !br->nf_call_arptables) |
796 | return NF_ACCEPT; | 802 | return NF_ACCEPT; |
797 | 803 | ||
798 | if (skb->protocol != htons(ETH_P_ARP)) { | 804 | if (!IS_ARP(skb)) { |
799 | if (!IS_VLAN_ARP(skb)) | 805 | if (!IS_VLAN_ARP(skb)) |
800 | return NF_ACCEPT; | 806 | return NF_ACCEPT; |
801 | nf_bridge_pull_encap_header(skb); | 807 | nf_bridge_pull_encap_header(skb); |
@@ -853,11 +859,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb, | |||
853 | if (!realoutdev) | 859 | if (!realoutdev) |
854 | return NF_DROP; | 860 | return NF_DROP; |
855 | 861 | ||
856 | if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) || | 862 | if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb)) |
857 | IS_PPPOE_IP(skb)) | ||
858 | pf = PF_INET; | 863 | pf = PF_INET; |
859 | else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || | 864 | else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) |
860 | IS_PPPOE_IPV6(skb)) | ||
861 | pf = PF_INET6; | 865 | pf = PF_INET6; |
862 | else | 866 | else |
863 | return NF_ACCEPT; | 867 | return NF_ACCEPT; |
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index 6751ed4e0c07..8c836d96ba76 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c | |||
@@ -31,7 +31,7 @@ static const char *const br_port_state_names[] = { | |||
31 | 31 | ||
32 | void br_log_state(const struct net_bridge_port *p) | 32 | void br_log_state(const struct net_bridge_port *p) |
33 | { | 33 | { |
34 | br_info(p->br, "port %u(%s) entering %s state\n", | 34 | br_info(p->br, "port %u(%s) entered %s state\n", |
35 | (unsigned) p->port_no, p->dev->name, | 35 | (unsigned) p->port_no, p->dev->name, |
36 | br_port_state_names[p->state]); | 36 | br_port_state_names[p->state]); |
37 | } | 37 | } |
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 19308e305d85..f494496373d6 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c | |||
@@ -98,14 +98,13 @@ void br_stp_disable_port(struct net_bridge_port *p) | |||
98 | struct net_bridge *br = p->br; | 98 | struct net_bridge *br = p->br; |
99 | int wasroot; | 99 | int wasroot; |
100 | 100 | ||
101 | br_log_state(p); | ||
102 | |||
103 | wasroot = br_is_root_bridge(br); | 101 | wasroot = br_is_root_bridge(br); |
104 | br_become_designated_port(p); | 102 | br_become_designated_port(p); |
105 | p->state = BR_STATE_DISABLED; | 103 | p->state = BR_STATE_DISABLED; |
106 | p->topology_change_ack = 0; | 104 | p->topology_change_ack = 0; |
107 | p->config_pending = 0; | 105 | p->config_pending = 0; |
108 | 106 | ||
107 | br_log_state(p); | ||
109 | br_ifinfo_notify(RTM_NEWLINK, p); | 108 | br_ifinfo_notify(RTM_NEWLINK, p); |
110 | 109 | ||
111 | del_timer(&p->message_age_timer); | 110 | del_timer(&p->message_age_timer); |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 8aa4ad0e06af..5fe2ff3b01ef 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1335,7 +1335,12 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m, | |||
1335 | const char *base, char __user *ubase) | 1335 | const char *base, char __user *ubase) |
1336 | { | 1336 | { |
1337 | char __user *hlp = ubase + ((char *)m - base); | 1337 | char __user *hlp = ubase + ((char *)m - base); |
1338 | if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) | 1338 | char name[EBT_FUNCTION_MAXNAMELEN] = {}; |
1339 | |||
1340 | /* ebtables expects 32 bytes long names but xt_match names are 29 bytes | ||
1341 | long. Copy 29 bytes and fill remaining bytes with zeroes. */ | ||
1342 | strncpy(name, m->u.match->name, sizeof(name)); | ||
1343 | if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) | ||
1339 | return -EFAULT; | 1344 | return -EFAULT; |
1340 | return 0; | 1345 | return 0; |
1341 | } | 1346 | } |
@@ -1344,7 +1349,10 @@ static inline int ebt_make_watchername(const struct ebt_entry_watcher *w, | |||
1344 | const char *base, char __user *ubase) | 1349 | const char *base, char __user *ubase) |
1345 | { | 1350 | { |
1346 | char __user *hlp = ubase + ((char *)w - base); | 1351 | char __user *hlp = ubase + ((char *)w - base); |
1347 | if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) | 1352 | char name[EBT_FUNCTION_MAXNAMELEN] = {}; |
1353 | |||
1354 | strncpy(name, w->u.watcher->name, sizeof(name)); | ||
1355 | if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN)) | ||
1348 | return -EFAULT; | 1356 | return -EFAULT; |
1349 | return 0; | 1357 | return 0; |
1350 | } | 1358 | } |
@@ -1355,6 +1363,7 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase) | |||
1355 | int ret; | 1363 | int ret; |
1356 | char __user *hlp; | 1364 | char __user *hlp; |
1357 | const struct ebt_entry_target *t; | 1365 | const struct ebt_entry_target *t; |
1366 | char name[EBT_FUNCTION_MAXNAMELEN] = {}; | ||
1358 | 1367 | ||
1359 | if (e->bitmask == 0) | 1368 | if (e->bitmask == 0) |
1360 | return 0; | 1369 | return 0; |
@@ -1368,7 +1377,8 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase) | |||
1368 | ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); | 1377 | ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase); |
1369 | if (ret != 0) | 1378 | if (ret != 0) |
1370 | return ret; | 1379 | return ret; |
1371 | if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN)) | 1380 | strncpy(name, t->u.target->name, sizeof(name)); |
1381 | if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN)) | ||
1372 | return -EFAULT; | 1382 | return -EFAULT; |
1373 | return 0; | 1383 | return 0; |
1374 | } | 1384 | } |
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index bf4a9c4808e1..d4d61b694fab 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
19 | #include <linux/net.h> | 19 | #include <linux/net.h> |
20 | #include <linux/workqueue.h> | ||
20 | #include <net/ip.h> | 21 | #include <net/ip.h> |
21 | #include <net/inetpeer.h> | 22 | #include <net/inetpeer.h> |
22 | #include <net/secure_seq.h> | 23 | #include <net/secure_seq.h> |
@@ -66,6 +67,11 @@ | |||
66 | 67 | ||
67 | static struct kmem_cache *peer_cachep __read_mostly; | 68 | static struct kmem_cache *peer_cachep __read_mostly; |
68 | 69 | ||
70 | static LIST_HEAD(gc_list); | ||
71 | static const int gc_delay = 60 * HZ; | ||
72 | static struct delayed_work gc_work; | ||
73 | static DEFINE_SPINLOCK(gc_lock); | ||
74 | |||
69 | #define node_height(x) x->avl_height | 75 | #define node_height(x) x->avl_height |
70 | 76 | ||
71 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) | 77 | #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) |
@@ -102,6 +108,50 @@ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries m | |||
102 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ | 108 | int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ |
103 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ | 109 | int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ |
104 | 110 | ||
111 | static void inetpeer_gc_worker(struct work_struct *work) | ||
112 | { | ||
113 | struct inet_peer *p, *n; | ||
114 | LIST_HEAD(list); | ||
115 | |||
116 | spin_lock_bh(&gc_lock); | ||
117 | list_replace_init(&gc_list, &list); | ||
118 | spin_unlock_bh(&gc_lock); | ||
119 | |||
120 | if (list_empty(&list)) | ||
121 | return; | ||
122 | |||
123 | list_for_each_entry_safe(p, n, &list, gc_list) { | ||
124 | |||
125 | if(need_resched()) | ||
126 | cond_resched(); | ||
127 | |||
128 | if (p->avl_left != peer_avl_empty) { | ||
129 | list_add_tail(&p->avl_left->gc_list, &list); | ||
130 | p->avl_left = peer_avl_empty; | ||
131 | } | ||
132 | |||
133 | if (p->avl_right != peer_avl_empty) { | ||
134 | list_add_tail(&p->avl_right->gc_list, &list); | ||
135 | p->avl_right = peer_avl_empty; | ||
136 | } | ||
137 | |||
138 | n = list_entry(p->gc_list.next, struct inet_peer, gc_list); | ||
139 | |||
140 | if (!atomic_read(&p->refcnt)) { | ||
141 | list_del(&p->gc_list); | ||
142 | kmem_cache_free(peer_cachep, p); | ||
143 | } | ||
144 | } | ||
145 | |||
146 | if (list_empty(&list)) | ||
147 | return; | ||
148 | |||
149 | spin_lock_bh(&gc_lock); | ||
150 | list_splice(&list, &gc_list); | ||
151 | spin_unlock_bh(&gc_lock); | ||
152 | |||
153 | schedule_delayed_work(&gc_work, gc_delay); | ||
154 | } | ||
105 | 155 | ||
106 | /* Called from ip_output.c:ip_init */ | 156 | /* Called from ip_output.c:ip_init */ |
107 | void __init inet_initpeers(void) | 157 | void __init inet_initpeers(void) |
@@ -126,6 +176,7 @@ void __init inet_initpeers(void) | |||
126 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, | 176 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, |
127 | NULL); | 177 | NULL); |
128 | 178 | ||
179 | INIT_DELAYED_WORK_DEFERRABLE(&gc_work, inetpeer_gc_worker); | ||
129 | } | 180 | } |
130 | 181 | ||
131 | static int addr_compare(const struct inetpeer_addr *a, | 182 | static int addr_compare(const struct inetpeer_addr *a, |
@@ -447,9 +498,8 @@ relookup: | |||
447 | p->rate_last = 0; | 498 | p->rate_last = 0; |
448 | p->pmtu_expires = 0; | 499 | p->pmtu_expires = 0; |
449 | p->pmtu_orig = 0; | 500 | p->pmtu_orig = 0; |
450 | p->redirect_genid = 0; | ||
451 | memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); | 501 | memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); |
452 | 502 | INIT_LIST_HEAD(&p->gc_list); | |
453 | 503 | ||
454 | /* Link the node. */ | 504 | /* Link the node. */ |
455 | link_to_pool(p, base); | 505 | link_to_pool(p, base); |
@@ -509,3 +559,30 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) | |||
509 | return rc; | 559 | return rc; |
510 | } | 560 | } |
511 | EXPORT_SYMBOL(inet_peer_xrlim_allow); | 561 | EXPORT_SYMBOL(inet_peer_xrlim_allow); |
562 | |||
563 | void inetpeer_invalidate_tree(int family) | ||
564 | { | ||
565 | struct inet_peer *old, *new, *prev; | ||
566 | struct inet_peer_base *base = family_to_base(family); | ||
567 | |||
568 | write_seqlock_bh(&base->lock); | ||
569 | |||
570 | old = base->root; | ||
571 | if (old == peer_avl_empty_rcu) | ||
572 | goto out; | ||
573 | |||
574 | new = peer_avl_empty_rcu; | ||
575 | |||
576 | prev = cmpxchg(&base->root, old, new); | ||
577 | if (prev == old) { | ||
578 | base->total = 0; | ||
579 | spin_lock(&gc_lock); | ||
580 | list_add_tail(&prev->gc_list, &gc_list); | ||
581 | spin_unlock(&gc_lock); | ||
582 | schedule_delayed_work(&gc_work, gc_delay); | ||
583 | } | ||
584 | |||
585 | out: | ||
586 | write_sequnlock_bh(&base->lock); | ||
587 | } | ||
588 | EXPORT_SYMBOL(inetpeer_invalidate_tree); | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 0489cedc1671..815989b90dea 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -132,7 +132,6 @@ static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ; | |||
132 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; | 132 | static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20; |
133 | static int ip_rt_min_advmss __read_mostly = 256; | 133 | static int ip_rt_min_advmss __read_mostly = 256; |
134 | static int rt_chain_length_max __read_mostly = 20; | 134 | static int rt_chain_length_max __read_mostly = 20; |
135 | static int redirect_genid; | ||
136 | 135 | ||
137 | static struct delayed_work expires_work; | 136 | static struct delayed_work expires_work; |
138 | static unsigned long expires_ljiffies; | 137 | static unsigned long expires_ljiffies; |
@@ -937,7 +936,7 @@ static void rt_cache_invalidate(struct net *net) | |||
937 | 936 | ||
938 | get_random_bytes(&shuffle, sizeof(shuffle)); | 937 | get_random_bytes(&shuffle, sizeof(shuffle)); |
939 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); | 938 | atomic_add(shuffle + 1U, &net->ipv4.rt_genid); |
940 | redirect_genid++; | 939 | inetpeer_invalidate_tree(AF_INET); |
941 | } | 940 | } |
942 | 941 | ||
943 | /* | 942 | /* |
@@ -1490,10 +1489,8 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, | |||
1490 | 1489 | ||
1491 | peer = rt->peer; | 1490 | peer = rt->peer; |
1492 | if (peer) { | 1491 | if (peer) { |
1493 | if (peer->redirect_learned.a4 != new_gw || | 1492 | if (peer->redirect_learned.a4 != new_gw) { |
1494 | peer->redirect_genid != redirect_genid) { | ||
1495 | peer->redirect_learned.a4 = new_gw; | 1493 | peer->redirect_learned.a4 = new_gw; |
1496 | peer->redirect_genid = redirect_genid; | ||
1497 | atomic_inc(&__rt_peer_genid); | 1494 | atomic_inc(&__rt_peer_genid); |
1498 | } | 1495 | } |
1499 | check_peer_redir(&rt->dst, peer); | 1496 | check_peer_redir(&rt->dst, peer); |
@@ -1798,8 +1795,6 @@ static void ipv4_validate_peer(struct rtable *rt) | |||
1798 | if (peer) { | 1795 | if (peer) { |
1799 | check_peer_pmtu(&rt->dst, peer); | 1796 | check_peer_pmtu(&rt->dst, peer); |
1800 | 1797 | ||
1801 | if (peer->redirect_genid != redirect_genid) | ||
1802 | peer->redirect_learned.a4 = 0; | ||
1803 | if (peer->redirect_learned.a4 && | 1798 | if (peer->redirect_learned.a4 && |
1804 | peer->redirect_learned.a4 != rt->rt_gateway) | 1799 | peer->redirect_learned.a4 != rt->rt_gateway) |
1805 | check_peer_redir(&rt->dst, peer); | 1800 | check_peer_redir(&rt->dst, peer); |
@@ -1963,8 +1958,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, | |||
1963 | dst_init_metrics(&rt->dst, peer->metrics, false); | 1958 | dst_init_metrics(&rt->dst, peer->metrics, false); |
1964 | 1959 | ||
1965 | check_peer_pmtu(&rt->dst, peer); | 1960 | check_peer_pmtu(&rt->dst, peer); |
1966 | if (peer->redirect_genid != redirect_genid) | 1961 | |
1967 | peer->redirect_learned.a4 = 0; | ||
1968 | if (peer->redirect_learned.a4 && | 1962 | if (peer->redirect_learned.a4 && |
1969 | peer->redirect_learned.a4 != rt->rt_gateway) { | 1963 | peer->redirect_learned.a4 != rt->rt_gateway) { |
1970 | rt->rt_gateway = peer->redirect_learned.a4; | 1964 | rt->rt_gateway = peer->redirect_learned.a4; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d9b83d198c3d..b5e315f13641 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1585,6 +1585,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, | |||
1585 | } | 1585 | } |
1586 | } | 1586 | } |
1587 | 1587 | ||
1588 | /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ | ||
1589 | if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) | ||
1590 | goto fallback; | ||
1591 | |||
1588 | if (!skb_shift(prev, skb, len)) | 1592 | if (!skb_shift(prev, skb, len)) |
1589 | goto fallback; | 1593 | goto fallback; |
1590 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) | 1594 | if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index c02280a4d126..6b8ebc5da0e1 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -434,6 +434,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev) | |||
434 | /* Join all-node multicast group */ | 434 | /* Join all-node multicast group */ |
435 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); | 435 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes); |
436 | 436 | ||
437 | /* Join all-router multicast group if forwarding is set */ | ||
438 | if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST)) | ||
439 | ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters); | ||
440 | |||
437 | return ndev; | 441 | return ndev; |
438 | } | 442 | } |
439 | 443 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 81e2aa4ca1fe..7b48035826ee 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -636,8 +636,12 @@ static noinline int early_drop(struct net *net, unsigned int hash) | |||
636 | 636 | ||
637 | if (del_timer(&ct->timeout)) { | 637 | if (del_timer(&ct->timeout)) { |
638 | death_by_timeout((unsigned long)ct); | 638 | death_by_timeout((unsigned long)ct); |
639 | dropped = 1; | 639 | /* Check if we indeed killed this entry. Reliable event |
640 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | 640 | delivery may have inserted it into the dying list. */ |
641 | if (test_bit(IPS_DYING_BIT, &ct->status)) { | ||
642 | dropped = 1; | ||
643 | NF_CT_STAT_INC_ATOMIC(net, early_drop); | ||
644 | } | ||
641 | } | 645 | } |
642 | nf_ct_put(ct); | 646 | nf_ct_put(ct); |
643 | return dropped; | 647 | return dropped; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index c1ea64c6c70d..2124977ac31d 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -1084,16 +1084,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct, | |||
1084 | if (!parse_nat_setup) { | 1084 | if (!parse_nat_setup) { |
1085 | #ifdef CONFIG_MODULES | 1085 | #ifdef CONFIG_MODULES |
1086 | rcu_read_unlock(); | 1086 | rcu_read_unlock(); |
1087 | spin_unlock_bh(&nf_conntrack_lock); | ||
1088 | nfnl_unlock(); | 1087 | nfnl_unlock(); |
1089 | if (request_module("nf-nat-ipv4") < 0) { | 1088 | if (request_module("nf-nat-ipv4") < 0) { |
1090 | nfnl_lock(); | 1089 | nfnl_lock(); |
1091 | spin_lock_bh(&nf_conntrack_lock); | ||
1092 | rcu_read_lock(); | 1090 | rcu_read_lock(); |
1093 | return -EOPNOTSUPP; | 1091 | return -EOPNOTSUPP; |
1094 | } | 1092 | } |
1095 | nfnl_lock(); | 1093 | nfnl_lock(); |
1096 | spin_lock_bh(&nf_conntrack_lock); | ||
1097 | rcu_read_lock(); | 1094 | rcu_read_lock(); |
1098 | if (nfnetlink_parse_nat_setup_hook) | 1095 | if (nfnetlink_parse_nat_setup_hook) |
1099 | return -EAGAIN; | 1096 | return -EAGAIN; |
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 2725d1bdf291..48badffaafc1 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2007-2011 Nicira Networks. | 2 | * Copyright (c) 2007-2012 Nicira Networks. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or | 4 | * This program is free software; you can redistribute it and/or |
5 | * modify it under the terms of version 2 of the GNU General Public | 5 | * modify it under the terms of version 2 of the GNU General Public |
@@ -145,9 +145,16 @@ static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, | |||
145 | inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, | 145 | inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, |
146 | *addr, new_addr, 1); | 146 | *addr, new_addr, 1); |
147 | } else if (nh->protocol == IPPROTO_UDP) { | 147 | } else if (nh->protocol == IPPROTO_UDP) { |
148 | if (likely(transport_len >= sizeof(struct udphdr))) | 148 | if (likely(transport_len >= sizeof(struct udphdr))) { |
149 | inet_proto_csum_replace4(&udp_hdr(skb)->check, skb, | 149 | struct udphdr *uh = udp_hdr(skb); |
150 | *addr, new_addr, 1); | 150 | |
151 | if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { | ||
152 | inet_proto_csum_replace4(&uh->check, skb, | ||
153 | *addr, new_addr, 1); | ||
154 | if (!uh->check) | ||
155 | uh->check = CSUM_MANGLED_0; | ||
156 | } | ||
157 | } | ||
151 | } | 158 | } |
152 | 159 | ||
153 | csum_replace4(&nh->check, *addr, new_addr); | 160 | csum_replace4(&nh->check, *addr, new_addr); |
@@ -197,8 +204,22 @@ static void set_tp_port(struct sk_buff *skb, __be16 *port, | |||
197 | skb->rxhash = 0; | 204 | skb->rxhash = 0; |
198 | } | 205 | } |
199 | 206 | ||
200 | static int set_udp_port(struct sk_buff *skb, | 207 | static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) |
201 | const struct ovs_key_udp *udp_port_key) | 208 | { |
209 | struct udphdr *uh = udp_hdr(skb); | ||
210 | |||
211 | if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { | ||
212 | set_tp_port(skb, port, new_port, &uh->check); | ||
213 | |||
214 | if (!uh->check) | ||
215 | uh->check = CSUM_MANGLED_0; | ||
216 | } else { | ||
217 | *port = new_port; | ||
218 | skb->rxhash = 0; | ||
219 | } | ||
220 | } | ||
221 | |||
222 | static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) | ||
202 | { | 223 | { |
203 | struct udphdr *uh; | 224 | struct udphdr *uh; |
204 | int err; | 225 | int err; |
@@ -210,16 +231,15 @@ static int set_udp_port(struct sk_buff *skb, | |||
210 | 231 | ||
211 | uh = udp_hdr(skb); | 232 | uh = udp_hdr(skb); |
212 | if (udp_port_key->udp_src != uh->source) | 233 | if (udp_port_key->udp_src != uh->source) |
213 | set_tp_port(skb, &uh->source, udp_port_key->udp_src, &uh->check); | 234 | set_udp_port(skb, &uh->source, udp_port_key->udp_src); |
214 | 235 | ||
215 | if (udp_port_key->udp_dst != uh->dest) | 236 | if (udp_port_key->udp_dst != uh->dest) |
216 | set_tp_port(skb, &uh->dest, udp_port_key->udp_dst, &uh->check); | 237 | set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); |
217 | 238 | ||
218 | return 0; | 239 | return 0; |
219 | } | 240 | } |
220 | 241 | ||
221 | static int set_tcp_port(struct sk_buff *skb, | 242 | static int set_tcp(struct sk_buff *skb, const struct ovs_key_tcp *tcp_port_key) |
222 | const struct ovs_key_tcp *tcp_port_key) | ||
223 | { | 243 | { |
224 | struct tcphdr *th; | 244 | struct tcphdr *th; |
225 | int err; | 245 | int err; |
@@ -328,11 +348,11 @@ static int execute_set_action(struct sk_buff *skb, | |||
328 | break; | 348 | break; |
329 | 349 | ||
330 | case OVS_KEY_ATTR_TCP: | 350 | case OVS_KEY_ATTR_TCP: |
331 | err = set_tcp_port(skb, nla_data(nested_attr)); | 351 | err = set_tcp(skb, nla_data(nested_attr)); |
332 | break; | 352 | break; |
333 | 353 | ||
334 | case OVS_KEY_ATTR_UDP: | 354 | case OVS_KEY_ATTR_UDP: |
335 | err = set_udp_port(skb, nla_data(nested_attr)); | 355 | err = set_udp(skb, nla_data(nested_attr)); |
336 | break; | 356 | break; |
337 | } | 357 | } |
338 | 358 | ||
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index ce64c18b8c79..2c030505b335 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -1521,6 +1521,9 @@ static struct vport *lookup_vport(struct ovs_header *ovs_header, | |||
1521 | vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); | 1521 | vport = ovs_vport_locate(nla_data(a[OVS_VPORT_ATTR_NAME])); |
1522 | if (!vport) | 1522 | if (!vport) |
1523 | return ERR_PTR(-ENODEV); | 1523 | return ERR_PTR(-ENODEV); |
1524 | if (ovs_header->dp_ifindex && | ||
1525 | ovs_header->dp_ifindex != get_dpifindex(vport->dp)) | ||
1526 | return ERR_PTR(-ENODEV); | ||
1524 | return vport; | 1527 | return vport; |
1525 | } else if (a[OVS_VPORT_ATTR_PORT_NO]) { | 1528 | } else if (a[OVS_VPORT_ATTR_PORT_NO]) { |
1526 | u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); | 1529 | u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]); |