diff options
author | David S. Miller <davem@davemloft.net> | 2016-01-20 21:56:44 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-01-20 21:56:44 -0500 |
commit | 8034e1efcb330d2aecef8cbf8a83f206270c1775 (patch) | |
tree | 92d9883ddf4a918b17eb8ed0fb0d64c45ebd5ea1 /net | |
parent | bffae6975e00b78e86bcd694ab4c8c48216572a3 (diff) | |
parent | b16c29191dc89bd877af99a7b04ce4866728a3e0 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
Diffstat (limited to 'net')
-rw-r--r-- | net/netfilter/ipset/ip_set_hash_netiface.c | 4 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_core.c | 38 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_helper.c | 2 | ||||
-rw-r--r-- | net/netfilter/nf_conntrack_netlink.c | 2 | ||||
-rw-r--r-- | net/netfilter/nf_tables_netdev.c | 8 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_cttimeout.c | 4 | ||||
-rw-r--r-- | net/netfilter/nft_byteorder.c | 6 | ||||
-rw-r--r-- | net/netfilter/nft_ct.c | 2 | ||||
-rw-r--r-- | net/netfilter/xt_TCPMSS.c | 9 |
9 files changed, 46 insertions, 29 deletions
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 43d8c9896fa3..f0f688db6213 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
@@ -164,8 +164,6 @@ hash_netiface4_kadt(struct ip_set *set, const struct sk_buff *skb, | |||
164 | }; | 164 | }; |
165 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); | 165 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); |
166 | 166 | ||
167 | if (e.cidr == 0) | ||
168 | return -EINVAL; | ||
169 | if (adt == IPSET_TEST) | 167 | if (adt == IPSET_TEST) |
170 | e.cidr = HOST_MASK; | 168 | e.cidr = HOST_MASK; |
171 | 169 | ||
@@ -377,8 +375,6 @@ hash_netiface6_kadt(struct ip_set *set, const struct sk_buff *skb, | |||
377 | }; | 375 | }; |
378 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); | 376 | struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); |
379 | 377 | ||
380 | if (e.cidr == 0) | ||
381 | return -EINVAL; | ||
382 | if (adt == IPSET_TEST) | 378 | if (adt == IPSET_TEST) |
383 | e.cidr = HOST_MASK; | 379 | e.cidr = HOST_MASK; |
384 | 380 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 3cb3cb831591..58882de06bd7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -66,6 +66,21 @@ EXPORT_SYMBOL_GPL(nf_conntrack_locks); | |||
66 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); | 66 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock); |
67 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); | 67 | EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock); |
68 | 68 | ||
69 | static __read_mostly spinlock_t nf_conntrack_locks_all_lock; | ||
70 | static __read_mostly bool nf_conntrack_locks_all; | ||
71 | |||
72 | void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) | ||
73 | { | ||
74 | spin_lock(lock); | ||
75 | while (unlikely(nf_conntrack_locks_all)) { | ||
76 | spin_unlock(lock); | ||
77 | spin_lock(&nf_conntrack_locks_all_lock); | ||
78 | spin_unlock(&nf_conntrack_locks_all_lock); | ||
79 | spin_lock(lock); | ||
80 | } | ||
81 | } | ||
82 | EXPORT_SYMBOL_GPL(nf_conntrack_lock); | ||
83 | |||
69 | static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) | 84 | static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) |
70 | { | 85 | { |
71 | h1 %= CONNTRACK_LOCKS; | 86 | h1 %= CONNTRACK_LOCKS; |
@@ -82,12 +97,12 @@ static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, | |||
82 | h1 %= CONNTRACK_LOCKS; | 97 | h1 %= CONNTRACK_LOCKS; |
83 | h2 %= CONNTRACK_LOCKS; | 98 | h2 %= CONNTRACK_LOCKS; |
84 | if (h1 <= h2) { | 99 | if (h1 <= h2) { |
85 | spin_lock(&nf_conntrack_locks[h1]); | 100 | nf_conntrack_lock(&nf_conntrack_locks[h1]); |
86 | if (h1 != h2) | 101 | if (h1 != h2) |
87 | spin_lock_nested(&nf_conntrack_locks[h2], | 102 | spin_lock_nested(&nf_conntrack_locks[h2], |
88 | SINGLE_DEPTH_NESTING); | 103 | SINGLE_DEPTH_NESTING); |
89 | } else { | 104 | } else { |
90 | spin_lock(&nf_conntrack_locks[h2]); | 105 | nf_conntrack_lock(&nf_conntrack_locks[h2]); |
91 | spin_lock_nested(&nf_conntrack_locks[h1], | 106 | spin_lock_nested(&nf_conntrack_locks[h1], |
92 | SINGLE_DEPTH_NESTING); | 107 | SINGLE_DEPTH_NESTING); |
93 | } | 108 | } |
@@ -102,16 +117,19 @@ static void nf_conntrack_all_lock(void) | |||
102 | { | 117 | { |
103 | int i; | 118 | int i; |
104 | 119 | ||
105 | for (i = 0; i < CONNTRACK_LOCKS; i++) | 120 | spin_lock(&nf_conntrack_locks_all_lock); |
106 | spin_lock_nested(&nf_conntrack_locks[i], i); | 121 | nf_conntrack_locks_all = true; |
122 | |||
123 | for (i = 0; i < CONNTRACK_LOCKS; i++) { | ||
124 | spin_lock(&nf_conntrack_locks[i]); | ||
125 | spin_unlock(&nf_conntrack_locks[i]); | ||
126 | } | ||
107 | } | 127 | } |
108 | 128 | ||
109 | static void nf_conntrack_all_unlock(void) | 129 | static void nf_conntrack_all_unlock(void) |
110 | { | 130 | { |
111 | int i; | 131 | nf_conntrack_locks_all = false; |
112 | 132 | spin_unlock(&nf_conntrack_locks_all_lock); | |
113 | for (i = 0; i < CONNTRACK_LOCKS; i++) | ||
114 | spin_unlock(&nf_conntrack_locks[i]); | ||
115 | } | 133 | } |
116 | 134 | ||
117 | unsigned int nf_conntrack_htable_size __read_mostly; | 135 | unsigned int nf_conntrack_htable_size __read_mostly; |
@@ -757,7 +775,7 @@ restart: | |||
757 | hash = hash_bucket(_hash, net); | 775 | hash = hash_bucket(_hash, net); |
758 | for (; i < net->ct.htable_size; i++) { | 776 | for (; i < net->ct.htable_size; i++) { |
759 | lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; | 777 | lockp = &nf_conntrack_locks[hash % CONNTRACK_LOCKS]; |
760 | spin_lock(lockp); | 778 | nf_conntrack_lock(lockp); |
761 | if (read_seqcount_retry(&net->ct.generation, sequence)) { | 779 | if (read_seqcount_retry(&net->ct.generation, sequence)) { |
762 | spin_unlock(lockp); | 780 | spin_unlock(lockp); |
763 | goto restart; | 781 | goto restart; |
@@ -1382,7 +1400,7 @@ get_next_corpse(struct net *net, int (*iter)(struct nf_conn *i, void *data), | |||
1382 | for (; *bucket < net->ct.htable_size; (*bucket)++) { | 1400 | for (; *bucket < net->ct.htable_size; (*bucket)++) { |
1383 | lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; | 1401 | lockp = &nf_conntrack_locks[*bucket % CONNTRACK_LOCKS]; |
1384 | local_bh_disable(); | 1402 | local_bh_disable(); |
1385 | spin_lock(lockp); | 1403 | nf_conntrack_lock(lockp); |
1386 | if (*bucket < net->ct.htable_size) { | 1404 | if (*bucket < net->ct.htable_size) { |
1387 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { | 1405 | hlist_nulls_for_each_entry(h, n, &net->ct.hash[*bucket], hnnode) { |
1388 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) | 1406 | if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) |
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index bd9d31537905..3b40ec575cd5 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -425,7 +425,7 @@ static void __nf_conntrack_helper_unregister(struct nf_conntrack_helper *me, | |||
425 | } | 425 | } |
426 | local_bh_disable(); | 426 | local_bh_disable(); |
427 | for (i = 0; i < net->ct.htable_size; i++) { | 427 | for (i = 0; i < net->ct.htable_size; i++) { |
428 | spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); | 428 | nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); |
429 | if (i < net->ct.htable_size) { | 429 | if (i < net->ct.htable_size) { |
430 | hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) | 430 | hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) |
431 | unhelp(h, me); | 431 | unhelp(h, me); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index dbb1bb3edb45..355e8552fd5b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -840,7 +840,7 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb) | |||
840 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { | 840 | for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { |
841 | restart: | 841 | restart: |
842 | lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; | 842 | lockp = &nf_conntrack_locks[cb->args[0] % CONNTRACK_LOCKS]; |
843 | spin_lock(lockp); | 843 | nf_conntrack_lock(lockp); |
844 | if (cb->args[0] >= net->ct.htable_size) { | 844 | if (cb->args[0] >= net->ct.htable_size) { |
845 | spin_unlock(lockp); | 845 | spin_unlock(lockp); |
846 | goto out; | 846 | goto out; |
diff --git a/net/netfilter/nf_tables_netdev.c b/net/netfilter/nf_tables_netdev.c index b6605e000801..5eefe4a355c6 100644 --- a/net/netfilter/nf_tables_netdev.c +++ b/net/netfilter/nf_tables_netdev.c | |||
@@ -224,12 +224,12 @@ static int __init nf_tables_netdev_init(void) | |||
224 | 224 | ||
225 | nft_register_chain_type(&nft_filter_chain_netdev); | 225 | nft_register_chain_type(&nft_filter_chain_netdev); |
226 | ret = register_pernet_subsys(&nf_tables_netdev_net_ops); | 226 | ret = register_pernet_subsys(&nf_tables_netdev_net_ops); |
227 | if (ret < 0) | 227 | if (ret < 0) { |
228 | nft_unregister_chain_type(&nft_filter_chain_netdev); | 228 | nft_unregister_chain_type(&nft_filter_chain_netdev); |
229 | 229 | return ret; | |
230 | } | ||
230 | register_netdevice_notifier(&nf_tables_netdev_notifier); | 231 | register_netdevice_notifier(&nf_tables_netdev_notifier); |
231 | 232 | return 0; | |
232 | return ret; | ||
233 | } | 233 | } |
234 | 234 | ||
235 | static void __exit nf_tables_netdev_exit(void) | 235 | static void __exit nf_tables_netdev_exit(void) |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 5d010f27ac01..94837d236ab0 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
@@ -307,12 +307,12 @@ static void ctnl_untimeout(struct net *net, struct ctnl_timeout *timeout) | |||
307 | 307 | ||
308 | local_bh_disable(); | 308 | local_bh_disable(); |
309 | for (i = 0; i < net->ct.htable_size; i++) { | 309 | for (i = 0; i < net->ct.htable_size; i++) { |
310 | spin_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); | 310 | nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); |
311 | if (i < net->ct.htable_size) { | 311 | if (i < net->ct.htable_size) { |
312 | hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) | 312 | hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode) |
313 | untimeout(h, timeout); | 313 | untimeout(h, timeout); |
314 | } | 314 | } |
315 | spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); | 315 | nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]); |
316 | } | 316 | } |
317 | local_bh_enable(); | 317 | local_bh_enable(); |
318 | } | 318 | } |
diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index 383c17138399..b78c28ba465f 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c | |||
@@ -46,16 +46,14 @@ static void nft_byteorder_eval(const struct nft_expr *expr, | |||
46 | switch (priv->op) { | 46 | switch (priv->op) { |
47 | case NFT_BYTEORDER_NTOH: | 47 | case NFT_BYTEORDER_NTOH: |
48 | for (i = 0; i < priv->len / 8; i++) { | 48 | for (i = 0; i < priv->len / 8; i++) { |
49 | src64 = get_unaligned_be64(&src[i]); | 49 | src64 = get_unaligned((u64 *)&src[i]); |
50 | src64 = be64_to_cpu((__force __be64)src64); | ||
51 | put_unaligned_be64(src64, &dst[i]); | 50 | put_unaligned_be64(src64, &dst[i]); |
52 | } | 51 | } |
53 | break; | 52 | break; |
54 | case NFT_BYTEORDER_HTON: | 53 | case NFT_BYTEORDER_HTON: |
55 | for (i = 0; i < priv->len / 8; i++) { | 54 | for (i = 0; i < priv->len / 8; i++) { |
56 | src64 = get_unaligned_be64(&src[i]); | 55 | src64 = get_unaligned_be64(&src[i]); |
57 | src64 = (__force u64)cpu_to_be64(src64); | 56 | put_unaligned(src64, (u64 *)&dst[i]); |
58 | put_unaligned_be64(src64, &dst[i]); | ||
59 | } | 57 | } |
60 | break; | 58 | break; |
61 | } | 59 | } |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index a0eb2161e3ef..d4a4619fcebc 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -127,6 +127,7 @@ static void nft_ct_get_eval(const struct nft_expr *expr, | |||
127 | NF_CT_LABELS_MAX_SIZE - size); | 127 | NF_CT_LABELS_MAX_SIZE - size); |
128 | return; | 128 | return; |
129 | } | 129 | } |
130 | #endif | ||
130 | case NFT_CT_BYTES: /* fallthrough */ | 131 | case NFT_CT_BYTES: /* fallthrough */ |
131 | case NFT_CT_PKTS: { | 132 | case NFT_CT_PKTS: { |
132 | const struct nf_conn_acct *acct = nf_conn_acct_find(ct); | 133 | const struct nf_conn_acct *acct = nf_conn_acct_find(ct); |
@@ -138,7 +139,6 @@ static void nft_ct_get_eval(const struct nft_expr *expr, | |||
138 | memcpy(dest, &count, sizeof(count)); | 139 | memcpy(dest, &count, sizeof(count)); |
139 | return; | 140 | return; |
140 | } | 141 | } |
141 | #endif | ||
142 | default: | 142 | default: |
143 | break; | 143 | break; |
144 | } | 144 | } |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index b7c43def0dc6..e118397254af 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -228,7 +228,7 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
228 | { | 228 | { |
229 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); | 229 | struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
230 | u8 nexthdr; | 230 | u8 nexthdr; |
231 | __be16 frag_off; | 231 | __be16 frag_off, oldlen, newlen; |
232 | int tcphoff; | 232 | int tcphoff; |
233 | int ret; | 233 | int ret; |
234 | 234 | ||
@@ -244,7 +244,12 @@ tcpmss_tg6(struct sk_buff *skb, const struct xt_action_param *par) | |||
244 | return NF_DROP; | 244 | return NF_DROP; |
245 | if (ret > 0) { | 245 | if (ret > 0) { |
246 | ipv6h = ipv6_hdr(skb); | 246 | ipv6h = ipv6_hdr(skb); |
247 | ipv6h->payload_len = htons(ntohs(ipv6h->payload_len) + ret); | 247 | oldlen = ipv6h->payload_len; |
248 | newlen = htons(ntohs(oldlen) + ret); | ||
249 | if (skb->ip_summed == CHECKSUM_COMPLETE) | ||
250 | skb->csum = csum_add(csum_sub(skb->csum, oldlen), | ||
251 | newlen); | ||
252 | ipv6h->payload_len = newlen; | ||
248 | } | 253 | } |
249 | return XT_CONTINUE; | 254 | return XT_CONTINUE; |
250 | } | 255 | } |