diff options
author | David S. Miller <davem@davemloft.net> | 2017-03-21 17:28:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-21 17:28:08 -0400 |
commit | 41e95736b30833710c1e77a2877c2d71133450f7 (patch) | |
tree | cebd559e5eefc9ab9fd83e16355105e3535125ac /net | |
parent | b9974d76f2f505ab0fe9101766b302511988dece (diff) | |
parent | 4485a841be171dbd8d3f0701b00f59d389e94ce6 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
Pablo Neira Ayuso says:
====================
Netfilter/IPVS updates for net-next
The following patchset contains Netfilter/IPVS updates for your
net-next tree. A couple of new features for nf_tables, and unsorted
cleanups and incremental updates for the Netfilter tree. More
specifically, they are:
1) Allow to check for TCP option presence via nft_exthdr, patch
from Phil Sutter.
2) Add symmetric hash support to nft_hash, from Laura Garcia Liebana.
3) Use pr_cont() in ebt_log, from Joe Perches.
4) Remove some dead code in arp_tables reported via static analysis
tool, from Colin Ian King.
5) Consolidate nf_tables expression validation, from Liping Zhang.
6) Consolidate set lookup via nft_set_lookup().
7) Remove unnecessary rcu read lock side in bridge netfilter, from
Florian Westphal.
8) Remove unused variable in nf_reject_ipv4, from Tahee Yoo.
9) Pass nft_ctx struct to object initialization indirections, from
Florian Westphal.
10) Add code to integrate conntrack helper into nf_tables, also from
Florian.
11) Allow to check if interface index or name exists via
NFTA_FIB_F_PRESENT, from Phil Sutter.
12) Simplify resolve_normal_ct(), from Florian.
13) Use per-limit spinlock in nft_limit and xt_limit, from Liping Zhang.
14) Use rwlock in nft_set_rbtree set, also from Liping Zhang.
15) One patch to remove a useless printk at netns init path in ipvs,
and several patches to document IPVS knobs.
16) Use refcount_t for reference counter in the Netfilter/IPVS code,
from Elena Reshetova.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
47 files changed, 508 insertions, 273 deletions
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index fa87fbd62bb7..d20b01b8d103 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
@@ -995,13 +995,10 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, | |||
995 | if (!elem) | 995 | if (!elem) |
996 | return okfn(net, sk, skb); | 996 | return okfn(net, sk, skb); |
997 | 997 | ||
998 | /* We may already have this, but read-locks nest anyway */ | ||
999 | rcu_read_lock(); | ||
1000 | nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, | 998 | nf_hook_state_init(&state, hook, NFPROTO_BRIDGE, indev, outdev, |
1001 | sk, net, okfn); | 999 | sk, net, okfn); |
1002 | 1000 | ||
1003 | ret = nf_hook_slow(skb, &state, elem); | 1001 | ret = nf_hook_slow(skb, &state, elem); |
1004 | rcu_read_unlock(); | ||
1005 | if (ret == 1) | 1002 | if (ret == 1) |
1006 | ret = okfn(net, sk, skb); | 1003 | ret = okfn(net, sk, skb); |
1007 | 1004 | ||
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index 98b9c8e8615e..707caea39743 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -62,10 +62,10 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset) | |||
62 | pptr = skb_header_pointer(skb, offset, | 62 | pptr = skb_header_pointer(skb, offset, |
63 | sizeof(_ports), &_ports); | 63 | sizeof(_ports), &_ports); |
64 | if (pptr == NULL) { | 64 | if (pptr == NULL) { |
65 | printk(" INCOMPLETE TCP/UDP header"); | 65 | pr_cont(" INCOMPLETE TCP/UDP header"); |
66 | return; | 66 | return; |
67 | } | 67 | } |
68 | printk(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); | 68 | pr_cont(" SPT=%u DPT=%u", ntohs(pptr->src), ntohs(pptr->dst)); |
69 | } | 69 | } |
70 | } | 70 | } |
71 | 71 | ||
@@ -100,11 +100,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
100 | 100 | ||
101 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); | 101 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); |
102 | if (ih == NULL) { | 102 | if (ih == NULL) { |
103 | printk(" INCOMPLETE IP header"); | 103 | pr_cont(" INCOMPLETE IP header"); |
104 | goto out; | 104 | goto out; |
105 | } | 105 | } |
106 | printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", | 106 | pr_cont(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", |
107 | &ih->saddr, &ih->daddr, ih->tos, ih->protocol); | 107 | &ih->saddr, &ih->daddr, ih->tos, ih->protocol); |
108 | print_ports(skb, ih->protocol, ih->ihl*4); | 108 | print_ports(skb, ih->protocol, ih->ihl*4); |
109 | goto out; | 109 | goto out; |
110 | } | 110 | } |
@@ -120,11 +120,11 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
120 | 120 | ||
121 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); | 121 | ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); |
122 | if (ih == NULL) { | 122 | if (ih == NULL) { |
123 | printk(" INCOMPLETE IPv6 header"); | 123 | pr_cont(" INCOMPLETE IPv6 header"); |
124 | goto out; | 124 | goto out; |
125 | } | 125 | } |
126 | printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", | 126 | pr_cont(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", |
127 | &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); | 127 | &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); |
128 | nexthdr = ih->nexthdr; | 128 | nexthdr = ih->nexthdr; |
129 | offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off); | 129 | offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr, &frag_off); |
130 | if (offset_ph == -1) | 130 | if (offset_ph == -1) |
@@ -142,12 +142,12 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
142 | 142 | ||
143 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); | 143 | ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); |
144 | if (ah == NULL) { | 144 | if (ah == NULL) { |
145 | printk(" INCOMPLETE ARP header"); | 145 | pr_cont(" INCOMPLETE ARP header"); |
146 | goto out; | 146 | goto out; |
147 | } | 147 | } |
148 | printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", | 148 | pr_cont(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", |
149 | ntohs(ah->ar_hrd), ntohs(ah->ar_pro), | 149 | ntohs(ah->ar_hrd), ntohs(ah->ar_pro), |
150 | ntohs(ah->ar_op)); | 150 | ntohs(ah->ar_op)); |
151 | 151 | ||
152 | /* If it's for Ethernet and the lengths are OK, | 152 | /* If it's for Ethernet and the lengths are OK, |
153 | * then log the ARP payload | 153 | * then log the ARP payload |
@@ -161,17 +161,17 @@ ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
161 | ap = skb_header_pointer(skb, sizeof(_arph), | 161 | ap = skb_header_pointer(skb, sizeof(_arph), |
162 | sizeof(_arpp), &_arpp); | 162 | sizeof(_arpp), &_arpp); |
163 | if (ap == NULL) { | 163 | if (ap == NULL) { |
164 | printk(" INCOMPLETE ARP payload"); | 164 | pr_cont(" INCOMPLETE ARP payload"); |
165 | goto out; | 165 | goto out; |
166 | } | 166 | } |
167 | printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", | 167 | pr_cont(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", |
168 | ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); | 168 | ap->mac_src, ap->ip_src, |
169 | ap->mac_dst, ap->ip_dst); | ||
169 | } | 170 | } |
170 | } | 171 | } |
171 | out: | 172 | out: |
172 | printk("\n"); | 173 | pr_cont("\n"); |
173 | spin_unlock_bh(&ebt_log_lock); | 174 | spin_unlock_bh(&ebt_log_lock); |
174 | |||
175 | } | 175 | } |
176 | 176 | ||
177 | static unsigned int | 177 | static unsigned int |
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index 206dc266ecd2..346ef6b00b8f 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c | |||
@@ -375,11 +375,7 @@ static int nft_reject_bridge_init(const struct nft_ctx *ctx, | |||
375 | const struct nlattr * const tb[]) | 375 | const struct nlattr * const tb[]) |
376 | { | 376 | { |
377 | struct nft_reject *priv = nft_expr_priv(expr); | 377 | struct nft_reject *priv = nft_expr_priv(expr); |
378 | int icmp_code, err; | 378 | int icmp_code; |
379 | |||
380 | err = nft_reject_bridge_validate(ctx, expr, NULL); | ||
381 | if (err < 0) | ||
382 | return err; | ||
383 | 379 | ||
384 | if (tb[NFTA_REJECT_TYPE] == NULL) | 380 | if (tb[NFTA_REJECT_TYPE] == NULL) |
385 | return -EINVAL; | 381 | return -EINVAL; |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 6241a81fd7f5..f17dab1dee6e 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -562,8 +562,6 @@ static int translate_table(struct xt_table_info *newinfo, void *entry0, | |||
562 | XT_ERROR_TARGET) == 0) | 562 | XT_ERROR_TARGET) == 0) |
563 | ++newinfo->stacksize; | 563 | ++newinfo->stacksize; |
564 | } | 564 | } |
565 | if (ret != 0) | ||
566 | goto out_free; | ||
567 | 565 | ||
568 | ret = -EINVAL; | 566 | ret = -EINVAL; |
569 | if (i != repl->num_entries) | 567 | if (i != repl->num_entries) |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 52f26459efc3..fcbdc0c49b0e 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/icmp.h> | 22 | #include <linux/icmp.h> |
23 | #include <linux/if_arp.h> | 23 | #include <linux/if_arp.h> |
24 | #include <linux/seq_file.h> | 24 | #include <linux/seq_file.h> |
25 | #include <linux/refcount.h> | ||
25 | #include <linux/netfilter_arp.h> | 26 | #include <linux/netfilter_arp.h> |
26 | #include <linux/netfilter/x_tables.h> | 27 | #include <linux/netfilter/x_tables.h> |
27 | #include <linux/netfilter_ipv4/ip_tables.h> | 28 | #include <linux/netfilter_ipv4/ip_tables.h> |
@@ -40,8 +41,8 @@ MODULE_DESCRIPTION("Xtables: CLUSTERIP target"); | |||
40 | 41 | ||
41 | struct clusterip_config { | 42 | struct clusterip_config { |
42 | struct list_head list; /* list of all configs */ | 43 | struct list_head list; /* list of all configs */ |
43 | atomic_t refcount; /* reference count */ | 44 | refcount_t refcount; /* reference count */ |
44 | atomic_t entries; /* number of entries/rules | 45 | refcount_t entries; /* number of entries/rules |
45 | * referencing us */ | 46 | * referencing us */ |
46 | 47 | ||
47 | __be32 clusterip; /* the IP address */ | 48 | __be32 clusterip; /* the IP address */ |
@@ -77,7 +78,7 @@ struct clusterip_net { | |||
77 | static inline void | 78 | static inline void |
78 | clusterip_config_get(struct clusterip_config *c) | 79 | clusterip_config_get(struct clusterip_config *c) |
79 | { | 80 | { |
80 | atomic_inc(&c->refcount); | 81 | refcount_inc(&c->refcount); |
81 | } | 82 | } |
82 | 83 | ||
83 | 84 | ||
@@ -89,7 +90,7 @@ static void clusterip_config_rcu_free(struct rcu_head *head) | |||
89 | static inline void | 90 | static inline void |
90 | clusterip_config_put(struct clusterip_config *c) | 91 | clusterip_config_put(struct clusterip_config *c) |
91 | { | 92 | { |
92 | if (atomic_dec_and_test(&c->refcount)) | 93 | if (refcount_dec_and_test(&c->refcount)) |
93 | call_rcu_bh(&c->rcu, clusterip_config_rcu_free); | 94 | call_rcu_bh(&c->rcu, clusterip_config_rcu_free); |
94 | } | 95 | } |
95 | 96 | ||
@@ -103,7 +104,7 @@ clusterip_config_entry_put(struct clusterip_config *c) | |||
103 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); | 104 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); |
104 | 105 | ||
105 | local_bh_disable(); | 106 | local_bh_disable(); |
106 | if (atomic_dec_and_lock(&c->entries, &cn->lock)) { | 107 | if (refcount_dec_and_lock(&c->entries, &cn->lock)) { |
107 | list_del_rcu(&c->list); | 108 | list_del_rcu(&c->list); |
108 | spin_unlock(&cn->lock); | 109 | spin_unlock(&cn->lock); |
109 | local_bh_enable(); | 110 | local_bh_enable(); |
@@ -149,10 +150,10 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry) | |||
149 | c = NULL; | 150 | c = NULL; |
150 | else | 151 | else |
151 | #endif | 152 | #endif |
152 | if (unlikely(!atomic_inc_not_zero(&c->refcount))) | 153 | if (unlikely(!refcount_inc_not_zero(&c->refcount))) |
153 | c = NULL; | 154 | c = NULL; |
154 | else if (entry) | 155 | else if (entry) |
155 | atomic_inc(&c->entries); | 156 | refcount_inc(&c->entries); |
156 | } | 157 | } |
157 | rcu_read_unlock_bh(); | 158 | rcu_read_unlock_bh(); |
158 | 159 | ||
@@ -188,8 +189,8 @@ clusterip_config_init(const struct ipt_clusterip_tgt_info *i, __be32 ip, | |||
188 | clusterip_config_init_nodelist(c, i); | 189 | clusterip_config_init_nodelist(c, i); |
189 | c->hash_mode = i->hash_mode; | 190 | c->hash_mode = i->hash_mode; |
190 | c->hash_initval = i->hash_initval; | 191 | c->hash_initval = i->hash_initval; |
191 | atomic_set(&c->refcount, 1); | 192 | refcount_set(&c->refcount, 1); |
192 | atomic_set(&c->entries, 1); | 193 | refcount_set(&c->entries, 1); |
193 | 194 | ||
194 | spin_lock_bh(&cn->lock); | 195 | spin_lock_bh(&cn->lock); |
195 | if (__clusterip_config_find(net, ip)) { | 196 | if (__clusterip_config_find(net, ip)) { |
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c index c9b52c361da2..ef49989c93b1 100644 --- a/net/ipv4/netfilter/nf_nat_snmp_basic.c +++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c | |||
@@ -998,18 +998,6 @@ err_id_free: | |||
998 | * | 998 | * |
999 | *****************************************************************************/ | 999 | *****************************************************************************/ |
1000 | 1000 | ||
1001 | static void hex_dump(const unsigned char *buf, size_t len) | ||
1002 | { | ||
1003 | size_t i; | ||
1004 | |||
1005 | for (i = 0; i < len; i++) { | ||
1006 | if (i && !(i % 16)) | ||
1007 | printk("\n"); | ||
1008 | printk("%02x ", *(buf + i)); | ||
1009 | } | ||
1010 | printk("\n"); | ||
1011 | } | ||
1012 | |||
1013 | /* | 1001 | /* |
1014 | * Parse and mangle SNMP message according to mapping. | 1002 | * Parse and mangle SNMP message according to mapping. |
1015 | * (And this is the fucking 'basic' method). | 1003 | * (And this is the fucking 'basic' method). |
@@ -1026,7 +1014,8 @@ static int snmp_parse_mangle(unsigned char *msg, | |||
1026 | struct snmp_object *obj; | 1014 | struct snmp_object *obj; |
1027 | 1015 | ||
1028 | if (debug > 1) | 1016 | if (debug > 1) |
1029 | hex_dump(msg, len); | 1017 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 16, 1, |
1018 | msg, len, 0); | ||
1030 | 1019 | ||
1031 | asn1_open(&ctx, msg, len); | 1020 | asn1_open(&ctx, msg, len); |
1032 | 1021 | ||
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 146d86105183..7cd8d0d918f8 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c | |||
@@ -104,7 +104,6 @@ EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put); | |||
104 | void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) | 104 | void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) |
105 | { | 105 | { |
106 | struct sk_buff *nskb; | 106 | struct sk_buff *nskb; |
107 | const struct iphdr *oiph; | ||
108 | struct iphdr *niph; | 107 | struct iphdr *niph; |
109 | const struct tcphdr *oth; | 108 | const struct tcphdr *oth; |
110 | struct tcphdr _oth; | 109 | struct tcphdr _oth; |
@@ -116,8 +115,6 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) | |||
116 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) | 115 | if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) |
117 | return; | 116 | return; |
118 | 117 | ||
119 | oiph = ip_hdr(oldskb); | ||
120 | |||
121 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + | 118 | nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + |
122 | LL_MAX_HEADER, GFP_ATOMIC); | 119 | LL_MAX_HEADER, GFP_ATOMIC); |
123 | if (!nskb) | 120 | if (!nskb) |
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c index 2981291910dd..f4e4462cb5bb 100644 --- a/net/ipv4/netfilter/nft_fib_ipv4.c +++ b/net/ipv4/netfilter/nft_fib_ipv4.c | |||
@@ -90,7 +90,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
90 | 90 | ||
91 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && | 91 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && |
92 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { | 92 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { |
93 | nft_fib_store_result(dest, priv->result, pkt, | 93 | nft_fib_store_result(dest, priv, pkt, |
94 | nft_in(pkt)->ifindex); | 94 | nft_in(pkt)->ifindex); |
95 | return; | 95 | return; |
96 | } | 96 | } |
@@ -99,7 +99,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
99 | if (ipv4_is_zeronet(iph->saddr)) { | 99 | if (ipv4_is_zeronet(iph->saddr)) { |
100 | if (ipv4_is_lbcast(iph->daddr) || | 100 | if (ipv4_is_lbcast(iph->daddr) || |
101 | ipv4_is_local_multicast(iph->daddr)) { | 101 | ipv4_is_local_multicast(iph->daddr)) { |
102 | nft_fib_store_result(dest, priv->result, pkt, | 102 | nft_fib_store_result(dest, priv, pkt, |
103 | get_ifindex(pkt->skb->dev)); | 103 | get_ifindex(pkt->skb->dev)); |
104 | return; | 104 | return; |
105 | } | 105 | } |
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c index 765facf03d45..e8d88d82636b 100644 --- a/net/ipv6/netfilter/nft_fib_ipv6.c +++ b/net/ipv6/netfilter/nft_fib_ipv6.c | |||
@@ -159,7 +159,7 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs, | |||
159 | 159 | ||
160 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && | 160 | if (nft_hook(pkt) == NF_INET_PRE_ROUTING && |
161 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { | 161 | nft_fib_is_loopback(pkt->skb, nft_in(pkt))) { |
162 | nft_fib_store_result(dest, priv->result, pkt, | 162 | nft_fib_store_result(dest, priv, pkt, |
163 | nft_in(pkt)->ifindex); | 163 | nft_in(pkt)->ifindex); |
164 | return; | 164 | return; |
165 | } | 165 | } |
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index e6a2753dff9e..3d2ac71a83ec 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c | |||
@@ -181,7 +181,7 @@ static inline int ip_vs_conn_hash(struct ip_vs_conn *cp) | |||
181 | 181 | ||
182 | if (!(cp->flags & IP_VS_CONN_F_HASHED)) { | 182 | if (!(cp->flags & IP_VS_CONN_F_HASHED)) { |
183 | cp->flags |= IP_VS_CONN_F_HASHED; | 183 | cp->flags |= IP_VS_CONN_F_HASHED; |
184 | atomic_inc(&cp->refcnt); | 184 | refcount_inc(&cp->refcnt); |
185 | hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); | 185 | hlist_add_head_rcu(&cp->c_list, &ip_vs_conn_tab[hash]); |
186 | ret = 1; | 186 | ret = 1; |
187 | } else { | 187 | } else { |
@@ -215,7 +215,7 @@ static inline int ip_vs_conn_unhash(struct ip_vs_conn *cp) | |||
215 | if (cp->flags & IP_VS_CONN_F_HASHED) { | 215 | if (cp->flags & IP_VS_CONN_F_HASHED) { |
216 | hlist_del_rcu(&cp->c_list); | 216 | hlist_del_rcu(&cp->c_list); |
217 | cp->flags &= ~IP_VS_CONN_F_HASHED; | 217 | cp->flags &= ~IP_VS_CONN_F_HASHED; |
218 | atomic_dec(&cp->refcnt); | 218 | refcount_dec(&cp->refcnt); |
219 | ret = 1; | 219 | ret = 1; |
220 | } else | 220 | } else |
221 | ret = 0; | 221 | ret = 0; |
@@ -242,13 +242,13 @@ static inline bool ip_vs_conn_unlink(struct ip_vs_conn *cp) | |||
242 | if (cp->flags & IP_VS_CONN_F_HASHED) { | 242 | if (cp->flags & IP_VS_CONN_F_HASHED) { |
243 | ret = false; | 243 | ret = false; |
244 | /* Decrease refcnt and unlink conn only if we are last user */ | 244 | /* Decrease refcnt and unlink conn only if we are last user */ |
245 | if (atomic_cmpxchg(&cp->refcnt, 1, 0) == 1) { | 245 | if (refcount_dec_if_one(&cp->refcnt)) { |
246 | hlist_del_rcu(&cp->c_list); | 246 | hlist_del_rcu(&cp->c_list); |
247 | cp->flags &= ~IP_VS_CONN_F_HASHED; | 247 | cp->flags &= ~IP_VS_CONN_F_HASHED; |
248 | ret = true; | 248 | ret = true; |
249 | } | 249 | } |
250 | } else | 250 | } else |
251 | ret = atomic_read(&cp->refcnt) ? false : true; | 251 | ret = refcount_read(&cp->refcnt) ? false : true; |
252 | 252 | ||
253 | spin_unlock(&cp->lock); | 253 | spin_unlock(&cp->lock); |
254 | ct_write_unlock_bh(hash); | 254 | ct_write_unlock_bh(hash); |
@@ -475,7 +475,7 @@ static void __ip_vs_conn_put_timer(struct ip_vs_conn *cp) | |||
475 | void ip_vs_conn_put(struct ip_vs_conn *cp) | 475 | void ip_vs_conn_put(struct ip_vs_conn *cp) |
476 | { | 476 | { |
477 | if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && | 477 | if ((cp->flags & IP_VS_CONN_F_ONE_PACKET) && |
478 | (atomic_read(&cp->refcnt) == 1) && | 478 | (refcount_read(&cp->refcnt) == 1) && |
479 | !timer_pending(&cp->timer)) | 479 | !timer_pending(&cp->timer)) |
480 | /* expire connection immediately */ | 480 | /* expire connection immediately */ |
481 | __ip_vs_conn_put_notimer(cp); | 481 | __ip_vs_conn_put_notimer(cp); |
@@ -617,8 +617,8 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest) | |||
617 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), | 617 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
618 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), | 618 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), |
619 | ip_vs_fwd_tag(cp), cp->state, | 619 | ip_vs_fwd_tag(cp), cp->state, |
620 | cp->flags, atomic_read(&cp->refcnt), | 620 | cp->flags, refcount_read(&cp->refcnt), |
621 | atomic_read(&dest->refcnt)); | 621 | refcount_read(&dest->refcnt)); |
622 | 622 | ||
623 | /* Update the connection counters */ | 623 | /* Update the connection counters */ |
624 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { | 624 | if (!(flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -714,8 +714,8 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp) | |||
714 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), | 714 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
715 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), | 715 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), |
716 | ip_vs_fwd_tag(cp), cp->state, | 716 | ip_vs_fwd_tag(cp), cp->state, |
717 | cp->flags, atomic_read(&cp->refcnt), | 717 | cp->flags, refcount_read(&cp->refcnt), |
718 | atomic_read(&dest->refcnt)); | 718 | refcount_read(&dest->refcnt)); |
719 | 719 | ||
720 | /* Update the connection counters */ | 720 | /* Update the connection counters */ |
721 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { | 721 | if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) { |
@@ -863,10 +863,10 @@ static void ip_vs_conn_expire(unsigned long data) | |||
863 | 863 | ||
864 | expire_later: | 864 | expire_later: |
865 | IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n", | 865 | IP_VS_DBG(7, "delayed: conn->refcnt=%d conn->n_control=%d\n", |
866 | atomic_read(&cp->refcnt), | 866 | refcount_read(&cp->refcnt), |
867 | atomic_read(&cp->n_control)); | 867 | atomic_read(&cp->n_control)); |
868 | 868 | ||
869 | atomic_inc(&cp->refcnt); | 869 | refcount_inc(&cp->refcnt); |
870 | cp->timeout = 60*HZ; | 870 | cp->timeout = 60*HZ; |
871 | 871 | ||
872 | if (ipvs->sync_state & IP_VS_STATE_MASTER) | 872 | if (ipvs->sync_state & IP_VS_STATE_MASTER) |
@@ -941,7 +941,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af, | |||
941 | * it in the table, so that other thread run ip_vs_random_dropentry | 941 | * it in the table, so that other thread run ip_vs_random_dropentry |
942 | * but cannot drop this entry. | 942 | * but cannot drop this entry. |
943 | */ | 943 | */ |
944 | atomic_set(&cp->refcnt, 1); | 944 | refcount_set(&cp->refcnt, 1); |
945 | 945 | ||
946 | cp->control = NULL; | 946 | cp->control = NULL; |
947 | atomic_set(&cp->n_control, 0); | 947 | atomic_set(&cp->n_control, 0); |
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index db40050f8785..b4a746d0e39b 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c | |||
@@ -542,7 +542,7 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, | |||
542 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), | 542 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
543 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), | 543 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
544 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), | 544 | IP_VS_DBG_ADDR(cp->daf, &cp->daddr), ntohs(cp->dport), |
545 | cp->flags, atomic_read(&cp->refcnt)); | 545 | cp->flags, refcount_read(&cp->refcnt)); |
546 | 546 | ||
547 | ip_vs_conn_stats(cp, svc); | 547 | ip_vs_conn_stats(cp, svc); |
548 | return cp; | 548 | return cp; |
@@ -1193,7 +1193,7 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, | |||
1193 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), | 1193 | IP_VS_DBG_ADDR(cp->af, &cp->caddr), ntohs(cp->cport), |
1194 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), | 1194 | IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), |
1195 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), | 1195 | IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), |
1196 | cp->flags, atomic_read(&cp->refcnt)); | 1196 | cp->flags, refcount_read(&cp->refcnt)); |
1197 | LeaveFunction(12); | 1197 | LeaveFunction(12); |
1198 | return cp; | 1198 | return cp; |
1199 | } | 1199 | } |
@@ -2231,8 +2231,6 @@ static int __net_init __ip_vs_init(struct net *net) | |||
2231 | if (ip_vs_sync_net_init(ipvs) < 0) | 2231 | if (ip_vs_sync_net_init(ipvs) < 0) |
2232 | goto sync_fail; | 2232 | goto sync_fail; |
2233 | 2233 | ||
2234 | printk(KERN_INFO "IPVS: Creating netns size=%zu id=%d\n", | ||
2235 | sizeof(struct netns_ipvs), ipvs->gen); | ||
2236 | return 0; | 2234 | return 0; |
2237 | /* | 2235 | /* |
2238 | * Error handling | 2236 | * Error handling |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5aeb0dde6ccc..541aa7694775 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -699,7 +699,7 @@ ip_vs_trash_get_dest(struct ip_vs_service *svc, int dest_af, | |||
699 | dest->vfwmark, | 699 | dest->vfwmark, |
700 | IP_VS_DBG_ADDR(dest->af, &dest->addr), | 700 | IP_VS_DBG_ADDR(dest->af, &dest->addr), |
701 | ntohs(dest->port), | 701 | ntohs(dest->port), |
702 | atomic_read(&dest->refcnt)); | 702 | refcount_read(&dest->refcnt)); |
703 | if (dest->af == dest_af && | 703 | if (dest->af == dest_af && |
704 | ip_vs_addr_equal(dest_af, &dest->addr, daddr) && | 704 | ip_vs_addr_equal(dest_af, &dest->addr, daddr) && |
705 | dest->port == dport && | 705 | dest->port == dport && |
@@ -934,7 +934,7 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest, | |||
934 | atomic_set(&dest->activeconns, 0); | 934 | atomic_set(&dest->activeconns, 0); |
935 | atomic_set(&dest->inactconns, 0); | 935 | atomic_set(&dest->inactconns, 0); |
936 | atomic_set(&dest->persistconns, 0); | 936 | atomic_set(&dest->persistconns, 0); |
937 | atomic_set(&dest->refcnt, 1); | 937 | refcount_set(&dest->refcnt, 1); |
938 | 938 | ||
939 | INIT_HLIST_NODE(&dest->d_list); | 939 | INIT_HLIST_NODE(&dest->d_list); |
940 | spin_lock_init(&dest->dst_lock); | 940 | spin_lock_init(&dest->dst_lock); |
@@ -998,7 +998,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) | |||
998 | IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " | 998 | IP_VS_DBG_BUF(3, "Get destination %s:%u from trash, " |
999 | "dest->refcnt=%d, service %u/%s:%u\n", | 999 | "dest->refcnt=%d, service %u/%s:%u\n", |
1000 | IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), | 1000 | IP_VS_DBG_ADDR(udest->af, &daddr), ntohs(dport), |
1001 | atomic_read(&dest->refcnt), | 1001 | refcount_read(&dest->refcnt), |
1002 | dest->vfwmark, | 1002 | dest->vfwmark, |
1003 | IP_VS_DBG_ADDR(svc->af, &dest->vaddr), | 1003 | IP_VS_DBG_ADDR(svc->af, &dest->vaddr), |
1004 | ntohs(dest->vport)); | 1004 | ntohs(dest->vport)); |
@@ -1074,7 +1074,7 @@ static void __ip_vs_del_dest(struct netns_ipvs *ipvs, struct ip_vs_dest *dest, | |||
1074 | spin_lock_bh(&ipvs->dest_trash_lock); | 1074 | spin_lock_bh(&ipvs->dest_trash_lock); |
1075 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", | 1075 | IP_VS_DBG_BUF(3, "Moving dest %s:%u into trash, dest->refcnt=%d\n", |
1076 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), | 1076 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), |
1077 | atomic_read(&dest->refcnt)); | 1077 | refcount_read(&dest->refcnt)); |
1078 | if (list_empty(&ipvs->dest_trash) && !cleanup) | 1078 | if (list_empty(&ipvs->dest_trash) && !cleanup) |
1079 | mod_timer(&ipvs->dest_trash_timer, | 1079 | mod_timer(&ipvs->dest_trash_timer, |
1080 | jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); | 1080 | jiffies + (IP_VS_DEST_TRASH_PERIOD >> 1)); |
@@ -1157,7 +1157,7 @@ static void ip_vs_dest_trash_expire(unsigned long data) | |||
1157 | 1157 | ||
1158 | spin_lock(&ipvs->dest_trash_lock); | 1158 | spin_lock(&ipvs->dest_trash_lock); |
1159 | list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { | 1159 | list_for_each_entry_safe(dest, next, &ipvs->dest_trash, t_list) { |
1160 | if (atomic_read(&dest->refcnt) > 1) | 1160 | if (refcount_read(&dest->refcnt) > 1) |
1161 | continue; | 1161 | continue; |
1162 | if (dest->idle_start) { | 1162 | if (dest->idle_start) { |
1163 | if (time_before(now, dest->idle_start + | 1163 | if (time_before(now, dest->idle_start + |
@@ -1545,7 +1545,7 @@ ip_vs_forget_dev(struct ip_vs_dest *dest, struct net_device *dev) | |||
1545 | dev->name, | 1545 | dev->name, |
1546 | IP_VS_DBG_ADDR(dest->af, &dest->addr), | 1546 | IP_VS_DBG_ADDR(dest->af, &dest->addr), |
1547 | ntohs(dest->port), | 1547 | ntohs(dest->port), |
1548 | atomic_read(&dest->refcnt)); | 1548 | refcount_read(&dest->refcnt)); |
1549 | __ip_vs_dst_cache_reset(dest); | 1549 | __ip_vs_dst_cache_reset(dest); |
1550 | } | 1550 | } |
1551 | spin_unlock_bh(&dest->dst_lock); | 1551 | spin_unlock_bh(&dest->dst_lock); |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 5824927cf8e0..b6aa4a970c6e 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -448,7 +448,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) | |||
448 | IP_VS_DBG_ADDR(least->af, &least->addr), | 448 | IP_VS_DBG_ADDR(least->af, &least->addr), |
449 | ntohs(least->port), | 449 | ntohs(least->port), |
450 | atomic_read(&least->activeconns), | 450 | atomic_read(&least->activeconns), |
451 | atomic_read(&least->refcnt), | 451 | refcount_read(&least->refcnt), |
452 | atomic_read(&least->weight), loh); | 452 | atomic_read(&least->weight), loh); |
453 | 453 | ||
454 | return least; | 454 | return least; |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index 703f11877bee..c13ff575f9f7 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -204,7 +204,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
204 | IP_VS_DBG_ADDR(least->af, &least->addr), | 204 | IP_VS_DBG_ADDR(least->af, &least->addr), |
205 | ntohs(least->port), | 205 | ntohs(least->port), |
206 | atomic_read(&least->activeconns), | 206 | atomic_read(&least->activeconns), |
207 | atomic_read(&least->refcnt), | 207 | refcount_read(&least->refcnt), |
208 | atomic_read(&least->weight), loh); | 208 | atomic_read(&least->weight), loh); |
209 | return least; | 209 | return least; |
210 | } | 210 | } |
@@ -249,7 +249,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
249 | __func__, | 249 | __func__, |
250 | IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), | 250 | IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port), |
251 | atomic_read(&most->activeconns), | 251 | atomic_read(&most->activeconns), |
252 | atomic_read(&most->refcnt), | 252 | refcount_read(&most->refcnt), |
253 | atomic_read(&most->weight), moh); | 253 | atomic_read(&most->weight), moh); |
254 | return most; | 254 | return most; |
255 | } | 255 | } |
@@ -612,7 +612,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) | |||
612 | IP_VS_DBG_ADDR(least->af, &least->addr), | 612 | IP_VS_DBG_ADDR(least->af, &least->addr), |
613 | ntohs(least->port), | 613 | ntohs(least->port), |
614 | atomic_read(&least->activeconns), | 614 | atomic_read(&least->activeconns), |
615 | atomic_read(&least->refcnt), | 615 | refcount_read(&least->refcnt), |
616 | atomic_read(&least->weight), loh); | 616 | atomic_read(&least->weight), loh); |
617 | 617 | ||
618 | return least; | 618 | return least; |
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c index a8b63401e773..7d9d4ac596ca 100644 --- a/net/netfilter/ipvs/ip_vs_nq.c +++ b/net/netfilter/ipvs/ip_vs_nq.c | |||
@@ -110,7 +110,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
110 | IP_VS_DBG_ADDR(least->af, &least->addr), | 110 | IP_VS_DBG_ADDR(least->af, &least->addr), |
111 | ntohs(least->port), | 111 | ntohs(least->port), |
112 | atomic_read(&least->activeconns), | 112 | atomic_read(&least->activeconns), |
113 | atomic_read(&least->refcnt), | 113 | refcount_read(&least->refcnt), |
114 | atomic_read(&least->weight), loh); | 114 | atomic_read(&least->weight), loh); |
115 | 115 | ||
116 | return least; | 116 | return least; |
diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index d952d67f904d..56f8e4b204ff 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c | |||
@@ -447,7 +447,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
447 | ntohs(cp->cport), | 447 | ntohs(cp->cport), |
448 | sctp_state_name(cp->state), | 448 | sctp_state_name(cp->state), |
449 | sctp_state_name(next_state), | 449 | sctp_state_name(next_state), |
450 | atomic_read(&cp->refcnt)); | 450 | refcount_read(&cp->refcnt)); |
451 | if (dest) { | 451 | if (dest) { |
452 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && | 452 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && |
453 | (next_state != IP_VS_SCTP_S_ESTABLISHED)) { | 453 | (next_state != IP_VS_SCTP_S_ESTABLISHED)) { |
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c index 5117bcb7d2f0..12dc8d5bc37d 100644 --- a/net/netfilter/ipvs/ip_vs_proto_tcp.c +++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c | |||
@@ -557,7 +557,7 @@ set_tcp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp, | |||
557 | ntohs(cp->cport), | 557 | ntohs(cp->cport), |
558 | tcp_state_name(cp->state), | 558 | tcp_state_name(cp->state), |
559 | tcp_state_name(new_state), | 559 | tcp_state_name(new_state), |
560 | atomic_read(&cp->refcnt)); | 560 | refcount_read(&cp->refcnt)); |
561 | 561 | ||
562 | if (dest) { | 562 | if (dest) { |
563 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && | 563 | if (!(cp->flags & IP_VS_CONN_F_INACTIVE) && |
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c index 58bacfc461ee..ee0530d14c5f 100644 --- a/net/netfilter/ipvs/ip_vs_rr.c +++ b/net/netfilter/ipvs/ip_vs_rr.c | |||
@@ -97,7 +97,7 @@ stop: | |||
97 | "activeconns %d refcnt %d weight %d\n", | 97 | "activeconns %d refcnt %d weight %d\n", |
98 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), | 98 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), |
99 | atomic_read(&dest->activeconns), | 99 | atomic_read(&dest->activeconns), |
100 | atomic_read(&dest->refcnt), atomic_read(&dest->weight)); | 100 | refcount_read(&dest->refcnt), atomic_read(&dest->weight)); |
101 | 101 | ||
102 | return dest; | 102 | return dest; |
103 | } | 103 | } |
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c index f8e2d00f528b..ab23cf203437 100644 --- a/net/netfilter/ipvs/ip_vs_sed.c +++ b/net/netfilter/ipvs/ip_vs_sed.c | |||
@@ -111,7 +111,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
111 | IP_VS_DBG_ADDR(least->af, &least->addr), | 111 | IP_VS_DBG_ADDR(least->af, &least->addr), |
112 | ntohs(least->port), | 112 | ntohs(least->port), |
113 | atomic_read(&least->activeconns), | 113 | atomic_read(&least->activeconns), |
114 | atomic_read(&least->refcnt), | 114 | refcount_read(&least->refcnt), |
115 | atomic_read(&least->weight), loh); | 115 | atomic_read(&least->weight), loh); |
116 | 116 | ||
117 | return least; | 117 | return least; |
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index 6b366fd90554..6add39e0ec20 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
@@ -83,7 +83,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb, | |||
83 | IP_VS_DBG_ADDR(least->af, &least->addr), | 83 | IP_VS_DBG_ADDR(least->af, &least->addr), |
84 | ntohs(least->port), | 84 | ntohs(least->port), |
85 | atomic_read(&least->activeconns), | 85 | atomic_read(&least->activeconns), |
86 | atomic_read(&least->refcnt), | 86 | refcount_read(&least->refcnt), |
87 | atomic_read(&least->weight), loh); | 87 | atomic_read(&least->weight), loh); |
88 | 88 | ||
89 | return least; | 89 | return least; |
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c index 17e6d4406ca7..62258dd457ac 100644 --- a/net/netfilter/ipvs/ip_vs_wrr.c +++ b/net/netfilter/ipvs/ip_vs_wrr.c | |||
@@ -218,7 +218,7 @@ found: | |||
218 | "activeconns %d refcnt %d weight %d\n", | 218 | "activeconns %d refcnt %d weight %d\n", |
219 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), | 219 | IP_VS_DBG_ADDR(dest->af, &dest->addr), ntohs(dest->port), |
220 | atomic_read(&dest->activeconns), | 220 | atomic_read(&dest->activeconns), |
221 | atomic_read(&dest->refcnt), | 221 | refcount_read(&dest->refcnt), |
222 | atomic_read(&dest->weight)); | 222 | atomic_read(&dest->weight)); |
223 | mark->cl = dest; | 223 | mark->cl = dest; |
224 | 224 | ||
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 071b97fcbefb..b0f2e8e65084 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -1129,7 +1129,7 @@ EXPORT_SYMBOL_GPL(nf_conntrack_free); | |||
1129 | 1129 | ||
1130 | /* Allocate a new conntrack: we return -ENOMEM if classification | 1130 | /* Allocate a new conntrack: we return -ENOMEM if classification |
1131 | failed due to stress. Otherwise it really is unclassifiable. */ | 1131 | failed due to stress. Otherwise it really is unclassifiable. */ |
1132 | static struct nf_conntrack_tuple_hash * | 1132 | static noinline struct nf_conntrack_tuple_hash * |
1133 | init_conntrack(struct net *net, struct nf_conn *tmpl, | 1133 | init_conntrack(struct net *net, struct nf_conn *tmpl, |
1134 | const struct nf_conntrack_tuple *tuple, | 1134 | const struct nf_conntrack_tuple *tuple, |
1135 | struct nf_conntrack_l3proto *l3proto, | 1135 | struct nf_conntrack_l3proto *l3proto, |
@@ -1237,21 +1237,20 @@ init_conntrack(struct net *net, struct nf_conn *tmpl, | |||
1237 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; | 1237 | return &ct->tuplehash[IP_CT_DIR_ORIGINAL]; |
1238 | } | 1238 | } |
1239 | 1239 | ||
1240 | /* On success, returns conntrack ptr, sets skb->_nfct | ctinfo */ | 1240 | /* On success, returns 0, sets skb->_nfct | ctinfo */ |
1241 | static inline struct nf_conn * | 1241 | static int |
1242 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | 1242 | resolve_normal_ct(struct net *net, struct nf_conn *tmpl, |
1243 | struct sk_buff *skb, | 1243 | struct sk_buff *skb, |
1244 | unsigned int dataoff, | 1244 | unsigned int dataoff, |
1245 | u_int16_t l3num, | 1245 | u_int16_t l3num, |
1246 | u_int8_t protonum, | 1246 | u_int8_t protonum, |
1247 | struct nf_conntrack_l3proto *l3proto, | 1247 | struct nf_conntrack_l3proto *l3proto, |
1248 | struct nf_conntrack_l4proto *l4proto, | 1248 | struct nf_conntrack_l4proto *l4proto) |
1249 | int *set_reply, | ||
1250 | enum ip_conntrack_info *ctinfo) | ||
1251 | { | 1249 | { |
1252 | const struct nf_conntrack_zone *zone; | 1250 | const struct nf_conntrack_zone *zone; |
1253 | struct nf_conntrack_tuple tuple; | 1251 | struct nf_conntrack_tuple tuple; |
1254 | struct nf_conntrack_tuple_hash *h; | 1252 | struct nf_conntrack_tuple_hash *h; |
1253 | enum ip_conntrack_info ctinfo; | ||
1255 | struct nf_conntrack_zone tmp; | 1254 | struct nf_conntrack_zone tmp; |
1256 | struct nf_conn *ct; | 1255 | struct nf_conn *ct; |
1257 | u32 hash; | 1256 | u32 hash; |
@@ -1260,7 +1259,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | |||
1260 | dataoff, l3num, protonum, net, &tuple, l3proto, | 1259 | dataoff, l3num, protonum, net, &tuple, l3proto, |
1261 | l4proto)) { | 1260 | l4proto)) { |
1262 | pr_debug("Can't get tuple\n"); | 1261 | pr_debug("Can't get tuple\n"); |
1263 | return NULL; | 1262 | return 0; |
1264 | } | 1263 | } |
1265 | 1264 | ||
1266 | /* look for tuple match */ | 1265 | /* look for tuple match */ |
@@ -1271,33 +1270,30 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl, | |||
1271 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, | 1270 | h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto, |
1272 | skb, dataoff, hash); | 1271 | skb, dataoff, hash); |
1273 | if (!h) | 1272 | if (!h) |
1274 | return NULL; | 1273 | return 0; |
1275 | if (IS_ERR(h)) | 1274 | if (IS_ERR(h)) |
1276 | return (void *)h; | 1275 | return PTR_ERR(h); |
1277 | } | 1276 | } |
1278 | ct = nf_ct_tuplehash_to_ctrack(h); | 1277 | ct = nf_ct_tuplehash_to_ctrack(h); |
1279 | 1278 | ||
1280 | /* It exists; we have (non-exclusive) reference. */ | 1279 | /* It exists; we have (non-exclusive) reference. */ |
1281 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { | 1280 | if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { |
1282 | *ctinfo = IP_CT_ESTABLISHED_REPLY; | 1281 | ctinfo = IP_CT_ESTABLISHED_REPLY; |
1283 | /* Please set reply bit if this packet OK */ | ||
1284 | *set_reply = 1; | ||
1285 | } else { | 1282 | } else { |
1286 | /* Once we've had two way comms, always ESTABLISHED. */ | 1283 | /* Once we've had two way comms, always ESTABLISHED. */ |
1287 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { | 1284 | if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { |
1288 | pr_debug("normal packet for %p\n", ct); | 1285 | pr_debug("normal packet for %p\n", ct); |
1289 | *ctinfo = IP_CT_ESTABLISHED; | 1286 | ctinfo = IP_CT_ESTABLISHED; |
1290 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { | 1287 | } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) { |
1291 | pr_debug("related packet for %p\n", ct); | 1288 | pr_debug("related packet for %p\n", ct); |
1292 | *ctinfo = IP_CT_RELATED; | 1289 | ctinfo = IP_CT_RELATED; |
1293 | } else { | 1290 | } else { |
1294 | pr_debug("new packet for %p\n", ct); | 1291 | pr_debug("new packet for %p\n", ct); |
1295 | *ctinfo = IP_CT_NEW; | 1292 | ctinfo = IP_CT_NEW; |
1296 | } | 1293 | } |
1297 | *set_reply = 0; | ||
1298 | } | 1294 | } |
1299 | nf_ct_set(skb, ct, *ctinfo); | 1295 | nf_ct_set(skb, ct, ctinfo); |
1300 | return ct; | 1296 | return 0; |
1301 | } | 1297 | } |
1302 | 1298 | ||
1303 | unsigned int | 1299 | unsigned int |
@@ -1311,7 +1307,6 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
1311 | unsigned int *timeouts; | 1307 | unsigned int *timeouts; |
1312 | unsigned int dataoff; | 1308 | unsigned int dataoff; |
1313 | u_int8_t protonum; | 1309 | u_int8_t protonum; |
1314 | int set_reply = 0; | ||
1315 | int ret; | 1310 | int ret; |
1316 | 1311 | ||
1317 | tmpl = nf_ct_get(skb, &ctinfo); | 1312 | tmpl = nf_ct_get(skb, &ctinfo); |
@@ -1354,23 +1349,22 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, | |||
1354 | goto out; | 1349 | goto out; |
1355 | } | 1350 | } |
1356 | repeat: | 1351 | repeat: |
1357 | ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, | 1352 | ret = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, |
1358 | l3proto, l4proto, &set_reply, &ctinfo); | 1353 | l3proto, l4proto); |
1359 | if (!ct) { | 1354 | if (ret < 0) { |
1360 | /* Not valid part of a connection */ | ||
1361 | NF_CT_STAT_INC_ATOMIC(net, invalid); | ||
1362 | ret = NF_ACCEPT; | ||
1363 | goto out; | ||
1364 | } | ||
1365 | |||
1366 | if (IS_ERR(ct)) { | ||
1367 | /* Too stressed to deal. */ | 1355 | /* Too stressed to deal. */ |
1368 | NF_CT_STAT_INC_ATOMIC(net, drop); | 1356 | NF_CT_STAT_INC_ATOMIC(net, drop); |
1369 | ret = NF_DROP; | 1357 | ret = NF_DROP; |
1370 | goto out; | 1358 | goto out; |
1371 | } | 1359 | } |
1372 | 1360 | ||
1373 | NF_CT_ASSERT(skb_nfct(skb)); | 1361 | ct = nf_ct_get(skb, &ctinfo); |
1362 | if (!ct) { | ||
1363 | /* Not valid part of a connection */ | ||
1364 | NF_CT_STAT_INC_ATOMIC(net, invalid); | ||
1365 | ret = NF_ACCEPT; | ||
1366 | goto out; | ||
1367 | } | ||
1374 | 1368 | ||
1375 | /* Decide what timeout policy we want to apply to this flow. */ | 1369 | /* Decide what timeout policy we want to apply to this flow. */ |
1376 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); | 1370 | timeouts = nf_ct_timeout_lookup(net, ct, l4proto); |
@@ -1395,7 +1389,8 @@ repeat: | |||
1395 | goto out; | 1389 | goto out; |
1396 | } | 1390 | } |
1397 | 1391 | ||
1398 | if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | 1392 | if (ctinfo == IP_CT_ESTABLISHED_REPLY && |
1393 | !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status)) | ||
1399 | nf_conntrack_event_cache(IPCT_REPLY, ct); | 1394 | nf_conntrack_event_cache(IPCT_REPLY, ct); |
1400 | out: | 1395 | out: |
1401 | if (tmpl) | 1396 | if (tmpl) |
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 4b2e1fb28bb4..cb29e598605f 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c | |||
@@ -133,7 +133,7 @@ nf_ct_expect_find_get(struct net *net, | |||
133 | 133 | ||
134 | rcu_read_lock(); | 134 | rcu_read_lock(); |
135 | i = __nf_ct_expect_find(net, zone, tuple); | 135 | i = __nf_ct_expect_find(net, zone, tuple); |
136 | if (i && !atomic_inc_not_zero(&i->use)) | 136 | if (i && !refcount_inc_not_zero(&i->use)) |
137 | i = NULL; | 137 | i = NULL; |
138 | rcu_read_unlock(); | 138 | rcu_read_unlock(); |
139 | 139 | ||
@@ -186,7 +186,7 @@ nf_ct_find_expectation(struct net *net, | |||
186 | return NULL; | 186 | return NULL; |
187 | 187 | ||
188 | if (exp->flags & NF_CT_EXPECT_PERMANENT) { | 188 | if (exp->flags & NF_CT_EXPECT_PERMANENT) { |
189 | atomic_inc(&exp->use); | 189 | refcount_inc(&exp->use); |
190 | return exp; | 190 | return exp; |
191 | } else if (del_timer(&exp->timeout)) { | 191 | } else if (del_timer(&exp->timeout)) { |
192 | nf_ct_unlink_expect(exp); | 192 | nf_ct_unlink_expect(exp); |
@@ -275,7 +275,7 @@ struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me) | |||
275 | return NULL; | 275 | return NULL; |
276 | 276 | ||
277 | new->master = me; | 277 | new->master = me; |
278 | atomic_set(&new->use, 1); | 278 | refcount_set(&new->use, 1); |
279 | return new; | 279 | return new; |
280 | } | 280 | } |
281 | EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); | 281 | EXPORT_SYMBOL_GPL(nf_ct_expect_alloc); |
@@ -348,7 +348,7 @@ static void nf_ct_expect_free_rcu(struct rcu_head *head) | |||
348 | 348 | ||
349 | void nf_ct_expect_put(struct nf_conntrack_expect *exp) | 349 | void nf_ct_expect_put(struct nf_conntrack_expect *exp) |
350 | { | 350 | { |
351 | if (atomic_dec_and_test(&exp->use)) | 351 | if (refcount_dec_and_test(&exp->use)) |
352 | call_rcu(&exp->rcu, nf_ct_expect_free_rcu); | 352 | call_rcu(&exp->rcu, nf_ct_expect_free_rcu); |
353 | } | 353 | } |
354 | EXPORT_SYMBOL_GPL(nf_ct_expect_put); | 354 | EXPORT_SYMBOL_GPL(nf_ct_expect_put); |
@@ -361,7 +361,7 @@ static void nf_ct_expect_insert(struct nf_conntrack_expect *exp) | |||
361 | unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); | 361 | unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple); |
362 | 362 | ||
363 | /* two references : one for hash insert, one for the timer */ | 363 | /* two references : one for hash insert, one for the timer */ |
364 | atomic_add(2, &exp->use); | 364 | refcount_add(2, &exp->use); |
365 | 365 | ||
366 | hlist_add_head(&exp->lnode, &master_help->expectations); | 366 | hlist_add_head(&exp->lnode, &master_help->expectations); |
367 | master_help->expecting[exp->class]++; | 367 | master_help->expecting[exp->class]++; |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 6806b5e73567..d49cc1e03c5b 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
@@ -2693,7 +2693,7 @@ restart: | |||
2693 | cb->nlh->nlmsg_seq, | 2693 | cb->nlh->nlmsg_seq, |
2694 | IPCTNL_MSG_EXP_NEW, | 2694 | IPCTNL_MSG_EXP_NEW, |
2695 | exp) < 0) { | 2695 | exp) < 0) { |
2696 | if (!atomic_inc_not_zero(&exp->use)) | 2696 | if (!refcount_inc_not_zero(&exp->use)) |
2697 | continue; | 2697 | continue; |
2698 | cb->args[1] = (unsigned long)exp; | 2698 | cb->args[1] = (unsigned long)exp; |
2699 | goto out; | 2699 | goto out; |
@@ -2739,7 +2739,7 @@ restart: | |||
2739 | cb->nlh->nlmsg_seq, | 2739 | cb->nlh->nlmsg_seq, |
2740 | IPCTNL_MSG_EXP_NEW, | 2740 | IPCTNL_MSG_EXP_NEW, |
2741 | exp) < 0) { | 2741 | exp) < 0) { |
2742 | if (!atomic_inc_not_zero(&exp->use)) | 2742 | if (!refcount_inc_not_zero(&exp->use)) |
2743 | continue; | 2743 | continue; |
2744 | cb->args[1] = (unsigned long)exp; | 2744 | cb->args[1] = (unsigned long)exp; |
2745 | goto out; | 2745 | goto out; |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 5e0ccfd5bb37..12cc5218de96 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1772,8 +1772,19 @@ static int nf_tables_newexpr(const struct nft_ctx *ctx, | |||
1772 | goto err1; | 1772 | goto err1; |
1773 | } | 1773 | } |
1774 | 1774 | ||
1775 | if (ops->validate) { | ||
1776 | const struct nft_data *data = NULL; | ||
1777 | |||
1778 | err = ops->validate(ctx, expr, &data); | ||
1779 | if (err < 0) | ||
1780 | goto err2; | ||
1781 | } | ||
1782 | |||
1775 | return 0; | 1783 | return 0; |
1776 | 1784 | ||
1785 | err2: | ||
1786 | if (ops->destroy) | ||
1787 | ops->destroy(ctx, expr); | ||
1777 | err1: | 1788 | err1: |
1778 | expr->ops = NULL; | 1789 | expr->ops = NULL; |
1779 | return err; | 1790 | return err; |
@@ -2523,8 +2534,8 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx, struct net *net, | |||
2523 | return 0; | 2534 | return 0; |
2524 | } | 2535 | } |
2525 | 2536 | ||
2526 | struct nft_set *nf_tables_set_lookup(const struct nft_table *table, | 2537 | static struct nft_set *nf_tables_set_lookup(const struct nft_table *table, |
2527 | const struct nlattr *nla, u8 genmask) | 2538 | const struct nlattr *nla, u8 genmask) |
2528 | { | 2539 | { |
2529 | struct nft_set *set; | 2540 | struct nft_set *set; |
2530 | 2541 | ||
@@ -2538,11 +2549,10 @@ struct nft_set *nf_tables_set_lookup(const struct nft_table *table, | |||
2538 | } | 2549 | } |
2539 | return ERR_PTR(-ENOENT); | 2550 | return ERR_PTR(-ENOENT); |
2540 | } | 2551 | } |
2541 | EXPORT_SYMBOL_GPL(nf_tables_set_lookup); | ||
2542 | 2552 | ||
2543 | struct nft_set *nf_tables_set_lookup_byid(const struct net *net, | 2553 | static struct nft_set *nf_tables_set_lookup_byid(const struct net *net, |
2544 | const struct nlattr *nla, | 2554 | const struct nlattr *nla, |
2545 | u8 genmask) | 2555 | u8 genmask) |
2546 | { | 2556 | { |
2547 | struct nft_trans *trans; | 2557 | struct nft_trans *trans; |
2548 | u32 id = ntohl(nla_get_be32(nla)); | 2558 | u32 id = ntohl(nla_get_be32(nla)); |
@@ -2557,7 +2567,25 @@ struct nft_set *nf_tables_set_lookup_byid(const struct net *net, | |||
2557 | } | 2567 | } |
2558 | return ERR_PTR(-ENOENT); | 2568 | return ERR_PTR(-ENOENT); |
2559 | } | 2569 | } |
2560 | EXPORT_SYMBOL_GPL(nf_tables_set_lookup_byid); | 2570 | |
2571 | struct nft_set *nft_set_lookup(const struct net *net, | ||
2572 | const struct nft_table *table, | ||
2573 | const struct nlattr *nla_set_name, | ||
2574 | const struct nlattr *nla_set_id, | ||
2575 | u8 genmask) | ||
2576 | { | ||
2577 | struct nft_set *set; | ||
2578 | |||
2579 | set = nf_tables_set_lookup(table, nla_set_name, genmask); | ||
2580 | if (IS_ERR(set)) { | ||
2581 | if (!nla_set_id) | ||
2582 | return set; | ||
2583 | |||
2584 | set = nf_tables_set_lookup_byid(net, nla_set_id, genmask); | ||
2585 | } | ||
2586 | return set; | ||
2587 | } | ||
2588 | EXPORT_SYMBOL_GPL(nft_set_lookup); | ||
2561 | 2589 | ||
2562 | static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, | 2590 | static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set, |
2563 | const char *name) | 2591 | const char *name) |
@@ -4067,7 +4095,8 @@ static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = { | |||
4067 | [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, | 4095 | [NFTA_OBJ_DATA] = { .type = NLA_NESTED }, |
4068 | }; | 4096 | }; |
4069 | 4097 | ||
4070 | static struct nft_object *nft_obj_init(const struct nft_object_type *type, | 4098 | static struct nft_object *nft_obj_init(const struct nft_ctx *ctx, |
4099 | const struct nft_object_type *type, | ||
4071 | const struct nlattr *attr) | 4100 | const struct nlattr *attr) |
4072 | { | 4101 | { |
4073 | struct nlattr *tb[type->maxattr + 1]; | 4102 | struct nlattr *tb[type->maxattr + 1]; |
@@ -4087,7 +4116,7 @@ static struct nft_object *nft_obj_init(const struct nft_object_type *type, | |||
4087 | if (obj == NULL) | 4116 | if (obj == NULL) |
4088 | goto err1; | 4117 | goto err1; |
4089 | 4118 | ||
4090 | err = type->init((const struct nlattr * const *)tb, obj); | 4119 | err = type->init(ctx, (const struct nlattr * const *)tb, obj); |
4091 | if (err < 0) | 4120 | if (err < 0) |
4092 | goto err2; | 4121 | goto err2; |
4093 | 4122 | ||
@@ -4195,7 +4224,7 @@ static int nf_tables_newobj(struct net *net, struct sock *nlsk, | |||
4195 | if (IS_ERR(type)) | 4224 | if (IS_ERR(type)) |
4196 | return PTR_ERR(type); | 4225 | return PTR_ERR(type); |
4197 | 4226 | ||
4198 | obj = nft_obj_init(type, nla[NFTA_OBJ_DATA]); | 4227 | obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]); |
4199 | if (IS_ERR(obj)) { | 4228 | if (IS_ERR(obj)) { |
4200 | err = PTR_ERR(obj); | 4229 | err = PTR_ERR(obj); |
4201 | goto err1; | 4230 | goto err1; |
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index d44d89b56127..c86da174a5fc 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
13 | #include <linux/atomic.h> | 13 | #include <linux/atomic.h> |
14 | #include <linux/refcount.h> | ||
14 | #include <linux/netlink.h> | 15 | #include <linux/netlink.h> |
15 | #include <linux/rculist.h> | 16 | #include <linux/rculist.h> |
16 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -32,7 +33,7 @@ struct nf_acct { | |||
32 | atomic64_t bytes; | 33 | atomic64_t bytes; |
33 | unsigned long flags; | 34 | unsigned long flags; |
34 | struct list_head head; | 35 | struct list_head head; |
35 | atomic_t refcnt; | 36 | refcount_t refcnt; |
36 | char name[NFACCT_NAME_MAX]; | 37 | char name[NFACCT_NAME_MAX]; |
37 | struct rcu_head rcu_head; | 38 | struct rcu_head rcu_head; |
38 | char data[0]; | 39 | char data[0]; |
@@ -123,7 +124,7 @@ static int nfnl_acct_new(struct net *net, struct sock *nfnl, | |||
123 | atomic64_set(&nfacct->pkts, | 124 | atomic64_set(&nfacct->pkts, |
124 | be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); | 125 | be64_to_cpu(nla_get_be64(tb[NFACCT_PKTS]))); |
125 | } | 126 | } |
126 | atomic_set(&nfacct->refcnt, 1); | 127 | refcount_set(&nfacct->refcnt, 1); |
127 | list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list); | 128 | list_add_tail_rcu(&nfacct->head, &net->nfnl_acct_list); |
128 | return 0; | 129 | return 0; |
129 | } | 130 | } |
@@ -166,7 +167,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, | |||
166 | NFACCT_PAD) || | 167 | NFACCT_PAD) || |
167 | nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes), | 168 | nla_put_be64(skb, NFACCT_BYTES, cpu_to_be64(bytes), |
168 | NFACCT_PAD) || | 169 | NFACCT_PAD) || |
169 | nla_put_be32(skb, NFACCT_USE, htonl(atomic_read(&acct->refcnt)))) | 170 | nla_put_be32(skb, NFACCT_USE, htonl(refcount_read(&acct->refcnt)))) |
170 | goto nla_put_failure; | 171 | goto nla_put_failure; |
171 | if (acct->flags & NFACCT_F_QUOTA) { | 172 | if (acct->flags & NFACCT_F_QUOTA) { |
172 | u64 *quota = (u64 *)acct->data; | 173 | u64 *quota = (u64 *)acct->data; |
@@ -329,7 +330,7 @@ static int nfnl_acct_try_del(struct nf_acct *cur) | |||
329 | /* We want to avoid races with nfnl_acct_put. So only when the current | 330 | /* We want to avoid races with nfnl_acct_put. So only when the current |
330 | * refcnt is 1, we decrease it to 0. | 331 | * refcnt is 1, we decrease it to 0. |
331 | */ | 332 | */ |
332 | if (atomic_cmpxchg(&cur->refcnt, 1, 0) == 1) { | 333 | if (refcount_dec_if_one(&cur->refcnt)) { |
333 | /* We are protected by nfnl mutex. */ | 334 | /* We are protected by nfnl mutex. */ |
334 | list_del_rcu(&cur->head); | 335 | list_del_rcu(&cur->head); |
335 | kfree_rcu(cur, rcu_head); | 336 | kfree_rcu(cur, rcu_head); |
@@ -413,7 +414,7 @@ struct nf_acct *nfnl_acct_find_get(struct net *net, const char *acct_name) | |||
413 | if (!try_module_get(THIS_MODULE)) | 414 | if (!try_module_get(THIS_MODULE)) |
414 | goto err; | 415 | goto err; |
415 | 416 | ||
416 | if (!atomic_inc_not_zero(&cur->refcnt)) { | 417 | if (!refcount_inc_not_zero(&cur->refcnt)) { |
417 | module_put(THIS_MODULE); | 418 | module_put(THIS_MODULE); |
418 | goto err; | 419 | goto err; |
419 | } | 420 | } |
@@ -429,7 +430,7 @@ EXPORT_SYMBOL_GPL(nfnl_acct_find_get); | |||
429 | 430 | ||
430 | void nfnl_acct_put(struct nf_acct *acct) | 431 | void nfnl_acct_put(struct nf_acct *acct) |
431 | { | 432 | { |
432 | if (atomic_dec_and_test(&acct->refcnt)) | 433 | if (refcount_dec_and_test(&acct->refcnt)) |
433 | kfree_rcu(acct, rcu_head); | 434 | kfree_rcu(acct, rcu_head); |
434 | 435 | ||
435 | module_put(THIS_MODULE); | 436 | module_put(THIS_MODULE); |
@@ -502,7 +503,7 @@ static void __net_exit nfnl_acct_net_exit(struct net *net) | |||
502 | list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) { | 503 | list_for_each_entry_safe(cur, tmp, &net->nfnl_acct_list, head) { |
503 | list_del_rcu(&cur->head); | 504 | list_del_rcu(&cur->head); |
504 | 505 | ||
505 | if (atomic_dec_and_test(&cur->refcnt)) | 506 | if (refcount_dec_and_test(&cur->refcnt)) |
506 | kfree_rcu(cur, rcu_head); | 507 | kfree_rcu(cur, rcu_head); |
507 | } | 508 | } |
508 | } | 509 | } |
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index 139e0867e56e..baa75f3ab7e7 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c | |||
@@ -138,7 +138,7 @@ static int cttimeout_new_timeout(struct net *net, struct sock *ctnl, | |||
138 | strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME])); | 138 | strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME])); |
139 | timeout->l3num = l3num; | 139 | timeout->l3num = l3num; |
140 | timeout->l4proto = l4proto; | 140 | timeout->l4proto = l4proto; |
141 | atomic_set(&timeout->refcnt, 1); | 141 | refcount_set(&timeout->refcnt, 1); |
142 | list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); | 142 | list_add_tail_rcu(&timeout->head, &net->nfct_timeout_list); |
143 | 143 | ||
144 | return 0; | 144 | return 0; |
@@ -172,7 +172,7 @@ ctnl_timeout_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type, | |||
172 | nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || | 172 | nla_put_be16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num)) || |
173 | nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || | 173 | nla_put_u8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4proto->l4proto) || |
174 | nla_put_be32(skb, CTA_TIMEOUT_USE, | 174 | nla_put_be32(skb, CTA_TIMEOUT_USE, |
175 | htonl(atomic_read(&timeout->refcnt)))) | 175 | htonl(refcount_read(&timeout->refcnt)))) |
176 | goto nla_put_failure; | 176 | goto nla_put_failure; |
177 | 177 | ||
178 | if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { | 178 | if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) { |
@@ -339,7 +339,7 @@ static int ctnl_timeout_try_del(struct net *net, struct ctnl_timeout *timeout) | |||
339 | /* We want to avoid races with ctnl_timeout_put. So only when the | 339 | /* We want to avoid races with ctnl_timeout_put. So only when the |
340 | * current refcnt is 1, we decrease it to 0. | 340 | * current refcnt is 1, we decrease it to 0. |
341 | */ | 341 | */ |
342 | if (atomic_cmpxchg(&timeout->refcnt, 1, 0) == 1) { | 342 | if (refcount_dec_if_one(&timeout->refcnt)) { |
343 | /* We are protected by nfnl mutex. */ | 343 | /* We are protected by nfnl mutex. */ |
344 | list_del_rcu(&timeout->head); | 344 | list_del_rcu(&timeout->head); |
345 | nf_ct_l4proto_put(timeout->l4proto); | 345 | nf_ct_l4proto_put(timeout->l4proto); |
@@ -536,7 +536,7 @@ ctnl_timeout_find_get(struct net *net, const char *name) | |||
536 | if (!try_module_get(THIS_MODULE)) | 536 | if (!try_module_get(THIS_MODULE)) |
537 | goto err; | 537 | goto err; |
538 | 538 | ||
539 | if (!atomic_inc_not_zero(&timeout->refcnt)) { | 539 | if (!refcount_inc_not_zero(&timeout->refcnt)) { |
540 | module_put(THIS_MODULE); | 540 | module_put(THIS_MODULE); |
541 | goto err; | 541 | goto err; |
542 | } | 542 | } |
@@ -550,7 +550,7 @@ err: | |||
550 | 550 | ||
551 | static void ctnl_timeout_put(struct ctnl_timeout *timeout) | 551 | static void ctnl_timeout_put(struct ctnl_timeout *timeout) |
552 | { | 552 | { |
553 | if (atomic_dec_and_test(&timeout->refcnt)) | 553 | if (refcount_dec_and_test(&timeout->refcnt)) |
554 | kfree_rcu(timeout, rcu_head); | 554 | kfree_rcu(timeout, rcu_head); |
555 | 555 | ||
556 | module_put(THIS_MODULE); | 556 | module_put(THIS_MODULE); |
@@ -601,7 +601,7 @@ static void __net_exit cttimeout_net_exit(struct net *net) | |||
601 | list_del_rcu(&cur->head); | 601 | list_del_rcu(&cur->head); |
602 | nf_ct_l4proto_put(cur->l4proto); | 602 | nf_ct_l4proto_put(cur->l4proto); |
603 | 603 | ||
604 | if (atomic_dec_and_test(&cur->refcnt)) | 604 | if (refcount_dec_and_test(&cur->refcnt)) |
605 | kfree_rcu(cur, rcu_head); | 605 | kfree_rcu(cur, rcu_head); |
606 | } | 606 | } |
607 | } | 607 | } |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 08247bf7d7b8..ecd857b75ffe 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include <net/netfilter/nfnetlink_log.h> | 40 | #include <net/netfilter/nfnetlink_log.h> |
41 | 41 | ||
42 | #include <linux/atomic.h> | 42 | #include <linux/atomic.h> |
43 | #include <linux/refcount.h> | ||
44 | |||
43 | 45 | ||
44 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) | 46 | #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) |
45 | #include "../bridge/br_private.h" | 47 | #include "../bridge/br_private.h" |
@@ -57,7 +59,7 @@ | |||
57 | struct nfulnl_instance { | 59 | struct nfulnl_instance { |
58 | struct hlist_node hlist; /* global list of instances */ | 60 | struct hlist_node hlist; /* global list of instances */ |
59 | spinlock_t lock; | 61 | spinlock_t lock; |
60 | atomic_t use; /* use count */ | 62 | refcount_t use; /* use count */ |
61 | 63 | ||
62 | unsigned int qlen; /* number of nlmsgs in skb */ | 64 | unsigned int qlen; /* number of nlmsgs in skb */ |
63 | struct sk_buff *skb; /* pre-allocatd skb */ | 65 | struct sk_buff *skb; /* pre-allocatd skb */ |
@@ -115,7 +117,7 @@ __instance_lookup(struct nfnl_log_net *log, u_int16_t group_num) | |||
115 | static inline void | 117 | static inline void |
116 | instance_get(struct nfulnl_instance *inst) | 118 | instance_get(struct nfulnl_instance *inst) |
117 | { | 119 | { |
118 | atomic_inc(&inst->use); | 120 | refcount_inc(&inst->use); |
119 | } | 121 | } |
120 | 122 | ||
121 | static struct nfulnl_instance * | 123 | static struct nfulnl_instance * |
@@ -125,7 +127,7 @@ instance_lookup_get(struct nfnl_log_net *log, u_int16_t group_num) | |||
125 | 127 | ||
126 | rcu_read_lock_bh(); | 128 | rcu_read_lock_bh(); |
127 | inst = __instance_lookup(log, group_num); | 129 | inst = __instance_lookup(log, group_num); |
128 | if (inst && !atomic_inc_not_zero(&inst->use)) | 130 | if (inst && !refcount_inc_not_zero(&inst->use)) |
129 | inst = NULL; | 131 | inst = NULL; |
130 | rcu_read_unlock_bh(); | 132 | rcu_read_unlock_bh(); |
131 | 133 | ||
@@ -145,7 +147,7 @@ static void nfulnl_instance_free_rcu(struct rcu_head *head) | |||
145 | static void | 147 | static void |
146 | instance_put(struct nfulnl_instance *inst) | 148 | instance_put(struct nfulnl_instance *inst) |
147 | { | 149 | { |
148 | if (inst && atomic_dec_and_test(&inst->use)) | 150 | if (inst && refcount_dec_and_test(&inst->use)) |
149 | call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); | 151 | call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); |
150 | } | 152 | } |
151 | 153 | ||
@@ -180,7 +182,7 @@ instance_create(struct net *net, u_int16_t group_num, | |||
180 | INIT_HLIST_NODE(&inst->hlist); | 182 | INIT_HLIST_NODE(&inst->hlist); |
181 | spin_lock_init(&inst->lock); | 183 | spin_lock_init(&inst->lock); |
182 | /* needs to be two, since we _put() after creation */ | 184 | /* needs to be two, since we _put() after creation */ |
183 | atomic_set(&inst->use, 2); | 185 | refcount_set(&inst->use, 2); |
184 | 186 | ||
185 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); | 187 | setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); |
186 | 188 | ||
@@ -1031,7 +1033,7 @@ static int seq_show(struct seq_file *s, void *v) | |||
1031 | inst->group_num, | 1033 | inst->group_num, |
1032 | inst->peer_portid, inst->qlen, | 1034 | inst->peer_portid, inst->qlen, |
1033 | inst->copy_mode, inst->copy_range, | 1035 | inst->copy_mode, inst->copy_range, |
1034 | inst->flushtimeout, atomic_read(&inst->use)); | 1036 | inst->flushtimeout, refcount_read(&inst->use)); |
1035 | 1037 | ||
1036 | return 0; | 1038 | return 0; |
1037 | } | 1039 | } |
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index c21e7eb8dce0..fab6bf3f955e 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c | |||
@@ -230,10 +230,6 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
230 | union nft_entry e = {}; | 230 | union nft_entry e = {}; |
231 | int ret; | 231 | int ret; |
232 | 232 | ||
233 | ret = nft_compat_chain_validate_dependency(target->table, ctx->chain); | ||
234 | if (ret < 0) | ||
235 | goto err; | ||
236 | |||
237 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); | 233 | target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); |
238 | 234 | ||
239 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 235 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
@@ -419,10 +415,6 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
419 | union nft_entry e = {}; | 415 | union nft_entry e = {}; |
420 | int ret; | 416 | int ret; |
421 | 417 | ||
422 | ret = nft_compat_chain_validate_dependency(match->table, ctx->chain); | ||
423 | if (ret < 0) | ||
424 | goto err; | ||
425 | |||
426 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); | 418 | match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); |
427 | 419 | ||
428 | if (ctx->nla[NFTA_RULE_COMPAT]) { | 420 | if (ctx->nla[NFTA_RULE_COMPAT]) { |
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c index 7f8422213341..67a710ebde09 100644 --- a/net/netfilter/nft_counter.c +++ b/net/netfilter/nft_counter.c | |||
@@ -82,7 +82,8 @@ static int nft_counter_do_init(const struct nlattr * const tb[], | |||
82 | return 0; | 82 | return 0; |
83 | } | 83 | } |
84 | 84 | ||
85 | static int nft_counter_obj_init(const struct nlattr * const tb[], | 85 | static int nft_counter_obj_init(const struct nft_ctx *ctx, |
86 | const struct nlattr * const tb[], | ||
86 | struct nft_object *obj) | 87 | struct nft_object *obj) |
87 | { | 88 | { |
88 | struct nft_counter_percpu_priv *priv = nft_obj_data(obj); | 89 | struct nft_counter_percpu_priv *priv = nft_obj_data(obj); |
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index bf548a7a71ec..4144ae845bdd 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c | |||
@@ -32,6 +32,12 @@ struct nft_ct { | |||
32 | }; | 32 | }; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | struct nft_ct_helper_obj { | ||
36 | struct nf_conntrack_helper *helper4; | ||
37 | struct nf_conntrack_helper *helper6; | ||
38 | u8 l4proto; | ||
39 | }; | ||
40 | |||
35 | #ifdef CONFIG_NF_CONNTRACK_ZONES | 41 | #ifdef CONFIG_NF_CONNTRACK_ZONES |
36 | static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); | 42 | static DEFINE_PER_CPU(struct nf_conn *, nft_ct_pcpu_template); |
37 | static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; | 43 | static unsigned int nft_ct_pcpu_template_refcnt __read_mostly; |
@@ -730,6 +736,162 @@ static struct nft_expr_type nft_notrack_type __read_mostly = { | |||
730 | .owner = THIS_MODULE, | 736 | .owner = THIS_MODULE, |
731 | }; | 737 | }; |
732 | 738 | ||
739 | static int nft_ct_helper_obj_init(const struct nft_ctx *ctx, | ||
740 | const struct nlattr * const tb[], | ||
741 | struct nft_object *obj) | ||
742 | { | ||
743 | struct nft_ct_helper_obj *priv = nft_obj_data(obj); | ||
744 | struct nf_conntrack_helper *help4, *help6; | ||
745 | char name[NF_CT_HELPER_NAME_LEN]; | ||
746 | int family = ctx->afi->family; | ||
747 | |||
748 | if (!tb[NFTA_CT_HELPER_NAME] || !tb[NFTA_CT_HELPER_L4PROTO]) | ||
749 | return -EINVAL; | ||
750 | |||
751 | priv->l4proto = nla_get_u8(tb[NFTA_CT_HELPER_L4PROTO]); | ||
752 | if (!priv->l4proto) | ||
753 | return -ENOENT; | ||
754 | |||
755 | nla_strlcpy(name, tb[NFTA_CT_HELPER_NAME], sizeof(name)); | ||
756 | |||
757 | if (tb[NFTA_CT_HELPER_L3PROTO]) | ||
758 | family = ntohs(nla_get_be16(tb[NFTA_CT_HELPER_L3PROTO])); | ||
759 | |||
760 | help4 = NULL; | ||
761 | help6 = NULL; | ||
762 | |||
763 | switch (family) { | ||
764 | case NFPROTO_IPV4: | ||
765 | if (ctx->afi->family == NFPROTO_IPV6) | ||
766 | return -EINVAL; | ||
767 | |||
768 | help4 = nf_conntrack_helper_try_module_get(name, family, | ||
769 | priv->l4proto); | ||
770 | break; | ||
771 | case NFPROTO_IPV6: | ||
772 | if (ctx->afi->family == NFPROTO_IPV4) | ||
773 | return -EINVAL; | ||
774 | |||
775 | help6 = nf_conntrack_helper_try_module_get(name, family, | ||
776 | priv->l4proto); | ||
777 | break; | ||
778 | case NFPROTO_NETDEV: /* fallthrough */ | ||
779 | case NFPROTO_BRIDGE: /* same */ | ||
780 | case NFPROTO_INET: | ||
781 | help4 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV4, | ||
782 | priv->l4proto); | ||
783 | help6 = nf_conntrack_helper_try_module_get(name, NFPROTO_IPV6, | ||
784 | priv->l4proto); | ||
785 | break; | ||
786 | default: | ||
787 | return -EAFNOSUPPORT; | ||
788 | } | ||
789 | |||
790 | /* && is intentional; only error if INET found neither ipv4 or ipv6 */ | ||
791 | if (!help4 && !help6) | ||
792 | return -ENOENT; | ||
793 | |||
794 | priv->helper4 = help4; | ||
795 | priv->helper6 = help6; | ||
796 | |||
797 | return 0; | ||
798 | } | ||
799 | |||
800 | static void nft_ct_helper_obj_destroy(struct nft_object *obj) | ||
801 | { | ||
802 | struct nft_ct_helper_obj *priv = nft_obj_data(obj); | ||
803 | |||
804 | if (priv->helper4) | ||
805 | module_put(priv->helper4->me); | ||
806 | if (priv->helper6) | ||
807 | module_put(priv->helper6->me); | ||
808 | } | ||
809 | |||
810 | static void nft_ct_helper_obj_eval(struct nft_object *obj, | ||
811 | struct nft_regs *regs, | ||
812 | const struct nft_pktinfo *pkt) | ||
813 | { | ||
814 | const struct nft_ct_helper_obj *priv = nft_obj_data(obj); | ||
815 | struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb); | ||
816 | struct nf_conntrack_helper *to_assign = NULL; | ||
817 | struct nf_conn_help *help; | ||
818 | |||
819 | if (!ct || | ||
820 | nf_ct_is_confirmed(ct) || | ||
821 | nf_ct_is_template(ct) || | ||
822 | priv->l4proto != nf_ct_protonum(ct)) | ||
823 | return; | ||
824 | |||
825 | switch (nf_ct_l3num(ct)) { | ||
826 | case NFPROTO_IPV4: | ||
827 | to_assign = priv->helper4; | ||
828 | break; | ||
829 | case NFPROTO_IPV6: | ||
830 | to_assign = priv->helper6; | ||
831 | break; | ||
832 | default: | ||
833 | WARN_ON_ONCE(1); | ||
834 | return; | ||
835 | } | ||
836 | |||
837 | if (!to_assign) | ||
838 | return; | ||
839 | |||
840 | if (test_bit(IPS_HELPER_BIT, &ct->status)) | ||
841 | return; | ||
842 | |||
843 | help = nf_ct_helper_ext_add(ct, to_assign, GFP_ATOMIC); | ||
844 | if (help) { | ||
845 | rcu_assign_pointer(help->helper, to_assign); | ||
846 | set_bit(IPS_HELPER_BIT, &ct->status); | ||
847 | } | ||
848 | } | ||
849 | |||
850 | static int nft_ct_helper_obj_dump(struct sk_buff *skb, | ||
851 | struct nft_object *obj, bool reset) | ||
852 | { | ||
853 | const struct nft_ct_helper_obj *priv = nft_obj_data(obj); | ||
854 | const struct nf_conntrack_helper *helper = priv->helper4; | ||
855 | u16 family; | ||
856 | |||
857 | if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) | ||
858 | return -1; | ||
859 | |||
860 | if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) | ||
861 | return -1; | ||
862 | |||
863 | if (priv->helper4 && priv->helper6) | ||
864 | family = NFPROTO_INET; | ||
865 | else if (priv->helper6) | ||
866 | family = NFPROTO_IPV6; | ||
867 | else | ||
868 | family = NFPROTO_IPV4; | ||
869 | |||
870 | if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) | ||
871 | return -1; | ||
872 | |||
873 | return 0; | ||
874 | } | ||
875 | |||
876 | static const struct nla_policy nft_ct_helper_policy[NFTA_CT_HELPER_MAX + 1] = { | ||
877 | [NFTA_CT_HELPER_NAME] = { .type = NLA_STRING, | ||
878 | .len = NF_CT_HELPER_NAME_LEN - 1 }, | ||
879 | [NFTA_CT_HELPER_L3PROTO] = { .type = NLA_U16 }, | ||
880 | [NFTA_CT_HELPER_L4PROTO] = { .type = NLA_U8 }, | ||
881 | }; | ||
882 | |||
883 | static struct nft_object_type nft_ct_helper_obj __read_mostly = { | ||
884 | .type = NFT_OBJECT_CT_HELPER, | ||
885 | .size = sizeof(struct nft_ct_helper_obj), | ||
886 | .maxattr = NFTA_CT_HELPER_MAX, | ||
887 | .policy = nft_ct_helper_policy, | ||
888 | .eval = nft_ct_helper_obj_eval, | ||
889 | .init = nft_ct_helper_obj_init, | ||
890 | .destroy = nft_ct_helper_obj_destroy, | ||
891 | .dump = nft_ct_helper_obj_dump, | ||
892 | .owner = THIS_MODULE, | ||
893 | }; | ||
894 | |||
733 | static int __init nft_ct_module_init(void) | 895 | static int __init nft_ct_module_init(void) |
734 | { | 896 | { |
735 | int err; | 897 | int err; |
@@ -744,7 +906,14 @@ static int __init nft_ct_module_init(void) | |||
744 | if (err < 0) | 906 | if (err < 0) |
745 | goto err1; | 907 | goto err1; |
746 | 908 | ||
909 | err = nft_register_obj(&nft_ct_helper_obj); | ||
910 | if (err < 0) | ||
911 | goto err2; | ||
912 | |||
747 | return 0; | 913 | return 0; |
914 | |||
915 | err2: | ||
916 | nft_unregister_expr(&nft_notrack_type); | ||
748 | err1: | 917 | err1: |
749 | nft_unregister_expr(&nft_ct_type); | 918 | nft_unregister_expr(&nft_ct_type); |
750 | return err; | 919 | return err; |
@@ -752,6 +921,7 @@ err1: | |||
752 | 921 | ||
753 | static void __exit nft_ct_module_exit(void) | 922 | static void __exit nft_ct_module_exit(void) |
754 | { | 923 | { |
924 | nft_unregister_obj(&nft_ct_helper_obj); | ||
755 | nft_unregister_expr(&nft_notrack_type); | 925 | nft_unregister_expr(&nft_notrack_type); |
756 | nft_unregister_expr(&nft_ct_type); | 926 | nft_unregister_expr(&nft_ct_type); |
757 | } | 927 | } |
@@ -763,3 +933,4 @@ MODULE_LICENSE("GPL"); | |||
763 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); | 933 | MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); |
764 | MODULE_ALIAS_NFT_EXPR("ct"); | 934 | MODULE_ALIAS_NFT_EXPR("ct"); |
765 | MODULE_ALIAS_NFT_EXPR("notrack"); | 935 | MODULE_ALIAS_NFT_EXPR("notrack"); |
936 | MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_CT_HELPER); | ||
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 049ad2d9ee66..3948da380259 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -133,16 +133,10 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
133 | priv->invert = true; | 133 | priv->invert = true; |
134 | } | 134 | } |
135 | 135 | ||
136 | set = nf_tables_set_lookup(ctx->table, tb[NFTA_DYNSET_SET_NAME], | 136 | set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_DYNSET_SET_NAME], |
137 | genmask); | 137 | tb[NFTA_DYNSET_SET_ID], genmask); |
138 | if (IS_ERR(set)) { | 138 | if (IS_ERR(set)) |
139 | if (tb[NFTA_DYNSET_SET_ID]) | 139 | return PTR_ERR(set); |
140 | set = nf_tables_set_lookup_byid(ctx->net, | ||
141 | tb[NFTA_DYNSET_SET_ID], | ||
142 | genmask); | ||
143 | if (IS_ERR(set)) | ||
144 | return PTR_ERR(set); | ||
145 | } | ||
146 | 140 | ||
147 | if (set->ops->update == NULL) | 141 | if (set->ops->update == NULL) |
148 | return -EOPNOTSUPP; | 142 | return -EOPNOTSUPP; |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index c308920b194c..d212a85d2f33 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
@@ -98,14 +98,21 @@ static void nft_exthdr_tcp_eval(const struct nft_expr *expr, | |||
98 | goto err; | 98 | goto err; |
99 | 99 | ||
100 | offset = i + priv->offset; | 100 | offset = i + priv->offset; |
101 | dest[priv->len / NFT_REG32_SIZE] = 0; | 101 | if (priv->flags & NFT_EXTHDR_F_PRESENT) { |
102 | memcpy(dest, opt + offset, priv->len); | 102 | *dest = 1; |
103 | } else { | ||
104 | dest[priv->len / NFT_REG32_SIZE] = 0; | ||
105 | memcpy(dest, opt + offset, priv->len); | ||
106 | } | ||
103 | 107 | ||
104 | return; | 108 | return; |
105 | } | 109 | } |
106 | 110 | ||
107 | err: | 111 | err: |
108 | regs->verdict.code = NFT_BREAK; | 112 | if (priv->flags & NFT_EXTHDR_F_PRESENT) |
113 | *dest = 0; | ||
114 | else | ||
115 | regs->verdict.code = NFT_BREAK; | ||
109 | } | 116 | } |
110 | 117 | ||
111 | static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { | 118 | static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { |
diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c index 29a4906adc27..21df8cccea65 100644 --- a/net/netfilter/nft_fib.c +++ b/net/netfilter/nft_fib.c | |||
@@ -24,7 +24,8 @@ const struct nla_policy nft_fib_policy[NFTA_FIB_MAX + 1] = { | |||
24 | EXPORT_SYMBOL(nft_fib_policy); | 24 | EXPORT_SYMBOL(nft_fib_policy); |
25 | 25 | ||
26 | #define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \ | 26 | #define NFTA_FIB_F_ALL (NFTA_FIB_F_SADDR | NFTA_FIB_F_DADDR | \ |
27 | NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF) | 27 | NFTA_FIB_F_MARK | NFTA_FIB_F_IIF | NFTA_FIB_F_OIF | \ |
28 | NFTA_FIB_F_PRESENT) | ||
28 | 29 | ||
29 | int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, | 30 | int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, |
30 | const struct nft_data **data) | 31 | const struct nft_data **data) |
@@ -112,7 +113,7 @@ int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
112 | if (err < 0) | 113 | if (err < 0) |
113 | return err; | 114 | return err; |
114 | 115 | ||
115 | return nft_fib_validate(ctx, expr, NULL); | 116 | return 0; |
116 | } | 117 | } |
117 | EXPORT_SYMBOL_GPL(nft_fib_init); | 118 | EXPORT_SYMBOL_GPL(nft_fib_init); |
118 | 119 | ||
@@ -133,19 +134,22 @@ int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
133 | } | 134 | } |
134 | EXPORT_SYMBOL_GPL(nft_fib_dump); | 135 | EXPORT_SYMBOL_GPL(nft_fib_dump); |
135 | 136 | ||
136 | void nft_fib_store_result(void *reg, enum nft_fib_result r, | 137 | void nft_fib_store_result(void *reg, const struct nft_fib *priv, |
137 | const struct nft_pktinfo *pkt, int index) | 138 | const struct nft_pktinfo *pkt, int index) |
138 | { | 139 | { |
139 | struct net_device *dev; | 140 | struct net_device *dev; |
140 | u32 *dreg = reg; | 141 | u32 *dreg = reg; |
141 | 142 | ||
142 | switch (r) { | 143 | switch (priv->result) { |
143 | case NFT_FIB_RESULT_OIF: | 144 | case NFT_FIB_RESULT_OIF: |
144 | *dreg = index; | 145 | *dreg = (priv->flags & NFTA_FIB_F_PRESENT) ? !!index : index; |
145 | break; | 146 | break; |
146 | case NFT_FIB_RESULT_OIFNAME: | 147 | case NFT_FIB_RESULT_OIFNAME: |
147 | dev = dev_get_by_index_rcu(nft_net(pkt), index); | 148 | dev = dev_get_by_index_rcu(nft_net(pkt), index); |
148 | strncpy(reg, dev ? dev->name : "", IFNAMSIZ); | 149 | if (priv->flags & NFTA_FIB_F_PRESENT) |
150 | *dreg = !!dev; | ||
151 | else | ||
152 | strncpy(reg, dev ? dev->name : "", IFNAMSIZ); | ||
149 | break; | 153 | break; |
150 | default: | 154 | default: |
151 | WARN_ON_ONCE(1); | 155 | WARN_ON_ONCE(1); |
diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index eb2721af898d..a6a4633725bb 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c | |||
@@ -17,7 +17,7 @@ | |||
17 | #include <net/netfilter/nf_tables_core.h> | 17 | #include <net/netfilter/nf_tables_core.h> |
18 | #include <linux/jhash.h> | 18 | #include <linux/jhash.h> |
19 | 19 | ||
20 | struct nft_hash { | 20 | struct nft_jhash { |
21 | enum nft_registers sreg:8; | 21 | enum nft_registers sreg:8; |
22 | enum nft_registers dreg:8; | 22 | enum nft_registers dreg:8; |
23 | u8 len; | 23 | u8 len; |
@@ -26,11 +26,11 @@ struct nft_hash { | |||
26 | u32 offset; | 26 | u32 offset; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | static void nft_hash_eval(const struct nft_expr *expr, | 29 | static void nft_jhash_eval(const struct nft_expr *expr, |
30 | struct nft_regs *regs, | 30 | struct nft_regs *regs, |
31 | const struct nft_pktinfo *pkt) | 31 | const struct nft_pktinfo *pkt) |
32 | { | 32 | { |
33 | struct nft_hash *priv = nft_expr_priv(expr); | 33 | struct nft_jhash *priv = nft_expr_priv(expr); |
34 | const void *data = ®s->data[priv->sreg]; | 34 | const void *data = ®s->data[priv->sreg]; |
35 | u32 h; | 35 | u32 h; |
36 | 36 | ||
@@ -38,6 +38,25 @@ static void nft_hash_eval(const struct nft_expr *expr, | |||
38 | regs->data[priv->dreg] = h + priv->offset; | 38 | regs->data[priv->dreg] = h + priv->offset; |
39 | } | 39 | } |
40 | 40 | ||
41 | struct nft_symhash { | ||
42 | enum nft_registers dreg:8; | ||
43 | u32 modulus; | ||
44 | u32 offset; | ||
45 | }; | ||
46 | |||
47 | static void nft_symhash_eval(const struct nft_expr *expr, | ||
48 | struct nft_regs *regs, | ||
49 | const struct nft_pktinfo *pkt) | ||
50 | { | ||
51 | struct nft_symhash *priv = nft_expr_priv(expr); | ||
52 | struct sk_buff *skb = pkt->skb; | ||
53 | u32 h; | ||
54 | |||
55 | h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus); | ||
56 | |||
57 | regs->data[priv->dreg] = h + priv->offset; | ||
58 | } | ||
59 | |||
41 | static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { | 60 | static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { |
42 | [NFTA_HASH_SREG] = { .type = NLA_U32 }, | 61 | [NFTA_HASH_SREG] = { .type = NLA_U32 }, |
43 | [NFTA_HASH_DREG] = { .type = NLA_U32 }, | 62 | [NFTA_HASH_DREG] = { .type = NLA_U32 }, |
@@ -45,13 +64,14 @@ static const struct nla_policy nft_hash_policy[NFTA_HASH_MAX + 1] = { | |||
45 | [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, | 64 | [NFTA_HASH_MODULUS] = { .type = NLA_U32 }, |
46 | [NFTA_HASH_SEED] = { .type = NLA_U32 }, | 65 | [NFTA_HASH_SEED] = { .type = NLA_U32 }, |
47 | [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, | 66 | [NFTA_HASH_OFFSET] = { .type = NLA_U32 }, |
67 | [NFTA_HASH_TYPE] = { .type = NLA_U32 }, | ||
48 | }; | 68 | }; |
49 | 69 | ||
50 | static int nft_hash_init(const struct nft_ctx *ctx, | 70 | static int nft_jhash_init(const struct nft_ctx *ctx, |
51 | const struct nft_expr *expr, | 71 | const struct nft_expr *expr, |
52 | const struct nlattr * const tb[]) | 72 | const struct nlattr * const tb[]) |
53 | { | 73 | { |
54 | struct nft_hash *priv = nft_expr_priv(expr); | 74 | struct nft_jhash *priv = nft_expr_priv(expr); |
55 | u32 len; | 75 | u32 len; |
56 | int err; | 76 | int err; |
57 | 77 | ||
@@ -92,10 +112,36 @@ static int nft_hash_init(const struct nft_ctx *ctx, | |||
92 | NFT_DATA_VALUE, sizeof(u32)); | 112 | NFT_DATA_VALUE, sizeof(u32)); |
93 | } | 113 | } |
94 | 114 | ||
95 | static int nft_hash_dump(struct sk_buff *skb, | 115 | static int nft_symhash_init(const struct nft_ctx *ctx, |
96 | const struct nft_expr *expr) | 116 | const struct nft_expr *expr, |
117 | const struct nlattr * const tb[]) | ||
97 | { | 118 | { |
98 | const struct nft_hash *priv = nft_expr_priv(expr); | 119 | struct nft_symhash *priv = nft_expr_priv(expr); |
120 | |||
121 | if (!tb[NFTA_HASH_DREG] || | ||
122 | !tb[NFTA_HASH_MODULUS]) | ||
123 | return -EINVAL; | ||
124 | |||
125 | if (tb[NFTA_HASH_OFFSET]) | ||
126 | priv->offset = ntohl(nla_get_be32(tb[NFTA_HASH_OFFSET])); | ||
127 | |||
128 | priv->dreg = nft_parse_register(tb[NFTA_HASH_DREG]); | ||
129 | |||
130 | priv->modulus = ntohl(nla_get_be32(tb[NFTA_HASH_MODULUS])); | ||
131 | if (priv->modulus <= 1) | ||
132 | return -ERANGE; | ||
133 | |||
134 | if (priv->offset + priv->modulus - 1 < priv->offset) | ||
135 | return -EOVERFLOW; | ||
136 | |||
137 | return nft_validate_register_store(ctx, priv->dreg, NULL, | ||
138 | NFT_DATA_VALUE, sizeof(u32)); | ||
139 | } | ||
140 | |||
141 | static int nft_jhash_dump(struct sk_buff *skb, | ||
142 | const struct nft_expr *expr) | ||
143 | { | ||
144 | const struct nft_jhash *priv = nft_expr_priv(expr); | ||
99 | 145 | ||
100 | if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg)) | 146 | if (nft_dump_register(skb, NFTA_HASH_SREG, priv->sreg)) |
101 | goto nla_put_failure; | 147 | goto nla_put_failure; |
@@ -110,6 +156,28 @@ static int nft_hash_dump(struct sk_buff *skb, | |||
110 | if (priv->offset != 0) | 156 | if (priv->offset != 0) |
111 | if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) | 157 | if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) |
112 | goto nla_put_failure; | 158 | goto nla_put_failure; |
159 | if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_JENKINS))) | ||
160 | goto nla_put_failure; | ||
161 | return 0; | ||
162 | |||
163 | nla_put_failure: | ||
164 | return -1; | ||
165 | } | ||
166 | |||
167 | static int nft_symhash_dump(struct sk_buff *skb, | ||
168 | const struct nft_expr *expr) | ||
169 | { | ||
170 | const struct nft_symhash *priv = nft_expr_priv(expr); | ||
171 | |||
172 | if (nft_dump_register(skb, NFTA_HASH_DREG, priv->dreg)) | ||
173 | goto nla_put_failure; | ||
174 | if (nla_put_be32(skb, NFTA_HASH_MODULUS, htonl(priv->modulus))) | ||
175 | goto nla_put_failure; | ||
176 | if (priv->offset != 0) | ||
177 | if (nla_put_be32(skb, NFTA_HASH_OFFSET, htonl(priv->offset))) | ||
178 | goto nla_put_failure; | ||
179 | if (nla_put_be32(skb, NFTA_HASH_TYPE, htonl(NFT_HASH_SYM))) | ||
180 | goto nla_put_failure; | ||
113 | return 0; | 181 | return 0; |
114 | 182 | ||
115 | nla_put_failure: | 183 | nla_put_failure: |
@@ -117,17 +185,46 @@ nla_put_failure: | |||
117 | } | 185 | } |
118 | 186 | ||
119 | static struct nft_expr_type nft_hash_type; | 187 | static struct nft_expr_type nft_hash_type; |
120 | static const struct nft_expr_ops nft_hash_ops = { | 188 | static const struct nft_expr_ops nft_jhash_ops = { |
121 | .type = &nft_hash_type, | 189 | .type = &nft_hash_type, |
122 | .size = NFT_EXPR_SIZE(sizeof(struct nft_hash)), | 190 | .size = NFT_EXPR_SIZE(sizeof(struct nft_jhash)), |
123 | .eval = nft_hash_eval, | 191 | .eval = nft_jhash_eval, |
124 | .init = nft_hash_init, | 192 | .init = nft_jhash_init, |
125 | .dump = nft_hash_dump, | 193 | .dump = nft_jhash_dump, |
126 | }; | 194 | }; |
127 | 195 | ||
196 | static const struct nft_expr_ops nft_symhash_ops = { | ||
197 | .type = &nft_hash_type, | ||
198 | .size = NFT_EXPR_SIZE(sizeof(struct nft_symhash)), | ||
199 | .eval = nft_symhash_eval, | ||
200 | .init = nft_symhash_init, | ||
201 | .dump = nft_symhash_dump, | ||
202 | }; | ||
203 | |||
204 | static const struct nft_expr_ops * | ||
205 | nft_hash_select_ops(const struct nft_ctx *ctx, | ||
206 | const struct nlattr * const tb[]) | ||
207 | { | ||
208 | u32 type; | ||
209 | |||
210 | if (!tb[NFTA_HASH_TYPE]) | ||
211 | return &nft_jhash_ops; | ||
212 | |||
213 | type = ntohl(nla_get_be32(tb[NFTA_HASH_TYPE])); | ||
214 | switch (type) { | ||
215 | case NFT_HASH_SYM: | ||
216 | return &nft_symhash_ops; | ||
217 | case NFT_HASH_JENKINS: | ||
218 | return &nft_jhash_ops; | ||
219 | default: | ||
220 | break; | ||
221 | } | ||
222 | return ERR_PTR(-EOPNOTSUPP); | ||
223 | } | ||
224 | |||
128 | static struct nft_expr_type nft_hash_type __read_mostly = { | 225 | static struct nft_expr_type nft_hash_type __read_mostly = { |
129 | .name = "hash", | 226 | .name = "hash", |
130 | .ops = &nft_hash_ops, | 227 | .select_ops = &nft_hash_select_ops, |
131 | .policy = nft_hash_policy, | 228 | .policy = nft_hash_policy, |
132 | .maxattr = NFTA_HASH_MAX, | 229 | .maxattr = NFTA_HASH_MAX, |
133 | .owner = THIS_MODULE, | 230 | .owner = THIS_MODULE, |
diff --git a/net/netfilter/nft_limit.c b/net/netfilter/nft_limit.c index c6baf412236d..18dd57a52651 100644 --- a/net/netfilter/nft_limit.c +++ b/net/netfilter/nft_limit.c | |||
@@ -17,9 +17,8 @@ | |||
17 | #include <linux/netfilter/nf_tables.h> | 17 | #include <linux/netfilter/nf_tables.h> |
18 | #include <net/netfilter/nf_tables.h> | 18 | #include <net/netfilter/nf_tables.h> |
19 | 19 | ||
20 | static DEFINE_SPINLOCK(limit_lock); | ||
21 | |||
22 | struct nft_limit { | 20 | struct nft_limit { |
21 | spinlock_t lock; | ||
23 | u64 last; | 22 | u64 last; |
24 | u64 tokens; | 23 | u64 tokens; |
25 | u64 tokens_max; | 24 | u64 tokens_max; |
@@ -34,7 +33,7 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) | |||
34 | u64 now, tokens; | 33 | u64 now, tokens; |
35 | s64 delta; | 34 | s64 delta; |
36 | 35 | ||
37 | spin_lock_bh(&limit_lock); | 36 | spin_lock_bh(&limit->lock); |
38 | now = ktime_get_ns(); | 37 | now = ktime_get_ns(); |
39 | tokens = limit->tokens + now - limit->last; | 38 | tokens = limit->tokens + now - limit->last; |
40 | if (tokens > limit->tokens_max) | 39 | if (tokens > limit->tokens_max) |
@@ -44,11 +43,11 @@ static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) | |||
44 | delta = tokens - cost; | 43 | delta = tokens - cost; |
45 | if (delta >= 0) { | 44 | if (delta >= 0) { |
46 | limit->tokens = delta; | 45 | limit->tokens = delta; |
47 | spin_unlock_bh(&limit_lock); | 46 | spin_unlock_bh(&limit->lock); |
48 | return limit->invert; | 47 | return limit->invert; |
49 | } | 48 | } |
50 | limit->tokens = tokens; | 49 | limit->tokens = tokens; |
51 | spin_unlock_bh(&limit_lock); | 50 | spin_unlock_bh(&limit->lock); |
52 | return !limit->invert; | 51 | return !limit->invert; |
53 | } | 52 | } |
54 | 53 | ||
@@ -86,6 +85,7 @@ static int nft_limit_init(struct nft_limit *limit, | |||
86 | limit->invert = true; | 85 | limit->invert = true; |
87 | } | 86 | } |
88 | limit->last = ktime_get_ns(); | 87 | limit->last = ktime_get_ns(); |
88 | spin_lock_init(&limit->lock); | ||
89 | 89 | ||
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c index e21aea7e5ec8..475570e89ede 100644 --- a/net/netfilter/nft_lookup.c +++ b/net/netfilter/nft_lookup.c | |||
@@ -71,16 +71,10 @@ static int nft_lookup_init(const struct nft_ctx *ctx, | |||
71 | tb[NFTA_LOOKUP_SREG] == NULL) | 71 | tb[NFTA_LOOKUP_SREG] == NULL) |
72 | return -EINVAL; | 72 | return -EINVAL; |
73 | 73 | ||
74 | set = nf_tables_set_lookup(ctx->table, tb[NFTA_LOOKUP_SET], genmask); | 74 | set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_LOOKUP_SET], |
75 | if (IS_ERR(set)) { | 75 | tb[NFTA_LOOKUP_SET_ID], genmask); |
76 | if (tb[NFTA_LOOKUP_SET_ID]) { | 76 | if (IS_ERR(set)) |
77 | set = nf_tables_set_lookup_byid(ctx->net, | 77 | return PTR_ERR(set); |
78 | tb[NFTA_LOOKUP_SET_ID], | ||
79 | genmask); | ||
80 | } | ||
81 | if (IS_ERR(set)) | ||
82 | return PTR_ERR(set); | ||
83 | } | ||
84 | 78 | ||
85 | if (set->flags & NFT_SET_EVAL) | 79 | if (set->flags & NFT_SET_EVAL) |
86 | return -EOPNOTSUPP; | 80 | return -EOPNOTSUPP; |
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index 11ce016cd479..6ac03d4266c9 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c | |||
@@ -46,10 +46,6 @@ int nft_masq_init(const struct nft_ctx *ctx, | |||
46 | struct nft_masq *priv = nft_expr_priv(expr); | 46 | struct nft_masq *priv = nft_expr_priv(expr); |
47 | int err; | 47 | int err; |
48 | 48 | ||
49 | err = nft_masq_validate(ctx, expr, NULL); | ||
50 | if (err) | ||
51 | return err; | ||
52 | |||
53 | if (tb[NFTA_MASQ_FLAGS]) { | 49 | if (tb[NFTA_MASQ_FLAGS]) { |
54 | priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); | 50 | priv->flags = ntohl(nla_get_be32(tb[NFTA_MASQ_FLAGS])); |
55 | if (priv->flags & ~NF_NAT_RANGE_MASK) | 51 | if (priv->flags & ~NF_NAT_RANGE_MASK) |
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index e1f5ca9b423b..d14417aaf5d4 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c | |||
@@ -370,10 +370,6 @@ int nft_meta_set_init(const struct nft_ctx *ctx, | |||
370 | return -EOPNOTSUPP; | 370 | return -EOPNOTSUPP; |
371 | } | 371 | } |
372 | 372 | ||
373 | err = nft_meta_set_validate(ctx, expr, NULL); | ||
374 | if (err < 0) | ||
375 | return err; | ||
376 | |||
377 | priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); | 373 | priv->sreg = nft_parse_register(tb[NFTA_META_SREG]); |
378 | err = nft_validate_register_load(priv->sreg, len); | 374 | err = nft_validate_register_load(priv->sreg, len); |
379 | if (err < 0) | 375 | if (err < 0) |
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 19a7bf3236f9..26a74dfb3b7a 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c | |||
@@ -138,10 +138,6 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, | |||
138 | return -EINVAL; | 138 | return -EINVAL; |
139 | } | 139 | } |
140 | 140 | ||
141 | err = nft_nat_validate(ctx, expr, NULL); | ||
142 | if (err < 0) | ||
143 | return err; | ||
144 | |||
145 | if (tb[NFTA_NAT_FAMILY] == NULL) | 141 | if (tb[NFTA_NAT_FAMILY] == NULL) |
146 | return -EINVAL; | 142 | return -EINVAL; |
147 | 143 | ||
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 1ae8c49ca4a1..1dd428fbaaa3 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c | |||
@@ -116,16 +116,10 @@ static int nft_objref_map_init(const struct nft_ctx *ctx, | |||
116 | struct nft_set *set; | 116 | struct nft_set *set; |
117 | int err; | 117 | int err; |
118 | 118 | ||
119 | set = nf_tables_set_lookup(ctx->table, tb[NFTA_OBJREF_SET_NAME], genmask); | 119 | set = nft_set_lookup(ctx->net, ctx->table, tb[NFTA_OBJREF_SET_NAME], |
120 | if (IS_ERR(set)) { | 120 | tb[NFTA_OBJREF_SET_ID], genmask); |
121 | if (tb[NFTA_OBJREF_SET_ID]) { | 121 | if (IS_ERR(set)) |
122 | set = nf_tables_set_lookup_byid(ctx->net, | 122 | return PTR_ERR(set); |
123 | tb[NFTA_OBJREF_SET_ID], | ||
124 | genmask); | ||
125 | } | ||
126 | if (IS_ERR(set)) | ||
127 | return PTR_ERR(set); | ||
128 | } | ||
129 | 123 | ||
130 | if (!(set->flags & NFT_SET_OBJECT)) | 124 | if (!(set->flags & NFT_SET_OBJECT)) |
131 | return -EINVAL; | 125 | return -EINVAL; |
diff --git a/net/netfilter/nft_quota.c b/net/netfilter/nft_quota.c index 2d6fe3559912..25e33159be57 100644 --- a/net/netfilter/nft_quota.c +++ b/net/netfilter/nft_quota.c | |||
@@ -99,7 +99,8 @@ static int nft_quota_do_init(const struct nlattr * const tb[], | |||
99 | return 0; | 99 | return 0; |
100 | } | 100 | } |
101 | 101 | ||
102 | static int nft_quota_obj_init(const struct nlattr * const tb[], | 102 | static int nft_quota_obj_init(const struct nft_ctx *ctx, |
103 | const struct nlattr * const tb[], | ||
103 | struct nft_object *obj) | 104 | struct nft_object *obj) |
104 | { | 105 | { |
105 | struct nft_quota *priv = nft_obj_data(obj); | 106 | struct nft_quota *priv = nft_obj_data(obj); |
diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 40dcd05146d5..1e66538bf0ff 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c | |||
@@ -47,10 +47,6 @@ int nft_redir_init(const struct nft_ctx *ctx, | |||
47 | unsigned int plen; | 47 | unsigned int plen; |
48 | int err; | 48 | int err; |
49 | 49 | ||
50 | err = nft_redir_validate(ctx, expr, NULL); | ||
51 | if (err < 0) | ||
52 | return err; | ||
53 | |||
54 | plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); | 50 | plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); |
55 | if (tb[NFTA_REDIR_REG_PROTO_MIN]) { | 51 | if (tb[NFTA_REDIR_REG_PROTO_MIN]) { |
56 | priv->sreg_proto_min = | 52 | priv->sreg_proto_min = |
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c index c64de3f7379d..29f5bd2377b0 100644 --- a/net/netfilter/nft_reject.c +++ b/net/netfilter/nft_reject.c | |||
@@ -42,11 +42,6 @@ int nft_reject_init(const struct nft_ctx *ctx, | |||
42 | const struct nlattr * const tb[]) | 42 | const struct nlattr * const tb[]) |
43 | { | 43 | { |
44 | struct nft_reject *priv = nft_expr_priv(expr); | 44 | struct nft_reject *priv = nft_expr_priv(expr); |
45 | int err; | ||
46 | |||
47 | err = nft_reject_validate(ctx, expr, NULL); | ||
48 | if (err < 0) | ||
49 | return err; | ||
50 | 45 | ||
51 | if (tb[NFTA_REJECT_TYPE] == NULL) | 46 | if (tb[NFTA_REJECT_TYPE] == NULL) |
52 | return -EINVAL; | 47 | return -EINVAL; |
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c index 9e90a02cb104..5a7fb5ff867d 100644 --- a/net/netfilter/nft_reject_inet.c +++ b/net/netfilter/nft_reject_inet.c | |||
@@ -66,11 +66,7 @@ static int nft_reject_inet_init(const struct nft_ctx *ctx, | |||
66 | const struct nlattr * const tb[]) | 66 | const struct nlattr * const tb[]) |
67 | { | 67 | { |
68 | struct nft_reject *priv = nft_expr_priv(expr); | 68 | struct nft_reject *priv = nft_expr_priv(expr); |
69 | int icmp_code, err; | 69 | int icmp_code; |
70 | |||
71 | err = nft_reject_validate(ctx, expr, NULL); | ||
72 | if (err < 0) | ||
73 | return err; | ||
74 | 70 | ||
75 | if (tb[NFTA_REJECT_TYPE] == NULL) | 71 | if (tb[NFTA_REJECT_TYPE] == NULL) |
76 | return -EINVAL; | 72 | return -EINVAL; |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 78dfbf9588b3..e97e2fb53f0a 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -18,9 +18,8 @@ | |||
18 | #include <linux/netfilter/nf_tables.h> | 18 | #include <linux/netfilter/nf_tables.h> |
19 | #include <net/netfilter/nf_tables.h> | 19 | #include <net/netfilter/nf_tables.h> |
20 | 20 | ||
21 | static DEFINE_SPINLOCK(nft_rbtree_lock); | ||
22 | |||
23 | struct nft_rbtree { | 21 | struct nft_rbtree { |
22 | rwlock_t lock; | ||
24 | struct rb_root root; | 23 | struct rb_root root; |
25 | }; | 24 | }; |
26 | 25 | ||
@@ -44,14 +43,14 @@ static bool nft_rbtree_equal(const struct nft_set *set, const void *this, | |||
44 | static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, | 43 | static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, |
45 | const u32 *key, const struct nft_set_ext **ext) | 44 | const u32 *key, const struct nft_set_ext **ext) |
46 | { | 45 | { |
47 | const struct nft_rbtree *priv = nft_set_priv(set); | 46 | struct nft_rbtree *priv = nft_set_priv(set); |
48 | const struct nft_rbtree_elem *rbe, *interval = NULL; | 47 | const struct nft_rbtree_elem *rbe, *interval = NULL; |
49 | u8 genmask = nft_genmask_cur(net); | 48 | u8 genmask = nft_genmask_cur(net); |
50 | const struct rb_node *parent; | 49 | const struct rb_node *parent; |
51 | const void *this; | 50 | const void *this; |
52 | int d; | 51 | int d; |
53 | 52 | ||
54 | spin_lock_bh(&nft_rbtree_lock); | 53 | read_lock_bh(&priv->lock); |
55 | parent = priv->root.rb_node; | 54 | parent = priv->root.rb_node; |
56 | while (parent != NULL) { | 55 | while (parent != NULL) { |
57 | rbe = rb_entry(parent, struct nft_rbtree_elem, node); | 56 | rbe = rb_entry(parent, struct nft_rbtree_elem, node); |
@@ -75,7 +74,7 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, | |||
75 | } | 74 | } |
76 | if (nft_rbtree_interval_end(rbe)) | 75 | if (nft_rbtree_interval_end(rbe)) |
77 | goto out; | 76 | goto out; |
78 | spin_unlock_bh(&nft_rbtree_lock); | 77 | read_unlock_bh(&priv->lock); |
79 | 78 | ||
80 | *ext = &rbe->ext; | 79 | *ext = &rbe->ext; |
81 | return true; | 80 | return true; |
@@ -85,12 +84,12 @@ static bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set, | |||
85 | if (set->flags & NFT_SET_INTERVAL && interval != NULL && | 84 | if (set->flags & NFT_SET_INTERVAL && interval != NULL && |
86 | nft_set_elem_active(&interval->ext, genmask) && | 85 | nft_set_elem_active(&interval->ext, genmask) && |
87 | !nft_rbtree_interval_end(interval)) { | 86 | !nft_rbtree_interval_end(interval)) { |
88 | spin_unlock_bh(&nft_rbtree_lock); | 87 | read_unlock_bh(&priv->lock); |
89 | *ext = &interval->ext; | 88 | *ext = &interval->ext; |
90 | return true; | 89 | return true; |
91 | } | 90 | } |
92 | out: | 91 | out: |
93 | spin_unlock_bh(&nft_rbtree_lock); | 92 | read_unlock_bh(&priv->lock); |
94 | return false; | 93 | return false; |
95 | } | 94 | } |
96 | 95 | ||
@@ -140,12 +139,13 @@ static int nft_rbtree_insert(const struct net *net, const struct nft_set *set, | |||
140 | const struct nft_set_elem *elem, | 139 | const struct nft_set_elem *elem, |
141 | struct nft_set_ext **ext) | 140 | struct nft_set_ext **ext) |
142 | { | 141 | { |
142 | struct nft_rbtree *priv = nft_set_priv(set); | ||
143 | struct nft_rbtree_elem *rbe = elem->priv; | 143 | struct nft_rbtree_elem *rbe = elem->priv; |
144 | int err; | 144 | int err; |
145 | 145 | ||
146 | spin_lock_bh(&nft_rbtree_lock); | 146 | write_lock_bh(&priv->lock); |
147 | err = __nft_rbtree_insert(net, set, rbe, ext); | 147 | err = __nft_rbtree_insert(net, set, rbe, ext); |
148 | spin_unlock_bh(&nft_rbtree_lock); | 148 | write_unlock_bh(&priv->lock); |
149 | 149 | ||
150 | return err; | 150 | return err; |
151 | } | 151 | } |
@@ -157,9 +157,9 @@ static void nft_rbtree_remove(const struct net *net, | |||
157 | struct nft_rbtree *priv = nft_set_priv(set); | 157 | struct nft_rbtree *priv = nft_set_priv(set); |
158 | struct nft_rbtree_elem *rbe = elem->priv; | 158 | struct nft_rbtree_elem *rbe = elem->priv; |
159 | 159 | ||
160 | spin_lock_bh(&nft_rbtree_lock); | 160 | write_lock_bh(&priv->lock); |
161 | rb_erase(&rbe->node, &priv->root); | 161 | rb_erase(&rbe->node, &priv->root); |
162 | spin_unlock_bh(&nft_rbtree_lock); | 162 | write_unlock_bh(&priv->lock); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void nft_rbtree_activate(const struct net *net, | 165 | static void nft_rbtree_activate(const struct net *net, |
@@ -224,12 +224,12 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, | |||
224 | struct nft_set *set, | 224 | struct nft_set *set, |
225 | struct nft_set_iter *iter) | 225 | struct nft_set_iter *iter) |
226 | { | 226 | { |
227 | const struct nft_rbtree *priv = nft_set_priv(set); | 227 | struct nft_rbtree *priv = nft_set_priv(set); |
228 | struct nft_rbtree_elem *rbe; | 228 | struct nft_rbtree_elem *rbe; |
229 | struct nft_set_elem elem; | 229 | struct nft_set_elem elem; |
230 | struct rb_node *node; | 230 | struct rb_node *node; |
231 | 231 | ||
232 | spin_lock_bh(&nft_rbtree_lock); | 232 | read_lock_bh(&priv->lock); |
233 | for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { | 233 | for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) { |
234 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 234 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
235 | 235 | ||
@@ -242,13 +242,13 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx, | |||
242 | 242 | ||
243 | iter->err = iter->fn(ctx, set, iter, &elem); | 243 | iter->err = iter->fn(ctx, set, iter, &elem); |
244 | if (iter->err < 0) { | 244 | if (iter->err < 0) { |
245 | spin_unlock_bh(&nft_rbtree_lock); | 245 | read_unlock_bh(&priv->lock); |
246 | return; | 246 | return; |
247 | } | 247 | } |
248 | cont: | 248 | cont: |
249 | iter->count++; | 249 | iter->count++; |
250 | } | 250 | } |
251 | spin_unlock_bh(&nft_rbtree_lock); | 251 | read_unlock_bh(&priv->lock); |
252 | } | 252 | } |
253 | 253 | ||
254 | static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) | 254 | static unsigned int nft_rbtree_privsize(const struct nlattr * const nla[]) |
@@ -262,6 +262,7 @@ static int nft_rbtree_init(const struct nft_set *set, | |||
262 | { | 262 | { |
263 | struct nft_rbtree *priv = nft_set_priv(set); | 263 | struct nft_rbtree *priv = nft_set_priv(set); |
264 | 264 | ||
265 | rwlock_init(&priv->lock); | ||
265 | priv->root = RB_ROOT; | 266 | priv->root = RB_ROOT; |
266 | return 0; | 267 | return 0; |
267 | } | 268 | } |
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index dab962df1787..d27b5f1ea619 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/netfilter/xt_limit.h> | 18 | #include <linux/netfilter/xt_limit.h> |
19 | 19 | ||
20 | struct xt_limit_priv { | 20 | struct xt_limit_priv { |
21 | spinlock_t lock; | ||
21 | unsigned long prev; | 22 | unsigned long prev; |
22 | uint32_t credit; | 23 | uint32_t credit; |
23 | }; | 24 | }; |
@@ -32,8 +33,6 @@ MODULE_ALIAS("ip6t_limit"); | |||
32 | * see net/sched/sch_tbf.c in the linux source tree | 33 | * see net/sched/sch_tbf.c in the linux source tree |
33 | */ | 34 | */ |
34 | 35 | ||
35 | static DEFINE_SPINLOCK(limit_lock); | ||
36 | |||
37 | /* Rusty: This is my (non-mathematically-inclined) understanding of | 36 | /* Rusty: This is my (non-mathematically-inclined) understanding of |
38 | this algorithm. The `average rate' in jiffies becomes your initial | 37 | this algorithm. The `average rate' in jiffies becomes your initial |
39 | amount of credit `credit' and the most credit you can ever have | 38 | amount of credit `credit' and the most credit you can ever have |
@@ -72,7 +71,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
72 | struct xt_limit_priv *priv = r->master; | 71 | struct xt_limit_priv *priv = r->master; |
73 | unsigned long now = jiffies; | 72 | unsigned long now = jiffies; |
74 | 73 | ||
75 | spin_lock_bh(&limit_lock); | 74 | spin_lock_bh(&priv->lock); |
76 | priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; | 75 | priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; |
77 | if (priv->credit > r->credit_cap) | 76 | if (priv->credit > r->credit_cap) |
78 | priv->credit = r->credit_cap; | 77 | priv->credit = r->credit_cap; |
@@ -80,11 +79,11 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
80 | if (priv->credit >= r->cost) { | 79 | if (priv->credit >= r->cost) { |
81 | /* We're not limited. */ | 80 | /* We're not limited. */ |
82 | priv->credit -= r->cost; | 81 | priv->credit -= r->cost; |
83 | spin_unlock_bh(&limit_lock); | 82 | spin_unlock_bh(&priv->lock); |
84 | return true; | 83 | return true; |
85 | } | 84 | } |
86 | 85 | ||
87 | spin_unlock_bh(&limit_lock); | 86 | spin_unlock_bh(&priv->lock); |
88 | return false; | 87 | return false; |
89 | } | 88 | } |
90 | 89 | ||
@@ -126,6 +125,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par) | |||
126 | r->credit_cap = priv->credit; /* Credits full. */ | 125 | r->credit_cap = priv->credit; /* Credits full. */ |
127 | r->cost = user2credits(r->avg); | 126 | r->cost = user2credits(r->avg); |
128 | } | 127 | } |
128 | spin_lock_init(&priv->lock); | ||
129 | |||
129 | return 0; | 130 | return 0; |
130 | } | 131 | } |
131 | 132 | ||