diff options
author | Aaron Conole <aconole@bytheb.org> | 2016-09-21 11:35:07 -0400 |
---|---|---|
committer | Pablo Neira Ayuso <pablo@netfilter.org> | 2016-09-25 08:38:48 -0400 |
commit | e3b37f11e6e4e6b6f02cc762f182ce233d2c1c9d (patch) | |
tree | 09f9f1a8b9e8e7e173e1059c5e251c19a9852df6 /net | |
parent | 54f17bbc52f71e2d313721046627c383d6c5c7da (diff) |
netfilter: replace list_head with single linked list
The netfilter hook list never uses the prev pointer, and so can be trimmed to
be a simple singly-linked list.
In addition to having a more light weight structure for hook traversal,
struct net becomes 5568 bytes (down from 6400) and struct net_device becomes
2176 bytes (down from 2240).
Signed-off-by: Aaron Conole <aconole@bytheb.org>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r-- | net/bridge/br_netfilter_hooks.c | 19 | ||||
-rw-r--r-- | net/netfilter/core.c | 141 | ||||
-rw-r--r-- | net/netfilter/nf_internals.h | 10 | ||||
-rw-r--r-- | net/netfilter/nf_queue.c | 18 | ||||
-rw-r--r-- | net/netfilter/nfnetlink_queue.c | 8 |
5 files changed, 118 insertions, 78 deletions
diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index 6029af47377d..2fe9345c1407 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c | |||
@@ -1002,28 +1002,21 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net, | |||
1002 | int (*okfn)(struct net *, struct sock *, | 1002 | int (*okfn)(struct net *, struct sock *, |
1003 | struct sk_buff *)) | 1003 | struct sk_buff *)) |
1004 | { | 1004 | { |
1005 | struct nf_hook_ops *elem; | 1005 | struct nf_hook_entry *elem; |
1006 | struct nf_hook_state state; | 1006 | struct nf_hook_state state; |
1007 | struct list_head *head; | ||
1008 | int ret; | 1007 | int ret; |
1009 | 1008 | ||
1010 | head = &net->nf.hooks[NFPROTO_BRIDGE][hook]; | 1009 | elem = rcu_dereference(net->nf.hooks[NFPROTO_BRIDGE][hook]); |
1011 | 1010 | ||
1012 | list_for_each_entry_rcu(elem, head, list) { | 1011 | while (elem && (elem->ops.priority <= NF_BR_PRI_BRNF)) |
1013 | struct nf_hook_ops *next; | 1012 | elem = rcu_dereference(elem->next); |
1014 | 1013 | ||
1015 | next = list_entry_rcu(list_next_rcu(&elem->list), | 1014 | if (!elem) |
1016 | struct nf_hook_ops, list); | ||
1017 | if (next->priority <= NF_BR_PRI_BRNF) | ||
1018 | continue; | ||
1019 | } | ||
1020 | |||
1021 | if (&elem->list == head) | ||
1022 | return okfn(net, sk, skb); | 1015 | return okfn(net, sk, skb); |
1023 | 1016 | ||
1024 | /* We may already have this, but read-locks nest anyway */ | 1017 | /* We may already have this, but read-locks nest anyway */ |
1025 | rcu_read_lock(); | 1018 | rcu_read_lock(); |
1026 | nf_hook_state_init(&state, head, hook, NF_BR_PRI_BRNF + 1, | 1019 | nf_hook_state_init(&state, elem, hook, NF_BR_PRI_BRNF + 1, |
1027 | NFPROTO_BRIDGE, indev, outdev, sk, net, okfn); | 1020 | NFPROTO_BRIDGE, indev, outdev, sk, net, okfn); |
1028 | 1021 | ||
1029 | ret = nf_hook_slow(skb, &state); | 1022 | ret = nf_hook_slow(skb, &state); |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 67b74287535d..72fc514ec676 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/rcupdate.h> | ||
25 | #include <net/net_namespace.h> | 26 | #include <net/net_namespace.h> |
26 | #include <net/sock.h> | 27 | #include <net/sock.h> |
27 | 28 | ||
@@ -61,33 +62,50 @@ EXPORT_SYMBOL(nf_hooks_needed); | |||
61 | #endif | 62 | #endif |
62 | 63 | ||
63 | static DEFINE_MUTEX(nf_hook_mutex); | 64 | static DEFINE_MUTEX(nf_hook_mutex); |
65 | #define nf_entry_dereference(e) \ | ||
66 | rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex)) | ||
64 | 67 | ||
65 | static struct list_head *nf_find_hook_list(struct net *net, | 68 | static struct nf_hook_entry *nf_hook_entry_head(struct net *net, |
66 | const struct nf_hook_ops *reg) | 69 | const struct nf_hook_ops *reg) |
67 | { | 70 | { |
68 | struct list_head *hook_list = NULL; | 71 | struct nf_hook_entry *hook_head = NULL; |
69 | 72 | ||
70 | if (reg->pf != NFPROTO_NETDEV) | 73 | if (reg->pf != NFPROTO_NETDEV) |
71 | hook_list = &net->nf.hooks[reg->pf][reg->hooknum]; | 74 | hook_head = nf_entry_dereference(net->nf.hooks[reg->pf] |
75 | [reg->hooknum]); | ||
72 | else if (reg->hooknum == NF_NETDEV_INGRESS) { | 76 | else if (reg->hooknum == NF_NETDEV_INGRESS) { |
73 | #ifdef CONFIG_NETFILTER_INGRESS | 77 | #ifdef CONFIG_NETFILTER_INGRESS |
74 | if (reg->dev && dev_net(reg->dev) == net) | 78 | if (reg->dev && dev_net(reg->dev) == net) |
75 | hook_list = ®->dev->nf_hooks_ingress; | 79 | hook_head = |
80 | nf_entry_dereference( | ||
81 | reg->dev->nf_hooks_ingress); | ||
76 | #endif | 82 | #endif |
77 | } | 83 | } |
78 | return hook_list; | 84 | return hook_head; |
79 | } | 85 | } |
80 | 86 | ||
81 | struct nf_hook_entry { | 87 | /* must hold nf_hook_mutex */ |
82 | const struct nf_hook_ops *orig_ops; | 88 | static void nf_set_hooks_head(struct net *net, const struct nf_hook_ops *reg, |
83 | struct nf_hook_ops ops; | 89 | struct nf_hook_entry *entry) |
84 | }; | 90 | { |
91 | switch (reg->pf) { | ||
92 | case NFPROTO_NETDEV: | ||
93 | /* We already checked in nf_register_net_hook() that this is | ||
94 | * used from ingress. | ||
95 | */ | ||
96 | rcu_assign_pointer(reg->dev->nf_hooks_ingress, entry); | ||
97 | break; | ||
98 | default: | ||
99 | rcu_assign_pointer(net->nf.hooks[reg->pf][reg->hooknum], | ||
100 | entry); | ||
101 | break; | ||
102 | } | ||
103 | } | ||
85 | 104 | ||
86 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | 105 | int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) |
87 | { | 106 | { |
88 | struct list_head *hook_list; | 107 | struct nf_hook_entry *hooks_entry; |
89 | struct nf_hook_entry *entry; | 108 | struct nf_hook_entry *entry; |
90 | struct nf_hook_ops *elem; | ||
91 | 109 | ||
92 | if (reg->pf == NFPROTO_NETDEV && | 110 | if (reg->pf == NFPROTO_NETDEV && |
93 | (reg->hooknum != NF_NETDEV_INGRESS || | 111 | (reg->hooknum != NF_NETDEV_INGRESS || |
@@ -100,19 +118,30 @@ int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) | |||
100 | 118 | ||
101 | entry->orig_ops = reg; | 119 | entry->orig_ops = reg; |
102 | entry->ops = *reg; | 120 | entry->ops = *reg; |
121 | entry->next = NULL; | ||
122 | |||
123 | mutex_lock(&nf_hook_mutex); | ||
124 | hooks_entry = nf_hook_entry_head(net, reg); | ||
103 | 125 | ||
104 | hook_list = nf_find_hook_list(net, reg); | 126 | if (hooks_entry && hooks_entry->orig_ops->priority > reg->priority) { |
105 | if (!hook_list) { | 127 | /* This is the case where we need to insert at the head */ |
106 | kfree(entry); | 128 | entry->next = hooks_entry; |
107 | return -ENOENT; | 129 | hooks_entry = NULL; |
108 | } | 130 | } |
109 | 131 | ||
110 | mutex_lock(&nf_hook_mutex); | 132 | while (hooks_entry && |
111 | list_for_each_entry(elem, hook_list, list) { | 133 | reg->priority >= hooks_entry->orig_ops->priority && |
112 | if (reg->priority < elem->priority) | 134 | nf_entry_dereference(hooks_entry->next)) { |
113 | break; | 135 | hooks_entry = nf_entry_dereference(hooks_entry->next); |
136 | } | ||
137 | |||
138 | if (hooks_entry) { | ||
139 | entry->next = nf_entry_dereference(hooks_entry->next); | ||
140 | rcu_assign_pointer(hooks_entry->next, entry); | ||
141 | } else { | ||
142 | nf_set_hooks_head(net, reg, entry); | ||
114 | } | 143 | } |
115 | list_add_rcu(&entry->ops.list, elem->list.prev); | 144 | |
116 | mutex_unlock(&nf_hook_mutex); | 145 | mutex_unlock(&nf_hook_mutex); |
117 | #ifdef CONFIG_NETFILTER_INGRESS | 146 | #ifdef CONFIG_NETFILTER_INGRESS |
118 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) | 147 | if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) |
@@ -127,24 +156,33 @@ EXPORT_SYMBOL(nf_register_net_hook); | |||
127 | 156 | ||
128 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) | 157 | void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) |
129 | { | 158 | { |
130 | struct list_head *hook_list; | 159 | struct nf_hook_entry *hooks_entry; |
131 | struct nf_hook_entry *entry; | ||
132 | struct nf_hook_ops *elem; | ||
133 | |||
134 | hook_list = nf_find_hook_list(net, reg); | ||
135 | if (!hook_list) | ||
136 | return; | ||
137 | 160 | ||
138 | mutex_lock(&nf_hook_mutex); | 161 | mutex_lock(&nf_hook_mutex); |
139 | list_for_each_entry(elem, hook_list, list) { | 162 | hooks_entry = nf_hook_entry_head(net, reg); |
140 | entry = container_of(elem, struct nf_hook_entry, ops); | 163 | if (hooks_entry->orig_ops == reg) { |
141 | if (entry->orig_ops == reg) { | 164 | nf_set_hooks_head(net, reg, |
142 | list_del_rcu(&entry->ops.list); | 165 | nf_entry_dereference(hooks_entry->next)); |
143 | break; | 166 | goto unlock; |
167 | } | ||
168 | while (hooks_entry && nf_entry_dereference(hooks_entry->next)) { | ||
169 | struct nf_hook_entry *next = | ||
170 | nf_entry_dereference(hooks_entry->next); | ||
171 | struct nf_hook_entry *nnext; | ||
172 | |||
173 | if (next->orig_ops != reg) { | ||
174 | hooks_entry = next; | ||
175 | continue; | ||
144 | } | 176 | } |
177 | nnext = nf_entry_dereference(next->next); | ||
178 | rcu_assign_pointer(hooks_entry->next, nnext); | ||
179 | hooks_entry = next; | ||
180 | break; | ||
145 | } | 181 | } |
182 | |||
183 | unlock: | ||
146 | mutex_unlock(&nf_hook_mutex); | 184 | mutex_unlock(&nf_hook_mutex); |
147 | if (&elem->list == hook_list) { | 185 | if (!hooks_entry) { |
148 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); | 186 | WARN(1, "nf_unregister_net_hook: hook not found!\n"); |
149 | return; | 187 | return; |
150 | } | 188 | } |
@@ -156,10 +194,10 @@ void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) | |||
156 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); | 194 | static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); |
157 | #endif | 195 | #endif |
158 | synchronize_net(); | 196 | synchronize_net(); |
159 | nf_queue_nf_hook_drop(net, &entry->ops); | 197 | nf_queue_nf_hook_drop(net, hooks_entry); |
160 | /* other cpu might still process nfqueue verdict that used reg */ | 198 | /* other cpu might still process nfqueue verdict that used reg */ |
161 | synchronize_net(); | 199 | synchronize_net(); |
162 | kfree(entry); | 200 | kfree(hooks_entry); |
163 | } | 201 | } |
164 | EXPORT_SYMBOL(nf_unregister_net_hook); | 202 | EXPORT_SYMBOL(nf_unregister_net_hook); |
165 | 203 | ||
@@ -258,10 +296,9 @@ void nf_unregister_hooks(struct nf_hook_ops *reg, unsigned int n) | |||
258 | } | 296 | } |
259 | EXPORT_SYMBOL(nf_unregister_hooks); | 297 | EXPORT_SYMBOL(nf_unregister_hooks); |
260 | 298 | ||
261 | unsigned int nf_iterate(struct list_head *head, | 299 | unsigned int nf_iterate(struct sk_buff *skb, |
262 | struct sk_buff *skb, | ||
263 | struct nf_hook_state *state, | 300 | struct nf_hook_state *state, |
264 | struct nf_hook_ops **elemp) | 301 | struct nf_hook_entry **entryp) |
265 | { | 302 | { |
266 | unsigned int verdict; | 303 | unsigned int verdict; |
267 | 304 | ||
@@ -269,20 +306,23 @@ unsigned int nf_iterate(struct list_head *head, | |||
269 | * The caller must not block between calls to this | 306 | * The caller must not block between calls to this |
270 | * function because of risk of continuing from deleted element. | 307 | * function because of risk of continuing from deleted element. |
271 | */ | 308 | */ |
272 | list_for_each_entry_continue_rcu((*elemp), head, list) { | 309 | while (*entryp) { |
273 | if (state->thresh > (*elemp)->priority) | 310 | if (state->thresh > (*entryp)->ops.priority) { |
311 | *entryp = rcu_dereference((*entryp)->next); | ||
274 | continue; | 312 | continue; |
313 | } | ||
275 | 314 | ||
276 | /* Optimization: we don't need to hold module | 315 | /* Optimization: we don't need to hold module |
277 | reference here, since function can't sleep. --RR */ | 316 | reference here, since function can't sleep. --RR */ |
278 | repeat: | 317 | repeat: |
279 | verdict = (*elemp)->hook((*elemp)->priv, skb, state); | 318 | verdict = (*entryp)->ops.hook((*entryp)->ops.priv, skb, state); |
280 | if (verdict != NF_ACCEPT) { | 319 | if (verdict != NF_ACCEPT) { |
281 | #ifdef CONFIG_NETFILTER_DEBUG | 320 | #ifdef CONFIG_NETFILTER_DEBUG |
282 | if (unlikely((verdict & NF_VERDICT_MASK) | 321 | if (unlikely((verdict & NF_VERDICT_MASK) |
283 | > NF_MAX_VERDICT)) { | 322 | > NF_MAX_VERDICT)) { |
284 | NFDEBUG("Evil return from %p(%u).\n", | 323 | NFDEBUG("Evil return from %p(%u).\n", |
285 | (*elemp)->hook, state->hook); | 324 | (*entryp)->ops.hook, state->hook); |
325 | *entryp = rcu_dereference((*entryp)->next); | ||
286 | continue; | 326 | continue; |
287 | } | 327 | } |
288 | #endif | 328 | #endif |
@@ -290,6 +330,7 @@ repeat: | |||
290 | return verdict; | 330 | return verdict; |
291 | goto repeat; | 331 | goto repeat; |
292 | } | 332 | } |
333 | *entryp = rcu_dereference((*entryp)->next); | ||
293 | } | 334 | } |
294 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
295 | } | 336 | } |
@@ -299,13 +340,13 @@ repeat: | |||
299 | * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ | 340 | * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */ |
300 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) | 341 | int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state) |
301 | { | 342 | { |
302 | struct nf_hook_ops *elem; | 343 | struct nf_hook_entry *entry; |
303 | unsigned int verdict; | 344 | unsigned int verdict; |
304 | int ret = 0; | 345 | int ret = 0; |
305 | 346 | ||
306 | elem = list_entry_rcu(state->hook_list, struct nf_hook_ops, list); | 347 | entry = rcu_dereference(state->hook_entries); |
307 | next_hook: | 348 | next_hook: |
308 | verdict = nf_iterate(state->hook_list, skb, state, &elem); | 349 | verdict = nf_iterate(skb, state, &entry); |
309 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { | 350 | if (verdict == NF_ACCEPT || verdict == NF_STOP) { |
310 | ret = 1; | 351 | ret = 1; |
311 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { | 352 | } else if ((verdict & NF_VERDICT_MASK) == NF_DROP) { |
@@ -314,8 +355,10 @@ next_hook: | |||
314 | if (ret == 0) | 355 | if (ret == 0) |
315 | ret = -EPERM; | 356 | ret = -EPERM; |
316 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { | 357 | } else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) { |
317 | int err = nf_queue(skb, elem, state, | 358 | int err; |
318 | verdict >> NF_VERDICT_QBITS); | 359 | |
360 | RCU_INIT_POINTER(state->hook_entries, entry); | ||
361 | err = nf_queue(skb, state, verdict >> NF_VERDICT_QBITS); | ||
319 | if (err < 0) { | 362 | if (err < 0) { |
320 | if (err == -ESRCH && | 363 | if (err == -ESRCH && |
321 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) | 364 | (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) |
@@ -442,7 +485,7 @@ static int __net_init netfilter_net_init(struct net *net) | |||
442 | 485 | ||
443 | for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { | 486 | for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { |
444 | for (h = 0; h < NF_MAX_HOOKS; h++) | 487 | for (h = 0; h < NF_MAX_HOOKS; h++) |
445 | INIT_LIST_HEAD(&net->nf.hooks[i][h]); | 488 | RCU_INIT_POINTER(net->nf.hooks[i][h], NULL); |
446 | } | 489 | } |
447 | 490 | ||
448 | #ifdef CONFIG_PROC_FS | 491 | #ifdef CONFIG_PROC_FS |
diff --git a/net/netfilter/nf_internals.h b/net/netfilter/nf_internals.h index 065522564ac6..e0adb5959342 100644 --- a/net/netfilter/nf_internals.h +++ b/net/netfilter/nf_internals.h | |||
@@ -13,13 +13,13 @@ | |||
13 | 13 | ||
14 | 14 | ||
15 | /* core.c */ | 15 | /* core.c */ |
16 | unsigned int nf_iterate(struct list_head *head, struct sk_buff *skb, | 16 | unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state, |
17 | struct nf_hook_state *state, struct nf_hook_ops **elemp); | 17 | struct nf_hook_entry **entryp); |
18 | 18 | ||
19 | /* nf_queue.c */ | 19 | /* nf_queue.c */ |
20 | int nf_queue(struct sk_buff *skb, struct nf_hook_ops *elem, | 20 | int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, |
21 | struct nf_hook_state *state, unsigned int queuenum); | 21 | unsigned int queuenum); |
22 | void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops); | 22 | void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry); |
23 | int __init netfilter_queue_init(void); | 23 | int __init netfilter_queue_init(void); |
24 | 24 | ||
25 | /* nf_log.c */ | 25 | /* nf_log.c */ |
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index b19ad20a705c..96964a0070e1 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -96,14 +96,14 @@ void nf_queue_entry_get_refs(struct nf_queue_entry *entry) | |||
96 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); | 97 | EXPORT_SYMBOL_GPL(nf_queue_entry_get_refs); |
98 | 98 | ||
99 | void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) | 99 | void nf_queue_nf_hook_drop(struct net *net, const struct nf_hook_entry *entry) |
100 | { | 100 | { |
101 | const struct nf_queue_handler *qh; | 101 | const struct nf_queue_handler *qh; |
102 | 102 | ||
103 | rcu_read_lock(); | 103 | rcu_read_lock(); |
104 | qh = rcu_dereference(net->nf.queue_handler); | 104 | qh = rcu_dereference(net->nf.queue_handler); |
105 | if (qh) | 105 | if (qh) |
106 | qh->nf_hook_drop(net, ops); | 106 | qh->nf_hook_drop(net, entry); |
107 | rcu_read_unlock(); | 107 | rcu_read_unlock(); |
108 | } | 108 | } |
109 | 109 | ||
@@ -112,7 +112,6 @@ void nf_queue_nf_hook_drop(struct net *net, struct nf_hook_ops *ops) | |||
112 | * through nf_reinject(). | 112 | * through nf_reinject(). |
113 | */ | 113 | */ |
114 | int nf_queue(struct sk_buff *skb, | 114 | int nf_queue(struct sk_buff *skb, |
115 | struct nf_hook_ops *elem, | ||
116 | struct nf_hook_state *state, | 115 | struct nf_hook_state *state, |
117 | unsigned int queuenum) | 116 | unsigned int queuenum) |
118 | { | 117 | { |
@@ -141,7 +140,6 @@ int nf_queue(struct sk_buff *skb, | |||
141 | 140 | ||
142 | *entry = (struct nf_queue_entry) { | 141 | *entry = (struct nf_queue_entry) { |
143 | .skb = skb, | 142 | .skb = skb, |
144 | .elem = elem, | ||
145 | .state = *state, | 143 | .state = *state, |
146 | .size = sizeof(*entry) + afinfo->route_key_size, | 144 | .size = sizeof(*entry) + afinfo->route_key_size, |
147 | }; | 145 | }; |
@@ -165,11 +163,15 @@ err: | |||
165 | 163 | ||
166 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | 164 | void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) |
167 | { | 165 | { |
166 | struct nf_hook_entry *hook_entry; | ||
168 | struct sk_buff *skb = entry->skb; | 167 | struct sk_buff *skb = entry->skb; |
169 | struct nf_hook_ops *elem = entry->elem; | ||
170 | const struct nf_afinfo *afinfo; | 168 | const struct nf_afinfo *afinfo; |
169 | struct nf_hook_ops *elem; | ||
171 | int err; | 170 | int err; |
172 | 171 | ||
172 | hook_entry = rcu_dereference(entry->state.hook_entries); | ||
173 | elem = &hook_entry->ops; | ||
174 | |||
173 | nf_queue_entry_release_refs(entry); | 175 | nf_queue_entry_release_refs(entry); |
174 | 176 | ||
175 | /* Continue traversal iff userspace said ok... */ | 177 | /* Continue traversal iff userspace said ok... */ |
@@ -186,8 +188,7 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
186 | 188 | ||
187 | if (verdict == NF_ACCEPT) { | 189 | if (verdict == NF_ACCEPT) { |
188 | next_hook: | 190 | next_hook: |
189 | verdict = nf_iterate(entry->state.hook_list, | 191 | verdict = nf_iterate(skb, &entry->state, &hook_entry); |
190 | skb, &entry->state, &elem); | ||
191 | } | 192 | } |
192 | 193 | ||
193 | switch (verdict & NF_VERDICT_MASK) { | 194 | switch (verdict & NF_VERDICT_MASK) { |
@@ -198,7 +199,8 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) | |||
198 | local_bh_enable(); | 199 | local_bh_enable(); |
199 | break; | 200 | break; |
200 | case NF_QUEUE: | 201 | case NF_QUEUE: |
201 | err = nf_queue(skb, elem, &entry->state, | 202 | RCU_INIT_POINTER(entry->state.hook_entries, hook_entry); |
203 | err = nf_queue(skb, &entry->state, | ||
202 | verdict >> NF_VERDICT_QBITS); | 204 | verdict >> NF_VERDICT_QBITS); |
203 | if (err < 0) { | 205 | if (err < 0) { |
204 | if (err == -ESRCH && | 206 | if (err == -ESRCH && |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index 7caa8b082c41..af832c526048 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
@@ -917,12 +917,14 @@ static struct notifier_block nfqnl_dev_notifier = { | |||
917 | .notifier_call = nfqnl_rcv_dev_event, | 917 | .notifier_call = nfqnl_rcv_dev_event, |
918 | }; | 918 | }; |
919 | 919 | ||
920 | static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long ops_ptr) | 920 | static int nf_hook_cmp(struct nf_queue_entry *entry, unsigned long entry_ptr) |
921 | { | 921 | { |
922 | return entry->elem == (struct nf_hook_ops *)ops_ptr; | 922 | return rcu_access_pointer(entry->state.hook_entries) == |
923 | (struct nf_hook_entry *)entry_ptr; | ||
923 | } | 924 | } |
924 | 925 | ||
925 | static void nfqnl_nf_hook_drop(struct net *net, struct nf_hook_ops *hook) | 926 | static void nfqnl_nf_hook_drop(struct net *net, |
927 | const struct nf_hook_entry *hook) | ||
926 | { | 928 | { |
927 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | 929 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); |
928 | int i; | 930 | int i; |