aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorFlorian Westphal <fw@strlen.de>2012-11-23 01:22:21 -0500
committerPablo Neira Ayuso <pablo@netfilter.org>2012-12-03 09:07:48 -0500
commit0360ae412d09bc6f4864c801effcb20bfd84520e (patch)
tree2c97b64015e7d0f6c6d554e99ac6e4f56bb39227 /net
parentd871befe357ccc262edbb0a4f9aeea650012edf5 (diff)
netfilter: kill support for per-af queue backends
We used to have several queueing backends, but nowadays only nfnetlink_queue remains. In light of this there doesn't seem to be a good reason to support per-af registering -- just hook up nfnetlink_queue on module load and remove it on unload. This means that the userspace BIND/UNBIND_PF commands are now obsolete; the kernel will ignore them. Signed-off-by: Florian Westphal <fw@strlen.de> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/core.c2
-rw-r--r--net/netfilter/nf_queue.c152
-rw-r--r--net/netfilter/nfnetlink_queue_core.c14
3 files changed, 18 insertions, 150 deletions
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 68912dadf13d..a9c488b6c50d 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -295,8 +295,6 @@ void __init netfilter_init(void)
295 panic("cannot create netfilter proc entry"); 295 panic("cannot create netfilter proc entry");
296#endif 296#endif
297 297
298 if (netfilter_queue_init() < 0)
299 panic("cannot initialize nf_queue");
300 if (netfilter_log_init() < 0) 298 if (netfilter_log_init() < 0)
301 panic("cannot initialize nf_log"); 299 panic("cannot initialize nf_log");
302} 300}
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index 8d2cf9ec37a8..d812c1235b30 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -14,84 +14,32 @@
14#include "nf_internals.h" 14#include "nf_internals.h"
15 15
16/* 16/*
17 * A queue handler may be registered for each protocol. Each is protected by 17 * Hook for nfnetlink_queue to register its queue handler.
18 * long term mutex. The handler must provide an an outfn() to accept packets 18 * We do this so that most of the NFQUEUE code can be modular.
19 * for queueing and must reinject all packets it receives, no matter what. 19 *
20 * Once the queue is registered it must reinject all packets it
21 * receives, no matter what.
20 */ 22 */
21static const struct nf_queue_handler __rcu *queue_handler[NFPROTO_NUMPROTO] __read_mostly; 23static const struct nf_queue_handler __rcu *queue_handler __read_mostly;
22
23static DEFINE_MUTEX(queue_handler_mutex);
24 24
25/* return EBUSY when somebody else is registered, return EEXIST if the 25/* return EBUSY when somebody else is registered, return EEXIST if the
26 * same handler is registered, return 0 in case of success. */ 26 * same handler is registered, return 0 in case of success. */
27int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) 27void nf_register_queue_handler(const struct nf_queue_handler *qh)
28{ 28{
29 int ret; 29 /* should never happen, we only have one queueing backend in kernel */
30 const struct nf_queue_handler *old; 30 WARN_ON(rcu_access_pointer(queue_handler));
31 31 rcu_assign_pointer(queue_handler, qh);
32 if (pf >= ARRAY_SIZE(queue_handler))
33 return -EINVAL;
34
35 mutex_lock(&queue_handler_mutex);
36 old = rcu_dereference_protected(queue_handler[pf],
37 lockdep_is_held(&queue_handler_mutex));
38 if (old == qh)
39 ret = -EEXIST;
40 else if (old)
41 ret = -EBUSY;
42 else {
43 rcu_assign_pointer(queue_handler[pf], qh);
44 ret = 0;
45 }
46 mutex_unlock(&queue_handler_mutex);
47
48 return ret;
49} 32}
50EXPORT_SYMBOL(nf_register_queue_handler); 33EXPORT_SYMBOL(nf_register_queue_handler);
51 34
52/* The caller must flush their queue before this */ 35/* The caller must flush their queue before this */
53int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh) 36void nf_unregister_queue_handler(void)
54{ 37{
55 const struct nf_queue_handler *old; 38 RCU_INIT_POINTER(queue_handler, NULL);
56
57 if (pf >= ARRAY_SIZE(queue_handler))
58 return -EINVAL;
59
60 mutex_lock(&queue_handler_mutex);
61 old = rcu_dereference_protected(queue_handler[pf],
62 lockdep_is_held(&queue_handler_mutex));
63 if (old && old != qh) {
64 mutex_unlock(&queue_handler_mutex);
65 return -EINVAL;
66 }
67
68 RCU_INIT_POINTER(queue_handler[pf], NULL);
69 mutex_unlock(&queue_handler_mutex);
70
71 synchronize_rcu(); 39 synchronize_rcu();
72
73 return 0;
74} 40}
75EXPORT_SYMBOL(nf_unregister_queue_handler); 41EXPORT_SYMBOL(nf_unregister_queue_handler);
76 42
77void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
78{
79 u_int8_t pf;
80
81 mutex_lock(&queue_handler_mutex);
82 for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
83 if (rcu_dereference_protected(
84 queue_handler[pf],
85 lockdep_is_held(&queue_handler_mutex)
86 ) == qh)
87 RCU_INIT_POINTER(queue_handler[pf], NULL);
88 }
89 mutex_unlock(&queue_handler_mutex);
90
91 synchronize_rcu();
92}
93EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
94
95static void nf_queue_entry_release_refs(struct nf_queue_entry *entry) 43static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
96{ 44{
97 /* Release those devices we held, or Alexey will kill me. */ 45 /* Release those devices we held, or Alexey will kill me. */
@@ -137,7 +85,7 @@ static int __nf_queue(struct sk_buff *skb,
137 /* QUEUE == DROP if no one is waiting, to be safe. */ 85 /* QUEUE == DROP if no one is waiting, to be safe. */
138 rcu_read_lock(); 86 rcu_read_lock();
139 87
140 qh = rcu_dereference(queue_handler[pf]); 88 qh = rcu_dereference(queue_handler);
141 if (!qh) { 89 if (!qh) {
142 status = -ESRCH; 90 status = -ESRCH;
143 goto err_unlock; 91 goto err_unlock;
@@ -344,77 +292,3 @@ void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
344 kfree(entry); 292 kfree(entry);
345} 293}
346EXPORT_SYMBOL(nf_reinject); 294EXPORT_SYMBOL(nf_reinject);
347
348#ifdef CONFIG_PROC_FS
349static void *seq_start(struct seq_file *seq, loff_t *pos)
350{
351 if (*pos >= ARRAY_SIZE(queue_handler))
352 return NULL;
353
354 return pos;
355}
356
357static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
358{
359 (*pos)++;
360
361 if (*pos >= ARRAY_SIZE(queue_handler))
362 return NULL;
363
364 return pos;
365}
366
367static void seq_stop(struct seq_file *s, void *v)
368{
369
370}
371
372static int seq_show(struct seq_file *s, void *v)
373{
374 int ret;
375 loff_t *pos = v;
376 const struct nf_queue_handler *qh;
377
378 rcu_read_lock();
379 qh = rcu_dereference(queue_handler[*pos]);
380 if (!qh)
381 ret = seq_printf(s, "%2lld NONE\n", *pos);
382 else
383 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
384 rcu_read_unlock();
385
386 return ret;
387}
388
389static const struct seq_operations nfqueue_seq_ops = {
390 .start = seq_start,
391 .next = seq_next,
392 .stop = seq_stop,
393 .show = seq_show,
394};
395
396static int nfqueue_open(struct inode *inode, struct file *file)
397{
398 return seq_open(file, &nfqueue_seq_ops);
399}
400
401static const struct file_operations nfqueue_file_ops = {
402 .owner = THIS_MODULE,
403 .open = nfqueue_open,
404 .read = seq_read,
405 .llseek = seq_lseek,
406 .release = seq_release,
407};
408#endif /* PROC_FS */
409
410
411int __init netfilter_queue_init(void)
412{
413#ifdef CONFIG_PROC_FS
414 if (!proc_create("nf_queue", S_IRUGO,
415 proc_net_netfilter, &nfqueue_file_ops))
416 return -1;
417#endif
418 return 0;
419}
420
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index e12d44e75b21..3158d87b56a8 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -809,7 +809,6 @@ static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
809}; 809};
810 810
811static const struct nf_queue_handler nfqh = { 811static const struct nf_queue_handler nfqh = {
812 .name = "nf_queue",
813 .outfn = &nfqnl_enqueue_packet, 812 .outfn = &nfqnl_enqueue_packet,
814}; 813};
815 814
@@ -827,14 +826,10 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
827 if (nfqa[NFQA_CFG_CMD]) { 826 if (nfqa[NFQA_CFG_CMD]) {
828 cmd = nla_data(nfqa[NFQA_CFG_CMD]); 827 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
829 828
830 /* Commands without queue context - might sleep */ 829 /* Obsolete commands without queue context */
831 switch (cmd->command) { 830 switch (cmd->command) {
832 case NFQNL_CFG_CMD_PF_BIND: 831 case NFQNL_CFG_CMD_PF_BIND: return 0;
833 return nf_register_queue_handler(ntohs(cmd->pf), 832 case NFQNL_CFG_CMD_PF_UNBIND: return 0;
834 &nfqh);
835 case NFQNL_CFG_CMD_PF_UNBIND:
836 return nf_unregister_queue_handler(ntohs(cmd->pf),
837 &nfqh);
838 } 833 }
839 } 834 }
840 835
@@ -1074,6 +1069,7 @@ static int __init nfnetlink_queue_init(void)
1074#endif 1069#endif
1075 1070
1076 register_netdevice_notifier(&nfqnl_dev_notifier); 1071 register_netdevice_notifier(&nfqnl_dev_notifier);
1072 nf_register_queue_handler(&nfqh);
1077 return status; 1073 return status;
1078 1074
1079#ifdef CONFIG_PROC_FS 1075#ifdef CONFIG_PROC_FS
@@ -1087,7 +1083,7 @@ cleanup_netlink_notifier:
1087 1083
1088static void __exit nfnetlink_queue_fini(void) 1084static void __exit nfnetlink_queue_fini(void)
1089{ 1085{
1090 nf_unregister_queue_handlers(&nfqh); 1086 nf_unregister_queue_handler();
1091 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1087 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1092#ifdef CONFIG_PROC_FS 1088#ifdef CONFIG_PROC_FS
1093 remove_proc_entry("nfnetlink_queue", proc_net_netfilter); 1089 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);