aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGao feng <gaofeng@cn.fujitsu.com>2013-03-24 19:50:47 -0400
committerPablo Neira Ayuso <pablo@netfilter.org>2013-04-05 15:08:11 -0400
commite817961048ecd12cf9cdfcec0062deb5f7970592 (patch)
tree4d57d15706a8ca0e2579428818f046bf4fc629cd /net
parent5b023fc8d8e0997e0b7ea6506d243afd5478c96e (diff)
netfilter: nfnetlink_queue: add net namespace support for nfnetlink_queue
This patch makes /proc/net/netfilter/nfnetlink_queue pernet. Moreover, there's a pernet instance table and lock. Signed-off-by: Gao feng <gaofeng@cn.fujitsu.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net')
-rw-r--r--net/netfilter/nfnetlink_queue_core.c173
1 files changed, 113 insertions, 60 deletions
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 19845e34f70f..d20c52cc80c0 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -30,6 +30,7 @@
30#include <linux/list.h> 30#include <linux/list.h>
31#include <net/sock.h> 31#include <net/sock.h>
32#include <net/netfilter/nf_queue.h> 32#include <net/netfilter/nf_queue.h>
33#include <net/netns/generic.h>
33#include <net/netfilter/nfnetlink_queue.h> 34#include <net/netfilter/nfnetlink_queue.h>
34 35
35#include <linux/atomic.h> 36#include <linux/atomic.h>
@@ -66,10 +67,18 @@ struct nfqnl_instance {
66 67
67typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long); 68typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
68 69
69static DEFINE_SPINLOCK(instances_lock); 70static int nfnl_queue_net_id __read_mostly;
70 71
71#define INSTANCE_BUCKETS 16 72#define INSTANCE_BUCKETS 16
72static struct hlist_head instance_table[INSTANCE_BUCKETS] __read_mostly; 73struct nfnl_queue_net {
74 spinlock_t instances_lock;
75 struct hlist_head instance_table[INSTANCE_BUCKETS];
76};
77
78static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
79{
80 return net_generic(net, nfnl_queue_net_id);
81}
73 82
74static inline u_int8_t instance_hashfn(u_int16_t queue_num) 83static inline u_int8_t instance_hashfn(u_int16_t queue_num)
75{ 84{
@@ -77,12 +86,12 @@ static inline u_int8_t instance_hashfn(u_int16_t queue_num)
77} 86}
78 87
79static struct nfqnl_instance * 88static struct nfqnl_instance *
80instance_lookup(u_int16_t queue_num) 89instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
81{ 90{
82 struct hlist_head *head; 91 struct hlist_head *head;
83 struct nfqnl_instance *inst; 92 struct nfqnl_instance *inst;
84 93
85 head = &instance_table[instance_hashfn(queue_num)]; 94 head = &q->instance_table[instance_hashfn(queue_num)];
86 hlist_for_each_entry_rcu(inst, head, hlist) { 95 hlist_for_each_entry_rcu(inst, head, hlist) {
87 if (inst->queue_num == queue_num) 96 if (inst->queue_num == queue_num)
88 return inst; 97 return inst;
@@ -91,14 +100,15 @@ instance_lookup(u_int16_t queue_num)
91} 100}
92 101
93static struct nfqnl_instance * 102static struct nfqnl_instance *
94instance_create(u_int16_t queue_num, int portid) 103instance_create(struct nfnl_queue_net *q, u_int16_t queue_num,
104 int portid)
95{ 105{
96 struct nfqnl_instance *inst; 106 struct nfqnl_instance *inst;
97 unsigned int h; 107 unsigned int h;
98 int err; 108 int err;
99 109
100 spin_lock(&instances_lock); 110 spin_lock(&q->instances_lock);
101 if (instance_lookup(queue_num)) { 111 if (instance_lookup(q, queue_num)) {
102 err = -EEXIST; 112 err = -EEXIST;
103 goto out_unlock; 113 goto out_unlock;
104 } 114 }
@@ -123,16 +133,16 @@ instance_create(u_int16_t queue_num, int portid)
123 } 133 }
124 134
125 h = instance_hashfn(queue_num); 135 h = instance_hashfn(queue_num);
126 hlist_add_head_rcu(&inst->hlist, &instance_table[h]); 136 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
127 137
128 spin_unlock(&instances_lock); 138 spin_unlock(&q->instances_lock);
129 139
130 return inst; 140 return inst;
131 141
132out_free: 142out_free:
133 kfree(inst); 143 kfree(inst);
134out_unlock: 144out_unlock:
135 spin_unlock(&instances_lock); 145 spin_unlock(&q->instances_lock);
136 return ERR_PTR(err); 146 return ERR_PTR(err);
137} 147}
138 148
@@ -158,11 +168,11 @@ __instance_destroy(struct nfqnl_instance *inst)
158} 168}
159 169
160static void 170static void
161instance_destroy(struct nfqnl_instance *inst) 171instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
162{ 172{
163 spin_lock(&instances_lock); 173 spin_lock(&q->instances_lock);
164 __instance_destroy(inst); 174 __instance_destroy(inst);
165 spin_unlock(&instances_lock); 175 spin_unlock(&q->instances_lock);
166} 176}
167 177
168static inline void 178static inline void
@@ -473,9 +483,12 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
473 int err = -ENOBUFS; 483 int err = -ENOBUFS;
474 __be32 *packet_id_ptr; 484 __be32 *packet_id_ptr;
475 int failopen = 0; 485 int failopen = 0;
486 struct net *net = dev_net(entry->indev ?
487 entry->indev : entry->outdev);
488 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
476 489
477 /* rcu_read_lock()ed by nf_hook_slow() */ 490 /* rcu_read_lock()ed by nf_hook_slow() */
478 queue = instance_lookup(queuenum); 491 queue = instance_lookup(q, queuenum);
479 if (!queue) { 492 if (!queue) {
480 err = -ESRCH; 493 err = -ESRCH;
481 goto err_out; 494 goto err_out;
@@ -512,7 +525,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
512 *packet_id_ptr = htonl(entry->id); 525 *packet_id_ptr = htonl(entry->id);
513 526
514 /* nfnetlink_unicast will either free the nskb or add it to a socket */ 527 /* nfnetlink_unicast will either free the nskb or add it to a socket */
515 err = nfnetlink_unicast(nskb, &init_net, queue->peer_portid, MSG_DONTWAIT); 528 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
516 if (err < 0) { 529 if (err < 0) {
517 queue->queue_user_dropped++; 530 queue->queue_user_dropped++;
518 goto err_out_unlock; 531 goto err_out_unlock;
@@ -625,15 +638,16 @@ dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
625/* drop all packets with either indev or outdev == ifindex from all queue 638/* drop all packets with either indev or outdev == ifindex from all queue
626 * instances */ 639 * instances */
627static void 640static void
628nfqnl_dev_drop(int ifindex) 641nfqnl_dev_drop(struct net *net, int ifindex)
629{ 642{
630 int i; 643 int i;
644 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
631 645
632 rcu_read_lock(); 646 rcu_read_lock();
633 647
634 for (i = 0; i < INSTANCE_BUCKETS; i++) { 648 for (i = 0; i < INSTANCE_BUCKETS; i++) {
635 struct nfqnl_instance *inst; 649 struct nfqnl_instance *inst;
636 struct hlist_head *head = &instance_table[i]; 650 struct hlist_head *head = &q->instance_table[i];
637 651
638 hlist_for_each_entry_rcu(inst, head, hlist) 652 hlist_for_each_entry_rcu(inst, head, hlist)
639 nfqnl_flush(inst, dev_cmp, ifindex); 653 nfqnl_flush(inst, dev_cmp, ifindex);
@@ -650,12 +664,9 @@ nfqnl_rcv_dev_event(struct notifier_block *this,
650{ 664{
651 struct net_device *dev = ptr; 665 struct net_device *dev = ptr;
652 666
653 if (!net_eq(dev_net(dev), &init_net))
654 return NOTIFY_DONE;
655
656 /* Drop any packets associated with the downed device */ 667 /* Drop any packets associated with the downed device */
657 if (event == NETDEV_DOWN) 668 if (event == NETDEV_DOWN)
658 nfqnl_dev_drop(dev->ifindex); 669 nfqnl_dev_drop(dev_net(dev), dev->ifindex);
659 return NOTIFY_DONE; 670 return NOTIFY_DONE;
660} 671}
661 672
@@ -668,24 +679,24 @@ nfqnl_rcv_nl_event(struct notifier_block *this,
668 unsigned long event, void *ptr) 679 unsigned long event, void *ptr)
669{ 680{
670 struct netlink_notify *n = ptr; 681 struct netlink_notify *n = ptr;
682 struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
671 683
672 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) { 684 if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
673 int i; 685 int i;
674 686
675 /* destroy all instances for this portid */ 687 /* destroy all instances for this portid */
676 spin_lock(&instances_lock); 688 spin_lock(&q->instances_lock);
677 for (i = 0; i < INSTANCE_BUCKETS; i++) { 689 for (i = 0; i < INSTANCE_BUCKETS; i++) {
678 struct hlist_node *t2; 690 struct hlist_node *t2;
679 struct nfqnl_instance *inst; 691 struct nfqnl_instance *inst;
680 struct hlist_head *head = &instance_table[i]; 692 struct hlist_head *head = &q->instance_table[i];
681 693
682 hlist_for_each_entry_safe(inst, t2, head, hlist) { 694 hlist_for_each_entry_safe(inst, t2, head, hlist) {
683 if ((n->net == &init_net) && 695 if (n->portid == inst->peer_portid)
684 (n->portid == inst->peer_portid))
685 __instance_destroy(inst); 696 __instance_destroy(inst);
686 } 697 }
687 } 698 }
688 spin_unlock(&instances_lock); 699 spin_unlock(&q->instances_lock);
689 } 700 }
690 return NOTIFY_DONE; 701 return NOTIFY_DONE;
691} 702}
@@ -706,11 +717,12 @@ static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
706 [NFQA_MARK] = { .type = NLA_U32 }, 717 [NFQA_MARK] = { .type = NLA_U32 },
707}; 718};
708 719
709static struct nfqnl_instance *verdict_instance_lookup(u16 queue_num, int nlportid) 720static struct nfqnl_instance *
721verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, int nlportid)
710{ 722{
711 struct nfqnl_instance *queue; 723 struct nfqnl_instance *queue;
712 724
713 queue = instance_lookup(queue_num); 725 queue = instance_lookup(q, queue_num);
714 if (!queue) 726 if (!queue)
715 return ERR_PTR(-ENODEV); 727 return ERR_PTR(-ENODEV);
716 728
@@ -754,7 +766,11 @@ nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
754 LIST_HEAD(batch_list); 766 LIST_HEAD(batch_list);
755 u16 queue_num = ntohs(nfmsg->res_id); 767 u16 queue_num = ntohs(nfmsg->res_id);
756 768
757 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid); 769 struct net *net = sock_net(ctnl);
770 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
771
772 queue = verdict_instance_lookup(q, queue_num,
773 NETLINK_CB(skb).portid);
758 if (IS_ERR(queue)) 774 if (IS_ERR(queue))
759 return PTR_ERR(queue); 775 return PTR_ERR(queue);
760 776
@@ -802,10 +818,13 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
802 enum ip_conntrack_info uninitialized_var(ctinfo); 818 enum ip_conntrack_info uninitialized_var(ctinfo);
803 struct nf_conn *ct = NULL; 819 struct nf_conn *ct = NULL;
804 820
805 queue = instance_lookup(queue_num); 821 struct net *net = sock_net(ctnl);
806 if (!queue) 822 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
807 823
808 queue = verdict_instance_lookup(queue_num, NETLINK_CB(skb).portid); 824 queue = instance_lookup(q, queue_num);
825 if (!queue)
826 queue = verdict_instance_lookup(q, queue_num,
827 NETLINK_CB(skb).portid);
809 if (IS_ERR(queue)) 828 if (IS_ERR(queue))
810 return PTR_ERR(queue); 829 return PTR_ERR(queue);
811 830
@@ -869,6 +888,8 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
869 u_int16_t queue_num = ntohs(nfmsg->res_id); 888 u_int16_t queue_num = ntohs(nfmsg->res_id);
870 struct nfqnl_instance *queue; 889 struct nfqnl_instance *queue;
871 struct nfqnl_msg_config_cmd *cmd = NULL; 890 struct nfqnl_msg_config_cmd *cmd = NULL;
891 struct net *net = sock_net(ctnl);
892 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
872 int ret = 0; 893 int ret = 0;
873 894
874 if (nfqa[NFQA_CFG_CMD]) { 895 if (nfqa[NFQA_CFG_CMD]) {
@@ -882,7 +903,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
882 } 903 }
883 904
884 rcu_read_lock(); 905 rcu_read_lock();
885 queue = instance_lookup(queue_num); 906 queue = instance_lookup(q, queue_num);
886 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) { 907 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
887 ret = -EPERM; 908 ret = -EPERM;
888 goto err_out_unlock; 909 goto err_out_unlock;
@@ -895,7 +916,8 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
895 ret = -EBUSY; 916 ret = -EBUSY;
896 goto err_out_unlock; 917 goto err_out_unlock;
897 } 918 }
898 queue = instance_create(queue_num, NETLINK_CB(skb).portid); 919 queue = instance_create(q, queue_num,
920 NETLINK_CB(skb).portid);
899 if (IS_ERR(queue)) { 921 if (IS_ERR(queue)) {
900 ret = PTR_ERR(queue); 922 ret = PTR_ERR(queue);
901 goto err_out_unlock; 923 goto err_out_unlock;
@@ -906,7 +928,7 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
906 ret = -ENODEV; 928 ret = -ENODEV;
907 goto err_out_unlock; 929 goto err_out_unlock;
908 } 930 }
909 instance_destroy(queue); 931 instance_destroy(q, queue);
910 break; 932 break;
911 case NFQNL_CFG_CMD_PF_BIND: 933 case NFQNL_CFG_CMD_PF_BIND:
912 case NFQNL_CFG_CMD_PF_UNBIND: 934 case NFQNL_CFG_CMD_PF_UNBIND:
@@ -1000,19 +1022,24 @@ static const struct nfnetlink_subsystem nfqnl_subsys = {
1000 1022
1001#ifdef CONFIG_PROC_FS 1023#ifdef CONFIG_PROC_FS
1002struct iter_state { 1024struct iter_state {
1025 struct seq_net_private p;
1003 unsigned int bucket; 1026 unsigned int bucket;
1004}; 1027};
1005 1028
1006static struct hlist_node *get_first(struct seq_file *seq) 1029static struct hlist_node *get_first(struct seq_file *seq)
1007{ 1030{
1008 struct iter_state *st = seq->private; 1031 struct iter_state *st = seq->private;
1032 struct net *net;
1033 struct nfnl_queue_net *q;
1009 1034
1010 if (!st) 1035 if (!st)
1011 return NULL; 1036 return NULL;
1012 1037
1038 net = seq_file_net(seq);
1039 q = nfnl_queue_pernet(net);
1013 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) { 1040 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1014 if (!hlist_empty(&instance_table[st->bucket])) 1041 if (!hlist_empty(&q->instance_table[st->bucket]))
1015 return instance_table[st->bucket].first; 1042 return q->instance_table[st->bucket].first;
1016 } 1043 }
1017 return NULL; 1044 return NULL;
1018} 1045}
@@ -1020,13 +1047,17 @@ static struct hlist_node *get_first(struct seq_file *seq)
1020static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h) 1047static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1021{ 1048{
1022 struct iter_state *st = seq->private; 1049 struct iter_state *st = seq->private;
1050 struct net *net = seq_file_net(seq);
1023 1051
1024 h = h->next; 1052 h = h->next;
1025 while (!h) { 1053 while (!h) {
1054 struct nfnl_queue_net *q;
1055
1026 if (++st->bucket >= INSTANCE_BUCKETS) 1056 if (++st->bucket >= INSTANCE_BUCKETS)
1027 return NULL; 1057 return NULL;
1028 1058
1029 h = instance_table[st->bucket].first; 1059 q = nfnl_queue_pernet(net);
1060 h = q->instance_table[st->bucket].first;
1030 } 1061 }
1031 return h; 1062 return h;
1032} 1063}
@@ -1042,11 +1073,11 @@ static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1042 return pos ? NULL : head; 1073 return pos ? NULL : head;
1043} 1074}
1044 1075
1045static void *seq_start(struct seq_file *seq, loff_t *pos) 1076static void *seq_start(struct seq_file *s, loff_t *pos)
1046 __acquires(instances_lock) 1077 __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1047{ 1078{
1048 spin_lock(&instances_lock); 1079 spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1049 return get_idx(seq, *pos); 1080 return get_idx(s, *pos);
1050} 1081}
1051 1082
1052static void *seq_next(struct seq_file *s, void *v, loff_t *pos) 1083static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
@@ -1056,9 +1087,9 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1056} 1087}
1057 1088
1058static void seq_stop(struct seq_file *s, void *v) 1089static void seq_stop(struct seq_file *s, void *v)
1059 __releases(instances_lock) 1090 __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1060{ 1091{
1061 spin_unlock(&instances_lock); 1092 spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1062} 1093}
1063 1094
1064static int seq_show(struct seq_file *s, void *v) 1095static int seq_show(struct seq_file *s, void *v)
@@ -1082,7 +1113,7 @@ static const struct seq_operations nfqnl_seq_ops = {
1082 1113
1083static int nfqnl_open(struct inode *inode, struct file *file) 1114static int nfqnl_open(struct inode *inode, struct file *file)
1084{ 1115{
1085 return seq_open_private(file, &nfqnl_seq_ops, 1116 return seq_open_net(inode, file, &nfqnl_seq_ops,
1086 sizeof(struct iter_state)); 1117 sizeof(struct iter_state));
1087} 1118}
1088 1119
@@ -1091,39 +1122,63 @@ static const struct file_operations nfqnl_file_ops = {
1091 .open = nfqnl_open, 1122 .open = nfqnl_open,
1092 .read = seq_read, 1123 .read = seq_read,
1093 .llseek = seq_lseek, 1124 .llseek = seq_lseek,
1094 .release = seq_release_private, 1125 .release = seq_release_net,
1095}; 1126};
1096 1127
1097#endif /* PROC_FS */ 1128#endif /* PROC_FS */
1098 1129
1099static int __init nfnetlink_queue_init(void) 1130static int __net_init nfnl_queue_net_init(struct net *net)
1100{ 1131{
1101 int i, status = -ENOMEM; 1132 unsigned int i;
1133 struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1102 1134
1103 for (i = 0; i < INSTANCE_BUCKETS; i++) 1135 for (i = 0; i < INSTANCE_BUCKETS; i++)
1104 INIT_HLIST_HEAD(&instance_table[i]); 1136 INIT_HLIST_HEAD(&q->instance_table[i]);
1137
1138 spin_lock_init(&q->instances_lock);
1139
1140#ifdef CONFIG_PROC_FS
1141 if (!proc_create("nfnetlink_queue", 0440,
1142 net->nf.proc_netfilter, &nfqnl_file_ops))
1143 return -ENOMEM;
1144#endif
1145 return 0;
1146}
1147
1148static void __net_exit nfnl_queue_net_exit(struct net *net)
1149{
1150 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1151}
1152
1153static struct pernet_operations nfnl_queue_net_ops = {
1154 .init = nfnl_queue_net_init,
1155 .exit = nfnl_queue_net_exit,
1156 .id = &nfnl_queue_net_id,
1157 .size = sizeof(struct nfnl_queue_net),
1158};
1159
1160static int __init nfnetlink_queue_init(void)
1161{
1162 int status = -ENOMEM;
1105 1163
1106 netlink_register_notifier(&nfqnl_rtnl_notifier); 1164 netlink_register_notifier(&nfqnl_rtnl_notifier);
1107 status = nfnetlink_subsys_register(&nfqnl_subsys); 1165 status = nfnetlink_subsys_register(&nfqnl_subsys);
1108 if (status < 0) { 1166 if (status < 0) {
1109 printk(KERN_ERR "nf_queue: failed to create netlink socket\n"); 1167 pr_err("nf_queue: failed to create netlink socket\n");
1110 goto cleanup_netlink_notifier; 1168 goto cleanup_netlink_notifier;
1111 } 1169 }
1112 1170
1113#ifdef CONFIG_PROC_FS 1171 status = register_pernet_subsys(&nfnl_queue_net_ops);
1114 if (!proc_create("nfnetlink_queue", 0440, 1172 if (status < 0) {
1115 proc_net_netfilter, &nfqnl_file_ops)) 1173 pr_err("nf_queue: failed to register pernet ops\n");
1116 goto cleanup_subsys; 1174 goto cleanup_subsys;
1117#endif 1175 }
1118
1119 register_netdevice_notifier(&nfqnl_dev_notifier); 1176 register_netdevice_notifier(&nfqnl_dev_notifier);
1120 nf_register_queue_handler(&nfqh); 1177 nf_register_queue_handler(&nfqh);
1121 return status; 1178 return status;
1122 1179
1123#ifdef CONFIG_PROC_FS
1124cleanup_subsys: 1180cleanup_subsys:
1125 nfnetlink_subsys_unregister(&nfqnl_subsys); 1181 nfnetlink_subsys_unregister(&nfqnl_subsys);
1126#endif
1127cleanup_netlink_notifier: 1182cleanup_netlink_notifier:
1128 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1183 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1129 return status; 1184 return status;
@@ -1133,9 +1188,7 @@ static void __exit nfnetlink_queue_fini(void)
1133{ 1188{
1134 nf_unregister_queue_handler(); 1189 nf_unregister_queue_handler();
1135 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1190 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1136#ifdef CONFIG_PROC_FS 1191 unregister_pernet_subsys(&nfnl_queue_net_ops);
1137 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1138#endif
1139 nfnetlink_subsys_unregister(&nfqnl_subsys); 1192 nfnetlink_subsys_unregister(&nfqnl_subsys);
1140 netlink_unregister_notifier(&nfqnl_rtnl_notifier); 1193 netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1141 1194