aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHarald Welte <laforge@netfilter.org>2005-08-09 22:58:39 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2005-08-29 18:38:12 -0400
commit0597f2680d666a3bcf101ac0c771ba7e50016bbd (patch)
treece43bf3284ce162ccb34d4ee5a9981089df5bce0
parent608c8e4f7b6e61cc783283e9dff8a465a5ad59bb (diff)
[NETFILTER]: Add new "nfnetlink_log" userspace packet logging facility
This is a generic (layer3 independent) version of what ipt_ULOG is already doing for IPv4 today. ipt_ULOG, ebt_ulog and finally also ip[6]t_LOG will be deprecated by this mechanism in the long term. Signed-off-by: Harald Welte <laforge@netfilter.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netfilter/nfnetlink_log.h85
-rw-r--r--net/netfilter/Kconfig11
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/nfnetlink_log.c995
-rw-r--r--net/netfilter/nfnetlink_queue.c1
5 files changed, 1093 insertions, 0 deletions
diff --git a/include/linux/netfilter/nfnetlink_log.h b/include/linux/netfilter/nfnetlink_log.h
new file mode 100644
index 000000000000..420ff4625cbf
--- /dev/null
+++ b/include/linux/netfilter/nfnetlink_log.h
@@ -0,0 +1,85 @@
1#ifndef _NFNETLINK_LOG_H
2#define _NFNETLINK_LOG_H
3
4/* This file describes the netlink messages (i.e. 'protocol packets'),
5 * and not any kind of function definitions. It is shared between kernel and
6 * userspace. Don't put kernel specific stuff in here */
7
8#include <linux/netfilter/nfnetlink.h>
9
10enum nfulnl_msg_types {
11 NFULNL_MSG_PACKET, /* packet from kernel to userspace */
12 NFULNL_MSG_CONFIG, /* connect to a particular queue */
13
14 NFULNL_MSG_MAX
15};
16
17struct nfulnl_msg_packet_hdr {
18 u_int16_t hw_protocol; /* hw protocol (network order) */
19 u_int8_t hook; /* netfilter hook */
20 u_int8_t _pad;
21} __attribute__ ((packed));
22
23struct nfulnl_msg_packet_hw {
24 u_int16_t hw_addrlen;
25 u_int16_t _pad;
26 u_int8_t hw_addr[8];
27} __attribute__ ((packed));
28
29struct nfulnl_msg_packet_timestamp {
30 u_int64_t sec;
31 u_int64_t usec;
32} __attribute__ ((packed));
33
34#define NFULNL_PREFIXLEN 30 /* just like old log target */
35
36enum nfulnl_attr_type {
37 NFULA_UNSPEC,
38 NFULA_PACKET_HDR,
39 NFULA_MARK, /* u_int32_t nfmark */
40 NFULA_TIMESTAMP, /* nfulnl_msg_packet_timestamp */
41 NFULA_IFINDEX_INDEV, /* u_int32_t ifindex */
42 NFULA_IFINDEX_OUTDEV, /* u_int32_t ifindex */
43 NFULA_HWADDR, /* nfulnl_msg_packet_hw */
44 NFULA_PAYLOAD, /* opaque data payload */
45 NFULA_PREFIX, /* string prefix */
46 NFULA_UID, /* user id of socket */
47
48 __NFULA_MAX
49};
50#define NFULA_MAX (__NFULA_MAX - 1)
51
52enum nfulnl_msg_config_cmds {
53 NFULNL_CFG_CMD_NONE,
54 NFULNL_CFG_CMD_BIND,
55 NFULNL_CFG_CMD_UNBIND,
56 NFULNL_CFG_CMD_PF_BIND,
57 NFULNL_CFG_CMD_PF_UNBIND,
58};
59
60struct nfulnl_msg_config_cmd {
61 u_int8_t command; /* nfulnl_msg_config_cmds */
62} __attribute__ ((packed));
63
64struct nfulnl_msg_config_mode {
65 u_int32_t copy_range;
66 u_int8_t copy_mode;
67 u_int8_t _pad;
68} __attribute__ ((packed));
69
70enum nfulnl_attr_config {
71 NFULA_CFG_UNSPEC,
72 NFULA_CFG_CMD, /* nfulnl_msg_config_cmd */
73 NFULA_CFG_MODE, /* nfulnl_msg_config_mode */
74 NFULA_CFG_NLBUFSIZ, /* u_int32_t buffer size */
75 NFULA_CFG_TIMEOUT, /* u_int32_t in 1/100 s */
76 NFULA_CFG_QTHRESH, /* u_int32_t */
77 __NFULA_CFG_MAX
78};
79#define NFULA_CFG_MAX (__NFULA_CFG_MAX -1)
80
81#define NFULNL_COPY_NONE 0x00
82#define NFULNL_COPY_META 0x01
83#define NFULNL_COPY_PACKET 0x02
84
85#endif /* _NFNETLINK_LOG_H */
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f0eb23e5c5f1..8296b38bf270 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -11,3 +11,14 @@ config NETFILTER_NETLINK_QUEUE
11 If this option isenabled, the kernel will include support 11 If this option isenabled, the kernel will include support
12 for queueing packets via NFNETLINK. 12 for queueing packets via NFNETLINK.
13 13
14config NETFILTER_NETLINK_LOG
15 tristate "Netfilter LOG over NFNETLINK interface"
16 depends on NETFILTER_NETLINK
17 help
18 If this option is enabled, the kernel will include support
19 for logging packets via NFNETLINK.
20
21 This obsoletes the existing ipt_ULOG and ebg_ulog mechanisms,
22 and is also scheduled to replace the old syslog-based ipt_LOG
23 and ip6t_LOG modules.
24
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 14a0b187e75e..c41caebc4a7c 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,2 +1,3 @@
1obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o 1obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
2obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o 2obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
3obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
new file mode 100644
index 000000000000..f41045e385ae
--- /dev/null
+++ b/net/netfilter/nfnetlink_log.c
@@ -0,0 +1,995 @@
1/*
2 * This is a module which is used for logging packets to userspace via
3 * nfetlink.
4 *
5 * (C) 2005 by Harald Welte <laforge@netfilter.org>
6 *
7 * Based on the old ipv4-only ipt_ULOG.c:
8 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15#include <linux/module.h>
16#include <linux/skbuff.h>
17#include <linux/init.h>
18#include <linux/ip.h>
19#include <linux/ipv6.h>
20#include <linux/netdevice.h>
21#include <linux/netfilter.h>
22#include <linux/netlink.h>
23#include <linux/netfilter/nfnetlink.h>
24#include <linux/netfilter/nfnetlink_log.h>
25#include <linux/spinlock.h>
26#include <linux/sysctl.h>
27#include <linux/proc_fs.h>
28#include <linux/security.h>
29#include <linux/list.h>
30#include <linux/jhash.h>
31#include <linux/random.h>
32#include <net/sock.h>
33
34#include <asm/atomic.h>
35
36#define NFULNL_NLBUFSIZ_DEFAULT 4096
37#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
38#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
39
40#define PRINTR(x, args...) do { if (net_ratelimit()) \
41 printk(x, ## args); } while (0);
42
43#if 0
44#define UDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \
45 __FILE__, __LINE__, __FUNCTION__, \
46 ## args)
47#else
48#define UDEBUG(x, ...)
49#endif
50
51struct nfulnl_instance {
52 struct hlist_node hlist; /* global list of instances */
53 spinlock_t lock;
54 atomic_t use; /* use count */
55
56 unsigned int qlen; /* number of nlmsgs in skb */
57 struct sk_buff *skb; /* pre-allocatd skb */
58 struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
59 struct timer_list timer;
60 int peer_pid; /* PID of the peer process */
61
62 /* configurable parameters */
63 unsigned int flushtimeout; /* timeout until queue flush */
64 unsigned int nlbufsiz; /* netlink buffer allocation size */
65 unsigned int qthreshold; /* threshold of the queue */
66 u_int32_t copy_range;
67 u_int16_t group_num; /* number of this queue */
68 u_int8_t copy_mode;
69};
70
71static DEFINE_RWLOCK(instances_lock);
72
73#define INSTANCE_BUCKETS 16
74static struct hlist_head instance_table[INSTANCE_BUCKETS];
75static unsigned int hash_init;
76
77static inline u_int8_t instance_hashfn(u_int16_t group_num)
78{
79 return ((group_num & 0xff) % INSTANCE_BUCKETS);
80}
81
82static struct nfulnl_instance *
83__instance_lookup(u_int16_t group_num)
84{
85 struct hlist_head *head;
86 struct hlist_node *pos;
87 struct nfulnl_instance *inst;
88
89 UDEBUG("entering (group_num=%u)\n", group_num);
90
91 head = &instance_table[instance_hashfn(group_num)];
92 hlist_for_each_entry(inst, pos, head, hlist) {
93 if (inst->group_num == group_num)
94 return inst;
95 }
96 return NULL;
97}
98
99static inline void
100instance_get(struct nfulnl_instance *inst)
101{
102 atomic_inc(&inst->use);
103}
104
105static struct nfulnl_instance *
106instance_lookup_get(u_int16_t group_num)
107{
108 struct nfulnl_instance *inst;
109
110 read_lock_bh(&instances_lock);
111 inst = __instance_lookup(group_num);
112 if (inst)
113 instance_get(inst);
114 read_unlock_bh(&instances_lock);
115
116 return inst;
117}
118
119static void
120instance_put(struct nfulnl_instance *inst)
121{
122 if (inst && atomic_dec_and_test(&inst->use)) {
123 UDEBUG("kfree(inst=%p)\n", inst);
124 kfree(inst);
125 }
126}
127
128static void nfulnl_timer(unsigned long data);
129
130static struct nfulnl_instance *
131instance_create(u_int16_t group_num, int pid)
132{
133 struct nfulnl_instance *inst;
134
135 UDEBUG("entering (group_num=%u, pid=%d)\n", group_num,
136 pid);
137
138 write_lock_bh(&instances_lock);
139 if (__instance_lookup(group_num)) {
140 inst = NULL;
141 UDEBUG("aborting, instance already exists\n");
142 goto out_unlock;
143 }
144
145 inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
146 if (!inst)
147 goto out_unlock;
148
149 memset(inst, 0, sizeof(*inst));
150 INIT_HLIST_NODE(&inst->hlist);
151 inst->lock = SPIN_LOCK_UNLOCKED;
152 /* needs to be two, since we _put() after creation */
153 atomic_set(&inst->use, 2);
154
155 init_timer(&inst->timer);
156 inst->timer.function = nfulnl_timer;
157 inst->timer.data = (unsigned long)inst;
158 /* don't start timer yet. (re)start it with every packet */
159
160 inst->peer_pid = pid;
161 inst->group_num = group_num;
162
163 inst->qthreshold = NFULNL_QTHRESH_DEFAULT;
164 inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT;
165 inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT;
166 inst->copy_mode = NFULNL_COPY_PACKET;
167 inst->copy_range = 0xffff;
168
169 if (!try_module_get(THIS_MODULE))
170 goto out_free;
171
172 hlist_add_head(&inst->hlist,
173 &instance_table[instance_hashfn(group_num)]);
174
175 UDEBUG("newly added node: %p, next=%p\n", &inst->hlist,
176 inst->hlist.next);
177
178 write_unlock_bh(&instances_lock);
179
180 return inst;
181
182out_free:
183 instance_put(inst);
184out_unlock:
185 write_unlock_bh(&instances_lock);
186 return NULL;
187}
188
189static int __nfulnl_send(struct nfulnl_instance *inst);
190
191static void
192_instance_destroy2(struct nfulnl_instance *inst, int lock)
193{
194 /* first pull it out of the global list */
195 if (lock)
196 write_lock_bh(&instances_lock);
197
198 UDEBUG("removing instance %p (queuenum=%u) from hash\n",
199 inst, inst->group_num);
200
201 hlist_del(&inst->hlist);
202
203 if (lock)
204 write_unlock_bh(&instances_lock);
205
206 /* then flush all pending packets from skb */
207
208 spin_lock_bh(&inst->lock);
209 if (inst->skb) {
210 if (inst->qlen)
211 __nfulnl_send(inst);
212 if (inst->skb) {
213 kfree_skb(inst->skb);
214 inst->skb = NULL;
215 }
216 }
217 spin_unlock_bh(&inst->lock);
218
219 /* and finally put the refcount */
220 instance_put(inst);
221
222 module_put(THIS_MODULE);
223}
224
225static inline void
226__instance_destroy(struct nfulnl_instance *inst)
227{
228 _instance_destroy2(inst, 0);
229}
230
231static inline void
232instance_destroy(struct nfulnl_instance *inst)
233{
234 _instance_destroy2(inst, 1);
235}
236
237static int
238nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
239 unsigned int range)
240{
241 int status = 0;
242
243 spin_lock_bh(&inst->lock);
244
245 switch (mode) {
246 case NFULNL_COPY_NONE:
247 case NFULNL_COPY_META:
248 inst->copy_mode = mode;
249 inst->copy_range = 0;
250 break;
251
252 case NFULNL_COPY_PACKET:
253 inst->copy_mode = mode;
254 /* we're using struct nfattr which has 16bit nfa_len */
255 if (range > 0xffff)
256 inst->copy_range = 0xffff;
257 else
258 inst->copy_range = range;
259 break;
260
261 default:
262 status = -EINVAL;
263 break;
264 }
265
266 spin_unlock_bh(&inst->lock);
267
268 return status;
269}
270
271static int
272nfulnl_set_nlbufsiz(struct nfulnl_instance *inst, u_int32_t nlbufsiz)
273{
274 int status;
275
276 spin_lock_bh(&inst->lock);
277 if (nlbufsiz < NFULNL_NLBUFSIZ_DEFAULT)
278 status = -ERANGE;
279 else if (nlbufsiz > 131072)
280 status = -ERANGE;
281 else {
282 inst->nlbufsiz = nlbufsiz;
283 status = 0;
284 }
285 spin_unlock_bh(&inst->lock);
286
287 return status;
288}
289
290static int
291nfulnl_set_timeout(struct nfulnl_instance *inst, u_int32_t timeout)
292{
293 spin_lock_bh(&inst->lock);
294 inst->flushtimeout = timeout;
295 spin_unlock_bh(&inst->lock);
296
297 return 0;
298}
299
300static int
301nfulnl_set_qthresh(struct nfulnl_instance *inst, u_int32_t qthresh)
302{
303 spin_lock_bh(&inst->lock);
304 inst->qthreshold = qthresh;
305 spin_unlock_bh(&inst->lock);
306
307 return 0;
308}
309
310static struct sk_buff *nfulnl_alloc_skb(unsigned int inst_size,
311 unsigned int pkt_size)
312{
313 struct sk_buff *skb;
314
315 UDEBUG("entered (%u, %u)\n", inst_size, pkt_size);
316
317 /* alloc skb which should be big enough for a whole multipart
318 * message. WARNING: has to be <= 128k due to slab restrictions */
319
320 skb = alloc_skb(inst_size, GFP_ATOMIC);
321 if (!skb) {
322 PRINTR("nfnetlink_log: can't alloc whole buffer (%u bytes)\n",
323 inst_size);
324
325 /* try to allocate only as much as we need for current
326 * packet */
327
328 skb = alloc_skb(pkt_size, GFP_ATOMIC);
329 if (!skb)
330 PRINTR("nfnetlink_log: can't even alloc %u bytes\n",
331 pkt_size);
332 }
333
334 return skb;
335}
336
337static int
338__nfulnl_send(struct nfulnl_instance *inst)
339{
340 int status;
341
342 if (timer_pending(&inst->timer))
343 del_timer(&inst->timer);
344
345 if (inst->qlen > 1)
346 inst->lastnlh->nlmsg_type = NLMSG_DONE;
347
348 status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT);
349 if (status < 0) {
350 UDEBUG("netlink_unicast() failed\n");
351 /* FIXME: statistics */
352 }
353
354 inst->qlen = 0;
355 inst->skb = NULL;
356 inst->lastnlh = NULL;
357
358 return status;
359}
360
361static void nfulnl_timer(unsigned long data)
362{
363 struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
364
365 UDEBUG("timer function called, flushing buffer\n");
366
367 spin_lock_bh(&inst->lock);
368 __nfulnl_send(inst);
369 instance_put(inst);
370 spin_unlock_bh(&inst->lock);
371}
372
373static inline int
374__build_packet_message(struct nfulnl_instance *inst,
375 const struct sk_buff *skb,
376 unsigned int data_len,
377 unsigned int pf,
378 unsigned int hooknum,
379 const struct net_device *indev,
380 const struct net_device *outdev,
381 const struct nf_loginfo *li,
382 const char *prefix)
383{
384 unsigned char *old_tail;
385 struct nfulnl_msg_packet_hdr pmsg;
386 struct nlmsghdr *nlh;
387 struct nfgenmsg *nfmsg;
388 u_int32_t tmp_uint;
389
390 UDEBUG("entered\n");
391
392 old_tail = inst->skb->tail;
393 nlh = NLMSG_PUT(inst->skb, 0, 0,
394 NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
395 sizeof(struct nfgenmsg));
396 nfmsg = NLMSG_DATA(nlh);
397 nfmsg->nfgen_family = pf;
398 nfmsg->version = NFNETLINK_V0;
399 nfmsg->res_id = htons(inst->group_num);
400
401 pmsg.hw_protocol = htons(skb->protocol);
402 pmsg.hook = hooknum;
403
404 NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);
405
406 if (prefix) {
407 int slen = strlen(prefix);
408 if (slen > NFULNL_PREFIXLEN)
409 slen = NFULNL_PREFIXLEN;
410 NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
411 }
412
413 if (indev) {
414 tmp_uint = htonl(indev->ifindex);
415 NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint),
416 &tmp_uint);
417 }
418
419 if (outdev) {
420 tmp_uint = htonl(outdev->ifindex);
421 NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint),
422 &tmp_uint);
423 }
424
425 if (skb->nfmark) {
426 tmp_uint = htonl(skb->nfmark);
427 NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
428 }
429
430 if (indev && skb->dev && skb->dev->hard_header_parse) {
431 struct nfulnl_msg_packet_hw phw;
432
433 phw.hw_addrlen =
434 skb->dev->hard_header_parse((struct sk_buff *)skb,
435 phw.hw_addr);
436 phw.hw_addrlen = htons(phw.hw_addrlen);
437 NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
438 }
439
440 if (skb->stamp.tv_sec) {
441 struct nfulnl_msg_packet_timestamp ts;
442
443 ts.sec = cpu_to_be64(skb->stamp.tv_sec);
444 ts.usec = cpu_to_be64(skb->stamp.tv_usec);
445
446 NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
447 }
448
449 /* UID */
450 if (skb->sk) {
451 read_lock_bh(&skb->sk->sk_callback_lock);
452 if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
453 u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
454 /* need to unlock here since NFA_PUT may goto */
455 read_unlock_bh(&skb->sk->sk_callback_lock);
456 NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
457 } else
458 read_unlock_bh(&skb->sk->sk_callback_lock);
459 }
460
461 if (data_len) {
462 struct nfattr *nfa;
463 int size = NFA_LENGTH(data_len);
464
465 if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) {
466 printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
467 goto nlmsg_failure;
468 }
469
470 nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size));
471 nfa->nfa_type = NFULA_PAYLOAD;
472 nfa->nfa_len = size;
473
474 if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len))
475 BUG();
476 }
477
478 nlh->nlmsg_len = inst->skb->tail - old_tail;
479 return 0;
480
481nlmsg_failure:
482 UDEBUG("nlmsg_failure\n");
483nfattr_failure:
484 PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
485 return -1;
486}
487
488#define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
489
490static struct nf_loginfo default_loginfo = {
491 .type = NF_LOG_TYPE_ULOG,
492 .u = {
493 .ulog = {
494 .copy_len = 0xffff,
495 .group = 0,
496 .qthreshold = 1,
497 },
498 },
499};
500
501/* log handler for internal netfilter logging api */
502static void
503nfulnl_log_packet(unsigned int pf,
504 unsigned int hooknum,
505 const struct sk_buff *skb,
506 const struct net_device *in,
507 const struct net_device *out,
508 const struct nf_loginfo *li_user,
509 const char *prefix)
510{
511 unsigned int size, data_len;
512 struct nfulnl_instance *inst;
513 const struct nf_loginfo *li;
514 unsigned int qthreshold;
515 unsigned int nlbufsiz;
516
517 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
518 li = li_user;
519 else
520 li = &default_loginfo;
521
522 inst = instance_lookup_get(li->u.ulog.group);
523 if (!inst)
524 inst = instance_lookup_get(0);
525 if (!inst) {
526 PRINTR("nfnetlink_log: trying to log packet, "
527 "but no instance for group %u\n", li->u.ulog.group);
528 return;
529 }
530
531 /* all macros expand to constant values at compile time */
532 /* FIXME: do we want to make the size calculation conditional based on
533 * what is actually present? way more branches and checks, but more
534 * memory efficient... */
535 size = NLMSG_SPACE(sizeof(struct nfgenmsg))
536 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr))
537 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
538 + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */
539 + NFA_SPACE(sizeof(u_int32_t)) /* mark */
540 + NFA_SPACE(sizeof(u_int32_t)) /* uid */
541 + NFA_SPACE(NFULNL_PREFIXLEN) /* prefix */
542 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
543 + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));
544
545 UDEBUG("initial size=%u\n", size);
546
547 spin_lock_bh(&inst->lock);
548
549 qthreshold = inst->qthreshold;
550 /* per-rule qthreshold overrides per-instance */
551 if (qthreshold > li->u.ulog.qthreshold)
552 qthreshold = li->u.ulog.qthreshold;
553
554 switch (inst->copy_mode) {
555 case NFULNL_COPY_META:
556 case NFULNL_COPY_NONE:
557 data_len = 0;
558 break;
559
560 case NFULNL_COPY_PACKET:
561 if (inst->copy_range == 0
562 || inst->copy_range > skb->len)
563 data_len = skb->len;
564 else
565 data_len = inst->copy_range;
566
567 size += NFA_SPACE(data_len);
568 UDEBUG("copy_packet, therefore size now %u\n", size);
569 break;
570
571 default:
572 spin_unlock_bh(&inst->lock);
573 instance_put(inst);
574 return;
575 }
576
577 if (size > inst->nlbufsiz)
578 nlbufsiz = size;
579 else
580 nlbufsiz = inst->nlbufsiz;
581
582 if (!inst->skb) {
583 if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
584 UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
585 inst->nlbufsiz, size);
586 goto alloc_failure;
587 }
588 } else if (inst->qlen >= qthreshold ||
589 size > skb_tailroom(inst->skb)) {
590 /* either the queue len is too high or we don't have
591 * enough room in the skb left. flush to userspace. */
592 UDEBUG("flushing old skb\n");
593
594 __nfulnl_send(inst);
595
596 if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
597 UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
598 inst->nlbufsiz, size);
599 goto alloc_failure;
600 }
601 }
602
603 UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
604 inst->qlen++;
605
606 __build_packet_message(inst, skb, data_len, pf,
607 hooknum, in, out, li, prefix);
608
609 /* timer_pending always called within inst->lock, so there
610 * is no chance of a race here */
611 if (!timer_pending(&inst->timer)) {
612 instance_get(inst);
613 inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
614 add_timer(&inst->timer);
615 }
616 spin_unlock_bh(&inst->lock);
617
618 return;
619
620alloc_failure:
621 spin_unlock_bh(&inst->lock);
622 instance_put(inst);
623 UDEBUG("error allocating skb\n");
624 /* FIXME: statistics */
625}
626
627static int
628nfulnl_rcv_nl_event(struct notifier_block *this,
629 unsigned long event, void *ptr)
630{
631 struct netlink_notify *n = ptr;
632
633 if (event == NETLINK_URELEASE &&
634 n->protocol == NETLINK_NETFILTER && n->pid) {
635 int i;
636
637 /* destroy all instances for this pid */
638 write_lock_bh(&instances_lock);
639 for (i = 0; i < INSTANCE_BUCKETS; i++) {
640 struct hlist_node *tmp, *t2;
641 struct nfulnl_instance *inst;
642 struct hlist_head *head = &instance_table[i];
643
644 hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) {
645 UDEBUG("node = %p\n", inst);
646 if (n->pid == inst->peer_pid)
647 __instance_destroy(inst);
648 }
649 }
650 write_unlock_bh(&instances_lock);
651 }
652 return NOTIFY_DONE;
653}
654
655static struct notifier_block nfulnl_rtnl_notifier = {
656 .notifier_call = nfulnl_rcv_nl_event,
657};
658
659static int
660nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
661 struct nlmsghdr *nlh, struct nfattr *nfqa[], int *errp)
662{
663 return -ENOTSUPP;
664}
665
666static struct nf_logger nfulnl_logger = {
667 .name = "nfnetlink_log",
668 .logfn = &nfulnl_log_packet,
669 .me = THIS_MODULE,
670};
671
672static const int nfula_min[NFULA_MAX] = {
673 [NFULA_PACKET_HDR-1] = sizeof(struct nfulnl_msg_packet_hdr),
674 [NFULA_MARK-1] = sizeof(u_int32_t),
675 [NFULA_TIMESTAMP-1] = sizeof(struct nfulnl_msg_packet_timestamp),
676 [NFULA_IFINDEX_INDEV-1] = sizeof(u_int32_t),
677 [NFULA_IFINDEX_OUTDEV-1]= sizeof(u_int32_t),
678 [NFULA_HWADDR-1] = sizeof(struct nfulnl_msg_packet_hw),
679 [NFULA_PAYLOAD-1] = 0,
680 [NFULA_PREFIX-1] = 0,
681 [NFULA_UID-1] = sizeof(u_int32_t),
682};
683
684static const int nfula_cfg_min[NFULA_CFG_MAX] = {
685 [NFULA_CFG_CMD-1] = sizeof(struct nfulnl_msg_config_cmd),
686 [NFULA_CFG_MODE-1] = sizeof(struct nfulnl_msg_config_mode),
687 [NFULA_CFG_TIMEOUT-1] = sizeof(u_int32_t),
688 [NFULA_CFG_QTHRESH-1] = sizeof(u_int32_t),
689 [NFULA_CFG_NLBUFSIZ-1] = sizeof(u_int32_t),
690};
691
692static int
693nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
694 struct nlmsghdr *nlh, struct nfattr *nfula[], int *errp)
695{
696 struct nfgenmsg *nfmsg = NLMSG_DATA(nlh);
697 u_int16_t group_num = ntohs(nfmsg->res_id);
698 struct nfulnl_instance *inst;
699 int ret = 0;
700
701 UDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type));
702
703 if (nfattr_bad_size(nfula, NFULA_CFG_MAX, nfula_cfg_min)) {
704 UDEBUG("bad attribute size\n");
705 return -EINVAL;
706 }
707
708 inst = instance_lookup_get(group_num);
709 if (nfula[NFULA_CFG_CMD-1]) {
710 u_int8_t pf = nfmsg->nfgen_family;
711 struct nfulnl_msg_config_cmd *cmd;
712 cmd = NFA_DATA(nfula[NFULA_CFG_CMD-1]);
713 UDEBUG("found CFG_CMD for\n");
714
715 switch (cmd->command) {
716 case NFULNL_CFG_CMD_BIND:
717 if (inst) {
718 ret = -EBUSY;
719 goto out_put;
720 }
721
722 inst = instance_create(group_num,
723 NETLINK_CB(skb).pid);
724 if (!inst) {
725 ret = -EINVAL;
726 goto out_put;
727 }
728 break;
729 case NFULNL_CFG_CMD_UNBIND:
730 if (!inst) {
731 ret = -ENODEV;
732 goto out_put;
733 }
734
735 if (inst->peer_pid != NETLINK_CB(skb).pid) {
736 ret = -EPERM;
737 goto out_put;
738 }
739
740 instance_destroy(inst);
741 break;
742 case NFULNL_CFG_CMD_PF_BIND:
743 UDEBUG("registering log handler for pf=%u\n", pf);
744 ret = nf_log_register(pf, &nfulnl_logger);
745 break;
746 case NFULNL_CFG_CMD_PF_UNBIND:
747 UDEBUG("unregistering log handler for pf=%u\n", pf);
748 /* This is a bug and a feature. We cannot unregister
749 * other handlers, like nfnetlink_inst can */
750 nf_log_unregister_pf(pf);
751 break;
752 default:
753 ret = -EINVAL;
754 break;
755 }
756 } else {
757 if (!inst) {
758 UDEBUG("no config command, and no instance for "
759 "group=%u pid=%u =>ENOENT\n",
760 group_num, NETLINK_CB(skb).pid);
761 ret = -ENOENT;
762 goto out_put;
763 }
764
765 if (inst->peer_pid != NETLINK_CB(skb).pid) {
766 UDEBUG("no config command, and wrong pid\n");
767 ret = -EPERM;
768 goto out_put;
769 }
770 }
771
772 if (nfula[NFULA_CFG_MODE-1]) {
773 struct nfulnl_msg_config_mode *params;
774 params = NFA_DATA(nfula[NFULA_CFG_MODE-1]);
775
776 nfulnl_set_mode(inst, params->copy_mode,
777 ntohs(params->copy_range));
778 }
779
780 if (nfula[NFULA_CFG_TIMEOUT-1]) {
781 u_int32_t timeout =
782 *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_TIMEOUT-1]);
783
784 nfulnl_set_timeout(inst, ntohl(timeout));
785 }
786
787 if (nfula[NFULA_CFG_NLBUFSIZ-1]) {
788 u_int32_t nlbufsiz =
789 *(u_int32_t *)NFA_DATA(nfula[NFULA_CFG_NLBUFSIZ-1]);
790
791 nfulnl_set_nlbufsiz(inst, ntohl(nlbufsiz));
792 }
793
794 if (nfula[NFULA_CFG_QTHRESH-1]) {
795 u_int32_t qthresh =
796 *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_QTHRESH-1]);
797
798 nfulnl_set_qthresh(inst, ntohl(qthresh));
799 }
800
801out_put:
802 instance_put(inst);
803 return ret;
804}
805
806static struct nfnl_callback nfulnl_cb[NFULNL_MSG_MAX] = {
807 [NFULNL_MSG_PACKET] = { .call = nfulnl_recv_unsupp,
808 .cap_required = CAP_NET_ADMIN },
809 [NFULNL_MSG_CONFIG] = { .call = nfulnl_recv_config,
810 .cap_required = CAP_NET_ADMIN },
811};
812
813static struct nfnetlink_subsystem nfulnl_subsys = {
814 .name = "log",
815 .subsys_id = NFNL_SUBSYS_ULOG,
816 .cb_count = NFULNL_MSG_MAX,
817 .attr_count = NFULA_MAX,
818 .cb = nfulnl_cb,
819};
820
821#ifdef CONFIG_PROC_FS
822struct iter_state {
823 unsigned int bucket;
824};
825
826static struct hlist_node *get_first(struct seq_file *seq)
827{
828 struct iter_state *st = seq->private;
829
830 if (!st)
831 return NULL;
832
833 for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
834 if (!hlist_empty(&instance_table[st->bucket]))
835 return instance_table[st->bucket].first;
836 }
837 return NULL;
838}
839
840static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
841{
842 struct iter_state *st = seq->private;
843
844 h = h->next;
845 while (!h) {
846 if (++st->bucket >= INSTANCE_BUCKETS)
847 return NULL;
848
849 h = instance_table[st->bucket].first;
850 }
851 return h;
852}
853
854static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
855{
856 struct hlist_node *head;
857 head = get_first(seq);
858
859 if (head)
860 while (pos && (head = get_next(seq, head)))
861 pos--;
862 return pos ? NULL : head;
863}
864
865static void *seq_start(struct seq_file *seq, loff_t *pos)
866{
867 read_lock_bh(&instances_lock);
868 return get_idx(seq, *pos);
869}
870
871static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
872{
873 (*pos)++;
874 return get_next(s, v);
875}
876
877static void seq_stop(struct seq_file *s, void *v)
878{
879 read_unlock_bh(&instances_lock);
880}
881
882static int seq_show(struct seq_file *s, void *v)
883{
884 const struct nfulnl_instance *inst = v;
885
886 return seq_printf(s, "%5d %6d %5d %1d %5d %6d %2d\n",
887 inst->group_num,
888 inst->peer_pid, inst->qlen,
889 inst->copy_mode, inst->copy_range,
890 inst->flushtimeout, atomic_read(&inst->use));
891}
892
893static struct seq_operations nful_seq_ops = {
894 .start = seq_start,
895 .next = seq_next,
896 .stop = seq_stop,
897 .show = seq_show,
898};
899
900static int nful_open(struct inode *inode, struct file *file)
901{
902 struct seq_file *seq;
903 struct iter_state *is;
904 int ret;
905
906 is = kmalloc(sizeof(*is), GFP_KERNEL);
907 if (!is)
908 return -ENOMEM;
909 memset(is, 0, sizeof(*is));
910 ret = seq_open(file, &nful_seq_ops);
911 if (ret < 0)
912 goto out_free;
913 seq = file->private_data;
914 seq->private = is;
915 return ret;
916out_free:
917 kfree(is);
918 return ret;
919}
920
921static struct file_operations nful_file_ops = {
922 .owner = THIS_MODULE,
923 .open = nful_open,
924 .read = seq_read,
925 .llseek = seq_lseek,
926 .release = seq_release_private,
927};
928
929#endif /* PROC_FS */
930
931static int
932init_or_cleanup(int init)
933{
934 int i, status = -ENOMEM;
935#ifdef CONFIG_PROC_FS
936 struct proc_dir_entry *proc_nful;
937#endif
938
939 if (!init)
940 goto cleanup;
941
942 for (i = 0; i < INSTANCE_BUCKETS; i++)
943 INIT_HLIST_HEAD(&instance_table[i]);
944
945 /* it's not really all that important to have a random value, so
946 * we can do this from the init function, even if there hasn't
947 * been that much entropy yet */
948 get_random_bytes(&hash_init, sizeof(hash_init));
949
950 netlink_register_notifier(&nfulnl_rtnl_notifier);
951 status = nfnetlink_subsys_register(&nfulnl_subsys);
952 if (status < 0) {
953 printk(KERN_ERR "log: failed to create netlink socket\n");
954 goto cleanup_netlink_notifier;
955 }
956
957#ifdef CONFIG_PROC_FS
958 proc_nful = create_proc_entry("nfnetlink_log", 0440,
959 proc_net_netfilter);
960 if (!proc_nful)
961 goto cleanup_subsys;
962 proc_nful->proc_fops = &nful_file_ops;
963#endif
964
965 return status;
966
967cleanup:
968 nf_log_unregister_logger(&nfulnl_logger);
969#ifdef CONFIG_PROC_FS
970 remove_proc_entry("nfnetlink_log", proc_net_netfilter);
971cleanup_subsys:
972#endif
973 nfnetlink_subsys_unregister(&nfulnl_subsys);
974cleanup_netlink_notifier:
975 netlink_unregister_notifier(&nfulnl_rtnl_notifier);
976 return status;
977}
978
979static int __init init(void)
980{
981
982 return init_or_cleanup(1);
983}
984
985static void __exit fini(void)
986{
987 init_or_cleanup(0);
988}
989
990MODULE_DESCRIPTION("netfilter userspace logging");
991MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
992MODULE_LICENSE("GPL");
993
994module_init(init);
995module_exit(fini);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index eab309e3d42e..d7b0330d64b4 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1041,6 +1041,7 @@ cleanup:
1041 nf_unregister_queue_handlers(nfqnl_enqueue_packet); 1041 nf_unregister_queue_handlers(nfqnl_enqueue_packet);
1042 unregister_netdevice_notifier(&nfqnl_dev_notifier); 1042 unregister_netdevice_notifier(&nfqnl_dev_notifier);
1043#ifdef CONFIG_PROC_FS 1043#ifdef CONFIG_PROC_FS
1044 remove_proc_entry("nfnetlink_queue", proc_net_netfilter);
1044cleanup_subsys: 1045cleanup_subsys:
1045#endif 1046#endif
1046 nfnetlink_subsys_unregister(&nfqnl_subsys); 1047 nfnetlink_subsys_unregister(&nfqnl_subsys);