diff options
author | Patrick McHardy <kaber@trash.net> | 2007-12-18 01:41:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:59:02 -0500 |
commit | 1792bab4caaa85bae858799bb6231f171f59b58a (patch) | |
tree | 2e4b51819d035d80d0fd969af5b0b60a92834a70 | |
parent | cd21f0ac43f8d7f23573a01ada7fb2c96e686ff8 (diff) |
[NETFILTER]: nfnetlink_log: remove excessive debugging
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/netfilter/nfnetlink_log.c | 45 |
1 files changed, 0 insertions, 45 deletions
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 325e93a9059b..c12e1d1bd00e 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -45,14 +45,6 @@ | |||
45 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ | 45 | #define PRINTR(x, args...) do { if (net_ratelimit()) \ |
46 | printk(x, ## args); } while (0); | 46 | printk(x, ## args); } while (0); |
47 | 47 | ||
48 | #if 0 | ||
49 | #define UDEBUG(x, args ...) printk(KERN_DEBUG "%s(%d):%s(): " x, \ | ||
50 | __FILE__, __LINE__, __FUNCTION__, \ | ||
51 | ## args) | ||
52 | #else | ||
53 | #define UDEBUG(x, ...) | ||
54 | #endif | ||
55 | |||
56 | struct nfulnl_instance { | 48 | struct nfulnl_instance { |
57 | struct hlist_node hlist; /* global list of instances */ | 49 | struct hlist_node hlist; /* global list of instances */ |
58 | spinlock_t lock; | 50 | spinlock_t lock; |
@@ -93,8 +85,6 @@ __instance_lookup(u_int16_t group_num) | |||
93 | struct hlist_node *pos; | 85 | struct hlist_node *pos; |
94 | struct nfulnl_instance *inst; | 86 | struct nfulnl_instance *inst; |
95 | 87 | ||
96 | UDEBUG("entering (group_num=%u)\n", group_num); | ||
97 | |||
98 | head = &instance_table[instance_hashfn(group_num)]; | 88 | head = &instance_table[instance_hashfn(group_num)]; |
99 | hlist_for_each_entry(inst, pos, head, hlist) { | 89 | hlist_for_each_entry(inst, pos, head, hlist) { |
100 | if (inst->group_num == group_num) | 90 | if (inst->group_num == group_num) |
@@ -127,7 +117,6 @@ static void | |||
127 | instance_put(struct nfulnl_instance *inst) | 117 | instance_put(struct nfulnl_instance *inst) |
128 | { | 118 | { |
129 | if (inst && atomic_dec_and_test(&inst->use)) { | 119 | if (inst && atomic_dec_and_test(&inst->use)) { |
130 | UDEBUG("kfree(inst=%p)\n", inst); | ||
131 | kfree(inst); | 120 | kfree(inst); |
132 | module_put(THIS_MODULE); | 121 | module_put(THIS_MODULE); |
133 | } | 122 | } |
@@ -140,13 +129,9 @@ instance_create(u_int16_t group_num, int pid) | |||
140 | { | 129 | { |
141 | struct nfulnl_instance *inst; | 130 | struct nfulnl_instance *inst; |
142 | 131 | ||
143 | UDEBUG("entering (group_num=%u, pid=%d)\n", group_num, | ||
144 | pid); | ||
145 | |||
146 | write_lock_bh(&instances_lock); | 132 | write_lock_bh(&instances_lock); |
147 | if (__instance_lookup(group_num)) { | 133 | if (__instance_lookup(group_num)) { |
148 | inst = NULL; | 134 | inst = NULL; |
149 | UDEBUG("aborting, instance already exists\n"); | ||
150 | goto out_unlock; | 135 | goto out_unlock; |
151 | } | 136 | } |
152 | 137 | ||
@@ -178,9 +163,6 @@ instance_create(u_int16_t group_num, int pid) | |||
178 | hlist_add_head(&inst->hlist, | 163 | hlist_add_head(&inst->hlist, |
179 | &instance_table[instance_hashfn(group_num)]); | 164 | &instance_table[instance_hashfn(group_num)]); |
180 | 165 | ||
181 | UDEBUG("newly added node: %p, next=%p\n", &inst->hlist, | ||
182 | inst->hlist.next); | ||
183 | |||
184 | write_unlock_bh(&instances_lock); | 166 | write_unlock_bh(&instances_lock); |
185 | 167 | ||
186 | return inst; | 168 | return inst; |
@@ -196,9 +178,6 @@ static void | |||
196 | __instance_destroy(struct nfulnl_instance *inst) | 178 | __instance_destroy(struct nfulnl_instance *inst) |
197 | { | 179 | { |
198 | /* first pull it out of the global list */ | 180 | /* first pull it out of the global list */ |
199 | UDEBUG("removing instance %p (queuenum=%u) from hash\n", | ||
200 | inst, inst->group_num); | ||
201 | |||
202 | hlist_del(&inst->hlist); | 181 | hlist_del(&inst->hlist); |
203 | 182 | ||
204 | /* then flush all pending packets from skb */ | 183 | /* then flush all pending packets from skb */ |
@@ -306,8 +285,6 @@ nfulnl_alloc_skb(unsigned int inst_size, unsigned int pkt_size) | |||
306 | struct sk_buff *skb; | 285 | struct sk_buff *skb; |
307 | unsigned int n; | 286 | unsigned int n; |
308 | 287 | ||
309 | UDEBUG("entered (%u, %u)\n", inst_size, pkt_size); | ||
310 | |||
311 | /* alloc skb which should be big enough for a whole multipart | 288 | /* alloc skb which should be big enough for a whole multipart |
312 | * message. WARNING: has to be <= 128k due to slab restrictions */ | 289 | * message. WARNING: has to be <= 128k due to slab restrictions */ |
313 | 290 | ||
@@ -342,10 +319,6 @@ __nfulnl_send(struct nfulnl_instance *inst) | |||
342 | sizeof(struct nfgenmsg)); | 319 | sizeof(struct nfgenmsg)); |
343 | 320 | ||
344 | status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); | 321 | status = nfnetlink_unicast(inst->skb, inst->peer_pid, MSG_DONTWAIT); |
345 | if (status < 0) { | ||
346 | UDEBUG("netlink_unicast() failed\n"); | ||
347 | /* FIXME: statistics */ | ||
348 | } | ||
349 | 322 | ||
350 | inst->qlen = 0; | 323 | inst->qlen = 0; |
351 | inst->skb = NULL; | 324 | inst->skb = NULL; |
@@ -369,8 +342,6 @@ nfulnl_timer(unsigned long data) | |||
369 | { | 342 | { |
370 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; | 343 | struct nfulnl_instance *inst = (struct nfulnl_instance *)data; |
371 | 344 | ||
372 | UDEBUG("timer function called, flushing buffer\n"); | ||
373 | |||
374 | spin_lock_bh(&inst->lock); | 345 | spin_lock_bh(&inst->lock); |
375 | if (inst->skb) | 346 | if (inst->skb) |
376 | __nfulnl_send(inst); | 347 | __nfulnl_send(inst); |
@@ -397,8 +368,6 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
397 | __be32 tmp_uint; | 368 | __be32 tmp_uint; |
398 | sk_buff_data_t old_tail = inst->skb->tail; | 369 | sk_buff_data_t old_tail = inst->skb->tail; |
399 | 370 | ||
400 | UDEBUG("entered\n"); | ||
401 | |||
402 | nlh = NLMSG_PUT(inst->skb, 0, 0, | 371 | nlh = NLMSG_PUT(inst->skb, 0, 0, |
403 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, | 372 | NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, |
404 | sizeof(struct nfgenmsg)); | 373 | sizeof(struct nfgenmsg)); |
@@ -544,7 +513,6 @@ __build_packet_message(struct nfulnl_instance *inst, | |||
544 | return 0; | 513 | return 0; |
545 | 514 | ||
546 | nlmsg_failure: | 515 | nlmsg_failure: |
547 | UDEBUG("nlmsg_failure\n"); | ||
548 | nla_put_failure: | 516 | nla_put_failure: |
549 | PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); | 517 | PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n"); |
550 | return -1; | 518 | return -1; |
@@ -609,8 +577,6 @@ nfulnl_log_packet(unsigned int pf, | |||
609 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) | 577 | + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) |
610 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); | 578 | + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); |
611 | 579 | ||
612 | UDEBUG("initial size=%u\n", size); | ||
613 | |||
614 | spin_lock_bh(&inst->lock); | 580 | spin_lock_bh(&inst->lock); |
615 | 581 | ||
616 | if (inst->flags & NFULNL_CFG_F_SEQ) | 582 | if (inst->flags & NFULNL_CFG_F_SEQ) |
@@ -637,7 +603,6 @@ nfulnl_log_packet(unsigned int pf, | |||
637 | data_len = inst->copy_range; | 603 | data_len = inst->copy_range; |
638 | 604 | ||
639 | size += nla_total_size(data_len); | 605 | size += nla_total_size(data_len); |
640 | UDEBUG("copy_packet, therefore size now %u\n", size); | ||
641 | break; | 606 | break; |
642 | 607 | ||
643 | default: | 608 | default: |
@@ -648,8 +613,6 @@ nfulnl_log_packet(unsigned int pf, | |||
648 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { | 613 | size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { |
649 | /* either the queue len is too high or we don't have | 614 | /* either the queue len is too high or we don't have |
650 | * enough room in the skb left. flush to userspace. */ | 615 | * enough room in the skb left. flush to userspace. */ |
651 | UDEBUG("flushing old skb\n"); | ||
652 | |||
653 | __nfulnl_flush(inst); | 616 | __nfulnl_flush(inst); |
654 | } | 617 | } |
655 | 618 | ||
@@ -659,7 +622,6 @@ nfulnl_log_packet(unsigned int pf, | |||
659 | goto alloc_failure; | 622 | goto alloc_failure; |
660 | } | 623 | } |
661 | 624 | ||
662 | UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold); | ||
663 | inst->qlen++; | 625 | inst->qlen++; |
664 | 626 | ||
665 | __build_packet_message(inst, skb, data_len, pf, | 627 | __build_packet_message(inst, skb, data_len, pf, |
@@ -681,7 +643,6 @@ unlock_and_release: | |||
681 | return; | 643 | return; |
682 | 644 | ||
683 | alloc_failure: | 645 | alloc_failure: |
684 | UDEBUG("error allocating skb\n"); | ||
685 | /* FIXME: statistics */ | 646 | /* FIXME: statistics */ |
686 | goto unlock_and_release; | 647 | goto unlock_and_release; |
687 | } | 648 | } |
@@ -704,7 +665,6 @@ nfulnl_rcv_nl_event(struct notifier_block *this, | |||
704 | struct hlist_head *head = &instance_table[i]; | 665 | struct hlist_head *head = &instance_table[i]; |
705 | 666 | ||
706 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { | 667 | hlist_for_each_entry_safe(inst, tmp, t2, head, hlist) { |
707 | UDEBUG("node = %p\n", inst); | ||
708 | if ((n->net == &init_net) && | 668 | if ((n->net == &init_net) && |
709 | (n->pid == inst->peer_pid)) | 669 | (n->pid == inst->peer_pid)) |
710 | __instance_destroy(inst); | 670 | __instance_destroy(inst); |
@@ -750,8 +710,6 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
750 | struct nfulnl_instance *inst; | 710 | struct nfulnl_instance *inst; |
751 | int ret = 0; | 711 | int ret = 0; |
752 | 712 | ||
753 | UDEBUG("entering for msg %u\n", NFNL_MSG_TYPE(nlh->nlmsg_type)); | ||
754 | |||
755 | inst = instance_lookup_get(group_num); | 713 | inst = instance_lookup_get(group_num); |
756 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { | 714 | if (inst && inst->peer_pid != NETLINK_CB(skb).pid) { |
757 | ret = -EPERM; | 715 | ret = -EPERM; |
@@ -763,7 +721,6 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
763 | struct nfulnl_msg_config_cmd *cmd; | 721 | struct nfulnl_msg_config_cmd *cmd; |
764 | 722 | ||
765 | cmd = nla_data(nfula[NFULA_CFG_CMD]); | 723 | cmd = nla_data(nfula[NFULA_CFG_CMD]); |
766 | UDEBUG("found CFG_CMD for\n"); | ||
767 | 724 | ||
768 | switch (cmd->command) { | 725 | switch (cmd->command) { |
769 | case NFULNL_CFG_CMD_BIND: | 726 | case NFULNL_CFG_CMD_BIND: |
@@ -788,11 +745,9 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
788 | instance_destroy(inst); | 745 | instance_destroy(inst); |
789 | goto out; | 746 | goto out; |
790 | case NFULNL_CFG_CMD_PF_BIND: | 747 | case NFULNL_CFG_CMD_PF_BIND: |
791 | UDEBUG("registering log handler for pf=%u\n", pf); | ||
792 | ret = nf_log_register(pf, &nfulnl_logger); | 748 | ret = nf_log_register(pf, &nfulnl_logger); |
793 | break; | 749 | break; |
794 | case NFULNL_CFG_CMD_PF_UNBIND: | 750 | case NFULNL_CFG_CMD_PF_UNBIND: |
795 | UDEBUG("unregistering log handler for pf=%u\n", pf); | ||
796 | /* This is a bug and a feature. We cannot unregister | 751 | /* This is a bug and a feature. We cannot unregister |
797 | * other handlers, like nfnetlink_inst can */ | 752 | * other handlers, like nfnetlink_inst can */ |
798 | nf_log_unregister_pf(pf); | 753 | nf_log_unregister_pf(pf); |