diff options
author | Patrick McHardy <kaber@trash.net> | 2007-12-05 04:26:02 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:56:13 -0500 |
commit | 171b7fc4fc178a004aec8d06eb745c30ae726fb6 (patch) | |
tree | ce54f05818c0e87dcbe034bf3a80f057aa1cf206 | |
parent | 9521409265d3bae939ace4c259f765c29339730f (diff) |
[NETFILTER]: ip6_queue: deobfuscate entry lookups
A queue entry lookup currently looks like this:
ipq_find_dequeue_entry -> __ipq_find_dequeue_entry ->
__ipq_find_entry -> cmpfn -> id_cmp
Use simple open-coded list walking and kill the cmpfn for
ipq_find_dequeue_entry. Instead add it to ipq_flush (after
similar cleanups) and use ipq_flush for both complete flushes
and flushing entries related to a device.
Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv6/netfilter/ip6_queue.c | 101 |
1 files changed, 37 insertions, 64 deletions
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c index 243a00bcd3df..7d0780d02d0b 100644 --- a/net/ipv6/netfilter/ip6_queue.c +++ b/net/ipv6/netfilter/ip6_queue.c | |||
@@ -75,52 +75,6 @@ __ipq_enqueue_entry(struct ipq_queue_entry *entry) | |||
75 | queue_total++; | 75 | queue_total++; |
76 | } | 76 | } |
77 | 77 | ||
78 | /* | ||
79 | * Find and return a queued entry matched by cmpfn, or return the last | ||
80 | * entry if cmpfn is NULL. | ||
81 | */ | ||
82 | static inline struct ipq_queue_entry * | ||
83 | __ipq_find_entry(ipq_cmpfn cmpfn, unsigned long data) | ||
84 | { | ||
85 | struct ipq_queue_entry *entry; | ||
86 | |||
87 | list_for_each_entry(entry, &queue_list, list) { | ||
88 | if (!cmpfn || cmpfn(entry, data)) | ||
89 | return entry; | ||
90 | } | ||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | static inline void | ||
95 | __ipq_dequeue_entry(struct ipq_queue_entry *entry) | ||
96 | { | ||
97 | list_del(&entry->list); | ||
98 | queue_total--; | ||
99 | } | ||
100 | |||
101 | static inline struct ipq_queue_entry * | ||
102 | __ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) | ||
103 | { | ||
104 | struct ipq_queue_entry *entry; | ||
105 | |||
106 | entry = __ipq_find_entry(cmpfn, data); | ||
107 | if (entry == NULL) | ||
108 | return NULL; | ||
109 | |||
110 | __ipq_dequeue_entry(entry); | ||
111 | return entry; | ||
112 | } | ||
113 | |||
114 | |||
115 | static inline void | ||
116 | __ipq_flush(int verdict) | ||
117 | { | ||
118 | struct ipq_queue_entry *entry; | ||
119 | |||
120 | while ((entry = __ipq_find_dequeue_entry(NULL, 0))) | ||
121 | ipq_issue_verdict(entry, verdict); | ||
122 | } | ||
123 | |||
124 | static inline int | 78 | static inline int |
125 | __ipq_set_mode(unsigned char mode, unsigned int range) | 79 | __ipq_set_mode(unsigned char mode, unsigned int range) |
126 | { | 80 | { |
@@ -147,31 +101,59 @@ __ipq_set_mode(unsigned char mode, unsigned int range) | |||
147 | return status; | 101 | return status; |
148 | } | 102 | } |
149 | 103 | ||
104 | static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data); | ||
105 | |||
150 | static inline void | 106 | static inline void |
151 | __ipq_reset(void) | 107 | __ipq_reset(void) |
152 | { | 108 | { |
153 | peer_pid = 0; | 109 | peer_pid = 0; |
154 | net_disable_timestamp(); | 110 | net_disable_timestamp(); |
155 | __ipq_set_mode(IPQ_COPY_NONE, 0); | 111 | __ipq_set_mode(IPQ_COPY_NONE, 0); |
156 | __ipq_flush(NF_DROP); | 112 | __ipq_flush(NULL, 0); |
157 | } | 113 | } |
158 | 114 | ||
159 | static struct ipq_queue_entry * | 115 | static struct ipq_queue_entry * |
160 | ipq_find_dequeue_entry(ipq_cmpfn cmpfn, unsigned long data) | 116 | ipq_find_dequeue_entry(unsigned long id) |
161 | { | 117 | { |
162 | struct ipq_queue_entry *entry; | 118 | struct ipq_queue_entry *entry = NULL, *i; |
163 | 119 | ||
164 | write_lock_bh(&queue_lock); | 120 | write_lock_bh(&queue_lock); |
165 | entry = __ipq_find_dequeue_entry(cmpfn, data); | 121 | |
122 | list_for_each_entry(i, &queue_list, list) { | ||
123 | if ((unsigned long)i == id) { | ||
124 | entry = i; | ||
125 | break; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | if (entry) { | ||
130 | list_del(&entry->list); | ||
131 | queue_total--; | ||
132 | } | ||
133 | |||
166 | write_unlock_bh(&queue_lock); | 134 | write_unlock_bh(&queue_lock); |
167 | return entry; | 135 | return entry; |
168 | } | 136 | } |
169 | 137 | ||
170 | static void | 138 | static void |
171 | ipq_flush(int verdict) | 139 | __ipq_flush(ipq_cmpfn cmpfn, unsigned long data) |
140 | { | ||
141 | struct ipq_queue_entry *entry, *next; | ||
142 | |||
143 | list_for_each_entry_safe(entry, next, &queue_list, list) { | ||
144 | if (!cmpfn || cmpfn(entry, data)) { | ||
145 | list_del(&entry->list); | ||
146 | queue_total--; | ||
147 | ipq_issue_verdict(entry, NF_DROP); | ||
148 | } | ||
149 | } | ||
150 | } | ||
151 | |||
152 | static void | ||
153 | ipq_flush(ipq_cmpfn cmpfn, unsigned long data) | ||
172 | { | 154 | { |
173 | write_lock_bh(&queue_lock); | 155 | write_lock_bh(&queue_lock); |
174 | __ipq_flush(verdict); | 156 | __ipq_flush(cmpfn, data); |
175 | write_unlock_bh(&queue_lock); | 157 | write_unlock_bh(&queue_lock); |
176 | } | 158 | } |
177 | 159 | ||
@@ -364,12 +346,6 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) | |||
364 | return 0; | 346 | return 0; |
365 | } | 347 | } |
366 | 348 | ||
367 | static inline int | ||
368 | id_cmp(struct ipq_queue_entry *e, unsigned long id) | ||
369 | { | ||
370 | return (id == (unsigned long )e); | ||
371 | } | ||
372 | |||
373 | static int | 349 | static int |
374 | ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) | 350 | ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) |
375 | { | 351 | { |
@@ -378,7 +354,7 @@ ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) | |||
378 | if (vmsg->value > NF_MAX_VERDICT) | 354 | if (vmsg->value > NF_MAX_VERDICT) |
379 | return -EINVAL; | 355 | return -EINVAL; |
380 | 356 | ||
381 | entry = ipq_find_dequeue_entry(id_cmp, vmsg->id); | 357 | entry = ipq_find_dequeue_entry(vmsg->id); |
382 | if (entry == NULL) | 358 | if (entry == NULL) |
383 | return -ENOENT; | 359 | return -ENOENT; |
384 | else { | 360 | else { |
@@ -449,10 +425,7 @@ dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex) | |||
449 | static void | 425 | static void |
450 | ipq_dev_drop(int ifindex) | 426 | ipq_dev_drop(int ifindex) |
451 | { | 427 | { |
452 | struct ipq_queue_entry *entry; | 428 | ipq_flush(dev_cmp, ifindex); |
453 | |||
454 | while ((entry = ipq_find_dequeue_entry(dev_cmp, ifindex)) != NULL) | ||
455 | ipq_issue_verdict(entry, NF_DROP); | ||
456 | } | 429 | } |
457 | 430 | ||
458 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) | 431 | #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0) |
@@ -689,7 +662,7 @@ static void __exit ip6_queue_fini(void) | |||
689 | { | 662 | { |
690 | nf_unregister_queue_handlers(&nfqh); | 663 | nf_unregister_queue_handlers(&nfqh); |
691 | synchronize_net(); | 664 | synchronize_net(); |
692 | ipq_flush(NF_DROP); | 665 | ipq_flush(NULL, 0); |
693 | 666 | ||
694 | unregister_sysctl_table(ipq_sysctl_header); | 667 | unregister_sysctl_table(ipq_sysctl_header); |
695 | unregister_netdevice_notifier(&ipq_dev_notifier); | 668 | unregister_netdevice_notifier(&ipq_dev_notifier); |