aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-12-05 04:26:33 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:56:14 -0500
commit02f014d88831f73b895c1fe09badb66c88e932d3 (patch)
tree09aa75b8edeb240e62c4269f20630f8206c0e6d4 /net/ipv4
parent7a6c6653b3a977087ec64d76817c7ee6e1df5b60 (diff)
[NETFILTER]: nf_queue: move list_head/skb/id to struct nf_info
Move common fields for queue management to struct nf_info and rename it to struct nf_queue_entry. The avoids one allocation/free per packet and simplifies the code a bit. Alternatively we could add some private room at the tail, but since all current users use identical structs this seems easier. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/netfilter.c14
-rw-r--r--net/ipv4/netfilter/ip_queue.c68
2 files changed, 31 insertions, 51 deletions
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f7166084a5ab..7bf5e4a199f0 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -123,11 +123,12 @@ struct ip_rt_info {
123 u_int8_t tos; 123 u_int8_t tos;
124}; 124};
125 125
126static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info) 126static void nf_ip_saveroute(const struct sk_buff *skb,
127 struct nf_queue_entry *entry)
127{ 128{
128 struct ip_rt_info *rt_info = nf_info_reroute(info); 129 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
129 130
130 if (info->hook == NF_INET_LOCAL_OUT) { 131 if (entry->hook == NF_INET_LOCAL_OUT) {
131 const struct iphdr *iph = ip_hdr(skb); 132 const struct iphdr *iph = ip_hdr(skb);
132 133
133 rt_info->tos = iph->tos; 134 rt_info->tos = iph->tos;
@@ -136,11 +137,12 @@ static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info)
136 } 137 }
137} 138}
138 139
139static int nf_ip_reroute(struct sk_buff *skb, const struct nf_info *info) 140static int nf_ip_reroute(struct sk_buff *skb,
141 const struct nf_queue_entry *entry)
140{ 142{
141 const struct ip_rt_info *rt_info = nf_info_reroute(info); 143 const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
142 144
143 if (info->hook == NF_INET_LOCAL_OUT) { 145 if (entry->hook == NF_INET_LOCAL_OUT) {
144 const struct iphdr *iph = ip_hdr(skb); 146 const struct iphdr *iph = ip_hdr(skb);
145 147
146 if (!(iph->tos == rt_info->tos 148 if (!(iph->tos == rt_info->tos
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index df2957c5bcb4..f1affd2344a9 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -35,13 +35,7 @@
35#define NET_IPQ_QMAX 2088 35#define NET_IPQ_QMAX 2088
36#define NET_IPQ_QMAX_NAME "ip_queue_maxlen" 36#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
37 37
38struct ipq_queue_entry { 38typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
39 struct list_head list;
40 struct nf_info *info;
41 struct sk_buff *skb;
42};
43
44typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
45 39
46static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 40static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
47static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 41static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
@@ -56,7 +50,7 @@ static LIST_HEAD(queue_list);
56static DEFINE_MUTEX(ipqnl_mutex); 50static DEFINE_MUTEX(ipqnl_mutex);
57 51
58static void 52static void
59ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 53ipq_issue_verdict(struct nf_queue_entry *entry, int verdict)
60{ 54{
61 /* TCP input path (and probably other bits) assume to be called 55 /* TCP input path (and probably other bits) assume to be called
62 * from softirq context, not from syscall, like ipq_issue_verdict is 56 * from softirq context, not from syscall, like ipq_issue_verdict is
@@ -64,14 +58,12 @@ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
64 * softirq, e.g. We therefore emulate this by local_bh_disable() */ 58 * softirq, e.g. We therefore emulate this by local_bh_disable() */
65 59
66 local_bh_disable(); 60 local_bh_disable();
67 nf_reinject(entry->skb, entry->info, verdict); 61 nf_reinject(entry, verdict);
68 local_bh_enable(); 62 local_bh_enable();
69
70 kfree(entry);
71} 63}
72 64
73static inline void 65static inline void
74__ipq_enqueue_entry(struct ipq_queue_entry *entry) 66__ipq_enqueue_entry(struct nf_queue_entry *entry)
75{ 67{
76 list_add_tail(&entry->list, &queue_list); 68 list_add_tail(&entry->list, &queue_list);
77 queue_total++; 69 queue_total++;
@@ -114,10 +106,10 @@ __ipq_reset(void)
114 __ipq_flush(NULL, 0); 106 __ipq_flush(NULL, 0);
115} 107}
116 108
117static struct ipq_queue_entry * 109static struct nf_queue_entry *
118ipq_find_dequeue_entry(unsigned long id) 110ipq_find_dequeue_entry(unsigned long id)
119{ 111{
120 struct ipq_queue_entry *entry = NULL, *i; 112 struct nf_queue_entry *entry = NULL, *i;
121 113
122 write_lock_bh(&queue_lock); 114 write_lock_bh(&queue_lock);
123 115
@@ -140,7 +132,7 @@ ipq_find_dequeue_entry(unsigned long id)
140static void 132static void
141__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 133__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
142{ 134{
143 struct ipq_queue_entry *entry, *next; 135 struct nf_queue_entry *entry, *next;
144 136
145 list_for_each_entry_safe(entry, next, &queue_list, list) { 137 list_for_each_entry_safe(entry, next, &queue_list, list) {
146 if (!cmpfn || cmpfn(entry, data)) { 138 if (!cmpfn || cmpfn(entry, data)) {
@@ -160,7 +152,7 @@ ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
160} 152}
161 153
162static struct sk_buff * 154static struct sk_buff *
163ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) 155ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
164{ 156{
165 sk_buff_data_t old_tail; 157 sk_buff_data_t old_tail;
166 size_t size = 0; 158 size_t size = 0;
@@ -217,20 +209,20 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
217 pmsg->timestamp_sec = tv.tv_sec; 209 pmsg->timestamp_sec = tv.tv_sec;
218 pmsg->timestamp_usec = tv.tv_usec; 210 pmsg->timestamp_usec = tv.tv_usec;
219 pmsg->mark = entry->skb->mark; 211 pmsg->mark = entry->skb->mark;
220 pmsg->hook = entry->info->hook; 212 pmsg->hook = entry->hook;
221 pmsg->hw_protocol = entry->skb->protocol; 213 pmsg->hw_protocol = entry->skb->protocol;
222 214
223 if (entry->info->indev) 215 if (entry->indev)
224 strcpy(pmsg->indev_name, entry->info->indev->name); 216 strcpy(pmsg->indev_name, entry->indev->name);
225 else 217 else
226 pmsg->indev_name[0] = '\0'; 218 pmsg->indev_name[0] = '\0';
227 219
228 if (entry->info->outdev) 220 if (entry->outdev)
229 strcpy(pmsg->outdev_name, entry->info->outdev->name); 221 strcpy(pmsg->outdev_name, entry->outdev->name);
230 else 222 else
231 pmsg->outdev_name[0] = '\0'; 223 pmsg->outdev_name[0] = '\0';
232 224
233 if (entry->info->indev && entry->skb->dev) { 225 if (entry->indev && entry->skb->dev) {
234 pmsg->hw_type = entry->skb->dev->type; 226 pmsg->hw_type = entry->skb->dev->type;
235 pmsg->hw_addrlen = dev_parse_header(entry->skb, 227 pmsg->hw_addrlen = dev_parse_header(entry->skb,
236 pmsg->hw_addr); 228 pmsg->hw_addr);
@@ -252,28 +244,17 @@ nlmsg_failure:
252} 244}
253 245
254static int 246static int
255ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, 247ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
256 unsigned int queuenum)
257{ 248{
258 int status = -EINVAL; 249 int status = -EINVAL;
259 struct sk_buff *nskb; 250 struct sk_buff *nskb;
260 struct ipq_queue_entry *entry;
261 251
262 if (copy_mode == IPQ_COPY_NONE) 252 if (copy_mode == IPQ_COPY_NONE)
263 return -EAGAIN; 253 return -EAGAIN;
264 254
265 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
266 if (entry == NULL) {
267 printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
268 return -ENOMEM;
269 }
270
271 entry->info = info;
272 entry->skb = skb;
273
274 nskb = ipq_build_packet_message(entry, &status); 255 nskb = ipq_build_packet_message(entry, &status);
275 if (nskb == NULL) 256 if (nskb == NULL)
276 goto err_out_free; 257 return status;
277 258
278 write_lock_bh(&queue_lock); 259 write_lock_bh(&queue_lock);
279 260
@@ -307,14 +288,11 @@ err_out_free_nskb:
307 288
308err_out_unlock: 289err_out_unlock:
309 write_unlock_bh(&queue_lock); 290 write_unlock_bh(&queue_lock);
310
311err_out_free:
312 kfree(entry);
313 return status; 291 return status;
314} 292}
315 293
316static int 294static int
317ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) 295ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
318{ 296{
319 int diff; 297 int diff;
320 int err; 298 int err;
@@ -352,7 +330,7 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
352static int 330static int
353ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) 331ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
354{ 332{
355 struct ipq_queue_entry *entry; 333 struct nf_queue_entry *entry;
356 334
357 if (vmsg->value > NF_MAX_VERDICT) 335 if (vmsg->value > NF_MAX_VERDICT)
358 return -EINVAL; 336 return -EINVAL;
@@ -412,13 +390,13 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
412} 390}
413 391
414static int 392static int
415dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex) 393dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
416{ 394{
417 if (entry->info->indev) 395 if (entry->indev)
418 if (entry->info->indev->ifindex == ifindex) 396 if (entry->indev->ifindex == ifindex)
419 return 1; 397 return 1;
420 if (entry->info->outdev) 398 if (entry->outdev)
421 if (entry->info->outdev->ifindex == ifindex) 399 if (entry->outdev->ifindex == ifindex)
422 return 1; 400 return 1;
423#ifdef CONFIG_BRIDGE_NETFILTER 401#ifdef CONFIG_BRIDGE_NETFILTER
424 if (entry->skb->nf_bridge) { 402 if (entry->skb->nf_bridge) {