aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2007-12-05 04:26:33 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:56:14 -0500
commit02f014d88831f73b895c1fe09badb66c88e932d3 (patch)
tree09aa75b8edeb240e62c4269f20630f8206c0e6d4 /net
parent7a6c6653b3a977087ec64d76817c7ee6e1df5b60 (diff)
[NETFILTER]: nf_queue: move list_head/skb/id to struct nf_info
Move common fields for queue management to struct nf_info and rename it to struct nf_queue_entry. The avoids one allocation/free per packet and simplifies the code a bit. Alternatively we could add some private room at the tail, but since all current users use identical structs this seems easier. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/netfilter.c14
-rw-r--r--net/ipv4/netfilter/ip_queue.c68
-rw-r--r--net/ipv6/netfilter.c14
-rw-r--r--net/ipv6/netfilter/ip6_queue.c67
-rw-r--r--net/netfilter/nf_queue.c65
-rw-r--r--net/netfilter/nfnetlink_queue.c74
6 files changed, 121 insertions, 181 deletions
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index f7166084a5ab..7bf5e4a199f0 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -123,11 +123,12 @@ struct ip_rt_info {
123 u_int8_t tos; 123 u_int8_t tos;
124}; 124};
125 125
126static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info) 126static void nf_ip_saveroute(const struct sk_buff *skb,
127 struct nf_queue_entry *entry)
127{ 128{
128 struct ip_rt_info *rt_info = nf_info_reroute(info); 129 struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
129 130
130 if (info->hook == NF_INET_LOCAL_OUT) { 131 if (entry->hook == NF_INET_LOCAL_OUT) {
131 const struct iphdr *iph = ip_hdr(skb); 132 const struct iphdr *iph = ip_hdr(skb);
132 133
133 rt_info->tos = iph->tos; 134 rt_info->tos = iph->tos;
@@ -136,11 +137,12 @@ static void nf_ip_saveroute(const struct sk_buff *skb, struct nf_info *info)
136 } 137 }
137} 138}
138 139
139static int nf_ip_reroute(struct sk_buff *skb, const struct nf_info *info) 140static int nf_ip_reroute(struct sk_buff *skb,
141 const struct nf_queue_entry *entry)
140{ 142{
141 const struct ip_rt_info *rt_info = nf_info_reroute(info); 143 const struct ip_rt_info *rt_info = nf_queue_entry_reroute(entry);
142 144
143 if (info->hook == NF_INET_LOCAL_OUT) { 145 if (entry->hook == NF_INET_LOCAL_OUT) {
144 const struct iphdr *iph = ip_hdr(skb); 146 const struct iphdr *iph = ip_hdr(skb);
145 147
146 if (!(iph->tos == rt_info->tos 148 if (!(iph->tos == rt_info->tos
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index df2957c5bcb4..f1affd2344a9 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -35,13 +35,7 @@
35#define NET_IPQ_QMAX 2088 35#define NET_IPQ_QMAX 2088
36#define NET_IPQ_QMAX_NAME "ip_queue_maxlen" 36#define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
37 37
38struct ipq_queue_entry { 38typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
39 struct list_head list;
40 struct nf_info *info;
41 struct sk_buff *skb;
42};
43
44typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
45 39
46static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 40static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
47static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 41static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
@@ -56,7 +50,7 @@ static LIST_HEAD(queue_list);
56static DEFINE_MUTEX(ipqnl_mutex); 50static DEFINE_MUTEX(ipqnl_mutex);
57 51
58static void 52static void
59ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 53ipq_issue_verdict(struct nf_queue_entry *entry, int verdict)
60{ 54{
61 /* TCP input path (and probably other bits) assume to be called 55 /* TCP input path (and probably other bits) assume to be called
62 * from softirq context, not from syscall, like ipq_issue_verdict is 56 * from softirq context, not from syscall, like ipq_issue_verdict is
@@ -64,14 +58,12 @@ ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict)
64 * softirq, e.g. We therefore emulate this by local_bh_disable() */ 58 * softirq, e.g. We therefore emulate this by local_bh_disable() */
65 59
66 local_bh_disable(); 60 local_bh_disable();
67 nf_reinject(entry->skb, entry->info, verdict); 61 nf_reinject(entry, verdict);
68 local_bh_enable(); 62 local_bh_enable();
69
70 kfree(entry);
71} 63}
72 64
73static inline void 65static inline void
74__ipq_enqueue_entry(struct ipq_queue_entry *entry) 66__ipq_enqueue_entry(struct nf_queue_entry *entry)
75{ 67{
76 list_add_tail(&entry->list, &queue_list); 68 list_add_tail(&entry->list, &queue_list);
77 queue_total++; 69 queue_total++;
@@ -114,10 +106,10 @@ __ipq_reset(void)
114 __ipq_flush(NULL, 0); 106 __ipq_flush(NULL, 0);
115} 107}
116 108
117static struct ipq_queue_entry * 109static struct nf_queue_entry *
118ipq_find_dequeue_entry(unsigned long id) 110ipq_find_dequeue_entry(unsigned long id)
119{ 111{
120 struct ipq_queue_entry *entry = NULL, *i; 112 struct nf_queue_entry *entry = NULL, *i;
121 113
122 write_lock_bh(&queue_lock); 114 write_lock_bh(&queue_lock);
123 115
@@ -140,7 +132,7 @@ ipq_find_dequeue_entry(unsigned long id)
140static void 132static void
141__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 133__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
142{ 134{
143 struct ipq_queue_entry *entry, *next; 135 struct nf_queue_entry *entry, *next;
144 136
145 list_for_each_entry_safe(entry, next, &queue_list, list) { 137 list_for_each_entry_safe(entry, next, &queue_list, list) {
146 if (!cmpfn || cmpfn(entry, data)) { 138 if (!cmpfn || cmpfn(entry, data)) {
@@ -160,7 +152,7 @@ ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
160} 152}
161 153
162static struct sk_buff * 154static struct sk_buff *
163ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) 155ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
164{ 156{
165 sk_buff_data_t old_tail; 157 sk_buff_data_t old_tail;
166 size_t size = 0; 158 size_t size = 0;
@@ -217,20 +209,20 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
217 pmsg->timestamp_sec = tv.tv_sec; 209 pmsg->timestamp_sec = tv.tv_sec;
218 pmsg->timestamp_usec = tv.tv_usec; 210 pmsg->timestamp_usec = tv.tv_usec;
219 pmsg->mark = entry->skb->mark; 211 pmsg->mark = entry->skb->mark;
220 pmsg->hook = entry->info->hook; 212 pmsg->hook = entry->hook;
221 pmsg->hw_protocol = entry->skb->protocol; 213 pmsg->hw_protocol = entry->skb->protocol;
222 214
223 if (entry->info->indev) 215 if (entry->indev)
224 strcpy(pmsg->indev_name, entry->info->indev->name); 216 strcpy(pmsg->indev_name, entry->indev->name);
225 else 217 else
226 pmsg->indev_name[0] = '\0'; 218 pmsg->indev_name[0] = '\0';
227 219
228 if (entry->info->outdev) 220 if (entry->outdev)
229 strcpy(pmsg->outdev_name, entry->info->outdev->name); 221 strcpy(pmsg->outdev_name, entry->outdev->name);
230 else 222 else
231 pmsg->outdev_name[0] = '\0'; 223 pmsg->outdev_name[0] = '\0';
232 224
233 if (entry->info->indev && entry->skb->dev) { 225 if (entry->indev && entry->skb->dev) {
234 pmsg->hw_type = entry->skb->dev->type; 226 pmsg->hw_type = entry->skb->dev->type;
235 pmsg->hw_addrlen = dev_parse_header(entry->skb, 227 pmsg->hw_addrlen = dev_parse_header(entry->skb,
236 pmsg->hw_addr); 228 pmsg->hw_addr);
@@ -252,28 +244,17 @@ nlmsg_failure:
252} 244}
253 245
254static int 246static int
255ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, 247ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
256 unsigned int queuenum)
257{ 248{
258 int status = -EINVAL; 249 int status = -EINVAL;
259 struct sk_buff *nskb; 250 struct sk_buff *nskb;
260 struct ipq_queue_entry *entry;
261 251
262 if (copy_mode == IPQ_COPY_NONE) 252 if (copy_mode == IPQ_COPY_NONE)
263 return -EAGAIN; 253 return -EAGAIN;
264 254
265 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
266 if (entry == NULL) {
267 printk(KERN_ERR "ip_queue: OOM in ipq_enqueue_packet()\n");
268 return -ENOMEM;
269 }
270
271 entry->info = info;
272 entry->skb = skb;
273
274 nskb = ipq_build_packet_message(entry, &status); 255 nskb = ipq_build_packet_message(entry, &status);
275 if (nskb == NULL) 256 if (nskb == NULL)
276 goto err_out_free; 257 return status;
277 258
278 write_lock_bh(&queue_lock); 259 write_lock_bh(&queue_lock);
279 260
@@ -307,14 +288,11 @@ err_out_free_nskb:
307 288
308err_out_unlock: 289err_out_unlock:
309 write_unlock_bh(&queue_lock); 290 write_unlock_bh(&queue_lock);
310
311err_out_free:
312 kfree(entry);
313 return status; 291 return status;
314} 292}
315 293
316static int 294static int
317ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) 295ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
318{ 296{
319 int diff; 297 int diff;
320 int err; 298 int err;
@@ -352,7 +330,7 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
352static int 330static int
353ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) 331ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
354{ 332{
355 struct ipq_queue_entry *entry; 333 struct nf_queue_entry *entry;
356 334
357 if (vmsg->value > NF_MAX_VERDICT) 335 if (vmsg->value > NF_MAX_VERDICT)
358 return -EINVAL; 336 return -EINVAL;
@@ -412,13 +390,13 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
412} 390}
413 391
414static int 392static int
415dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex) 393dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
416{ 394{
417 if (entry->info->indev) 395 if (entry->indev)
418 if (entry->info->indev->ifindex == ifindex) 396 if (entry->indev->ifindex == ifindex)
419 return 1; 397 return 1;
420 if (entry->info->outdev) 398 if (entry->outdev)
421 if (entry->info->outdev->ifindex == ifindex) 399 if (entry->outdev->ifindex == ifindex)
422 return 1; 400 return 1;
423#ifdef CONFIG_BRIDGE_NETFILTER 401#ifdef CONFIG_BRIDGE_NETFILTER
424 if (entry->skb->nf_bridge) { 402 if (entry->skb->nf_bridge) {
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 55ea9c6ec744..945e6ae19569 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -57,11 +57,12 @@ struct ip6_rt_info {
57 struct in6_addr saddr; 57 struct in6_addr saddr;
58}; 58};
59 59
60static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info) 60static void nf_ip6_saveroute(const struct sk_buff *skb,
61 struct nf_queue_entry *entry)
61{ 62{
62 struct ip6_rt_info *rt_info = nf_info_reroute(info); 63 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
63 64
64 if (info->hook == NF_INET_LOCAL_OUT) { 65 if (entry->hook == NF_INET_LOCAL_OUT) {
65 struct ipv6hdr *iph = ipv6_hdr(skb); 66 struct ipv6hdr *iph = ipv6_hdr(skb);
66 67
67 rt_info->daddr = iph->daddr; 68 rt_info->daddr = iph->daddr;
@@ -69,11 +70,12 @@ static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_info *info)
69 } 70 }
70} 71}
71 72
72static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_info *info) 73static int nf_ip6_reroute(struct sk_buff *skb,
74 const struct nf_queue_entry *entry)
73{ 75{
74 struct ip6_rt_info *rt_info = nf_info_reroute(info); 76 struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry);
75 77
76 if (info->hook == NF_INET_LOCAL_OUT) { 78 if (entry->hook == NF_INET_LOCAL_OUT) {
77 struct ipv6hdr *iph = ipv6_hdr(skb); 79 struct ipv6hdr *iph = ipv6_hdr(skb);
78 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || 80 if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) ||
79 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr)) 81 !ipv6_addr_equal(&iph->saddr, &rt_info->saddr))
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 9c50cb19b39b..9014adae4fb1 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -39,13 +39,7 @@
39#define NET_IPQ_QMAX 2088 39#define NET_IPQ_QMAX 2088
40#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen" 40#define NET_IPQ_QMAX_NAME "ip6_queue_maxlen"
41 41
42struct ipq_queue_entry { 42typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43 struct list_head list;
44 struct nf_info *info;
45 struct sk_buff *skb;
46};
47
48typedef int (*ipq_cmpfn)(struct ipq_queue_entry *, unsigned long);
49 43
50static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE; 44static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
51static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT; 45static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
@@ -60,16 +54,15 @@ static LIST_HEAD(queue_list);
60static DEFINE_MUTEX(ipqnl_mutex); 54static DEFINE_MUTEX(ipqnl_mutex);
61 55
62static void 56static void
63ipq_issue_verdict(struct ipq_queue_entry *entry, int verdict) 57ipq_issue_verdict(struct nf_queue_entry *entry, int verdict)
64{ 58{
65 local_bh_disable(); 59 local_bh_disable();
66 nf_reinject(entry->skb, entry->info, verdict); 60 nf_reinject(entry, verdict);
67 local_bh_enable(); 61 local_bh_enable();
68 kfree(entry);
69} 62}
70 63
71static inline void 64static inline void
72__ipq_enqueue_entry(struct ipq_queue_entry *entry) 65__ipq_enqueue_entry(struct nf_queue_entry *entry)
73{ 66{
74 list_add_tail(&entry->list, &queue_list); 67 list_add_tail(&entry->list, &queue_list);
75 queue_total++; 68 queue_total++;
@@ -112,10 +105,10 @@ __ipq_reset(void)
112 __ipq_flush(NULL, 0); 105 __ipq_flush(NULL, 0);
113} 106}
114 107
115static struct ipq_queue_entry * 108static struct nf_queue_entry *
116ipq_find_dequeue_entry(unsigned long id) 109ipq_find_dequeue_entry(unsigned long id)
117{ 110{
118 struct ipq_queue_entry *entry = NULL, *i; 111 struct nf_queue_entry *entry = NULL, *i;
119 112
120 write_lock_bh(&queue_lock); 113 write_lock_bh(&queue_lock);
121 114
@@ -138,7 +131,7 @@ ipq_find_dequeue_entry(unsigned long id)
138static void 131static void
139__ipq_flush(ipq_cmpfn cmpfn, unsigned long data) 132__ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
140{ 133{
141 struct ipq_queue_entry *entry, *next; 134 struct nf_queue_entry *entry, *next;
142 135
143 list_for_each_entry_safe(entry, next, &queue_list, list) { 136 list_for_each_entry_safe(entry, next, &queue_list, list) {
144 if (!cmpfn || cmpfn(entry, data)) { 137 if (!cmpfn || cmpfn(entry, data)) {
@@ -158,7 +151,7 @@ ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
158} 151}
159 152
160static struct sk_buff * 153static struct sk_buff *
161ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) 154ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
162{ 155{
163 sk_buff_data_t old_tail; 156 sk_buff_data_t old_tail;
164 size_t size = 0; 157 size_t size = 0;
@@ -215,20 +208,20 @@ ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp)
215 pmsg->timestamp_sec = tv.tv_sec; 208 pmsg->timestamp_sec = tv.tv_sec;
216 pmsg->timestamp_usec = tv.tv_usec; 209 pmsg->timestamp_usec = tv.tv_usec;
217 pmsg->mark = entry->skb->mark; 210 pmsg->mark = entry->skb->mark;
218 pmsg->hook = entry->info->hook; 211 pmsg->hook = entry->hook;
219 pmsg->hw_protocol = entry->skb->protocol; 212 pmsg->hw_protocol = entry->skb->protocol;
220 213
221 if (entry->info->indev) 214 if (entry->indev)
222 strcpy(pmsg->indev_name, entry->info->indev->name); 215 strcpy(pmsg->indev_name, entry->indev->name);
223 else 216 else
224 pmsg->indev_name[0] = '\0'; 217 pmsg->indev_name[0] = '\0';
225 218
226 if (entry->info->outdev) 219 if (entry->outdev)
227 strcpy(pmsg->outdev_name, entry->info->outdev->name); 220 strcpy(pmsg->outdev_name, entry->outdev->name);
228 else 221 else
229 pmsg->outdev_name[0] = '\0'; 222 pmsg->outdev_name[0] = '\0';
230 223
231 if (entry->info->indev && entry->skb->dev) { 224 if (entry->indev && entry->skb->dev) {
232 pmsg->hw_type = entry->skb->dev->type; 225 pmsg->hw_type = entry->skb->dev->type;
233 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 226 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
234 } 227 }
@@ -249,28 +242,17 @@ nlmsg_failure:
249} 242}
250 243
251static int 244static int
252ipq_enqueue_packet(struct sk_buff *skb, struct nf_info *info, 245ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
253 unsigned int queuenum)
254{ 246{
255 int status = -EINVAL; 247 int status = -EINVAL;
256 struct sk_buff *nskb; 248 struct sk_buff *nskb;
257 struct ipq_queue_entry *entry;
258 249
259 if (copy_mode == IPQ_COPY_NONE) 250 if (copy_mode == IPQ_COPY_NONE)
260 return -EAGAIN; 251 return -EAGAIN;
261 252
262 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
263 if (entry == NULL) {
264 printk(KERN_ERR "ip6_queue: OOM in ipq_enqueue_packet()\n");
265 return -ENOMEM;
266 }
267
268 entry->info = info;
269 entry->skb = skb;
270
271 nskb = ipq_build_packet_message(entry, &status); 253 nskb = ipq_build_packet_message(entry, &status);
272 if (nskb == NULL) 254 if (nskb == NULL)
273 goto err_out_free; 255 return status;
274 256
275 write_lock_bh(&queue_lock); 257 write_lock_bh(&queue_lock);
276 258
@@ -304,14 +286,11 @@ err_out_free_nskb:
304 286
305err_out_unlock: 287err_out_unlock:
306 write_unlock_bh(&queue_lock); 288 write_unlock_bh(&queue_lock);
307
308err_out_free:
309 kfree(entry);
310 return status; 289 return status;
311} 290}
312 291
313static int 292static int
314ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e) 293ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
315{ 294{
316 int diff; 295 int diff;
317 int err; 296 int err;
@@ -349,7 +328,7 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct ipq_queue_entry *e)
349static int 328static int
350ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len) 329ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
351{ 330{
352 struct ipq_queue_entry *entry; 331 struct nf_queue_entry *entry;
353 332
354 if (vmsg->value > NF_MAX_VERDICT) 333 if (vmsg->value > NF_MAX_VERDICT)
355 return -EINVAL; 334 return -EINVAL;
@@ -409,14 +388,14 @@ ipq_receive_peer(struct ipq_peer_msg *pmsg,
409} 388}
410 389
411static int 390static int
412dev_cmp(struct ipq_queue_entry *entry, unsigned long ifindex) 391dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
413{ 392{
414 if (entry->info->indev) 393 if (entry->indev)
415 if (entry->info->indev->ifindex == ifindex) 394 if (entry->indev->ifindex == ifindex)
416 return 1; 395 return 1;
417 396
418 if (entry->info->outdev) 397 if (entry->outdev)
419 if (entry->info->outdev->ifindex == ifindex) 398 if (entry->outdev->ifindex == ifindex)
420 return 1; 399 return 1;
421#ifdef CONFIG_BRIDGE_NETFILTER 400#ifdef CONFIG_BRIDGE_NETFILTER
422 if (entry->skb->nf_bridge) { 401 if (entry->skb->nf_bridge) {
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
index bd71f433b85e..d9d3dc4ce1a3 100644
--- a/net/netfilter/nf_queue.c
+++ b/net/netfilter/nf_queue.c
@@ -93,7 +93,7 @@ static int __nf_queue(struct sk_buff *skb,
93 unsigned int queuenum) 93 unsigned int queuenum)
94{ 94{
95 int status; 95 int status;
96 struct nf_info *info; 96 struct nf_queue_entry *entry;
97#ifdef CONFIG_BRIDGE_NETFILTER 97#ifdef CONFIG_BRIDGE_NETFILTER
98 struct net_device *physindev = NULL; 98 struct net_device *physindev = NULL;
99 struct net_device *physoutdev = NULL; 99 struct net_device *physoutdev = NULL;
@@ -118,8 +118,8 @@ static int __nf_queue(struct sk_buff *skb,
118 return 1; 118 return 1;
119 } 119 }
120 120
121 info = kmalloc(sizeof(*info) + afinfo->route_key_size, GFP_ATOMIC); 121 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
122 if (!info) { 122 if (!entry) {
123 if (net_ratelimit()) 123 if (net_ratelimit())
124 printk(KERN_ERR "OOM queueing packet %p\n", 124 printk(KERN_ERR "OOM queueing packet %p\n",
125 skb); 125 skb);
@@ -128,13 +128,20 @@ static int __nf_queue(struct sk_buff *skb,
128 return 1; 128 return 1;
129 } 129 }
130 130
131 *info = (struct nf_info) { 131 *entry = (struct nf_queue_entry) {
132 (struct nf_hook_ops *)elem, pf, hook, indev, outdev, okfn }; 132 .skb = skb,
133 .elem = list_entry(elem, struct nf_hook_ops, list),
134 .pf = pf,
135 .hook = hook,
136 .indev = indev,
137 .outdev = outdev,
138 .okfn = okfn,
139 };
133 140
134 /* If it's going away, ignore hook. */ 141 /* If it's going away, ignore hook. */
135 if (!try_module_get(info->elem->owner)) { 142 if (!try_module_get(entry->elem->owner)) {
136 rcu_read_unlock(); 143 rcu_read_unlock();
137 kfree(info); 144 kfree(entry);
138 return 0; 145 return 0;
139 } 146 }
140 147
@@ -153,8 +160,8 @@ static int __nf_queue(struct sk_buff *skb,
153 dev_hold(physoutdev); 160 dev_hold(physoutdev);
154 } 161 }
155#endif 162#endif
156 afinfo->saveroute(skb, info); 163 afinfo->saveroute(skb, entry);
157 status = qh->outfn(skb, info, queuenum); 164 status = qh->outfn(entry, queuenum);
158 165
159 rcu_read_unlock(); 166 rcu_read_unlock();
160 167
@@ -170,8 +177,8 @@ static int __nf_queue(struct sk_buff *skb,
170 if (physoutdev) 177 if (physoutdev)
171 dev_put(physoutdev); 178 dev_put(physoutdev);
172#endif 179#endif
173 module_put(info->elem->owner); 180 module_put(entry->elem->owner);
174 kfree(info); 181 kfree(entry);
175 kfree_skb(skb); 182 kfree_skb(skb);
176 183
177 return 1; 184 return 1;
@@ -220,19 +227,19 @@ int nf_queue(struct sk_buff *skb,
220 return 1; 227 return 1;
221} 228}
222 229
223void nf_reinject(struct sk_buff *skb, struct nf_info *info, 230void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
224 unsigned int verdict)
225{ 231{
226 struct list_head *elem = &info->elem->list; 232 struct sk_buff *skb = entry->skb;
233 struct list_head *elem = &entry->elem->list;
227 struct nf_afinfo *afinfo; 234 struct nf_afinfo *afinfo;
228 235
229 rcu_read_lock(); 236 rcu_read_lock();
230 237
231 /* Release those devices we held, or Alexey will kill me. */ 238 /* Release those devices we held, or Alexey will kill me. */
232 if (info->indev) 239 if (entry->indev)
233 dev_put(info->indev); 240 dev_put(entry->indev);
234 if (info->outdev) 241 if (entry->outdev)
235 dev_put(info->outdev); 242 dev_put(entry->outdev);
236#ifdef CONFIG_BRIDGE_NETFILTER 243#ifdef CONFIG_BRIDGE_NETFILTER
237 if (skb->nf_bridge) { 244 if (skb->nf_bridge) {
238 if (skb->nf_bridge->physindev) 245 if (skb->nf_bridge->physindev)
@@ -243,7 +250,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
243#endif 250#endif
244 251
245 /* Drop reference to owner of hook which queued us. */ 252 /* Drop reference to owner of hook which queued us. */
246 module_put(info->elem->owner); 253 module_put(entry->elem->owner);
247 254
248 /* Continue traversal iff userspace said ok... */ 255 /* Continue traversal iff userspace said ok... */
249 if (verdict == NF_REPEAT) { 256 if (verdict == NF_REPEAT) {
@@ -252,28 +259,28 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
252 } 259 }
253 260
254 if (verdict == NF_ACCEPT) { 261 if (verdict == NF_ACCEPT) {
255 afinfo = nf_get_afinfo(info->pf); 262 afinfo = nf_get_afinfo(entry->pf);
256 if (!afinfo || afinfo->reroute(skb, info) < 0) 263 if (!afinfo || afinfo->reroute(skb, entry) < 0)
257 verdict = NF_DROP; 264 verdict = NF_DROP;
258 } 265 }
259 266
260 if (verdict == NF_ACCEPT) { 267 if (verdict == NF_ACCEPT) {
261 next_hook: 268 next_hook:
262 verdict = nf_iterate(&nf_hooks[info->pf][info->hook], 269 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
263 skb, info->hook, 270 skb, entry->hook,
264 info->indev, info->outdev, &elem, 271 entry->indev, entry->outdev, &elem,
265 info->okfn, INT_MIN); 272 entry->okfn, INT_MIN);
266 } 273 }
267 274
268 switch (verdict & NF_VERDICT_MASK) { 275 switch (verdict & NF_VERDICT_MASK) {
269 case NF_ACCEPT: 276 case NF_ACCEPT:
270 case NF_STOP: 277 case NF_STOP:
271 info->okfn(skb); 278 entry->okfn(skb);
272 case NF_STOLEN: 279 case NF_STOLEN:
273 break; 280 break;
274 case NF_QUEUE: 281 case NF_QUEUE:
275 if (!__nf_queue(skb, elem, info->pf, info->hook, 282 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
276 info->indev, info->outdev, info->okfn, 283 entry->indev, entry->outdev, entry->okfn,
277 verdict >> NF_VERDICT_BITS)) 284 verdict >> NF_VERDICT_BITS))
278 goto next_hook; 285 goto next_hook;
279 break; 286 break;
@@ -281,7 +288,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info,
281 kfree_skb(skb); 288 kfree_skb(skb);
282 } 289 }
283 rcu_read_unlock(); 290 rcu_read_unlock();
284 kfree(info); 291 kfree(entry);
285 return; 292 return;
286} 293}
287EXPORT_SYMBOL(nf_reinject); 294EXPORT_SYMBOL(nf_reinject);
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index cb901cf75776..a4937649d006 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -45,13 +45,6 @@
45#define QDEBUG(x, ...) 45#define QDEBUG(x, ...)
46#endif 46#endif
47 47
48struct nfqnl_queue_entry {
49 struct list_head list;
50 struct nf_info *info;
51 struct sk_buff *skb;
52 unsigned int id;
53};
54
55struct nfqnl_instance { 48struct nfqnl_instance {
56 struct hlist_node hlist; /* global list of queues */ 49 struct hlist_node hlist; /* global list of queues */
57 atomic_t use; 50 atomic_t use;
@@ -73,7 +66,7 @@ struct nfqnl_instance {
73 struct list_head queue_list; /* packets in queue */ 66 struct list_head queue_list; /* packets in queue */
74}; 67};
75 68
76typedef int (*nfqnl_cmpfn)(struct nfqnl_queue_entry *, unsigned long); 69typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
77 70
78static DEFINE_RWLOCK(instances_lock); 71static DEFINE_RWLOCK(instances_lock);
79 72
@@ -212,7 +205,7 @@ instance_destroy(struct nfqnl_instance *inst)
212 205
213 206
214static void 207static void
215issue_verdict(struct nfqnl_queue_entry *entry, int verdict) 208issue_verdict(struct nf_queue_entry *entry, int verdict)
216{ 209{
217 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict); 210 QDEBUG("entering for entry %p, verdict %u\n", entry, verdict);
218 211
@@ -222,15 +215,12 @@ issue_verdict(struct nfqnl_queue_entry *entry, int verdict)
222 * softirq, e.g. We therefore emulate this by local_bh_disable() */ 215 * softirq, e.g. We therefore emulate this by local_bh_disable() */
223 216
224 local_bh_disable(); 217 local_bh_disable();
225 nf_reinject(entry->skb, entry->info, verdict); 218 nf_reinject(entry, verdict);
226 local_bh_enable(); 219 local_bh_enable();
227
228 kfree(entry);
229} 220}
230 221
231static inline void 222static inline void
232__enqueue_entry(struct nfqnl_instance *queue, 223__enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
233 struct nfqnl_queue_entry *entry)
234{ 224{
235 list_add_tail(&entry->list, &queue->queue_list); 225 list_add_tail(&entry->list, &queue->queue_list);
236 queue->queue_total++; 226 queue->queue_total++;
@@ -265,10 +255,10 @@ __nfqnl_set_mode(struct nfqnl_instance *queue,
265 return status; 255 return status;
266} 256}
267 257
268static struct nfqnl_queue_entry * 258static struct nf_queue_entry *
269find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) 259find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
270{ 260{
271 struct nfqnl_queue_entry *entry = NULL, *i; 261 struct nf_queue_entry *entry = NULL, *i;
272 262
273 spin_lock_bh(&queue->lock); 263 spin_lock_bh(&queue->lock);
274 264
@@ -292,7 +282,7 @@ find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
292static void 282static void
293nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data) 283nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
294{ 284{
295 struct nfqnl_queue_entry *entry, *next; 285 struct nf_queue_entry *entry, *next;
296 286
297 spin_lock_bh(&queue->lock); 287 spin_lock_bh(&queue->lock);
298 list_for_each_entry_safe(entry, next, &queue->queue_list, list) { 288 list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
@@ -307,7 +297,7 @@ nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
307 297
308static struct sk_buff * 298static struct sk_buff *
309nfqnl_build_packet_message(struct nfqnl_instance *queue, 299nfqnl_build_packet_message(struct nfqnl_instance *queue,
310 struct nfqnl_queue_entry *entry, int *errp) 300 struct nf_queue_entry *entry, int *errp)
311{ 301{
312 sk_buff_data_t old_tail; 302 sk_buff_data_t old_tail;
313 size_t size; 303 size_t size;
@@ -316,7 +306,6 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
316 struct nfqnl_msg_packet_hdr pmsg; 306 struct nfqnl_msg_packet_hdr pmsg;
317 struct nlmsghdr *nlh; 307 struct nlmsghdr *nlh;
318 struct nfgenmsg *nfmsg; 308 struct nfgenmsg *nfmsg;
319 struct nf_info *entinf = entry->info;
320 struct sk_buff *entskb = entry->skb; 309 struct sk_buff *entskb = entry->skb;
321 struct net_device *indev; 310 struct net_device *indev;
322 struct net_device *outdev; 311 struct net_device *outdev;
@@ -336,7 +325,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
336 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw)) 325 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
337 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp)); 326 + nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
338 327
339 outdev = entinf->outdev; 328 outdev = entry->outdev;
340 329
341 spin_lock_bh(&queue->lock); 330 spin_lock_bh(&queue->lock);
342 331
@@ -379,23 +368,23 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
379 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET, 368 NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
380 sizeof(struct nfgenmsg)); 369 sizeof(struct nfgenmsg));
381 nfmsg = NLMSG_DATA(nlh); 370 nfmsg = NLMSG_DATA(nlh);
382 nfmsg->nfgen_family = entinf->pf; 371 nfmsg->nfgen_family = entry->pf;
383 nfmsg->version = NFNETLINK_V0; 372 nfmsg->version = NFNETLINK_V0;
384 nfmsg->res_id = htons(queue->queue_num); 373 nfmsg->res_id = htons(queue->queue_num);
385 374
386 pmsg.packet_id = htonl(entry->id); 375 pmsg.packet_id = htonl(entry->id);
387 pmsg.hw_protocol = entskb->protocol; 376 pmsg.hw_protocol = entskb->protocol;
388 pmsg.hook = entinf->hook; 377 pmsg.hook = entry->hook;
389 378
390 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg); 379 NLA_PUT(skb, NFQA_PACKET_HDR, sizeof(pmsg), &pmsg);
391 380
392 indev = entinf->indev; 381 indev = entry->indev;
393 if (indev) { 382 if (indev) {
394 tmp_uint = htonl(indev->ifindex); 383 tmp_uint = htonl(indev->ifindex);
395#ifndef CONFIG_BRIDGE_NETFILTER 384#ifndef CONFIG_BRIDGE_NETFILTER
396 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint); 385 NLA_PUT(skb, NFQA_IFINDEX_INDEV, sizeof(tmp_uint), &tmp_uint);
397#else 386#else
398 if (entinf->pf == PF_BRIDGE) { 387 if (entry->pf == PF_BRIDGE) {
399 /* Case 1: indev is physical input device, we need to 388 /* Case 1: indev is physical input device, we need to
400 * look for bridge group (when called from 389 * look for bridge group (when called from
401 * netfilter_bridge) */ 390 * netfilter_bridge) */
@@ -425,7 +414,7 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
425#ifndef CONFIG_BRIDGE_NETFILTER 414#ifndef CONFIG_BRIDGE_NETFILTER
426 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint); 415 NLA_PUT(skb, NFQA_IFINDEX_OUTDEV, sizeof(tmp_uint), &tmp_uint);
427#else 416#else
428 if (entinf->pf == PF_BRIDGE) { 417 if (entry->pf == PF_BRIDGE) {
429 /* Case 1: outdev is physical output device, we need to 418 /* Case 1: outdev is physical output device, we need to
430 * look for bridge group (when called from 419 * look for bridge group (when called from
431 * netfilter_bridge) */ 420 * netfilter_bridge) */
@@ -504,13 +493,11 @@ nla_put_failure:
504} 493}
505 494
506static int 495static int
507nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info, 496nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
508 unsigned int queuenum)
509{ 497{
510 int status = -EINVAL; 498 int status = -EINVAL;
511 struct sk_buff *nskb; 499 struct sk_buff *nskb;
512 struct nfqnl_instance *queue; 500 struct nfqnl_instance *queue;
513 struct nfqnl_queue_entry *entry;
514 501
515 QDEBUG("entered\n"); 502 QDEBUG("entered\n");
516 503
@@ -526,22 +513,11 @@ nfqnl_enqueue_packet(struct sk_buff *skb, struct nf_info *info,
526 goto err_out_put; 513 goto err_out_put;
527 } 514 }
528 515
529 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
530 if (entry == NULL) {
531 if (net_ratelimit())
532 printk(KERN_ERR
533 "nf_queue: OOM in nfqnl_enqueue_packet()\n");
534 status = -ENOMEM;
535 goto err_out_put;
536 }
537
538 entry->info = info;
539 entry->skb = skb;
540 entry->id = atomic_inc_return(&queue->id_sequence); 516 entry->id = atomic_inc_return(&queue->id_sequence);
541 517
542 nskb = nfqnl_build_packet_message(queue, entry, &status); 518 nskb = nfqnl_build_packet_message(queue, entry, &status);
543 if (nskb == NULL) 519 if (nskb == NULL)
544 goto err_out_free; 520 goto err_out_put;
545 521
546 spin_lock_bh(&queue->lock); 522 spin_lock_bh(&queue->lock);
547 523
@@ -577,15 +553,13 @@ err_out_free_nskb:
577err_out_unlock: 553err_out_unlock:
578 spin_unlock_bh(&queue->lock); 554 spin_unlock_bh(&queue->lock);
579 555
580err_out_free:
581 kfree(entry);
582err_out_put: 556err_out_put:
583 instance_put(queue); 557 instance_put(queue);
584 return status; 558 return status;
585} 559}
586 560
587static int 561static int
588nfqnl_mangle(void *data, int data_len, struct nfqnl_queue_entry *e) 562nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e)
589{ 563{
590 int diff; 564 int diff;
591 int err; 565 int err;
@@ -630,15 +604,13 @@ nfqnl_set_mode(struct nfqnl_instance *queue,
630} 604}
631 605
632static int 606static int
633dev_cmp(struct nfqnl_queue_entry *entry, unsigned long ifindex) 607dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
634{ 608{
635 struct nf_info *entinf = entry->info; 609 if (entry->indev)
636 610 if (entry->indev->ifindex == ifindex)
637 if (entinf->indev)
638 if (entinf->indev->ifindex == ifindex)
639 return 1; 611 return 1;
640 if (entinf->outdev) 612 if (entry->outdev)
641 if (entinf->outdev->ifindex == ifindex) 613 if (entry->outdev->ifindex == ifindex)
642 return 1; 614 return 1;
643#ifdef CONFIG_BRIDGE_NETFILTER 615#ifdef CONFIG_BRIDGE_NETFILTER
644 if (entry->skb->nf_bridge) { 616 if (entry->skb->nf_bridge) {
@@ -748,7 +720,7 @@ nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
748 struct nfqnl_msg_verdict_hdr *vhdr; 720 struct nfqnl_msg_verdict_hdr *vhdr;
749 struct nfqnl_instance *queue; 721 struct nfqnl_instance *queue;
750 unsigned int verdict; 722 unsigned int verdict;
751 struct nfqnl_queue_entry *entry; 723 struct nf_queue_entry *entry;
752 int err; 724 int err;
753 725
754 queue = instance_lookup_get(queue_num); 726 queue = instance_lookup_get(queue_num);