diff options
author | Harald Welte <laforge@netfilter.org> | 2005-08-09 22:42:34 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-08-29 18:36:19 -0400 |
commit | 2cc7d5730957c4a3f3659d17d2ba5e06d5581c1f (patch) | |
tree | c2c3d03d8120831d487bb8fcc73e5dcbe13aebea /net/core | |
parent | 4fdb3bb723db469717c6d38fda667d8b0fa86ebd (diff) |
[NETFILTER]: Move reroute-after-queue code up to the nf_queue layer.
The rerouting functionality is required by the core, therefore it has
to be implemented by the core and not in individual queue handlers.
Signed-off-by: Harald Welte <laforge@netfilter.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/netfilter.c | 66 |
1 files changed, 54 insertions, 12 deletions
diff --git a/net/core/netfilter.c b/net/core/netfilter.c index 9849357f6129..1ed4f3110421 100644 --- a/net/core/netfilter.c +++ b/net/core/netfilter.c | |||
@@ -53,6 +53,9 @@ static struct nf_queue_handler_t { | |||
53 | nf_queue_outfn_t outfn; | 53 | nf_queue_outfn_t outfn; |
54 | void *data; | 54 | void *data; |
55 | } queue_handler[NPROTO]; | 55 | } queue_handler[NPROTO]; |
56 | |||
57 | static struct nf_queue_rerouter *queue_rerouter; | ||
58 | |||
56 | static DEFINE_RWLOCK(queue_handler_lock); | 59 | static DEFINE_RWLOCK(queue_handler_lock); |
57 | 60 | ||
58 | int nf_register_hook(struct nf_hook_ops *reg) | 61 | int nf_register_hook(struct nf_hook_ops *reg) |
@@ -260,11 +263,34 @@ int nf_unregister_queue_handler(int pf) | |||
260 | return 0; | 263 | return 0; |
261 | } | 264 | } |
262 | 265 | ||
266 | int nf_register_queue_rerouter(int pf, struct nf_queue_rerouter *rer) | ||
267 | { | ||
268 | if (pf >= NPROTO) | ||
269 | return -EINVAL; | ||
270 | |||
271 | write_lock_bh(&queue_handler_lock); | ||
272 | memcpy(&queue_rerouter[pf], rer, sizeof(queue_rerouter[pf])); | ||
273 | write_unlock_bh(&queue_handler_lock); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | int nf_unregister_queue_rerouter(int pf) | ||
279 | { | ||
280 | if (pf >= NPROTO) | ||
281 | return -EINVAL; | ||
282 | |||
283 | write_lock_bh(&queue_handler_lock); | ||
284 | memset(&queue_rerouter[pf], 0, sizeof(queue_rerouter[pf])); | ||
285 | write_unlock_bh(&queue_handler_lock); | ||
286 | return 0; | ||
287 | } | ||
288 | |||
263 | /* | 289 | /* |
264 | * Any packet that leaves via this function must come back | 290 | * Any packet that leaves via this function must come back |
265 | * through nf_reinject(). | 291 | * through nf_reinject(). |
266 | */ | 292 | */ |
267 | static int nf_queue(struct sk_buff *skb, | 293 | static int nf_queue(struct sk_buff **skb, |
268 | struct list_head *elem, | 294 | struct list_head *elem, |
269 | int pf, unsigned int hook, | 295 | int pf, unsigned int hook, |
270 | struct net_device *indev, | 296 | struct net_device *indev, |
@@ -282,17 +308,17 @@ static int nf_queue(struct sk_buff *skb, | |||
282 | read_lock(&queue_handler_lock); | 308 | read_lock(&queue_handler_lock); |
283 | if (!queue_handler[pf].outfn) { | 309 | if (!queue_handler[pf].outfn) { |
284 | read_unlock(&queue_handler_lock); | 310 | read_unlock(&queue_handler_lock); |
285 | kfree_skb(skb); | 311 | kfree_skb(*skb); |
286 | return 1; | 312 | return 1; |
287 | } | 313 | } |
288 | 314 | ||
289 | info = kmalloc(sizeof(*info), GFP_ATOMIC); | 315 | info = kmalloc(sizeof(*info)+queue_rerouter[pf].rer_size, GFP_ATOMIC); |
290 | if (!info) { | 316 | if (!info) { |
291 | if (net_ratelimit()) | 317 | if (net_ratelimit()) |
292 | printk(KERN_ERR "OOM queueing packet %p\n", | 318 | printk(KERN_ERR "OOM queueing packet %p\n", |
293 | skb); | 319 | *skb); |
294 | read_unlock(&queue_handler_lock); | 320 | read_unlock(&queue_handler_lock); |
295 | kfree_skb(skb); | 321 | kfree_skb(*skb); |
296 | return 1; | 322 | return 1; |
297 | } | 323 | } |
298 | 324 | ||
@@ -311,15 +337,21 @@ static int nf_queue(struct sk_buff *skb, | |||
311 | if (outdev) dev_hold(outdev); | 337 | if (outdev) dev_hold(outdev); |
312 | 338 | ||
313 | #ifdef CONFIG_BRIDGE_NETFILTER | 339 | #ifdef CONFIG_BRIDGE_NETFILTER |
314 | if (skb->nf_bridge) { | 340 | if ((*skb)->nf_bridge) { |
315 | physindev = skb->nf_bridge->physindev; | 341 | physindev = (*skb)->nf_bridge->physindev; |
316 | if (physindev) dev_hold(physindev); | 342 | if (physindev) dev_hold(physindev); |
317 | physoutdev = skb->nf_bridge->physoutdev; | 343 | physoutdev = (*skb)->nf_bridge->physoutdev; |
318 | if (physoutdev) dev_hold(physoutdev); | 344 | if (physoutdev) dev_hold(physoutdev); |
319 | } | 345 | } |
320 | #endif | 346 | #endif |
347 | if (queue_rerouter[pf].save) | ||
348 | queue_rerouter[pf].save(*skb, info); | ||
349 | |||
350 | status = queue_handler[pf].outfn(*skb, info, queue_handler[pf].data); | ||
351 | |||
352 | if (status >= 0 && queue_rerouter[pf].reroute) | ||
353 | status = queue_rerouter[pf].reroute(skb, info); | ||
321 | 354 | ||
322 | status = queue_handler[pf].outfn(skb, info, queue_handler[pf].data); | ||
323 | read_unlock(&queue_handler_lock); | 355 | read_unlock(&queue_handler_lock); |
324 | 356 | ||
325 | if (status < 0) { | 357 | if (status < 0) { |
@@ -332,9 +364,11 @@ static int nf_queue(struct sk_buff *skb, | |||
332 | #endif | 364 | #endif |
333 | module_put(info->elem->owner); | 365 | module_put(info->elem->owner); |
334 | kfree(info); | 366 | kfree(info); |
335 | kfree_skb(skb); | 367 | kfree_skb(*skb); |
368 | |||
336 | return 1; | 369 | return 1; |
337 | } | 370 | } |
371 | |||
338 | return 1; | 372 | return 1; |
339 | } | 373 | } |
340 | 374 | ||
@@ -365,7 +399,7 @@ next_hook: | |||
365 | ret = -EPERM; | 399 | ret = -EPERM; |
366 | } else if (verdict == NF_QUEUE) { | 400 | } else if (verdict == NF_QUEUE) { |
367 | NFDEBUG("nf_hook: Verdict = QUEUE.\n"); | 401 | NFDEBUG("nf_hook: Verdict = QUEUE.\n"); |
368 | if (!nf_queue(*pskb, elem, pf, hook, indev, outdev, okfn)) | 402 | if (!nf_queue(pskb, elem, pf, hook, indev, outdev, okfn)) |
369 | goto next_hook; | 403 | goto next_hook; |
370 | } | 404 | } |
371 | unlock: | 405 | unlock: |
@@ -428,7 +462,7 @@ void nf_reinject(struct sk_buff *skb, struct nf_info *info, | |||
428 | break; | 462 | break; |
429 | 463 | ||
430 | case NF_QUEUE: | 464 | case NF_QUEUE: |
431 | if (!nf_queue(skb, elem, info->pf, info->hook, | 465 | if (!nf_queue(&skb, elem, info->pf, info->hook, |
432 | info->indev, info->outdev, info->okfn)) | 466 | info->indev, info->outdev, info->okfn)) |
433 | goto next_hook; | 467 | goto next_hook; |
434 | break; | 468 | break; |
@@ -555,6 +589,12 @@ void __init netfilter_init(void) | |||
555 | { | 589 | { |
556 | int i, h; | 590 | int i, h; |
557 | 591 | ||
592 | queue_rerouter = kmalloc(NPROTO * sizeof(struct nf_queue_rerouter), | ||
593 | GFP_KERNEL); | ||
594 | if (!queue_rerouter) | ||
595 | panic("netfilter: cannot allocate queue rerouter array\n"); | ||
596 | memset(queue_rerouter, 0, NPROTO * sizeof(struct nf_queue_rerouter)); | ||
597 | |||
558 | for (i = 0; i < NPROTO; i++) { | 598 | for (i = 0; i < NPROTO; i++) { |
559 | for (h = 0; h < NF_MAX_HOOKS; h++) | 599 | for (h = 0; h < NF_MAX_HOOKS; h++) |
560 | INIT_LIST_HEAD(&nf_hooks[i][h]); | 600 | INIT_LIST_HEAD(&nf_hooks[i][h]); |
@@ -573,4 +613,6 @@ EXPORT_SYMBOL(nf_reinject); | |||
573 | EXPORT_SYMBOL(nf_setsockopt); | 613 | EXPORT_SYMBOL(nf_setsockopt); |
574 | EXPORT_SYMBOL(nf_unregister_hook); | 614 | EXPORT_SYMBOL(nf_unregister_hook); |
575 | EXPORT_SYMBOL(nf_unregister_queue_handler); | 615 | EXPORT_SYMBOL(nf_unregister_queue_handler); |
616 | EXPORT_SYMBOL_GPL(nf_register_queue_rerouter); | ||
617 | EXPORT_SYMBOL_GPL(nf_unregister_queue_rerouter); | ||
576 | EXPORT_SYMBOL(nf_unregister_sockopt); | 618 | EXPORT_SYMBOL(nf_unregister_sockopt); |